From 003e4d656edb1463d4f2ce3f6cf92be3f013e4f8 Mon Sep 17 00:00:00 2001 From: asteam Date: Fri, 23 Aug 2024 16:55:50 +0300 Subject: [PATCH] 1.0.1 --- CHANGELOG.md | 138 +-- Makefile | 2 +- README.md | 2 +- ...eleted_list.md => account_list_deleted.md} | 4 +- docs/index.md | 22 +- internal/provider/data_sources_map.go | 62 +- internal/provider/provider.go | 28 +- internal/provider/provider_validation.go | 71 +- internal/provider/resources_map.go | 15 + .../data_source_account_list_deleted.go | 2 +- ...atten_data_source_account_list_deleted.go} | 0 .../cloudapi/account/resource_account.go | 2 +- .../utilities/utility_data_source_account.go | 2 +- ..._list.go => schema_data_source_lb_list.go} | 0 ... => schema_data_source_lb_list_deleted.go} | 0 .../utilities/utility_data_source_lb_list.go | 2 +- .../vins/schemas/schema_resource_vins.go | 2 +- .../utility_data_source_vins_ext_net_list.go | 2 +- .../utility_data_source_vins_list_deleted.go | 2 +- .../data_source_cb_account_audits_list.go | 91 ++ ...rce_cb_account_available_templates_list.go | 91 ++ .../data_source_cb_account_computes_list.go | 88 ++ .../data_source_cb_account_disks_list.go | 88 ++ .../data_source_cb_account_flipgroups_list.go | 88 ++ ...rce_cb_account_get_resource_consumption.go | 91 ++ .../data_source_cb_account_list_deleted.go | 88 ++ ...ce_cb_account_resource_consumption_list.go | 89 ++ .../account/data_source_cb_account_rg_list.go | 88 ++ ...tten_data_source_cb_account_audits_list.go | 59 ++ ...rce_cb_account_available_templates_list.go | 41 + ...en_data_source_cb_account_computes_list.go | 81 ++ ...atten_data_source_cb_account_disks_list.go | 66 ++ ..._data_source_cb_account_flipgroups_list.go | 81 ++ ...rce_cb_account_get_resource_consumption.go | 91 ++ ...ten_data_source_cb_account_list_deleted.go | 104 ++ ...ce_cb_account_resource_consumption_list.go | 97 ++ .../flatten_data_source_cb_account_rg_list.go | 124 +++ .../flattens/flatten_resource_cb_account.go | 171 ++++ ...odel_data_source_cb_account_audits_list.go | 24 + ...rce_cb_account_available_templates_list.go | 15 + ...el_data_source_cb_account_computes_list.go | 50 + ...model_data_source_cb_account_disks_list.go | 36 + ..._data_source_cb_account_flipgroups_list.go | 50 + ...rce_cb_account_get_resource_consumption.go | 45 + ...del_data_source_cb_account_list_deleted.go | 68 ++ ...ce_cb_account_resource_consumption_list.go | 40 + .../model_data_source_cb_account_rg_list.go | 81 ++ .../models/model_resource_cb_account.go | 91 ++ .../account/resource_cb_account.go | 321 ++++++ ...hema_data_source_cb_account_audits_list.go | 42 + ...rce_cb_account_available_templates_list.go | 19 + ...ma_data_source_cb_account_computes_list.go | 117 +++ ...chema_data_source_cb_account_disks_list.go | 81 ++ ..._data_source_cb_account_flipgroups_list.go | 131 +++ ...rce_cb_account_get_resource_consumption.go | 131 +++ ...ema_data_source_cb_account_list_deleted.go | 181 ++++ ...ce_cb_account_resource_consumption_list.go | 113 +++ .../schema_data_source_cb_account_rg_list.go | 232 +++++ .../schemas/schema_resource_cb_account.go | 210 ++++ ...lity_data_source_cb_account_audits_list.go | 21 + ...rce_cb_account_available_templates_list.go | 21 + ...ty_data_source_cb_account_computes_list.go | 61 ++ ...ility_data_source_cb_account_disks_list.go | 51 + ..._data_source_cb_account_flipgroups_list.go | 55 + ...rce_cb_account_get_resource_consumption.go | 24 + ...ity_data_source_cb_account_list_deleted.go | 46 + ...ce_cb_account_resource_consumption_list.go | 24 + .../utility_data_source_cb_account_rg_list.go | 52 + .../utilities/utility_resource_cb_account.go | 725 ++++++++++++++ .../cloudbroker/audit/data_source_cb_audit.go | 91 ++ .../audit/data_source_cb_audit_linked_jobs.go | 91 ++ .../audit/data_source_cb_audit_list.go | 89 ++ .../flattens/flatten_data_source_cb_audit.go | 50 + ...latten_data_source_cb_audit_linked_jobs.go | 64 ++ .../flatten_data_source_cb_audit_list.go | 67 ++ .../models/model_data_source_cb_audit.go | 28 + .../model_data_source_cb_audit_linked_jobs.go | 27 + .../models/model_data_source_cb_audit_list.go | 33 + .../schemas/schema_data_source_cb_audit.go | 55 + ...schema_data_source_cb_audit_linked_jobs.go | 47 + .../schema_data_source_cb_audit_list.go | 65 ++ .../utilities/utility_data_source_cb_audit.go | 27 + ...tility_data_source_cb_audit_linked_jobs.go | 27 + .../utility_data_source_cb_audit_list.go | 58 ++ .../service/cloudbroker/ic/input_checks.go | 305 ++++++ .../service/cloudbroker/lb/data_source_lb.go | 91 ++ .../cloudbroker/lb/data_source_lb_list.go | 88 ++ .../lb/data_source_lb_list_deleted.go | 89 ++ .../lb/flattens/flatten_data_source_lb.go | 219 ++++ .../flattens/flatten_data_source_lb_list.go | 93 ++ .../flatten_data_source_lb_list_deleted.go | 91 ++ .../lb/flattens/flatten_resource_lb.go | 72 ++ .../flattens/flatten_resource_lb_backend.go | 44 + .../flatten_resource_lb_backend_server.go | 45 + .../flattens/flatten_resoursce_lb_frontend.go | 36 + .../flatten_resoursce_lb_frontend_bind.go | 37 + .../service/cloudbroker/lb/input_check.go | 125 +++ .../lb/models/model_data_source_lb.go | 147 +++ .../lb/models/model_data_source_lb_list.go | 64 ++ .../model_data_source_lb_list_deleted.go | 62 ++ .../lb/models/model_resource_lb.go | 52 + .../lb/models/model_resource_lb_backend.go | 24 + .../model_resource_lb_backend_server.go | 26 + .../lb/models/model_resource_lb_frontend.go | 16 + .../models/model_resource_lb_frontend_bind.go | 17 + .../service/cloudbroker/lb/resource_lb.go | 359 +++++++ .../cloudbroker/lb/resource_lb_backend.go | 267 +++++ .../lb/resource_lb_backend_server.go | 268 +++++ .../cloudbroker/lb/resource_lb_frontend.go | 208 ++++ .../lb/resource_lb_frontend_bind.go | 268 +++++ .../lb/schemas/schema_data_source_lb.go | 270 +++++ .../lb/schemas/schema_data_source_lb_list.go | 341 +++++++ .../schema_data_source_lb_list_deleted.go | 333 ++++++ .../lb/schemas/schema_resource_lb.go | 316 ++++++ .../lb/schemas/schema_resource_lb_backend.go | 122 +++ .../schema_resource_lb_backend_server.go | 83 ++ .../lb/schemas/schema_resource_lb_frontend.go | 49 + .../schema_resource_lb_frontend_bind.go | 36 + .../lb/utilities/utility_data_source_lb.go | 27 + .../utilities/utility_data_source_lb_list.go | 63 ++ .../utility_data_source_lb_list_deleted.go | 57 ++ .../lb/utilities/utility_resource_lb.go | 352 +++++++ .../utilities/utility_resource_lb_backend.go | 167 +++ .../utility_resource_lb_backend_server.go | 187 ++++ .../utilities/utility_resource_lb_frontend.go | 75 ++ .../utility_resource_lb_frontend_bind.go | 115 +++ .../cloudbroker/node/data_source_cb_node.go | 91 ++ .../node/data_source_cb_node_list.go | 87 ++ .../flattens/flatten_data_source_cb_node.go | 169 ++++ .../flatten_data_source_cb_node_list.go | 126 +++ .../node/models/model_data_source_cb_node.go | 87 ++ .../models/model_data_source_cb_node_list.go | 75 ++ .../schemas/schema_data_source_cb_node.go | 191 ++++ .../schema_data_source_cb_node_list.go | 287 ++++++ .../utilities/utility_data_source_cb_node.go | 27 + .../utility_data_source_cb_node_list.go | 60 ++ .../service/cloudbroker/rg/data_source_rg.go | 91 ++ .../data_source_rg_affinity_group_computes.go | 89 ++ .../rg/data_source_rg_affinity_groups_get.go | 89 ++ .../rg/data_source_rg_affinity_groups_list.go | 89 ++ .../cloudbroker/rg/data_source_rg_audits.go | 89 ++ ...data_source_rg_get_resource_consumption.go | 89 ++ .../cloudbroker/rg/data_source_rg_list.go | 89 ++ .../rg/data_source_rg_list_computes.go | 89 ++ .../rg/data_source_rg_list_deleted.go | 89 ++ .../cloudbroker/rg/data_source_rg_list_lb.go | 89 ++ .../cloudbroker/rg/data_source_rg_list_pfw.go | 89 ++ .../rg/data_source_rg_list_vins.go | 89 ++ ...ata_source_rg_resource_consumption_list.go | 89 ++ .../cloudbroker/rg/data_source_rg_usage.go | 89 ++ .../rg/flattens/flatten_data_source_rg.go | 126 +++ ..._data_source_rg_affinity_group_computes.go | 82 ++ ...tten_data_source_rg_affinity_groups_get.go | 51 + ...ten_data_source_rg_affinity_groups_list.go | 68 ++ .../flattens/flatten_data_source_rg_audits.go | 56 ++ ...data_source_rg_get_resource_consumption.go | 97 ++ .../flattens/flatten_data_source_rg_list.go | 95 ++ .../flatten_data_source_rg_list_computes.go | 121 +++ .../flatten_data_source_rg_list_deleted.go | 94 ++ .../flatten_data_source_rg_list_lb.go | 172 ++++ .../flatten_data_source_rg_list_pfw.go | 62 ++ .../flatten_data_source_rg_list_vins.go | 80 ++ ...ata_source_rg_resource_consumption_list.go | 56 ++ .../flattens/flatten_data_source_rg_usage.go | 52 + .../rg/models/model_data_source_rg.go | 84 ++ ..._data_source_rg_affinity_group_computes.go | 29 + ...odel_data_source_rg_affinity_groups_get.go | 19 + ...del_data_source_rg_affinity_groups_list.go | 31 + .../rg/models/model_data_source_rg_audits.go | 24 + ...odel_data_source_rg_get_resource_limits.go | 55 + .../rg/models/model_data_source_rg_list.go | 61 ++ .../model_data_source_rg_list_computes.go | 76 ++ .../model_data_source_rg_list_deleted.go | 59 ++ .../rg/models/model_data_source_rg_list_lb.go | 108 ++ .../models/model_data_source_rg_list_pfw.go | 28 + .../models/model_data_source_rg_list_vins.go | 48 + ...del_data_source_rg_resource_limits_list.go | 23 + .../rg/models/model_data_source_rg_usage.go | 24 + .../rg/schemas/schema_data_source_rg.go | 164 +++ ..._data_source_rg_affinity_group_computes.go | 59 ++ ...hema_data_source_rg_affinity_groups_get.go | 29 + ...ema_data_source_rg_affinity_groups_list.go | 57 ++ .../schemas/schema_data_source_rg_audits.go | 41 + ...data_source_rg_get_resource_consumption.go | 136 +++ .../rg/schemas/schema_data_source_rg_list.go | 215 ++++ .../schema_data_source_rg_list_computes.go | 192 ++++ .../schema_data_source_rg_list_deleted.go | 207 ++++ .../schemas/schema_data_source_rg_list_lb.go | 314 ++++++ .../schemas/schema_data_source_rg_list_pfw.go | 54 + .../schema_data_source_rg_list_vins.go | 117 +++ ...ata_source_rg_resource_consumption_list.go | 143 +++ .../rg/schemas/schema_data_source_rg_usage.go | 66 ++ .../rg/utilities/utility_data_source_rg.go | 23 + ..._data_source_rg_affinity_group_computes.go | 29 + ...lity_data_source_rg_affinity_groups_get.go | 29 + ...ity_data_source_rg_affinity_groups_list.go | 33 + .../utility_data_source_rg_audits.go | 27 + ...data_source_rg_get_resource_consumption.go | 25 + .../utilities/utility_data_source_rg_list.go | 62 ++ .../utility_data_source_rg_list_computes.go | 61 ++ .../utility_data_source_rg_list_deleted.go | 56 ++ .../utility_data_source_rg_list_lb.go | 55 + .../utility_data_source_rg_list_pfw.go | 27 + .../utility_data_source_rg_list_vins.go | 49 + ...ata_source_rg_resource_consumption_list.go | 22 + .../utilities/utility_data_source_rg_usage.go | 29 + .../vfpool/data_source_cb_vfpool.go | 91 ++ .../vfpool/data_source_cb_vfpool_list.go | 89 ++ .../flattens/flatten_data_source_cb_vfpool.go | 92 ++ .../flatten_data_source_cb_vfpool_list.go | 104 ++ .../flattens/flatten_resource_cb_vfpool.go | 146 +++ .../models/model_data_source_cb_vfpool.go | 40 + .../model_data_source_cb_vfpool_list.go | 55 + .../vfpool/models/model_resource_cb_vfpool.go | 72 ++ .../cloudbroker/vfpool/resource_cb_vfpool.go | 294 ++++++ .../schemas/schema_data_source_cb_vfpool.go | 79 ++ .../schema_data_source_cb_vfpool_list.go | 119 +++ .../schemas/schema_resource_cb_vfpool.go | 107 ++ .../utility_data_source_cb_vfpool.go | 25 + .../utility_data_source_cb_vfpool_list.go | 57 ++ .../utilities/utility_resource_cb_vfpool.go | 294 ++++++ .../cloudbroker/vins/data_source_vins.go | 91 ++ .../vins/data_source_vins_audits.go | 91 ++ .../vins/data_source_vins_ext_net_list.go | 91 ++ .../vins/data_source_vins_ip_list.go | 91 ++ .../cloudbroker/vins/data_source_vins_list.go | 88 ++ .../vins/data_source_vins_list_deleted.go | 88 ++ .../vins/data_source_vins_nat_rule_list.go | 89 ++ .../vins/data_source_vins_static_route.go | 88 ++ .../data_source_vins_static_route_list.go | 88 ++ .../vins/flattens/flatten_data_source_vins.go | 585 +++++++++++ .../flatten_data_source_vins_audits.go | 55 + .../flatten_data_source_vins_ext_net_list.go | 57 ++ .../flatten_data_source_vins_ip_list.go | 58 ++ .../flattens/flatten_data_source_vins_list.go | 117 +++ .../flatten_data_source_vins_list_deleted.go | 88 ++ .../flatten_data_source_vins_nat_rule_list.go | 58 ++ .../flatten_data_source_vins_static_route.go | 51 + ...tten_data_source_vins_static_route_list.go | 63 ++ .../vins/flattens/flatten_resource_vins.go | 161 +++ .../flatten_resource_vins_static_route.go | 68 ++ .../service/cloudbroker/vins/input_checks.go | 91 ++ .../vins/models/model_data_source_vins.go | 454 +++++++++ .../models/model_data_source_vins_audits.go | 24 + .../model_data_source_vins_ext_net_list.go | 26 + .../models/model_data_source_vins_ip_list.go | 27 + .../models/model_data_source_vins_list.go | 82 ++ .../model_data_source_vins_list_deleted.go | 59 ++ .../model_data_source_vins_nat_rule_list.go | 29 + .../model_data_source_vins_static_route.go | 21 + ...odel_data_source_vins_static_route_list.go | 26 + .../vins/models/model_resource_vins.go | 131 +++ .../model_resource_vins_static_route.go | 24 + .../service/cloudbroker/vins/resource_vins.go | 424 ++++++++ .../vins/resource_vins_static_route.go | 312 ++++++ .../vins/schemas/schema_data_source_vins.go | 717 +++++++++++++ .../schemas/schema_data_source_vins_audits.go | 42 + .../schema_data_source_vins_ext_net_list.go | 48 + .../schema_data_source_vins_ip_list.go | 51 + .../schemas/schema_data_source_vins_list.go | 202 ++++ .../schema_data_source_vins_list_deleted.go | 188 ++++ .../schema_data_source_vins_nat_rule_list.go | 59 ++ .../schema_data_source_vins_static_route.go | 41 + ...hema_data_source_vins_static_route_list.go | 50 + .../vins/schemas/schema_resource_vins.go | 884 ++++++++++++++++ .../schema_resource_vins_static_route.go | 51 + .../utilities/utility_data_source_vins.go | 27 + .../utility_data_source_vins_audits.go | 27 + .../utility_data_source_vins_ext_net_list.go | 27 + .../utility_data_source_vins_ip_list.go | 27 + .../utility_data_source_vins_list.go | 59 ++ .../utility_data_source_vins_list_deleted.go | 53 + .../utility_data_source_vins_nat_rule_list.go | 34 + .../utility_data_source_vins_static_route.go | 39 + ...lity_data_source_vins_static_route_list.go | 25 + .../vins/utilities/utility_resource_vins.go | 948 ++++++++++++++++++ .../utility_resource_vins_static_route.go | 195 ++++ samples/cloudapi/account/data_account/main.tf | 4 +- .../account/data_account_audits/main.tf | 4 +- .../data_account_computes_list/main.tf | 4 +- .../data_account_consumed_units/main.tf | 4 +- .../main.tf | 4 +- .../account/data_account_deleted_list/main.tf | 4 +- .../account/data_account_disks_list/main.tf | 4 +- .../data_account_flipgroups_list/main.tf | 4 +- .../main.tf | 4 +- .../account/data_account_list/main.tf | 4 +- .../data_account_reserved_units/main.tf | 4 +- .../main.tf | 4 +- .../account/data_account_rg_list/main.tf | 4 +- .../data_account_templates_list/main.tf | 4 +- .../account/data_account_vins_list/main.tf | 4 +- .../cloudapi/account/resource_account/main.tf | 4 +- .../cloudapi/bservice/data_bservice/main.tf | 4 +- .../data_bservice_deleted_list/main.tf | 4 +- .../bservice/data_bservice_group/main.tf | 4 +- .../bservice/data_bservice_list/main.tf | 4 +- .../data_bservice_snapshot_list/main.tf | 4 +- .../bservice/resource_bservice/main.tf | 4 +- .../bservice/resource_bservice_group/main.tf | 4 +- samples/cloudapi/disk/data_disk/main.tf | 4 +- samples/cloudapi/disk/data_disk_list/main.tf | 4 +- .../disk/data_disk_list_deleted/main.tf | 4 +- .../disk/data_disk_list_types/main.tf | 4 +- .../data_disk_list_types_detailed/main.tf | 4 +- .../disk/data_disk_list_unattached/main.tf | 4 +- .../disk/data_disk_replication/main.tf | 4 +- .../cloudapi/disk/data_disk_snapshot/main.tf | 4 +- .../disk/data_disk_snapshot_list/main.tf | 4 +- samples/cloudapi/disk/resource_disk/main.tf | 4 +- .../disk/resource_disk_replication/main.tf | 4 +- .../disk/resource_disk_snapshot/main.tf | 4 +- samples/cloudapi/extnet/data_extnet/main.tf | 4 +- .../extnet/data_extnet_computes_list/main.tf | 4 +- .../extnet/data_extnet_default/main.tf | 4 +- .../cloudapi/extnet/data_extnet_list/main.tf | 4 +- .../cloudapi/flipgroup/data_flipgroup/main.tf | 4 +- .../flipgroup/data_flipgroup_list/maint.tf | 4 +- .../flipgroup/resource_flipgroup/main.tf | 4 +- samples/cloudapi/image/data_image/main.tf | 4 +- .../cloudapi/image/data_image_list/main.tf | 4 +- samples/cloudapi/image/resource_image/main.tf | 4 +- .../image/resource_image_virtual/main.tf | 4 +- samples/cloudapi/k8s/data_k8ci_list/main.tf | 4 +- samples/cloudapi/k8s/data_k8s/main.tf | 4 +- .../cloudapi/k8s/data_k8s_computes/main.tf | 4 +- samples/cloudapi/k8s/data_k8s_list/main.tf | 4 +- .../k8s/data_k8s_list_deleted/main.tf | 4 +- samples/cloudapi/k8s/data_k8s_wg/main.tf | 4 +- .../k8s/data_k8s_wg_cloud_init/main.tf | 4 +- samples/cloudapi/k8s/data_k8s_wg_list/main.tf | 4 +- samples/cloudapi/k8s/resource_k8s_cp/main.tf | 4 +- samples/cloudapi/k8s/resource_k8s_wg/main.tf | 4 +- samples/cloudapi/kvmvm/data_kvmvm/main.tf | 4 +- .../cloudapi/kvmvm/data_kvmvm_audits/main.tf | 4 +- .../kvmvm/data_kvmvm_get_audits/main.tf | 4 +- .../kvmvm/data_kvmvm_get_console_url/main.tf | 4 +- .../cloudapi/kvmvm/data_kvmvm_get_log/main.tf | 4 +- .../cloudapi/kvmvm/data_kvmvm_list/main.tf | 4 +- .../kvmvm/data_kvmvm_list_deleted/main.tf | 4 +- .../kvmvm/data_kvmvm_pci_device_list/main.tf | 4 +- .../kvmvm/data_kvmvm_pfw_list/main.tf | 4 +- .../kvmvm/data_kvmvm_snapshot_usage/main.tf | 4 +- .../kvmvm/data_kvmvm_user_list/main.tf | 4 +- .../kvmvm/data_kvmvm_vgpu_list/main.tf | 4 +- samples/cloudapi/kvmvm/resource_kvmvm/main.tf | 4 +- samples/cloudapi/lb/data_lb/main.tf | 6 +- samples/cloudapi/lb/data_lb_list/main.tf | 4 +- .../cloudapi/lb/data_lb_list_deleted/main.tf | 4 +- samples/cloudapi/lb/resource_lb/main.tf | 4 +- .../cloudapi/lb/resource_lb_backend/main.tf | 4 +- .../lb/resource_lb_backend_server/main.tf | 4 +- .../cloudapi/lb/resource_lb_frontend/main.tf | 4 +- .../lb/resource_lb_frontend_bind/main.tf | 4 +- samples/cloudapi/rg/data_rg/main.tf | 4 +- .../data_rg_affinity_group_computes/main.tf | 4 +- .../rg/data_rg_affinity_groups_get/main.tf | 4 +- .../rg/data_rg_affinity_groups_list/main.tf | 4 +- samples/cloudapi/rg/data_rg_audits/main.tf | 4 +- samples/cloudapi/rg/data_rg_list/main.tf | 4 +- .../cloudapi/rg/data_rg_list_computes/main.tf | 4 +- .../cloudapi/rg/data_rg_list_deleted/main.tf | 4 +- samples/cloudapi/rg/data_rg_list_lb/main.tf | 4 +- samples/cloudapi/rg/data_rg_list_pfw/main.tf | 4 +- samples/cloudapi/rg/data_rg_list_vins/main.tf | 4 +- .../data_rg_resource_consumption_get/main.tf | 4 +- .../data_rg_resource_consumption_list/main.tf | 4 +- samples/cloudapi/rg/data_rg_usage/main.tf | 4 +- samples/cloudapi/rg/resource_rg/main.tf | 4 +- samples/cloudapi/stack/data_stack/main.tf | 4 +- .../cloudapi/stack/data_stack_list/main.tf | 4 +- samples/cloudapi/vins/data_vins/main.tf | 4 +- .../cloudapi/vins/data_vins_audits/main.tf | 4 +- .../vins/data_vins_ext_net_list/main.tf | 4 +- .../cloudapi/vins/data_vins_ip_list/main.tf | 4 +- samples/cloudapi/vins/data_vins_list/main.tf | 4 +- .../vins/data_vins_list_deleted/main.tf | 4 +- .../vins/data_vins_nat_rule_list/main.tf | 4 +- .../vins/data_vins_static_route/main.tf | 4 +- .../vins/data_vins_static_route_list/main.tf | 4 +- samples/cloudapi/vins/resource_vins/main.tf | 39 +- .../vins/resource_vins_static_route/main.tf | 4 +- .../08.-Полезные-советы.md | 10 + wiki/1.0.0/Home.md | 1 + wiki/1.0.1/01.-Введение.md | 7 + wiki/1.0.1/02.-Пример-работы.md | 92 ++ ...лачной-платформы-DYNAMIX.md | 32 + ...erraform-провайдером-DYNAMIX.md | 6 + ...-Terraform-провайдера-DYNAMIX.md | 147 +++ ...-Terraform-провайдера-DYNAMIX.md | 64 ++ ...ние-между-группами-API.md | 38 + ...4-Получение-gid-или-grid_id.md | 17 + ...form-провайдера-в-образ.md | 43 + wiki/1.0.1/05.-Работа-с-terraform.md | 4 + ...ортирование-ресурсов.md | 75 ++ ...02-Работа-с-таймаутами.md | 100 ++ ...становление-ресурсов.md | 29 + ...урсов.-Мета-аргументы.md | 452 +++++++++ ...05.05-Удаление-ресурсов.md | 262 +++++ .../05.06-Установка-Terraform.md | 46 + ...струкция-по-миграции.md | 708 +++++++++++++ ...-Terraform-провайдера-DYNAMIX.md | 1 + ...овательская-группа-API.md | 1 + wiki/1.0.1/06.01.01-Data_dynamix_kvmvm.md | 269 +++++ wiki/1.0.1/06.01.02-Data_dynamix_resgroup.md | 121 +++ wiki/1.0.1/06.01.03-Data_dynamix_disk.md | 110 ++ wiki/1.0.1/06.01.04-Data_dynamix_vins.md | 279 ++++++ wiki/1.0.1/06.01.05-Data_dynamix_account.md | 113 +++ .../1.0.1/06.01.06-Data_dynamix_image_list.md | 158 +++ wiki/1.0.1/06.01.07-Data_dynamix_image.md | 82 ++ .../06.01.08-Data_dynamix_locations_list.md | 76 ++ .../06.01.09-Data_dynamix_location_url.md | 23 + .../06.01.11-Data_dynamix_snapshot_list.md | 34 + wiki/1.0.1/06.01.12-Data_dynamix_rg_list.md | 122 +++ wiki/1.0.1/06.01.13-Data_dynamix_disk_list.md | 186 ++++ wiki/1.0.1/06.01.14-Data_dynamix_vins_list.md | 102 ++ .../06.01.15-Data_dynamix_extnet_list.md | 88 ++ .../06.01.16-Data_dynamix_extnet_default.md | 24 + ...01.17-Data_dynamix_extnet_computes_list.md | 76 ++ wiki/1.0.1/06.01.18-Data_dynamix_extnet.md | 53 + ....01.19-Data_dynamix_account_audits_list.md | 36 + ...1.20-Data_dynamix_account_computes_list.md | 118 +++ ....21-Data_dynamix_account_consumed_units.md | 38 + ..._dynamix_account_consumed_units_by_type.md | 44 + ...6.01.23-Data_dynamix_account_disks_list.md | 79 ++ ...24-Data_dynamix_account_flipgroups_list.md | 97 ++ ....25-Data_dynamix_account_reserved_units.md | 38 + .../06.01.26-Data_dynamix_account_rg_list.md | 93 ++ ....27-Data_dynamix_account_templates_list.md | 84 ++ ...06.01.28-Data_dynamix_account_vins_list.md | 94 ++ .../06.01.29-Data_dynamix_account_list.md | 76 ++ ...01.30-Data_dynamix_account_deleted_list.md | 70 ++ .../06.01.31-Data_dynamix_bservice_list.md | 116 +++ ...1.32-Data_dynamix_bservice_deleted_list.md | 81 ++ ....33-Data_dynamix_bservice_snapshot_list.md | 33 + wiki/1.0.1/06.01.34-Data_dynamix_bservice.md | 92 ++ .../06.01.35-Data_dynamix_bservice_group.md | 68 ++ wiki/1.0.1/06.01.36-Data_dynamix_lb.md | 109 ++ wiki/1.0.1/06.01.37-Data_dynamix_lb_list.md | 181 ++++ .../06.01.38-Data_dynamix_lb_list_deleted.md | 167 +++ .../06.01.39-Data_dynamix_disk_list_types.md | 45 + ...0-Data_dynamix_disk_list_types_detailed.md | 52 + ...06.01.41-Data_dynamix_disk_list_deleted.md | 129 +++ ...01.42-Data_dynamix_disk_list_unattached.md | 160 +++ ...6.01.43-Data_dynamix_disk_snapshot_list.md | 32 + .../06.01.44-Data_dynamix_disk_snapshot.md | 38 + wiki/1.0.1/06.01.45-Data_dynamix_k8s.md | 121 +++ wiki/1.0.1/06.01.46-Data_dynamix_k8s_list.md | 171 ++++ .../06.01.47-Data_dynamix_k8s_list_deleted.md | 156 +++ wiki/1.0.1/06.01.48-Data_dynamix_k8s_wg.md | 66 ++ .../06.01.49-Data_dynamix_k8s_wg_list.md | 61 ++ .../06.01.50-Data_dynamix_vins_audits.md | 37 + ...06.01.51-Data_dynamix_vins_ext_net_list.md | 37 + .../06.01.52-Data_dynamix_vins_ip_list.md | 38 + ...06.01.53-Data_dynamix_vins_list_deleted.md | 96 ++ ...6.01.54-Data_dynamix_vins_nat_rule_list.md | 38 + .../06.01.55-Data_dynamix_kvmvm_audits.md | 33 + .../06.01.56-Data_dynamix_kvmvm_get_audits.md | 30 + ...1.57-Data_dynamix_kvmvm_get_console_url.md | 28 + .../06.01.58-Data_dynamix_kvmvm_get_log.md | 34 + .../1.0.1/06.01.59-Data_dynamix_kvmvm_list.md | 242 +++++ .../06.01.60-Data_dynamix_kvmvm_pfw_list.md | 35 + .../06.01.61-Data_dynamix_kvmvm_user_list.md | 41 + wiki/1.0.1/06.01.62-Data_dynamix_rg_list.md | 155 +++ ...Data_dynamix_rg_affinity_group_computes.md | 50 + ...64-Data_dynamix_rg_affinity_groups_list.md | 54 + ....65-Data_dynamix_rg_affinity_groups_get.md | 44 + wiki/1.0.1/06.01.66-Data_dynamix_rg_audits.md | 42 + .../06.01.67-Data_dynamix_rg_list_computes.md | 139 +++ .../06.01.68-Data_dynamix_rg_list_deleted.md | 143 +++ .../1.0.1/06.01.69-Data_dynamix_rg_list_lb.md | 194 ++++ .../06.01.70-Data_dynamix_rg_list_pfw.md | 45 + .../06.01.71-Data_dynamix_rg_list_vins.md | 100 ++ wiki/1.0.1/06.01.72-Data_dynamix_rg_usage.md | 57 ++ ...01.73-Data_dynamix_kvmvm_snapshot_usage.md | 45 + .../06.01.74-Data _dynamix_k8s_computes.md | 47 + wiki/1.0.1/06.01.75-Data_dynamix_flipgroup.md | 49 + .../06.01.75-Data_dynamix_flipgroup_list.md | 125 +++ wiki/1.0.1/06.01.76-Data_dynamix_k8ci_list.md | 88 ++ ...06.01.77-Data_dynamix_vins_static_route.md | 39 + ....78-Data_dynamix_vins_static_route_list.md | 34 + ...ynamix_account_resource_consumption_get.md | 68 ++ ...namix_account_resource_consumption_list.md | 49 + ...06.01.81-Data_dynamix_k8s_wg_cloud_init.md | 39 + ...6.01.82-Data_dynamix_kvmvm_list_deleted.md | 228 +++++ ...1.83-Data_dynamix_kvmvm_pci_device_list.md | 84 ++ .../06.01.84-Data_dynamix_kvmvm_vgpu_list.md | 92 ++ ...ata_dynamix_rg_resource_consumption_get.md | 68 ++ ...ta_dynamix_rg_resource_consumption_list.md | 61 ++ .../06.01.87-Data_dynamix_disk_replication.md | 118 +++ wiki/1.0.1/06.01.87-Data_dynamix_vfpool.md | 63 ++ .../06.01.88-Data_dynamix_vfpool_list.md | 122 +++ wiki/1.0.1/06.01.89-Data_dynamix_stack.md | 42 + .../1.0.1/06.01.90-Data_dynamix_stack_list.md | 76 ++ ...-Terraform-провайдера-dynamix.md | 2 + ...овательская-группа-API.md | 1 + wiki/1.0.1/07.01.01-Resource_dynamix_kvmvm.md | 860 ++++++++++++++++ .../07.01.02-Resource_dynamix_resgroup.md | 277 +++++ wiki/1.0.1/07.01.03-Resource_dynamix_disk.md | 173 ++++ wiki/1.0.1/07.01.04-Resource_dynamix_vins.md | 455 +++++++++ .../07.01.05-Resource_dynamix_snapshot.md | 48 + wiki/1.0.1/07.01.06-Resource_dynamix_k8s.md | 571 +++++++++++ .../1.0.1/07.01.07-Resource_dynamix_k8s_wg.md | 167 +++ ...07.01.08-Resource_dynamix_image_virtual.md | 87 ++ wiki/1.0.1/07.01.09-Resource_dynamix_image.md | 165 +++ wiki/1.0.1/07.01.10-Resource_dynamix_pfw.md | 59 ++ .../07.01.11-Resource_dynamix_account.md | 227 +++++ .../07.01.12-Resource_dynamix_bservice.md | 154 +++ ...7.01.13-Resource_dynamix_bservice_group.md | 252 +++++ wiki/1.0.1/07.01.14-Resource_dynamix_lb.md | 203 ++++ .../07.01.15-Resource_dynamix_lb_frontend.md | 73 ++ ...01.16-Resource_dynamix_lb_frontend_bind.md | 71 ++ .../07.01.17-Resource_dynamix_lb_backend.md | 151 +++ ...1.18-Resource_dynamix_lb_backend_server.md | 154 +++ ...07.01.19-Resource_dynamix_disk_snapshot.md | 51 + .../07.01.20-Resource_dynamix_flipgroup.md | 96 ++ .../1.0.1/07.01.21-Resource_dynamix_k8s_cp.md | 357 +++++++ ...1.22-Resource_dynamix_vins_static_route.md | 60 ++ ...source_dynamix_image_from_blank_compute.md | 145 +++ ...source_dynamix_image_from_platform_disk.md | 168 ++++ ...01.25-Resource_dynamix_disk_replication.md | 167 +++ .../08.-Полезные-советы.md | 52 + wiki/1.0.1/Home.md | 295 ++++++ wiki/Home.md | 3 +- 524 files changed, 43376 insertions(+), 432 deletions(-) rename docs/data-sources/{account_deleted_list.md => account_list_deleted.md} (93%) rename internal/service/cloudapi/account/flattens/{flatten_account_list_deleted.go => flatten_data_source_account_list_deleted.go} (100%) rename internal/service/cloudapi/lb/schemas/{schema_data_soursce_lb_list.go => schema_data_source_lb_list.go} (100%) rename internal/service/cloudapi/lb/schemas/{schema_data_soursce_lb_list_deleted.go => schema_data_source_lb_list_deleted.go} (100%) create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_audits_list.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_available_templates_list.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_computes_list.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_disks_list.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_flipgroups_list.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_get_resource_consumption.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_list_deleted.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_resource_consumption_list.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_rg_list.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_audits_list.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_available_templates_list.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_computes_list.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_disks_list.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_flipgroups_list.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_get_resource_consumption.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_list_deleted.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_resource_consumption_list.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_rg_list.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_resource_cb_account.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_audits_list.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_available_templates_list.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_computes_list.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_disks_list.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_flipgroups_list.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_get_resource_consumption.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_list_deleted.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_resource_consumption_list.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_rg_list.go create mode 100644 internal/service/cloudbroker/account/models/model_resource_cb_account.go create mode 100644 internal/service/cloudbroker/account/resource_cb_account.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_audits_list.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_available_templates_list.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_computes_list.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_disks_list.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_flipgroups_list.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_get_resource_consumption.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_list_deleted.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_resource_consumption_list.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_rg_list.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_resource_cb_account.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_audits_list.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_available_templates_list.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_computes_list.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_disks_list.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_flipgroups_list.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_get_resource_consumption.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_list_deleted.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_resource_consumption_list.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_rg_list.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_resource_cb_account.go create mode 100644 internal/service/cloudbroker/audit/data_source_cb_audit.go create mode 100644 internal/service/cloudbroker/audit/data_source_cb_audit_linked_jobs.go create mode 100644 internal/service/cloudbroker/audit/data_source_cb_audit_list.go create mode 100644 internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit.go create mode 100644 internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit_linked_jobs.go create mode 100644 internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit_list.go create mode 100644 internal/service/cloudbroker/audit/models/model_data_source_cb_audit.go create mode 100644 internal/service/cloudbroker/audit/models/model_data_source_cb_audit_linked_jobs.go create mode 100644 internal/service/cloudbroker/audit/models/model_data_source_cb_audit_list.go create mode 100644 internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit.go create mode 100644 internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit_linked_jobs.go create mode 100644 internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit_list.go create mode 100644 internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit.go create mode 100644 internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit_linked_jobs.go create mode 100644 internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit_list.go create mode 100644 internal/service/cloudbroker/ic/input_checks.go create mode 100644 internal/service/cloudbroker/lb/data_source_lb.go create mode 100644 internal/service/cloudbroker/lb/data_source_lb_list.go create mode 100644 internal/service/cloudbroker/lb/data_source_lb_list_deleted.go create mode 100644 internal/service/cloudbroker/lb/flattens/flatten_data_source_lb.go create mode 100644 internal/service/cloudbroker/lb/flattens/flatten_data_source_lb_list.go create mode 100644 internal/service/cloudbroker/lb/flattens/flatten_data_source_lb_list_deleted.go create mode 100644 internal/service/cloudbroker/lb/flattens/flatten_resource_lb.go create mode 100644 internal/service/cloudbroker/lb/flattens/flatten_resource_lb_backend.go create mode 100644 internal/service/cloudbroker/lb/flattens/flatten_resource_lb_backend_server.go create mode 100644 internal/service/cloudbroker/lb/flattens/flatten_resoursce_lb_frontend.go create mode 100644 internal/service/cloudbroker/lb/flattens/flatten_resoursce_lb_frontend_bind.go create mode 100644 internal/service/cloudbroker/lb/input_check.go create mode 100644 internal/service/cloudbroker/lb/models/model_data_source_lb.go create mode 100644 internal/service/cloudbroker/lb/models/model_data_source_lb_list.go create mode 100644 internal/service/cloudbroker/lb/models/model_data_source_lb_list_deleted.go create mode 100644 internal/service/cloudbroker/lb/models/model_resource_lb.go create mode 100644 internal/service/cloudbroker/lb/models/model_resource_lb_backend.go create mode 100644 internal/service/cloudbroker/lb/models/model_resource_lb_backend_server.go create mode 100644 internal/service/cloudbroker/lb/models/model_resource_lb_frontend.go create mode 100644 internal/service/cloudbroker/lb/models/model_resource_lb_frontend_bind.go create mode 100644 internal/service/cloudbroker/lb/resource_lb.go create mode 100644 internal/service/cloudbroker/lb/resource_lb_backend.go create mode 100644 internal/service/cloudbroker/lb/resource_lb_backend_server.go create mode 100644 internal/service/cloudbroker/lb/resource_lb_frontend.go create mode 100644 internal/service/cloudbroker/lb/resource_lb_frontend_bind.go create mode 100644 internal/service/cloudbroker/lb/schemas/schema_data_source_lb.go create mode 100644 internal/service/cloudbroker/lb/schemas/schema_data_source_lb_list.go create mode 100644 internal/service/cloudbroker/lb/schemas/schema_data_source_lb_list_deleted.go create mode 100644 internal/service/cloudbroker/lb/schemas/schema_resource_lb.go create mode 100644 internal/service/cloudbroker/lb/schemas/schema_resource_lb_backend.go create mode 100644 internal/service/cloudbroker/lb/schemas/schema_resource_lb_backend_server.go create mode 100644 internal/service/cloudbroker/lb/schemas/schema_resource_lb_frontend.go create mode 100644 internal/service/cloudbroker/lb/schemas/schema_resource_lb_frontend_bind.go create mode 100644 internal/service/cloudbroker/lb/utilities/utility_data_source_lb.go create mode 100644 internal/service/cloudbroker/lb/utilities/utility_data_source_lb_list.go create mode 100644 internal/service/cloudbroker/lb/utilities/utility_data_source_lb_list_deleted.go create mode 100644 internal/service/cloudbroker/lb/utilities/utility_resource_lb.go create mode 100644 internal/service/cloudbroker/lb/utilities/utility_resource_lb_backend.go create mode 100644 internal/service/cloudbroker/lb/utilities/utility_resource_lb_backend_server.go create mode 100644 internal/service/cloudbroker/lb/utilities/utility_resource_lb_frontend.go create mode 100644 internal/service/cloudbroker/lb/utilities/utility_resource_lb_frontend_bind.go create mode 100644 internal/service/cloudbroker/node/data_source_cb_node.go create mode 100644 internal/service/cloudbroker/node/data_source_cb_node_list.go create mode 100644 internal/service/cloudbroker/node/flattens/flatten_data_source_cb_node.go create mode 100644 internal/service/cloudbroker/node/flattens/flatten_data_source_cb_node_list.go create mode 100644 internal/service/cloudbroker/node/models/model_data_source_cb_node.go create mode 100644 internal/service/cloudbroker/node/models/model_data_source_cb_node_list.go create mode 100644 internal/service/cloudbroker/node/schemas/schema_data_source_cb_node.go create mode 100644 internal/service/cloudbroker/node/schemas/schema_data_source_cb_node_list.go create mode 100644 internal/service/cloudbroker/node/utilities/utility_data_source_cb_node.go create mode 100644 internal/service/cloudbroker/node/utilities/utility_data_source_cb_node_list.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_audits.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_get_resource_consumption.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_list.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_list_computes.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_list_deleted.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_list_lb.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_list_pfw.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_list_vins.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_resource_consumption_list.go create mode 100644 internal/service/cloudbroker/rg/data_source_rg_usage.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_audits.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_get_resource_consumption.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_computes.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_deleted.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_lb.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_pfw.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_vins.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_resource_consumption_list.go create mode 100644 internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_usage.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_audits.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_get_resource_limits.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_list.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_list_computes.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_list_deleted.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_list_lb.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_list_pfw.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_list_vins.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_resource_limits_list.go create mode 100644 internal/service/cloudbroker/rg/models/model_data_source_rg_usage.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_audits.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_get_resource_consumption.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_computes.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_deleted.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_lb.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_pfw.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_vins.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_resource_consumption_list.go create mode 100644 internal/service/cloudbroker/rg/schemas/schema_data_source_rg_usage.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_audits.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_get_resource_consumption.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_computes.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_deleted.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_lb.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_pfw.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_vins.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_resource_consumption_list.go create mode 100644 internal/service/cloudbroker/rg/utilities/utility_data_source_rg_usage.go create mode 100644 internal/service/cloudbroker/vfpool/data_source_cb_vfpool.go create mode 100644 internal/service/cloudbroker/vfpool/data_source_cb_vfpool_list.go create mode 100644 internal/service/cloudbroker/vfpool/flattens/flatten_data_source_cb_vfpool.go create mode 100644 internal/service/cloudbroker/vfpool/flattens/flatten_data_source_cb_vfpool_list.go create mode 100644 internal/service/cloudbroker/vfpool/flattens/flatten_resource_cb_vfpool.go create mode 100644 internal/service/cloudbroker/vfpool/models/model_data_source_cb_vfpool.go create mode 100644 internal/service/cloudbroker/vfpool/models/model_data_source_cb_vfpool_list.go create mode 100644 internal/service/cloudbroker/vfpool/models/model_resource_cb_vfpool.go create mode 100644 internal/service/cloudbroker/vfpool/resource_cb_vfpool.go create mode 100644 internal/service/cloudbroker/vfpool/schemas/schema_data_source_cb_vfpool.go create mode 100644 internal/service/cloudbroker/vfpool/schemas/schema_data_source_cb_vfpool_list.go create mode 100644 internal/service/cloudbroker/vfpool/schemas/schema_resource_cb_vfpool.go create mode 100644 internal/service/cloudbroker/vfpool/utilities/utility_data_source_cb_vfpool.go create mode 100644 internal/service/cloudbroker/vfpool/utilities/utility_data_source_cb_vfpool_list.go create mode 100644 internal/service/cloudbroker/vfpool/utilities/utility_resource_cb_vfpool.go create mode 100644 internal/service/cloudbroker/vins/data_source_vins.go create mode 100644 internal/service/cloudbroker/vins/data_source_vins_audits.go create mode 100644 internal/service/cloudbroker/vins/data_source_vins_ext_net_list.go create mode 100644 internal/service/cloudbroker/vins/data_source_vins_ip_list.go create mode 100644 internal/service/cloudbroker/vins/data_source_vins_list.go create mode 100644 internal/service/cloudbroker/vins/data_source_vins_list_deleted.go create mode 100644 internal/service/cloudbroker/vins/data_source_vins_nat_rule_list.go create mode 100644 internal/service/cloudbroker/vins/data_source_vins_static_route.go create mode 100644 internal/service/cloudbroker/vins/data_source_vins_static_route_list.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_data_source_vins.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_audits.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_ext_net_list.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_ip_list.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_list.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_list_deleted.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_nat_rule_list.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_static_route.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_static_route_list.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_resource_vins.go create mode 100644 internal/service/cloudbroker/vins/flattens/flatten_resource_vins_static_route.go create mode 100644 internal/service/cloudbroker/vins/input_checks.go create mode 100644 internal/service/cloudbroker/vins/models/model_data_source_vins.go create mode 100644 internal/service/cloudbroker/vins/models/model_data_source_vins_audits.go create mode 100644 internal/service/cloudbroker/vins/models/model_data_source_vins_ext_net_list.go create mode 100644 internal/service/cloudbroker/vins/models/model_data_source_vins_ip_list.go create mode 100644 internal/service/cloudbroker/vins/models/model_data_source_vins_list.go create mode 100644 internal/service/cloudbroker/vins/models/model_data_source_vins_list_deleted.go create mode 100644 internal/service/cloudbroker/vins/models/model_data_source_vins_nat_rule_list.go create mode 100644 internal/service/cloudbroker/vins/models/model_data_source_vins_static_route.go create mode 100644 internal/service/cloudbroker/vins/models/model_data_source_vins_static_route_list.go create mode 100644 internal/service/cloudbroker/vins/models/model_resource_vins.go create mode 100644 internal/service/cloudbroker/vins/models/model_resource_vins_static_route.go create mode 100644 internal/service/cloudbroker/vins/resource_vins.go create mode 100644 internal/service/cloudbroker/vins/resource_vins_static_route.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_data_source_vins.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_data_source_vins_audits.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_data_source_vins_ext_net_list.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_data_source_vins_ip_list.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_data_source_vins_list.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_data_source_vins_list_deleted.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_data_source_vins_nat_rule_list.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_data_source_vins_static_route.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_data_source_vins_static_route_list.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_resource_vins.go create mode 100644 internal/service/cloudbroker/vins/schemas/schema_resource_vins_static_route.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_data_source_vins.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_data_source_vins_audits.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_data_source_vins_ext_net_list.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_data_source_vins_ip_list.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_data_source_vins_list.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_data_source_vins_list_deleted.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_data_source_vins_nat_rule_list.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_data_source_vins_static_route.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_data_source_vins_static_route_list.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_resource_vins.go create mode 100644 internal/service/cloudbroker/vins/utilities/utility_resource_vins_static_route.go create mode 100644 wiki/1.0.1/01.-Введение.md create mode 100644 wiki/1.0.1/02.-Пример-работы.md create mode 100644 wiki/1.0.1/03.-Обзор-облачной-платформы-DYNAMIX.md create mode 100644 wiki/1.0.1/04.-Начало-работы-с-terraform-провайдером-DYNAMIX.md create mode 100644 wiki/1.0.1/04.01-Установка-Terraform-провайдера-DYNAMIX.md create mode 100644 wiki/1.0.1/04.02-Инициализация-Terraform-провайдера-DYNAMIX.md create mode 100644 wiki/1.0.1/04.03-Переключение-между-группами-API.md create mode 100644 wiki/1.0.1/04.04-Получение-gid-или-grid_id.md create mode 100644 wiki/1.0.1/04.05-Сборка-terraform-провайдера-в-образ.md create mode 100644 wiki/1.0.1/05.-Работа-с-terraform.md create mode 100644 wiki/1.0.1/05.01-Импортирование-ресурсов.md create mode 100644 wiki/1.0.1/05.02-Работа-с-таймаутами.md create mode 100644 wiki/1.0.1/05.03-Восстановление-ресурсов.md create mode 100644 wiki/1.0.1/05.04-Массовое-создание-ресурсов.-Мета-аргументы.md create mode 100644 wiki/1.0.1/05.05-Удаление-ресурсов.md create mode 100644 wiki/1.0.1/05.06-Установка-Terraform.md create mode 100644 wiki/1.0.1/05.07-Инструкция-по-миграции.md create mode 100644 wiki/1.0.1/06.-Data-source-функции-Terraform-провайдера-DYNAMIX.md create mode 100644 wiki/1.0.1/06.01-Пользовательская-группа-API.md create mode 100644 wiki/1.0.1/06.01.01-Data_dynamix_kvmvm.md create mode 100644 wiki/1.0.1/06.01.02-Data_dynamix_resgroup.md create mode 100644 wiki/1.0.1/06.01.03-Data_dynamix_disk.md create mode 100644 wiki/1.0.1/06.01.04-Data_dynamix_vins.md create mode 100644 wiki/1.0.1/06.01.05-Data_dynamix_account.md create mode 100644 wiki/1.0.1/06.01.06-Data_dynamix_image_list.md create mode 100644 wiki/1.0.1/06.01.07-Data_dynamix_image.md create mode 100644 wiki/1.0.1/06.01.08-Data_dynamix_locations_list.md create mode 100644 wiki/1.0.1/06.01.09-Data_dynamix_location_url.md create mode 100644 wiki/1.0.1/06.01.11-Data_dynamix_snapshot_list.md create mode 100644 wiki/1.0.1/06.01.12-Data_dynamix_rg_list.md create mode 100644 wiki/1.0.1/06.01.13-Data_dynamix_disk_list.md create mode 100644 wiki/1.0.1/06.01.14-Data_dynamix_vins_list.md create mode 100644 wiki/1.0.1/06.01.15-Data_dynamix_extnet_list.md create mode 100644 wiki/1.0.1/06.01.16-Data_dynamix_extnet_default.md create mode 100644 wiki/1.0.1/06.01.17-Data_dynamix_extnet_computes_list.md create mode 100644 wiki/1.0.1/06.01.18-Data_dynamix_extnet.md create mode 100644 wiki/1.0.1/06.01.19-Data_dynamix_account_audits_list.md create mode 100644 wiki/1.0.1/06.01.20-Data_dynamix_account_computes_list.md create mode 100644 wiki/1.0.1/06.01.21-Data_dynamix_account_consumed_units.md create mode 100644 wiki/1.0.1/06.01.22-Data_dynamix_account_consumed_units_by_type.md create mode 100644 wiki/1.0.1/06.01.23-Data_dynamix_account_disks_list.md create mode 100644 wiki/1.0.1/06.01.24-Data_dynamix_account_flipgroups_list.md create mode 100644 wiki/1.0.1/06.01.25-Data_dynamix_account_reserved_units.md create mode 100644 wiki/1.0.1/06.01.26-Data_dynamix_account_rg_list.md create mode 100644 wiki/1.0.1/06.01.27-Data_dynamix_account_templates_list.md create mode 100644 wiki/1.0.1/06.01.28-Data_dynamix_account_vins_list.md create mode 100644 wiki/1.0.1/06.01.29-Data_dynamix_account_list.md create mode 100644 wiki/1.0.1/06.01.30-Data_dynamix_account_deleted_list.md create mode 100644 wiki/1.0.1/06.01.31-Data_dynamix_bservice_list.md create mode 100644 wiki/1.0.1/06.01.32-Data_dynamix_bservice_deleted_list.md create mode 100644 wiki/1.0.1/06.01.33-Data_dynamix_bservice_snapshot_list.md create mode 100644 wiki/1.0.1/06.01.34-Data_dynamix_bservice.md create mode 100644 wiki/1.0.1/06.01.35-Data_dynamix_bservice_group.md create mode 100644 wiki/1.0.1/06.01.36-Data_dynamix_lb.md create mode 100644 wiki/1.0.1/06.01.37-Data_dynamix_lb_list.md create mode 100644 wiki/1.0.1/06.01.38-Data_dynamix_lb_list_deleted.md create mode 100644 wiki/1.0.1/06.01.39-Data_dynamix_disk_list_types.md create mode 100644 wiki/1.0.1/06.01.40-Data_dynamix_disk_list_types_detailed.md create mode 100644 wiki/1.0.1/06.01.41-Data_dynamix_disk_list_deleted.md create mode 100644 wiki/1.0.1/06.01.42-Data_dynamix_disk_list_unattached.md create mode 100644 wiki/1.0.1/06.01.43-Data_dynamix_disk_snapshot_list.md create mode 100644 wiki/1.0.1/06.01.44-Data_dynamix_disk_snapshot.md create mode 100644 wiki/1.0.1/06.01.45-Data_dynamix_k8s.md create mode 100644 wiki/1.0.1/06.01.46-Data_dynamix_k8s_list.md create mode 100644 wiki/1.0.1/06.01.47-Data_dynamix_k8s_list_deleted.md create mode 100644 wiki/1.0.1/06.01.48-Data_dynamix_k8s_wg.md create mode 100644 wiki/1.0.1/06.01.49-Data_dynamix_k8s_wg_list.md create mode 100644 wiki/1.0.1/06.01.50-Data_dynamix_vins_audits.md create mode 100644 wiki/1.0.1/06.01.51-Data_dynamix_vins_ext_net_list.md create mode 100644 wiki/1.0.1/06.01.52-Data_dynamix_vins_ip_list.md create mode 100644 wiki/1.0.1/06.01.53-Data_dynamix_vins_list_deleted.md create mode 100644 wiki/1.0.1/06.01.54-Data_dynamix_vins_nat_rule_list.md create mode 100644 wiki/1.0.1/06.01.55-Data_dynamix_kvmvm_audits.md create mode 100644 wiki/1.0.1/06.01.56-Data_dynamix_kvmvm_get_audits.md create mode 100644 wiki/1.0.1/06.01.57-Data_dynamix_kvmvm_get_console_url.md create mode 100644 wiki/1.0.1/06.01.58-Data_dynamix_kvmvm_get_log.md create mode 100644 wiki/1.0.1/06.01.59-Data_dynamix_kvmvm_list.md create mode 100644 wiki/1.0.1/06.01.60-Data_dynamix_kvmvm_pfw_list.md create mode 100644 wiki/1.0.1/06.01.61-Data_dynamix_kvmvm_user_list.md create mode 100644 wiki/1.0.1/06.01.62-Data_dynamix_rg_list.md create mode 100644 wiki/1.0.1/06.01.63-Data_dynamix_rg_affinity_group_computes.md create mode 100644 wiki/1.0.1/06.01.64-Data_dynamix_rg_affinity_groups_list.md create mode 100644 wiki/1.0.1/06.01.65-Data_dynamix_rg_affinity_groups_get.md create mode 100644 wiki/1.0.1/06.01.66-Data_dynamix_rg_audits.md create mode 100644 wiki/1.0.1/06.01.67-Data_dynamix_rg_list_computes.md create mode 100644 wiki/1.0.1/06.01.68-Data_dynamix_rg_list_deleted.md create mode 100644 wiki/1.0.1/06.01.69-Data_dynamix_rg_list_lb.md create mode 100644 wiki/1.0.1/06.01.70-Data_dynamix_rg_list_pfw.md create mode 100644 wiki/1.0.1/06.01.71-Data_dynamix_rg_list_vins.md create mode 100644 wiki/1.0.1/06.01.72-Data_dynamix_rg_usage.md create mode 100644 wiki/1.0.1/06.01.73-Data_dynamix_kvmvm_snapshot_usage.md create mode 100644 wiki/1.0.1/06.01.74-Data _dynamix_k8s_computes.md create mode 100644 wiki/1.0.1/06.01.75-Data_dynamix_flipgroup.md create mode 100644 wiki/1.0.1/06.01.75-Data_dynamix_flipgroup_list.md create mode 100644 wiki/1.0.1/06.01.76-Data_dynamix_k8ci_list.md create mode 100644 wiki/1.0.1/06.01.77-Data_dynamix_vins_static_route.md create mode 100644 wiki/1.0.1/06.01.78-Data_dynamix_vins_static_route_list.md create mode 100644 wiki/1.0.1/06.01.79-Data_dynamix_account_resource_consumption_get.md create mode 100644 wiki/1.0.1/06.01.80-Data_dynamix_account_resource_consumption_list.md create mode 100644 wiki/1.0.1/06.01.81-Data_dynamix_k8s_wg_cloud_init.md create mode 100644 wiki/1.0.1/06.01.82-Data_dynamix_kvmvm_list_deleted.md create mode 100644 wiki/1.0.1/06.01.83-Data_dynamix_kvmvm_pci_device_list.md create mode 100644 wiki/1.0.1/06.01.84-Data_dynamix_kvmvm_vgpu_list.md create mode 100644 wiki/1.0.1/06.01.85-Data_dynamix_rg_resource_consumption_get.md create mode 100644 wiki/1.0.1/06.01.86-Data_dynamix_rg_resource_consumption_list.md create mode 100644 wiki/1.0.1/06.01.87-Data_dynamix_disk_replication.md create mode 100644 wiki/1.0.1/06.01.87-Data_dynamix_vfpool.md create mode 100644 wiki/1.0.1/06.01.88-Data_dynamix_vfpool_list.md create mode 100644 wiki/1.0.1/06.01.89-Data_dynamix_stack.md create mode 100644 wiki/1.0.1/06.01.90-Data_dynamix_stack_list.md create mode 100644 wiki/1.0.1/07.-Resource-функции-Terraform-провайдера-dynamix.md create mode 100644 wiki/1.0.1/07.01-Пользовательская-группа-API.md create mode 100644 wiki/1.0.1/07.01.01-Resource_dynamix_kvmvm.md create mode 100644 wiki/1.0.1/07.01.02-Resource_dynamix_resgroup.md create mode 100644 wiki/1.0.1/07.01.03-Resource_dynamix_disk.md create mode 100644 wiki/1.0.1/07.01.04-Resource_dynamix_vins.md create mode 100644 wiki/1.0.1/07.01.05-Resource_dynamix_snapshot.md create mode 100644 wiki/1.0.1/07.01.06-Resource_dynamix_k8s.md create mode 100644 wiki/1.0.1/07.01.07-Resource_dynamix_k8s_wg.md create mode 100644 wiki/1.0.1/07.01.08-Resource_dynamix_image_virtual.md create mode 100644 wiki/1.0.1/07.01.09-Resource_dynamix_image.md create mode 100644 wiki/1.0.1/07.01.10-Resource_dynamix_pfw.md create mode 100644 wiki/1.0.1/07.01.11-Resource_dynamix_account.md create mode 100644 wiki/1.0.1/07.01.12-Resource_dynamix_bservice.md create mode 100644 wiki/1.0.1/07.01.13-Resource_dynamix_bservice_group.md create mode 100644 wiki/1.0.1/07.01.14-Resource_dynamix_lb.md create mode 100644 wiki/1.0.1/07.01.15-Resource_dynamix_lb_frontend.md create mode 100644 wiki/1.0.1/07.01.16-Resource_dynamix_lb_frontend_bind.md create mode 100644 wiki/1.0.1/07.01.17-Resource_dynamix_lb_backend.md create mode 100644 wiki/1.0.1/07.01.18-Resource_dynamix_lb_backend_server.md create mode 100644 wiki/1.0.1/07.01.19-Resource_dynamix_disk_snapshot.md create mode 100644 wiki/1.0.1/07.01.20-Resource_dynamix_flipgroup.md create mode 100644 wiki/1.0.1/07.01.21-Resource_dynamix_k8s_cp.md create mode 100644 wiki/1.0.1/07.01.22-Resource_dynamix_vins_static_route.md create mode 100644 wiki/1.0.1/07.01.23-Resource_dynamix_image_from_blank_compute.md create mode 100644 wiki/1.0.1/07.01.24-Resource_dynamix_image_from_platform_disk.md create mode 100644 wiki/1.0.1/07.01.25-Resource_dynamix_disk_replication.md create mode 100644 wiki/1.0.1/08.-Полезные-советы.md create mode 100644 wiki/1.0.1/Home.md diff --git a/CHANGELOG.md b/CHANGELOG.md index cea04b9..e8116a5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,136 +1,8 @@ -## Version 1.0.0 +## Version 1.0.1 ### Feature -#### account: -- Add data source dynamix_account_audits_list in cloudapi/account -- Add data source dynamix_account_computes_list in cloudapi/account -- Add data source dynamix_account_consumed_unit_by_type in cloudapi/account -- Add data source dynamix_account_consumed_units in cloudapi/account -- Add data source dynamix_account_disks_list in cloudapi/account -- Add data source dynamix_account_flipgroups_list in cloudapi/account -- Add data source dynamix_account_get_resource_consumption_list in cloudapi/account -- Add data source dynamix_account_get_resource_consumption in cloudapi/account -- Add data source dynamix_account_deleted_list in cloudapi/account -- Add data source dynamix_account_list in cloudapi/account -- Add data source dynamix_account_reserved_units in cloudapi/account -- Add data source dynamix_account_rg_list in cloudapi/account -- Add data source dynamix_account_templates_list in cloudapi/account -- Add data source dynamix_account_vins_list in cloudapi/account -- Add data source dynamix_account in cloudapi/account -- Add resource dynamix_account in cloudapi/account - -#### bservice: -- Add data source dynamix_bservice_deleted_list in cloudapi/bservice -- Add data source dynamix_bservice_group in cloudapi/bservice -- Add data source dynamix_bservice_list in cloudapi/bservice -- Add data source dynamix_bservice_snapshot_list in cloudapi/bservice -- Add data source dynamix_bservice in cloudapi/bservice -- Add resource dynamix_bservice_group in cloudapi/bservice -- Add resource dynamix_bservice in cloudapi/bservice - -#### disks: -- Add data source dynamix_disk in cloudapi/disks -- Add data source dynamix_disk_list in cloudapi/disks -- Add data source dynamix_disk_list_deleted in cloudapi/disks -- Add data source dynamix_disk_list_types in cloudapi/disks -- Add data source dynamix_disk_list_types_detailed in cloudapi/disks -- Add data source dynamix_disk_list_unattached in cloudapi/disks -- Add data source dynamix_disk_replication in cloudapi/disks -- Add data source dynamix_disk_snapshot in cloudapi/disks -- Add data source dynamix_disk_snapshot_list in cloudapi/disks -- Add resource dynamix_disk in cloudapi/disks -- Add resource dynamix_disk_replication in cloudapi/disks -- Add resource dynamix_disk_snapshot in cloudapi/disks - -#### extnet: -- Add data source dynamix_extnet in cloudapi/extnet -- Add data source dynamix_extnet_computes_list in cloudapi/extnet -- Add data source dynamix_extnet_default in cloudapi/extnet -- Add data source dynamix_extnet_list in cloudapi/extnet - -#### flipgroup: -- Add data source dynamix_flipgroup in cloudapi/flipgroup -- Add data source dynamix_flipgroup_list in cloudapi/flipgroup -- Add resource dynamix_flipgroup in cloudapi/flipgroup - -#### image: -- Add data source dynamix_image in cloudapi/image -- Add data source dynamix_image_list in cloudapi/image -- Add resource dynamix_image in cloudapi/image -- Add resource dynamix_image_virtual in cloudapi/image - -#### k8s: -- Add data source dynamix_k8ci_list in cloudapi/k8s -- Add data source dynamix_k8s in cloudapi/k8s -- Add data source dynamix_k8s_computes in cloudapi/k8s -- Add data source dynamix_k8s_list in cloudapi/k8s -- Add data source dynamix_k8s_list_deleted in cloudapi/k8s -- Add data source dynamix_k8s_wg in cloudapi/k8s -- Add data source dynamix_k8s_wg_cloud_init in cloudapi/k8s -- Add data source dynamix_k8s_wg_list in cloudapi/k8s -- Add resource dynamix_k8s_cp in cloudapi/k8s -- Add resource dynamix_k8s_wg in cloudapi/k8s - -#### kvmvm: -- Add data source dynamix_kvmvm in cloudapi/kvmvm -- Add data source dynamix_kvmvm_audits in cloudapi/kvmvm -- Add data source dynamix_kvmvm_get_audits in cloudapi/kvmvm -- Add data source dynamix_kvmvm_get_console_url in cloudapi/kvmvm -- Add data source dynamix_kvmvm_get_log in cloudapi/kvmvm -- Add data source dynamix_kvmvm_list in cloudapi/kvmvm -- Add data source dynamix_kvmvm_list_deleted in cloudapi/kvmvm -- Add data source dynamix_kvmvm_pci_device_list in cloudapi/kvmvm -- Add data source dynamix_kvmvm_pfw_list in cloudapi/kvmvm -- Add data source dynamix_kvmvm_snapshot_usage in cloudapi/kvmvm -- Add data source dynamix_kvmvm_user_list in cloudapi/kvmvm -- Add data source dynamix_kvmvm_vgpu_list in cloudapi/kvmvm -- Add resource dynamix_kvmvm in cloudapi/kvmvm - -#### lb: -- Add data source dynamix_data_source_lb_list_deleted in cloudapi/lb -- Add data source dynamix_lb_list_deleted in cloudapi/lb -- Add data source dynamix_lb in cloudapi/lb -- Add resource dynamix_lb_backend_server in cloudapi/lb -- Add resource dynamix_lb_backend in cloudapi/lb -- Add resource dynamix_lb_frontend_bind in cloudapi/lb -- Add resource dynamix_lb_frontend in cloudapi/lb -- Add resource dynamix_lb in cloudapi/lb - -#### rg: -- Add data source dynamix_rg in cloudapi/rg -- Add data source dynamix_rg_affinity_group_computes in cloudapi/rg -- Add data source dynamix_rg_affinity_groups_get in cloudapi/rg -- Add data source dynamix_rg_affinity_groups_list in cloudapi/rg -- Add data source dynamix_rg_audits in cloudapi/rg -- Add data source dynamix_rg_resource_consumption_get in cloudapi/rg -- Add data source dynamix_rg_list in cloudapi/rg -- Add data source dynamix_rg_list_computes in cloudapi/rg -- Add data source dynamix_rg_list_deleted in cloudapi/rg -- Add data source dynamix_rg_list_lb in cloudapi/rg -- Add data source dynamix_rg_list_pfw in cloudapi/rg -- Add data source dynamix_rg_list_vins in cloudapi/rg -- Add data source dynamix_rg_resource_consumption_list in cloudapi/rg -- Add data source dynamix_rg_usage in cloudapi/rg -- Add resource dynamix_rg in cloudapi/rg - -#### stack: -- Add data source dynamix_stack in cloudapi/stack -- Add data source dynamix_stack_list in cloudapi/stack - -#### vfpool: -- Add data source dynamix_vfpool in cloudapi/vfpool -- Add data source dynamix_vfpool_list in cloudapi/vfpool - -#### vins: -- Add data source dynamix_vins in cloudapi/vins -- Add data source dynamix_vins_audits in cloudapi/vins -- Add data source dynamix_vins_ext_net_list in cloudapi/vins -- Add data source dynamix_vins_ip_list in cloudapi/vins -- Add data source dynamix_vins_list in cloudapi/vins -- Add data source dynamix_vins_list_deleted in cloudapi/vins -- Add data source dynamix_vins_nat_rule_list in cloudapi/vins -- Add data source dynamix_vins_static_route in cloudapi/vins -- Add data source dynamix_vins_static_route_list in cloudapi/vins -- Add resource dynamix_vins in cloudapi/vins -- Add resource dynamix_vins_static_route in cloudapi/vins +### Bugfix +- Fixed dynamix_lb_list data source in cloudapi/lb for the optional field 'rg_id' to work correctly +- Fixex reading configuration data from environment variables +- Fixed configuration validation \ No newline at end of file diff --git a/Makefile b/Makefile index b090019..fac9d61 100644 --- a/Makefile +++ b/Makefile @@ -8,7 +8,7 @@ ZIPDIR = ./zip BINARY=${NAME} WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${SECONDNAMESPACE}/${VERSION}/${OS_ARCH} MAINPATH = ./cmd/dynamix/ -VERSION=1.0.0 +VERSION=1.0.1 OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH) FILES = ${BINARY}_${VERSION}_darwin_amd64\ diff --git a/README.md b/README.md index 56ac2c0..a84cea8 100644 --- a/README.md +++ b/README.md @@ -140,4 +140,4 @@ | data source dynamix_lb_list | server_default_settings, server_settings, primary_node, secondary_node | | data source dynamix_lb_list_deleted | server_default_settings, server_settings, primary_node, secondary_node | | resource dynamix_lb | server_default_settings, server_settings, primary_node, secondary_node | -| resource dynamix_lb_backend | server_settings | \ No newline at end of file +| resource dynamix_lb_backend | server_settings | diff --git a/docs/data-sources/account_deleted_list.md b/docs/data-sources/account_list_deleted.md similarity index 93% rename from docs/data-sources/account_deleted_list.md rename to docs/data-sources/account_list_deleted.md index 1eb0783..4d4f9af 100644 --- a/docs/data-sources/account_deleted_list.md +++ b/docs/data-sources/account_list_deleted.md @@ -1,12 +1,12 @@ --- # generated by https://github.com/hashicorp/terraform-plugin-docs -page_title: "dynamix_account_deleted_list Data Source - terraform-provider-dynamix" +page_title: "dynamix_account_list_deleted Data Source - terraform-provider-dynamix" subcategory: "" description: |- --- -# dynamix_account_deleted_list (Data Source) +# dynamix_account_list_deleted (Data Source) diff --git a/docs/index.md b/docs/index.md index 3fe3326..f082db2 100644 --- a/docs/index.md +++ b/docs/index.md @@ -17,21 +17,21 @@ description: |- ### Required -- `authenticator` (String) Authentication mode to use when connecting to DECORT cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'. -- `controller_url` (String) URL of DECORT Cloud controller to use. API calls will be directed to this URL. +- `authenticator` (String) Authentication mode to use when connecting to DYNAMIX cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'. +- `controller_url` (String) URL of DYNAMIX Cloud controller to use. API calls will be directed to this URL. ### Optional -- `allow_unverified_ssl` (Boolean) If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only! Default is false. -- `app_id` (String) Application ID to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode. -- `app_secret` (String) Application secret to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode. -- `bvs_password` (String) User password for DECORT cloud API operations in 'bvs' authentication mode. -- `bvs_user` (String) User name for DECORT cloud API operations in 'bvs' authentication mode. -- `domain` (String) User password for DECORT cloud API operations in 'bvs' authentication mode. -- `jwt` (String) JWT to access DECORT cloud API in 'jwt' authentication mode. +- `allow_unverified_ssl` (Boolean) If true, DYNAMIX API will not verify SSL certificates. Use this with caution and in trusted environments only! Default is false. +- `app_id` (String) Application ID to access DYNAMIX cloud API in 'decs3o' and 'bvs' authentication mode. +- `app_secret` (String) Application secret to access DYNAMIX cloud API in 'decs3o' and 'bvs' authentication mode. +- `bvs_password` (String) User password for DYNAMIX cloud API operations in 'bvs' authentication mode. +- `bvs_user` (String) User name for DYNAMIX cloud API operations in 'bvs' authentication mode. +- `domain` (String) User password for DYNAMIX cloud API operations in 'bvs' authentication mode. +- `jwt` (String) JWT to access DYNAMIX cloud API in 'jwt' authentication mode. - `oauth2_url` (String) OAuth2 application URL in 'decs3o' and 'bvs' authentication mode. -- `password` (String) User password for DECORT cloud API operations in 'legacy' authentication mode. +- `password` (String) User password for DYNAMIX cloud API operations in 'legacy' authentication mode. - `path_config` (String) The path of the configuration file entry. - `path_token` (String) The path of the token file entry. - `time_to_refresh` (Number) The number of minutes before the expiration of the token, a refresh will be made. -- `user` (String) User name for DECORT cloud API operations in 'legacy' authentication mode. +- `user` (String) User name for DYNAMIX cloud API operations in 'legacy' authentication mode. diff --git a/internal/provider/data_sources_map.go b/internal/provider/data_sources_map.go index db23b2e..82925e7 100644 --- a/internal/provider/data_sources_map.go +++ b/internal/provider/data_sources_map.go @@ -15,8 +15,14 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/stack" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vfpool" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vins" - cbaccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account" + cbAccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account" + cbAudit "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit" + cbLb "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb" + cbNode "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node" + cbRG "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg" cbStack "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/stack" + cbVFpool "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool" + cbVins "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins" ) func newDataSourcesMap() []func() datasource.DataSource { @@ -111,6 +117,9 @@ func newDataSourcesMap() []func() datasource.DataSource { vfpool.NewDataSourceVFPool, vfpool.NewDataSourceVFPoolList, + cbVFpool.NewDataSourceVFPool, + cbVFpool.NewDataSourceVFPoolList, + vins.NewDataSourceVINS, vins.NewDataSourceVINSAudits, vins.NewDataSourceVINSExtNetList, @@ -121,11 +130,56 @@ func newDataSourcesMap() []func() datasource.DataSource { vins.NewDataSourceVINSStaticRoute, vins.NewDataSourceVINSStaticRouteList, - cbaccount.NewDataSourceAccount, - cbaccount.NewDataSourceAccountList, - cbaccount.NewDataSourceAccountVinsList, + cbAccount.NewDataSourceAccount, + cbAccount.NewDataSourceAccountList, + cbAccount.NewDataSourceAccountVinsList, + cbAccount.NewDataSourceAccountAuditsList, + cbAccount.NewDataSourceAccountAvailableTemplatesListDataSource, + cbAccount.NewDataSourceAccountComputesList, + cbAccount.NewDataSourceAccountDisksList, + cbAccount.NewDataSourceAccountFlipgroupsList, + cbAccount.NewDataSourceAccountGetResourceConsumption, + cbAccount.NewDataSourceAccountListDeleted, + cbAccount.NewDataSourceAccountGetResourceConsumptionList, + cbAccount.NewDataSourceAccountRGList, + + cbAudit.NewDataSourceAudit, + cbAudit.NewDataSourceAuditLinkedJobs, + cbAudit.NewDataSourceAuditList, + + cbNode.NewDataSourceNode, + cbNode.NewDataSourceNodeList, + + cbLb.NewDataSourceLB, + cbLb.NewDataSourceLBList, + cbLb.NewDataSourceLBListDeleted, cbStack.NewDataSourceStack, cbStack.NewDataSourceStackList, + + cbVins.NewDataSourceVINS, + cbVins.NewDataSourceVINSAudits, + cbVins.NewDataSourceVINSExtNetList, + cbVins.NewDataSourceVINSIPList, + cbVins.NewDataSourceVINSList, + cbVins.NewDataSourceVINSListDeleted, + cbVins.NewDataSourceVINSNATRuleList, + cbVins.NewDataSourceVINSStaticRoute, + cbVins.NewDataSourceVINSStaticRouteList, + + cbRG.NewDataSourceRG, + cbRG.NewDataSourceRGList, + cbRG.NewDataSourceRGListDeleted, + cbRG.NewDataSourceRGAffinityGroupComputes, + cbRG.NewDataSourceRGAffinityGroupsGet, + cbRG.NewDataSourceRGAffinityGroupsList, + cbRG.NewDataSourceRGAudits, + cbRG.NewDataSourceRGGetResourceConsumption, + cbRG.NewDataSourceRGResourceConsumptionList, + cbRG.NewDataSourceRGUsage, + cbRG.NewDataSourceRGListComputes, + cbRG.NewDataSourceRGListLB, + cbRG.NewDataSourceRGListPFW, + cbRG.NewDataSourceRGListVins, } } diff --git a/internal/provider/provider.go b/internal/provider/provider.go index b7d6da4..6f3ec58 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -65,7 +65,7 @@ func (p *DynamixProvider) Schema(_ context.Context, _ provider.SchemaRequest, re resp.Schema = schema.Schema{ Attributes: map[string]schema.Attribute{ "authenticator": schema.StringAttribute{ - MarkdownDescription: "Authentication mode to use when connecting to DECORT cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.", + MarkdownDescription: "Authentication mode to use when connecting to DYNAMIX cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.", Required: true, Validators: []validator.String{ stringvalidator.OneOfCaseInsensitive("decs3o", "legacy", "jwt", "bvs"), // ignore case while validating @@ -76,43 +76,43 @@ func (p *DynamixProvider) Schema(_ context.Context, _ provider.SchemaRequest, re Optional: true, }, "controller_url": schema.StringAttribute{ - MarkdownDescription: "URL of DECORT Cloud controller to use. API calls will be directed to this URL.", + MarkdownDescription: "URL of DYNAMIX Cloud controller to use. API calls will be directed to this URL.", Required: true, }, "user": schema.StringAttribute{ - MarkdownDescription: "User name for DECORT cloud API operations in 'legacy' authentication mode.", + MarkdownDescription: "User name for DYNAMIX cloud API operations in 'legacy' authentication mode.", Optional: true, }, "password": schema.StringAttribute{ - MarkdownDescription: "User password for DECORT cloud API operations in 'legacy' authentication mode.", + MarkdownDescription: "User password for DYNAMIX cloud API operations in 'legacy' authentication mode.", Optional: true, }, "bvs_user": schema.StringAttribute{ - MarkdownDescription: "User name for DECORT cloud API operations in 'bvs' authentication mode.", + MarkdownDescription: "User name for DYNAMIX cloud API operations in 'bvs' authentication mode.", Optional: true, }, "bvs_password": schema.StringAttribute{ - MarkdownDescription: "User password for DECORT cloud API operations in 'bvs' authentication mode.", + MarkdownDescription: "User password for DYNAMIX cloud API operations in 'bvs' authentication mode.", Optional: true, }, "domain": schema.StringAttribute{ - MarkdownDescription: "User password for DECORT cloud API operations in 'bvs' authentication mode.", + MarkdownDescription: "User password for DYNAMIX cloud API operations in 'bvs' authentication mode.", Optional: true, }, "app_id": schema.StringAttribute{ - MarkdownDescription: "Application ID to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.", + MarkdownDescription: "Application ID to access DYNAMIX cloud API in 'decs3o' and 'bvs' authentication mode.", Optional: true, }, "app_secret": schema.StringAttribute{ - MarkdownDescription: "Application secret to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.", + MarkdownDescription: "Application secret to access DYNAMIX cloud API in 'decs3o' and 'bvs' authentication mode.", Optional: true, }, "jwt": schema.StringAttribute{ - MarkdownDescription: "JWT to access DECORT cloud API in 'jwt' authentication mode.", + MarkdownDescription: "JWT to access DYNAMIX cloud API in 'jwt' authentication mode.", Optional: true, }, "allow_unverified_ssl": schema.BoolAttribute{ - MarkdownDescription: "If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only! Default is false.", + MarkdownDescription: "If true, DYNAMIX API will not verify SSL certificates. Use this with caution and in trusted environments only! Default is false.", Optional: true, }, "path_config": schema.StringAttribute{ @@ -137,7 +137,8 @@ func (p *DynamixProvider) Configure(ctx context.Context, req provider.ConfigureR diags := req.Config.Get(ctx, &config) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { - log.Debugf("Provider Configure error after req.Config.Get") + resp.Diagnostics.AddError("Provider Configure: ", "error after req.Config.Get") + log.Error("Provider Configure error after req.Config.Get") return } @@ -148,7 +149,8 @@ func (p *DynamixProvider) Configure(ctx context.Context, req provider.ConfigureR // Validate and set up authentication mode mode, err := dynamixConfig.validateAuthenticator() if err != nil { - log.Debug(err) + log.Error(err) + resp.Diagnostics.AddError("Provider Configure: validate error:", err.Error()) return } diff --git a/internal/provider/provider_validation.go b/internal/provider/provider_validation.go index f567be4..51524da 100644 --- a/internal/provider/provider_validation.go +++ b/internal/provider/provider_validation.go @@ -6,7 +6,6 @@ import ( "os" "strings" - log "github.com/sirupsen/logrus" sdk_config "repository.basistech.ru/BASIS/decort-golang-sdk/config" ) @@ -38,82 +37,82 @@ type dynamixProviderConfig struct { func (d *dynamixProviderConfig) new(config dynamixProviderModel) { d.authenticator = strings.ToLower(config.Authenticator.ValueString()) - if config.Oauth2Url.IsUnknown() { - d.oauth2Url = os.Getenv("DECORT_OAUTH2_URL") + if config.Oauth2Url.IsNull() { + d.oauth2Url = os.Getenv("DYNAMIX_OAUTH2_URL") } else { d.oauth2Url = config.Oauth2Url.ValueString() } d.oauth2Url = strings.ToLower(d.oauth2Url) - d.controllerUrl = strings.ToLower(config.ControllerUrl.ValueString()) - if d.controllerUrl == "" { - log.Debugf("empty DECORT cloud controller URL provided") - return + if config.ControllerUrl.IsNull() { + d.controllerUrl = os.Getenv("DYNAMIX_CONTROLLER_URL") + } else { + d.controllerUrl = strings.ToLower(config.ControllerUrl.ValueString()) } - if config.User.IsUnknown() { - d.user = os.Getenv("DECORT_USER") + if config.User.IsNull() { + d.user = os.Getenv("DYNAMIX_USER") } else { d.user = config.User.ValueString() } - if config.Password.IsUnknown() { - d.password = os.Getenv("DECORT_PASSWORD") + if config.Password.IsNull() { + d.password = os.Getenv("DYNAMIX_PASSWORD") } else { d.password = config.Password.ValueString() } - if config.BvsUser.IsUnknown() { - d.bvsUser = os.Getenv("DECORT_BVS_USER") + if config.BvsUser.IsNull() { + d.bvsUser = os.Getenv("DYNAMIX_BVS_USER") } else { d.bvsUser = config.BvsUser.ValueString() } - if config.BvsPassword.IsUnknown() { - d.bvsPassword = os.Getenv("DECORT_BVS_PASSWORD") + if config.BvsPassword.IsNull() { + d.bvsPassword = os.Getenv("DYNAMIX_BVS_PASSWORD") } else { d.bvsPassword = config.BvsPassword.ValueString() } - if config.Domain.IsUnknown() { - d.domain = os.Getenv("DECORT_DOMAIN") + if config.Domain.IsNull() { + d.domain = os.Getenv("DYNAMIX_DOMAIN") } else { d.domain = config.Domain.ValueString() } - if config.AppId.IsUnknown() { - d.appId = os.Getenv("DECORT_APP_ID") + if config.AppId.IsNull() { + d.appId = os.Getenv("DYNAMIX_APP_ID") } else { d.appId = config.AppId.ValueString() } - if config.AppSecret.IsUnknown() { - d.appSecret = os.Getenv("DECORT_APP_SECRET") + if config.AppSecret.IsNull() { + d.appSecret = os.Getenv("DYNAMIX_APP_SECRET") } else { d.appSecret = config.AppSecret.ValueString() } - if config.Jwt.IsUnknown() { - d.jwt = os.Getenv("DECORT_JWT") + if config.Jwt.IsNull() { + d.jwt = os.Getenv("DYNAMIX_JWT") } else { d.jwt = config.Jwt.ValueString() } - if config.AllowUnverifiedSsl.IsUnknown() { + if config.AllowUnverifiedSsl.IsNull() { d.allowUnverifiedSsl = false // default false } else { d.allowUnverifiedSsl = config.AllowUnverifiedSsl.ValueBool() } - if !config.PathConfig.IsUnknown() { + if !config.PathConfig.IsNull() { d.pathConfig = config.PathConfig.ValueString() } - if !config.PathToken.IsUnknown() { + if !config.PathToken.IsNull() { d.pathToken = config.PathToken.ValueString() } - if !config.TimeToRefresh.IsUnknown() { + if !config.TimeToRefresh.IsNull() { d.timeToRefresh = config.TimeToRefresh.ValueInt64() } @@ -125,6 +124,12 @@ func (d *dynamixProviderConfig) new(config dynamixProviderModel) { // reason, the method will return mode = MODE_UNDEF and error. func (d *dynamixProviderConfig) validateAuthenticator() (int, error) { var mode = MODE_UNDEF + if d.oauth2Url == "" { + return mode, fmt.Errorf("OAuth2 URL is required") + } + if d.controllerUrl == "" { + return mode, fmt.Errorf("controllerURL is required") + } switch d.authenticator { case "jwt": if d.jwt == "" { @@ -132,9 +137,6 @@ func (d *dynamixProviderConfig) validateAuthenticator() (int, error) { } mode = MODE_JWT case "decs3o": - if d.oauth2Url == "" { - return mode, fmt.Errorf("authenticator mode 'decs3o' specified but no OAuth2 URL provided") - } if d.appId == "" { return mode, fmt.Errorf("authenticator mode 'decs3o' specified but no Application ID provided") } @@ -149,6 +151,12 @@ func (d *dynamixProviderConfig) validateAuthenticator() (int, error) { if d.password == "" { return mode, fmt.Errorf("authenticator mode 'legacy' specified but no password provided") } + if d.appId == "" { + return mode, fmt.Errorf("authenticator mode 'legacy' specified but no Application ID provided") + } + if d.appSecret == "" { + return mode, fmt.Errorf("authenticator mode 'legacy' specified but no Secret ID provided") + } mode = MODE_LEGACY case "bvs": if d.bvsUser == "" { @@ -157,9 +165,6 @@ func (d *dynamixProviderConfig) validateAuthenticator() (int, error) { if d.bvsPassword == "" { return mode, fmt.Errorf("authenticator mode 'bvs' specified but no bvs password provided") } - if d.oauth2Url == "" { - return mode, fmt.Errorf("authenticator mode 'bvs' specified but no bvs URL provided") - } if d.appId == "" { return mode, fmt.Errorf("authenticator mode 'bvs' specified but no Application ID provided") } diff --git a/internal/provider/resources_map.go b/internal/provider/resources_map.go index 53d3e3c..d2da980 100644 --- a/internal/provider/resources_map.go +++ b/internal/provider/resources_map.go @@ -12,6 +12,10 @@ import ( "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/lb" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg" "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vins" + cbLb "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb" + cbAccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account" + cbVFpool "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool" + cbVins "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins" ) func newResourcesMap() []func() resource.Resource { @@ -36,5 +40,16 @@ func newResourcesMap() []func() resource.Resource { vins.NewResourceVINSStaticRoute, bservice.NewResourceBService, bservice.NewResourceBServiceGroup, + + cbAccount.NewResourceAccount, + + cbLb.NewResourceLB, + cbLb.NewResourceLBBackend, + cbLb.NewResourceLBBackendServer, + cbLb.NewResourceLBFrontend, + cbLb.NewResourceLBFrontendBind, + cbVFpool.NewResourceVFPool, + cbVins.NewResourceVINS, + cbVins.NewResourceVINSStaticRoute, } } diff --git a/internal/service/cloudapi/account/data_source_account_list_deleted.go b/internal/service/cloudapi/account/data_source_account_list_deleted.go index 955447b..62a78fa 100644 --- a/internal/service/cloudapi/account/data_source_account_list_deleted.go +++ b/internal/service/cloudapi/account/data_source_account_list_deleted.go @@ -77,7 +77,7 @@ func (d *dataSourceAccountListDeleted) Schema(ctx context.Context, _ datasource. } func (d *dataSourceAccountListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { - resp.TypeName = req.ProviderTypeName + "_account_deleted_list" + resp.TypeName = req.ProviderTypeName + "_account_list_deleted" } // Configure adds the provider configured client to the data source. diff --git a/internal/service/cloudapi/account/flattens/flatten_account_list_deleted.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_list_deleted.go similarity index 100% rename from internal/service/cloudapi/account/flattens/flatten_account_list_deleted.go rename to internal/service/cloudapi/account/flattens/flatten_data_source_account_list_deleted.go diff --git a/internal/service/cloudapi/account/resource_account.go b/internal/service/cloudapi/account/resource_account.go index 6eb908e..129459f 100644 --- a/internal/service/cloudapi/account/resource_account.go +++ b/internal/service/cloudapi/account/resource_account.go @@ -41,7 +41,7 @@ type resourceAccount struct { func (r *resourceAccount) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { resp.Diagnostics.AddError( "Only users with admin privileges are able to create accounts. Contact your platform administrator or import existing account.", - "Use 'terraform import basis_account. ' command to import existing account configuration", + "Use 'terraform import dynamix_account. ' command to import existing account configuration", ) return } diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account.go b/internal/service/cloudapi/account/utilities/utility_data_source_account.go index 8723b95..99216ed 100644 --- a/internal/service/cloudapi/account/utilities/utility_data_source_account.go +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account.go @@ -14,7 +14,7 @@ func AccountDataSourceCheckPresence(ctx context.Context, accountId uint64, c *de recordAccount, err := c.CloudAPI().Account().Get(ctx, account.GetRequest{AccountID: accountId}) if err != nil { - return nil, fmt.Errorf("cannot get info about extnet with error: %w", err) + return nil, fmt.Errorf("cannot get info about account with error: %w", err) } tflog.Info(ctx, "AccountDataSourceCheckPresence: response from CloudAPI().Account().Get", diff --git a/internal/service/cloudapi/lb/schemas/schema_data_soursce_lb_list.go b/internal/service/cloudapi/lb/schemas/schema_data_source_lb_list.go similarity index 100% rename from internal/service/cloudapi/lb/schemas/schema_data_soursce_lb_list.go rename to internal/service/cloudapi/lb/schemas/schema_data_source_lb_list.go diff --git a/internal/service/cloudapi/lb/schemas/schema_data_soursce_lb_list_deleted.go b/internal/service/cloudapi/lb/schemas/schema_data_source_lb_list_deleted.go similarity index 100% rename from internal/service/cloudapi/lb/schemas/schema_data_soursce_lb_list_deleted.go rename to internal/service/cloudapi/lb/schemas/schema_data_source_lb_list_deleted.go diff --git a/internal/service/cloudapi/lb/utilities/utility_data_source_lb_list.go b/internal/service/cloudapi/lb/utilities/utility_data_source_lb_list.go index 8339102..9c6fdb1 100644 --- a/internal/service/cloudapi/lb/utilities/utility_data_source_lb_list.go +++ b/internal/service/cloudapi/lb/utilities/utility_data_source_lb_list.go @@ -24,7 +24,7 @@ func LBListDataSourceCheckPresence(ctx context.Context, plan *models.DataSourceL listLBReq.AccountID = uint64(plan.AccountID.ValueInt64()) } if !plan.RgID.IsNull() { - listLBReq.RGID = uint64(plan.ByID.ValueInt64()) + listLBReq.RGID = uint64(plan.RgID.ValueInt64()) } if !plan.TechStatus.IsNull() { listLBReq.TechStatus = plan.TechStatus.ValueString() diff --git a/internal/service/cloudapi/vins/schemas/schema_resource_vins.go b/internal/service/cloudapi/vins/schemas/schema_resource_vins.go index eec344b..2c3965d 100644 --- a/internal/service/cloudapi/vins/schemas/schema_resource_vins.go +++ b/internal/service/cloudapi/vins/schemas/schema_resource_vins.go @@ -113,7 +113,7 @@ func MakeSchemaResourceVINS() map[string]schema.Attribute { Required: true, Description: "type of the reservation", Validators: []validator.String{ - stringvalidator.OneOfCaseInsensitive("DHCP", "VIP", "EXCLUDE"), // case is ignored + stringvalidator.OneOfCaseInsensitive("DHCP", "VIP", "EXCLUDED"), // case is ignored }, }, "ip_addr": schema.StringAttribute{ diff --git a/internal/service/cloudapi/vins/utilities/utility_data_source_vins_ext_net_list.go b/internal/service/cloudapi/vins/utilities/utility_data_source_vins_ext_net_list.go index 87e782c..0199d60 100644 --- a/internal/service/cloudapi/vins/utilities/utility_data_source_vins_ext_net_list.go +++ b/internal/service/cloudapi/vins/utilities/utility_data_source_vins_ext_net_list.go @@ -17,7 +17,7 @@ func VINSExtNetListDataSourceCheckPresence(ctx context.Context, vinsId uint64, c extnetList, err := c.CloudAPI().VINS().ExtNetList(ctx, vins.ExtNetListRequest{VINSID: vinsId}) if err != nil { - diags.AddError(fmt.Sprintf("Cannot get info about ext net list for vins with ID %v", vinsId), err.Error()) + diags.AddError(fmt.Sprintf("Cannot get info about extnet list for vins with ID %v", vinsId), err.Error()) return nil, diags } diff --git a/internal/service/cloudapi/vins/utilities/utility_data_source_vins_list_deleted.go b/internal/service/cloudapi/vins/utilities/utility_data_source_vins_list_deleted.go index 871eb8e..dcd21e1 100644 --- a/internal/service/cloudapi/vins/utilities/utility_data_source_vins_list_deleted.go +++ b/internal/service/cloudapi/vins/utilities/utility_data_source_vins_list_deleted.go @@ -47,7 +47,7 @@ func VINSListDeletedDataSourceCheckPresence(ctx context.Context, plan *models.Da tflog.Info(ctx, "VINSListDeletedDataSourceCheckPresence: before call CloudAPI().VINS().ListDeleted", map[string]any{"req": listReq}) list, err := c.CloudAPI().VINS().ListDeleted(ctx, listReq) if err != nil { - diags.AddError("Cannot get info about vins list", err.Error()) + diags.AddError("Cannot get info about vins deleted list", err.Error()) return nil, diags } tflog.Info(ctx, "VINSListDeletedDataSourceCheckPresence: successfull response from CloudAPI().VINS().ListDeleted") diff --git a/internal/service/cloudbroker/account/data_source_cb_account_audits_list.go b/internal/service/cloudbroker/account/data_source_cb_account_audits_list.go new file mode 100644 index 0000000..664c165 --- /dev/null +++ b/internal/service/cloudbroker/account/data_source_cb_account_audits_list.go @@ -0,0 +1,91 @@ +package cbAccount + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountAuditsList{} +) + +func NewDataSourceAccountAuditsList() datasource.DataSource { + return &dataSourceAccountAuditsList{} +} + +// dataSourceAccountAuditsList is the data source implementation. +type dataSourceAccountAuditsList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountAuditsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountAuditsListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error get state") + return + } + accountId := uint64(state.AccountID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceAccountAuditsList: got state successfully", map[string]any{"account_id": accountId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountAuditsList: set timeouts successfully", map[string]any{ + "account_id": accountId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountAuditsDataSourceList(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountAuditsList", map[string]any{"account_id": accountId}) +} + +func (d *dataSourceAccountAuditsList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountAuditsList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountAuditsList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_account_audits_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountAuditsList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountAuditsList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountAuditsList successfully") +} diff --git a/internal/service/cloudbroker/account/data_source_cb_account_available_templates_list.go b/internal/service/cloudbroker/account/data_source_cb_account_available_templates_list.go new file mode 100644 index 0000000..bbcb1d6 --- /dev/null +++ b/internal/service/cloudbroker/account/data_source_cb_account_available_templates_list.go @@ -0,0 +1,91 @@ +package cbAccount + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountAvailableTemplatesListDataSource{} +) + +func NewDataSourceAccountAvailableTemplatesListDataSource() datasource.DataSource { + return &dataSourceAccountAvailableTemplatesListDataSource{} +} + +// dataSourceAccountAvailableTemplatesListDataSource is the data source implementation. +type dataSourceAccountAvailableTemplatesListDataSource struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountAvailableTemplatesListDataSource) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountAvailableTemplatesListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read DataSourceAccountAvailableTemplatesListModel: Error get state") + return + } + accountID := uint64(state.AccountID.ValueInt64()) + tflog.Info(ctx, "Read DataSourceAccountAvailableTemplatesListModel: got state successfully", map[string]any{"account_id": accountID}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read DataSourceAccountAvailableTemplatesListModel: Error set timeout") + return + } + tflog.Info(ctx, "Read DataSourceAccountAvailableTemplatesListModel: set timeouts successfully", map[string]any{ + "account_id": accountID, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountAvailableTemplatesListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read DataSourceAccountAvailableTemplatesListModel: Error flatten data source AccountAvailableTemplatesListDataSource") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read DataSourceAccountAvailableTemplatesListModel: Error set state") + return + } + tflog.Info(ctx, "End read DataSourceAccountAvailableTemplatesListModel", map[string]any{"account_id": accountID}) +} + +func (d *dataSourceAccountAvailableTemplatesListDataSource) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountAvailableTemplatesList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountAvailableTemplatesListDataSource) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_account_available_templates_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountAvailableTemplatesListDataSource) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure DataSourceAccountAvailableTemplatesListModel") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure DataSourceAccountAvailableTemplatesListModel successfully") +} diff --git a/internal/service/cloudbroker/account/data_source_cb_account_computes_list.go b/internal/service/cloudbroker/account/data_source_cb_account_computes_list.go new file mode 100644 index 0000000..05fb091 --- /dev/null +++ b/internal/service/cloudbroker/account/data_source_cb_account_computes_list.go @@ -0,0 +1,88 @@ +package cbAccount + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountComputesList{} +) + +func NewDataSourceAccountComputesList() datasource.DataSource { + return &dataSourceAccountComputesList{} +} + +// NewDataSourceAccountComputesList is the data source implementation. +type dataSourceAccountComputesList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountComputesList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.ListComputesModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountComputesList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountComputesList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountComputesList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountComputesList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountComputesListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountComputesList: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountComputesList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountComputesList") +} + +func (d *dataSourceAccountComputesList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountComputesList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountComputesList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_account_computes_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountComputesList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountComputesList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountComputesList successfully") +} diff --git a/internal/service/cloudbroker/account/data_source_cb_account_disks_list.go b/internal/service/cloudbroker/account/data_source_cb_account_disks_list.go new file mode 100644 index 0000000..a89440d --- /dev/null +++ b/internal/service/cloudbroker/account/data_source_cb_account_disks_list.go @@ -0,0 +1,88 @@ +package cbAccount + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountDisksList{} +) + +func NewDataSourceAccountDisksList() datasource.DataSource { + return &dataSourceAccountDisksList{} +} + +// dataSourceAccountDisksList is the data source implementation. +type dataSourceAccountDisksList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountDisksList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountDisksListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountDisksList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountDisksList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountDisksList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountDisksList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountDisksListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountDisksList: Error flatten data source account disks list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountDisksList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountDisksList") +} + +func (d *dataSourceAccountDisksList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountDisksList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountDisksList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_account_disks_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountDisksList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountDisksList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountDisksList successfully") +} diff --git a/internal/service/cloudbroker/account/data_source_cb_account_flipgroups_list.go b/internal/service/cloudbroker/account/data_source_cb_account_flipgroups_list.go new file mode 100644 index 0000000..e0fec9c --- /dev/null +++ b/internal/service/cloudbroker/account/data_source_cb_account_flipgroups_list.go @@ -0,0 +1,88 @@ +package cbAccount + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountFlipgroupsList{} +) + +func NewDataSourceAccountFlipgroupsList() datasource.DataSource { + return &dataSourceAccountFlipgroupsList{} +} + +// dataSourceAccountFlipgroupsList is the data source implementation. +type dataSourceAccountFlipgroupsList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountFlipgroupsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountFlipgroupsListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountFlipgroupsList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountFlipgroupsList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountFlipgroupsListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountFlipgroupsList") +} + +func (d *dataSourceAccountFlipgroupsList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountFlipgroupsList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountFlipgroupsList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_account_flipgroups_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountFlipgroupsList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountFlipgroupsList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountFlipgroupsList successfully") +} diff --git a/internal/service/cloudbroker/account/data_source_cb_account_get_resource_consumption.go b/internal/service/cloudbroker/account/data_source_cb_account_get_resource_consumption.go new file mode 100644 index 0000000..9a7badd --- /dev/null +++ b/internal/service/cloudbroker/account/data_source_cb_account_get_resource_consumption.go @@ -0,0 +1,91 @@ +package cbAccount + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountGetResourceConsumption{} +) + +func NewDataSourceAccountGetResourceConsumption() datasource.DataSource { + return &dataSourceAccountGetResourceConsumption{} +} + +// dataSourceAccountGetResourceConsumption is the data source implementation. +type dataSourceAccountGetResourceConsumption struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountGetResourceConsumption) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.AccountGetResourceConsumptionModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error get state") + return + } + accountId := uint64(state.AccountID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumption: got state successfully", map[string]any{"account_id": accountId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumption: set timeouts successfully", map[string]any{ + "account_id": accountId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountGetResourceConsumptionDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error flatten data source account") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountGetResourceConsumption", map[string]any{"account_id": accountId}) +} + +func (d *dataSourceAccountGetResourceConsumption) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountGetResourceConsumption(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountGetResourceConsumption) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_account_resource_consumption_get" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountGetResourceConsumption) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountGetResourceConsumption") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountGetResourceConsumption successfully") +} diff --git a/internal/service/cloudbroker/account/data_source_cb_account_list_deleted.go b/internal/service/cloudbroker/account/data_source_cb_account_list_deleted.go new file mode 100644 index 0000000..7c71fd8 --- /dev/null +++ b/internal/service/cloudbroker/account/data_source_cb_account_list_deleted.go @@ -0,0 +1,88 @@ +package cbAccount + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountListDeleted{} +) + +func NewDataSourceAccountListDeleted() datasource.DataSource { + return &dataSourceAccountListDeleted{} +} + +// dataSourceAccountListDeleted is the data source implementation. +type dataSourceAccountListDeleted struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountListDeletedModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountListDeleted: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountListDeleted: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountListDeletedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountListDeleted") +} + +func (d *dataSourceAccountListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountListDeleted(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_account_list_deleted" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountListDeleted") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountListDeleted successfully") +} diff --git a/internal/service/cloudbroker/account/data_source_cb_account_resource_consumption_list.go b/internal/service/cloudbroker/account/data_source_cb_account_resource_consumption_list.go new file mode 100644 index 0000000..b3f3f40 --- /dev/null +++ b/internal/service/cloudbroker/account/data_source_cb_account_resource_consumption_list.go @@ -0,0 +1,89 @@ +package cbAccount + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountGetResourceConsumptionList{} +) + +func NewDataSourceAccountGetResourceConsumptionList() datasource.DataSource { + return &dataSourceAccountGetResourceConsumptionList{} +} + +// dataSourceAccountGetResourceConsumptionList is the data source implementation. +type dataSourceAccountGetResourceConsumptionList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountGetResourceConsumptionList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.AccountGetResourceConsumptionListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumptionList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumptionList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountGetResourceConsumptionList(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error flatten data source account") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountGetResourceConsumptionList") +} + +func (d *dataSourceAccountGetResourceConsumptionList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountGetResourceListConsumption(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountGetResourceConsumptionList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_account_resource_consumption_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountGetResourceConsumptionList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountGetResourceConsumptionList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountGetResourceConsumptionList successfully") +} diff --git a/internal/service/cloudbroker/account/data_source_cb_account_rg_list.go b/internal/service/cloudbroker/account/data_source_cb_account_rg_list.go new file mode 100644 index 0000000..9799155 --- /dev/null +++ b/internal/service/cloudbroker/account/data_source_cb_account_rg_list.go @@ -0,0 +1,88 @@ +package cbAccount + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountRGList{} +) + +func NewDataSourceAccountRGList() datasource.DataSource { + return &dataSourceAccountRGList{} +} + +// dataSourceAccountList is the data source implementation. +type dataSourceAccountRGList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountRGList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountRGListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountRGList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountRGList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountRGList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountRGList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountRGListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountRGList: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountRGList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountRGList") +} + +func (d *dataSourceAccountRGList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountRGList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountRGList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_account_rg_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountRGList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountRGList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountRGList successfully") +} diff --git a/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_audits_list.go b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_audits_list.go new file mode 100644 index 0000000..d0a6395 --- /dev/null +++ b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_audits_list.go @@ -0,0 +1,59 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// AccountAuditsDataSourceList flattens data source for account audits. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountAuditsDataSourceList(ctx context.Context, state *models.DataSourceAccountAuditsListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountAuditsDataSourceList") + + diags := diag.Diagnostics{} + + accountId := uint64(state.AccountID.ValueInt64()) + + auditsList, err := utilities.AccountAuditsListDataSourceCheckPresence(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account audits with account ID %v", accountId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountAuditsDataSourceList: before flatten", map[string]any{"account_id": accountId}) + + id := uuid.New() + *state = models.DataSourceAccountAuditsListModel{ + AccountID: state.AccountID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemAuditModel, 0, len(*auditsList)) + for _, item := range *auditsList { + i := models.ItemAuditModel{ + Call: types.StringValue(item.Call), + ResponseTime: types.Float64Value(item.ResponseTime), + StatusCode: types.Int64Value(int64(item.StatusCode)), + Timestamp: types.Float64Value(item.Timestamp), + User: types.StringValue(item.User), + } + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "flattens.AccountAuditsDataSourceList: after flatten", map[string]any{"account_id": state.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.AccountAuditsDataSourceList", map[string]any{"account_id": state.Id.ValueString()}) + return nil +} diff --git a/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_available_templates_list.go b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_available_templates_list.go new file mode 100644 index 0000000..8e262a7 --- /dev/null +++ b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_available_templates_list.go @@ -0,0 +1,41 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// AccountAvailableTemplatesListDataSource flattens data source for templates list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountAvailableTemplatesListDataSource(ctx context.Context, state *models.DataSourceAccountAvailableTemplatesListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountAvailableTemplatesListDataSource") + + diags := diag.Diagnostics{} + + templatesList, err := utilities.AccountAvailableTemplatesListDataSourceCheckPresence(ctx, uint64(state.AccountID.ValueInt64()), c) + if err != nil { + diags.AddError("Cannot get info about templates list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountAvailableTemplatesListDataSource: before flatten") + + *state = models.DataSourceAccountAvailableTemplatesListModel{ + AccountID: state.AccountID, + Timeouts: state.Timeouts, + Items: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, templatesList), + } + + tflog.Info(ctx, "flattens.AccountAvailableTemplatesListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.AccountAvailableTemplatesListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_computes_list.go b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_computes_list.go new file mode 100644 index 0000000..8d4d9a2 --- /dev/null +++ b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_computes_list.go @@ -0,0 +1,81 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// AccountComputesListDataSource flattens data source for account computes list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountComputesListDataSource(ctx context.Context, state *models.ListComputesModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountComputesListDataSource") + + diags := diag.Diagnostics{} + + cList, err := utilities.AccountComputesListDataSourceCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about computes list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountComputesListDataSource: before flatten") + + *state = models.ListComputesModel{ + AccountID: state.AccountID, + Name: state.Name, + RGName: state.RGName, + RGID: state.RGID, + TechStatus: state.TechStatus, + IpAddress: state.IpAddress, + ExtNetName: state.ExtNetName, + ExtNetID: state.ExtNetID, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + EntryCount: types.Int64Value(int64(cList.EntryCount)), + } + + data := make([]models.ItemComputeModel, 0, len(cList.Data)) + + for _, v := range cList.Data { + item := models.ItemComputeModel{ + AccountID: types.Int64Value(int64(v.AccountID)), + AccountName: types.StringValue(v.AccountName), + CPUs: types.Int64Value(int64(v.CPUs)), + CreatedBy: types.StringValue(v.CreatedBy), + CreatedTime: types.Int64Value(int64(v.CreatedTime)), + DeletedBy: types.StringValue(v.DeletedBy), + DeletedTime: types.Int64Value(int64(v.DeletedTime)), + ID: types.Int64Value(int64(v.ID)), + Name: types.StringValue(v.Name), + RAM: types.Int64Value(int64(v.RAM)), + Registered: types.BoolValue(v.Registered), + RGID: types.Int64Value(int64(v.RGID)), + RgName: types.StringValue(v.RgName), + Status: types.StringValue(v.Status), + TechStatus: types.StringValue(v.TechStatus), + TotalDisksSize: types.Int64Value(int64(v.TotalDisksSize)), + UpdatedBy: types.StringValue(v.UpdatedBy), + UpdatedTime: types.Int64Value(int64(v.UpdatedTime)), + UserManaged: types.BoolValue(v.UserManaged), + VINSConnected: types.Int64Value(int64(v.VINSConnected)), + } + data = append(data, item) + } + + state.Items = data + + tflog.Info(ctx, "flattens.AccountComputesListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.AccountComputesListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_disks_list.go b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_disks_list.go new file mode 100644 index 0000000..c7732c8 --- /dev/null +++ b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_disks_list.go @@ -0,0 +1,66 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// AccountDisksListDataSource flattens data source for account disks list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountDisksListDataSource(ctx context.Context, state *models.DataSourceAccountDisksListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountDisksListDataSource") + + diags := diag.Diagnostics{} + + disksList, err := utilities.AccountDisksListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("flattens.AccountDisksListDataSource: Cannot get info", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountDisksListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountDisksListModel{ + AccountID: state.AccountID, + DiskID: state.DiskID, + Name: state.Name, + DiskMaxSize: state.DiskMaxSize, + Type: state.Type, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + SortBy: state.SortBy, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(disksList.EntryCount)), + } + + items := make([]models.ItemDiskModel, 0, len(disksList.Data)) + for _, item := range disksList.Data { + i := models.ItemDiskModel{ + DiskID: types.Int64Value(int64(item.ID)), + DiskName: types.StringValue(item.Name), + Pool: types.StringValue(item.Pool), + SEPID: types.Int64Value(int64(item.SepID)), + Shareable: types.BoolValue(item.Shareable), + SizeMax: types.Int64Value(int64(item.SizeMax)), + Type: types.StringValue(item.Type), + } + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "flattens.AccountDisksListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.AccountDisksListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_flipgroups_list.go b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_flipgroups_list.go new file mode 100644 index 0000000..da159e1 --- /dev/null +++ b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_flipgroups_list.go @@ -0,0 +1,81 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// AccountFlipgroupsListDataSource flattens data source for account flipgroups list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountFlipgroupsListDataSource(ctx context.Context, state *models.DataSourceAccountFlipgroupsListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountFlipgroupsListDataSource") + + diags := diag.Diagnostics{} + + flipgroups, err := utilities.AccountFlipgroupsListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about account flipgroups list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountFlipgroupsListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountFlipgroupsListModel{ + AccountID: state.AccountID, + Name: state.Name, + VINSID: state.VINSID, + VINSName: state.VINSName, + ExtNetID: state.ExtNetID, + ByIP: state.ByIP, + FLIPGroupID: state.FLIPGroupID, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(flipgroups.EntryCount)), + } + + items := make([]models.ItemAccountFlipgroupModel, 0, len(flipgroups.Data)) + for _, item := range flipgroups.Data { + i := models.ItemAccountFlipgroupModel{ + AccountID: types.Int64Value(int64(item.AccountID)), + ClientType: types.StringValue(item.ClientType), + ConnType: types.StringValue(item.ConnType), + CreatedBy: types.StringValue(item.CreatedBy), + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + DefaultGW: types.StringValue(item.DefaultGW), + DeletedBy: types.StringValue(item.DeletedBy), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + Description: types.StringValue(item.Description), + GID: types.Int64Value(int64(item.GID)), + GUID: types.Int64Value(int64(item.GUID)), + ID: types.Int64Value(int64(item.ID)), + IP: types.StringValue(item.IP), + Milestones: types.Int64Value(int64(item.Milestones)), + Name: types.StringValue(item.Name), + NetID: types.Int64Value(int64(item.NetID)), + NetType: types.StringValue(item.NetType), + NetMask: types.Int64Value(int64(item.Netmask)), + Status: types.StringValue(item.Status), + UpdatedBy: types.StringValue(item.UpdatedBy), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + } + + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "End flattens.AccountFlipgroupsListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_get_resource_consumption.go b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_get_resource_consumption.go new file mode 100644 index 0000000..353c669 --- /dev/null +++ b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_get_resource_consumption.go @@ -0,0 +1,91 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// AccountGetResourceConsumptionDataSource flattens data source for account. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountGetResourceConsumptionDataSource(ctx context.Context, state *models.AccountGetResourceConsumptionModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountGetResourceConsumptionDataSource") + + diags := diag.Diagnostics{} + + accountId := uint64(state.AccountID.ValueInt64()) + + record, err := utilities.AccountGetResourceConsumptionDataSourceCheckPresence(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountGetResourceConsumptionDataSource: before flatten", map[string]any{"account_id": accountId, "record": record}) + + *state = models.AccountGetResourceConsumptionModel{ + AccountID: state.AccountID, + Timeouts: state.Timeouts, + + Consumed: &models.ResourceConsumptionModel{ + CPU: types.Int64Value(record.Consumed.CPU), + DiskSize: types.Float64Value(record.Consumed.DiskSize), + DiskSizeMax: types.Float64Value(record.Consumed.DiskSizeMax), + ExtIPs: types.Int64Value(record.Consumed.ExtIPs), + ExtTraffic: types.Int64Value(record.Consumed.ExtTraffic), + GPU: types.Int64Value(record.Consumed.GPU), + RAM: types.Int64Value(record.Consumed.RAM), + SEPs: flattenResourceConsumptionSep(ctx, record.Consumed.SEPs), + }, + Limits: &models.ResourceConsumptionLimitsModel{ + CUC: types.Float64Value(record.ResourceLimits.CuC), + CUD: types.Float64Value(record.ResourceLimits.CuD), + CUI: types.Float64Value(record.ResourceLimits.CuI), + CUM: types.Float64Value(record.ResourceLimits.CuM), + CUDM: types.Float64Value(record.ResourceLimits.CuDM), + CUNP: types.Float64Value(record.ResourceLimits.CuNP), + GPUUnits: types.Float64Value(record.ResourceLimits.GPUUnits), + }, + Reserved: &models.ResourceConsumptionModel{ + CPU: types.Int64Value(record.Reserved.CPU), + DiskSize: types.Float64Value(record.Reserved.DiskSize), + DiskSizeMax: types.Float64Value(record.Reserved.DiskSizeMax), + ExtIPs: types.Int64Value(record.Reserved.ExtIPs), + ExtTraffic: types.Int64Value(record.Reserved.ExtTraffic), + GPU: types.Int64Value(record.Reserved.GPU), + RAM: types.Int64Value(record.Reserved.RAM), + SEPs: flattenResourceConsumptionSep(ctx, record.Reserved.SEPs), + }, + } + + tflog.Info(ctx, "flattens.AccountGetResourceConsumptionDataSource: after flatten", map[string]any{"account_id": state.AccountID.ValueInt64()}) + + tflog.Info(ctx, "End flattens.AccountGetResourceConsumptionDataSource", map[string]any{"account_id": state.AccountID.ValueInt64()}) + return nil +} + +func flattenResourceConsumptionSep(ctx context.Context, seps map[string]map[string]account.DiskUsage) []models.ResourceConsumptionSepModel { + tflog.Info(ctx, "Start flattenResourceConsumption") + res := make([]models.ResourceConsumptionSepModel, 0, len(seps)) + for sepId := range seps { + for poolName, diskData := range seps[sepId] { + s := models.ResourceConsumptionSepModel{ + SepID: types.StringValue(sepId), + PoolName: types.StringValue(poolName), + DiskSize: types.Float64Value(diskData.DiskSize), + DiskSizeMax: types.Float64Value(diskData.DiskSizeMax), + } + res = append(res, s) + } + } + tflog.Info(ctx, "End flattenResourceConsumption") + return res +} diff --git a/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_list_deleted.go b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_list_deleted.go new file mode 100644 index 0000000..eb41e69 --- /dev/null +++ b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_list_deleted.go @@ -0,0 +1,104 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// AccountListDeletedDataSource flattens data source for account list deleted. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountListDeletedDataSource(ctx context.Context, state *models.DataSourceAccountListDeletedModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountListDeletedDataSource") + + diags := diag.Diagnostics{} + + accListDel, err := utilities.AccountListDeletedCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about account list deleted", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountListDeletedDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountListDeletedModel{ + ByID: state.ByID, + Name: state.Name, + ACL: state.ACL, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + SortBy: state.SortBy, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(accListDel.EntryCount)), + } + + items := make([]models.ItemAccountListDeletedModel, 0, len(accListDel.Data)) + for _, item := range accListDel.Data { + i := models.ItemAccountListDeletedModel{ + DCLocation: types.StringValue(item.DCLocation), + Ckey: types.StringValue(item.CKey), + Meta: flattens.FlattenSimpleTypeToList(ctx, types.StringType, item.Meta), + ComputeFeatures: flattens.FlattenSimpleTypeToList(ctx, types.StringType, item.ComputeFeatures), + Company: types.StringValue(item.Company), + Companyurl: types.StringValue(item.CompanyURL), + CpuAllocationParameter: types.StringValue(item.CPUAllocationParameter), + CpuAllocationRatio: types.Float64Value(float64(item.CPUAllocationRatio)), + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + CreatedBy: types.StringValue(item.CreatedBy), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + DeletedBy: types.StringValue(item.DeletedBy), + AccountID: types.Int64Value(int64(item.ID)), + GUID: types.Int64Value(int64(item.GUID)), + AccountName: types.StringValue(item.Name), + Status: types.StringValue(item.Status), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + ResourceLimits: &models.AccountLimitsModel{ + CUC: types.Float64Value(float64(item.ResourceLimits.CuC)), + CUD: types.Float64Value(float64(item.ResourceLimits.CuD)), + CUI: types.Float64Value(float64(item.ResourceLimits.CuI)), + CUM: types.Float64Value(float64(item.ResourceLimits.CuM)), + CUDM: types.Float64Value(float64(item.ResourceLimits.CuDM)), + CUNP: types.Float64Value(float64(item.ResourceLimits.CuNP)), + GPUUnits: types.Float64Value(float64(item.ResourceLimits.GPUUnits)), + }, + ResourceTypes: flattens.FlattenSimpleTypeToList(ctx, types.StringType, item.ResTypes), + SendAccessEmails: types.BoolValue(item.SendAccessEmails), + UniqPools: flattens.FlattenSimpleTypeToList(ctx, types.StringType, item.UniqPools), + Version: types.Int64Value(int64(item.Version)), + Vins: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, item.VINS), + } + + aclList := make([]models.RecordACLModel, 0, len(item.ACL)) + for _, acl := range item.ACL { + a := models.RecordACLModel{ + Explicit: types.BoolValue(acl.Explicit), + GUID: types.StringValue(acl.GUID), + Right: types.StringValue(acl.Right), + Status: types.StringValue(acl.Status), + Type: types.StringValue(acl.Type), + UserGroupID: types.StringValue(acl.UserGroupID), + } + aclList = append(aclList, a) + } + + i.ACL = aclList + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "flattens.AccountListDeletedDataSource: after flatten") + + tflog.Info(ctx, "End flattens.AccountListDeletedDataSource") + return nil +} diff --git a/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_resource_consumption_list.go b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_resource_consumption_list.go new file mode 100644 index 0000000..8e422aa --- /dev/null +++ b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_resource_consumption_list.go @@ -0,0 +1,97 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// AccountGetResourceConsumptionList flattens data source for rg get resource consumption. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountGetResourceConsumptionList(ctx context.Context, state *models.AccountGetResourceConsumptionListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountGetResourceConsumptionList") + + diags := diag.Diagnostics{} + + resConsList, err := utilities.AccountGetResourceConsumptionListDataSourceCheckPresence(ctx, c) + if err != nil { + diags.AddError("Cannot get info about resource consumptions", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountGetResourceConsumptionList: before flatten") + + *state = models.AccountGetResourceConsumptionListModel{ + EntryCount: state.EntryCount, + Timeouts: state.Timeouts, + } + + items := make([]models.AccountGetResourceConsumptionListItemModel, 0, len(resConsList.Data)) + for _, resConsItem := range resConsList.Data { + item := models.AccountGetResourceConsumptionListItemModel{ + AccountId: types.Int64Value(int64(resConsItem.AccountID)), + Consumed: &models.ResourceConsumptionListModel{ + CPU: types.Int64Value(resConsItem.Consumed.CPU), + DiskSize: types.Float64Value(resConsItem.Consumed.DiskSize), + DiskSizeMax: types.Float64Value(resConsItem.Consumed.DiskSizeMax), + ExtIPs: types.Int64Value(resConsItem.Consumed.ExtIPs), + ExtTraffic: types.Int64Value(resConsItem.Consumed.ExtTraffic), + GPU: types.Int64Value(resConsItem.Consumed.GPU), + RAM: types.Int64Value(resConsItem.Consumed.RAM), + }, + Reserved: &models.ResourceConsumptionListModel{ + CPU: types.Int64Value(resConsItem.Reserved.CPU), + DiskSize: types.Float64Value(resConsItem.Reserved.DiskSize), + DiskSizeMax: types.Float64Value(resConsItem.Reserved.DiskSizeMax), + ExtIPs: types.Int64Value(resConsItem.Reserved.ExtIPs), + ExtTraffic: types.Int64Value(resConsItem.Reserved.ExtTraffic), + GPU: types.Int64Value(resConsItem.Reserved.GPU), + RAM: types.Int64Value(resConsItem.Reserved.RAM), + }, + } + + sepsConsumed := make([]models.ResourceConsumptionSepListModel, 0, len(resConsItem.Consumed.SEPs)) + for sepId, data := range resConsItem.Consumed.SEPs { + for dataName, diskData := range data { + sepItem := models.ResourceConsumptionSepListModel{ + SepID: types.StringValue(sepId), + PoolName: types.StringValue(dataName), + DiskSize: types.Float64Value(diskData.DiskSize), + DiskSizeMax: types.Float64Value(diskData.DiskSizeMax), + } + sepsConsumed = append(sepsConsumed, sepItem) + } + } + item.Consumed.SEPs = sepsConsumed + + sepsReserved := make([]models.ResourceConsumptionSepListModel, 0, len(resConsItem.Reserved.SEPs)) + for sepId, data := range resConsItem.Reserved.SEPs { + for dataName, diskData := range data { + sepItem := models.ResourceConsumptionSepListModel{ + SepID: types.StringValue(sepId), + PoolName: types.StringValue(dataName), + DiskSize: types.Float64Value(diskData.DiskSize), + DiskSizeMax: types.Float64Value(diskData.DiskSizeMax), + } + sepsReserved = append(sepsReserved, sepItem) + } + } + item.Reserved.SEPs = sepsReserved + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(resConsList.EntryCount)) + + tflog.Info(ctx, "flattens.AccountGetResourceConsumptionList: after flatten") + + tflog.Info(ctx, "End flattens.AccountGetResourceConsumptionList") + return nil +} diff --git a/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_rg_list.go b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_rg_list.go new file mode 100644 index 0000000..7225d87 --- /dev/null +++ b/internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_rg_list.go @@ -0,0 +1,124 @@ +package flattens + +import ( + "context" + + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// AccountRGListDataSource flattens data source for account rg list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountRGListDataSource(ctx context.Context, state *models.DataSourceAccountRGListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountRGListDataSource") + + diags := diag.Diagnostics{} + + rgList, err := utilities.AccountRGListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about account rg list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountRGListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountRGListModel{ + AccountID: state.AccountID, + RGID: state.RGID, + VinsID: state.VinsID, + VMID: state.VMID, + Name: state.Name, + Status: state.Status, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + SortBy: state.SortBy, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(rgList.EntryCount)), + } + + items := make([]models.ItemAccountRGModel, 0, len(rgList.Data)) + for _, item := range rgList.Data { + i := models.ItemAccountRGModel{ + Computes: &models.RGComputesModel{ + Started: types.Int64Value(int64(item.Computes.Started)), + Stopped: types.Int64Value(int64(item.Computes.Stopped)), + }, + Resources: &models.RGResourcesModel{ + Consumed: &models.ResourceModel{ + CPU: types.Int64Value(item.Resources.Consumed.CPU), + DiskSize: types.Float64Value(item.Resources.Consumed.DiskSize), + DiskSizeMax: types.Float64Value(item.Resources.Consumed.DiskSizeMax), + ExtIPs: types.Int64Value(item.Resources.Consumed.ExtIPs), + ExtTraffic: types.Int64Value(item.Resources.Consumed.ExtTraffic), + GPU: types.Int64Value(item.Resources.Consumed.GPU), + RAM: types.Int64Value(item.Resources.Consumed.RAM), + SEPs: flattenSep(item.Resources.Consumed.SEPs), + }, + Limits: &models.LimitsRGModel{ + CPU: types.Int64Value(item.Resources.Limits.CPU), + DiskSize: types.Int64Value(item.Resources.Limits.DiskSize), + DiskSizeMax: types.Int64Value(item.Resources.Limits.DiskSizeMax), + ExtIPs: types.Int64Value(item.Resources.Limits.ExtIPs), + ExtTraffic: types.Int64Value(item.Resources.Limits.ExtTraffic), + GPU: types.Int64Value(item.Resources.Limits.GPU), + RAM: types.Int64Value(item.Resources.Limits.RAM), + SEPs: types.Int64Value(int64(item.Resources.Limits.SEPs)), + }, + Reserved: &models.ResourceModel{ + CPU: types.Int64Value(item.Resources.Reserved.CPU), + DiskSize: types.Float64Value(item.Resources.Reserved.DiskSize), + DiskSizeMax: types.Float64Value(item.Resources.Reserved.DiskSizeMax), + ExtIPs: types.Int64Value(item.Resources.Reserved.ExtIPs), + ExtTraffic: types.Int64Value(item.Resources.Reserved.ExtTraffic), + GPU: types.Int64Value(item.Resources.Reserved.GPU), + RAM: types.Int64Value(item.Resources.Reserved.RAM), + SEPs: flattenSep(item.Resources.Reserved.SEPs), + }, + }, + CreatedBy: types.StringValue(item.CreatedBy), + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + DeletedBy: types.StringValue(item.DeletedBy), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + RGID: types.Int64Value(int64(item.ID)), + Milestones: types.Int64Value(int64(item.Milestones)), + RGName: types.StringValue(item.Name), + Status: types.StringValue(item.Status), + UpdatedBy: types.StringValue(item.UpdatedBy), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + VINSes: types.Int64Value(int64(item.VINSes)), + } + + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "End flattens.AccountRGListDataSource") + return nil +} + +func flattenSep(seps map[string]map[string]account.DiskUsage) []models.SepModel { + res := make([]models.SepModel, 0, len(seps)) + for sepId := range seps { + for poolName, diskData := range seps[sepId] { + s := models.SepModel{ + SepID: types.StringValue(sepId), + PoolName: types.StringValue(poolName), + DiskSize: types.Float64Value(diskData.DiskSize), + DiskSizeMax: types.Float64Value(diskData.DiskSizeMax), + } + res = append(res, s) + } + } + return res +} diff --git a/internal/service/cloudbroker/account/flattens/flatten_resource_cb_account.go b/internal/service/cloudbroker/account/flattens/flatten_resource_cb_account.go new file mode 100644 index 0000000..fb9a790 --- /dev/null +++ b/internal/service/cloudbroker/account/flattens/flatten_resource_cb_account.go @@ -0,0 +1,171 @@ +package flattens + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// AccountResource flattens resource for account. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountResource(ctx context.Context, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountResource") + + diags := diag.Diagnostics{} + + accountId := uint64(state.AccountID.ValueInt64()) + if accountId == 0 { + id, err := strconv.Atoi(state.Id.ValueString()) + if err != nil { + diags.AddError( + "flattens.AccountResource: cannot parse resource ID from state", + err.Error()) + return diags + } + accountId = uint64(id) + } + + recordAccount, err := utilities.AccountResourceCheckPresence(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("flattens.AccountResource: Cannot get info about resource with ID %v", accountId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountResource: before flatten", map[string]any{"account_id": accountId, "recordAccount": recordAccount}) + + *state = models.ResourceAccountModel{ + AccountName: types.StringValue(recordAccount.Name), + Username: state.Username, + EmailAddress: state.EmailAddress, + SendAccessEmails: state.SendAccessEmails, + Users: state.Users, + Restore: state.Restore, + Permanently: state.Permanently, + Enable: state.Enable, + ResourceLimits: flattenResourceLimitsInAccountResource(ctx, recordAccount.ResourceLimits, state), + AvailableTemplates: state.AvailableTemplates, + Timeouts: state.Timeouts, + Id: types.StringValue(strconv.Itoa(int(accountId))), + AccountID: types.Int64Value(int64(recordAccount.ID)), + DCLocation: types.StringValue(recordAccount.DCLocation), + CKey: types.StringValue(recordAccount.CKey), + ACL: resourceFlattenACLInAccount(ctx, recordAccount.ACL), + Company: types.StringValue(recordAccount.Company), + CompanyURL: types.StringValue(recordAccount.CompanyURL), + CPUAllocationParameter: types.StringValue(recordAccount.CPUAllocationParameter), + CPUAllocationRatio: types.Float64Value(recordAccount.CPUAllocationRatio), + CreatedBy: types.StringValue(recordAccount.CreatedBy), + CreatedTime: types.Int64Value(int64(recordAccount.CreatedTime)), + DeactivationTime: types.Float64Value(recordAccount.DeactivationTime), + DeletedBy: types.StringValue(recordAccount.DeletedBy), + DeletedTime: types.Int64Value(int64(recordAccount.DeletedTime)), + DisplayName: types.StringValue(recordAccount.DisplayName), + GUID: types.Int64Value(int64(recordAccount.GUID)), + Status: types.StringValue(recordAccount.Status), + UpdatedTime: types.Int64Value(int64(recordAccount.UpdatedTime)), + Version: types.Int64Value(int64(recordAccount.Version)), + } + + state.VINS, diags = types.ListValueFrom(ctx, types.Int64Type, recordAccount.VINS) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.AccountResource: cannot flatten recordAccount.VINS to state.VINS", diags)) + } + + state.UniqPools, diags = types.ListValueFrom(ctx, types.StringType, recordAccount.UniqPools) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.AccountResource: cannot flatten recordAccount.UniqPools to state.UniqPools", diags)) + } + + state.ResourceTypes, diags = types.ListValueFrom(ctx, types.Int64Type, recordAccount.ResTypes) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.AccountResource: cannot flatten recordAccount.ResTypes to state.ResourceTypes", diags)) + } + + state.ComputeFeatures, diags = types.SetValueFrom(ctx, types.StringType, recordAccount.ComputeFeatures) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.AccountResource: cannot flatten recordAccount.ComputeFeatures to state.ComputeFeatures", diags)) + } + + tflog.Info(ctx, "flattens.AccountResource: after flatten", map[string]any{"account_id": state.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.AccountResource", map[string]any{"account_id": state.Id.ValueString()}) + return nil +} + +func flattenResourceLimitsInAccountResource(ctx context.Context, limits account.ResourceLimits, state *models.ResourceAccountModel) types.Object { + tflog.Info(ctx, "Start flattenResourceLimitsInAccountResource") + + diags := diag.Diagnostics{} + + var resourceLimits models.ResourceLimitsInAccountResourceModel + diags.Append(state.ResourceLimits.As(ctx, &resourceLimits, basetypes.ObjectAsOptions{})...) + if diags.HasError() { + tflog.Error(ctx, "flattenResourceLimitsInAccountResource: cannot populate resourceLimits with plan.ResourceLimits object element") + } + + if resourceLimits.CUC.ValueFloat64() == 0 { + resourceLimits.CUC = types.Float64Value(limits.CuC) + } + if resourceLimits.CUD.ValueFloat64() == 0 { + resourceLimits.CUD = types.Float64Value(limits.CuD) + } + if resourceLimits.CUI.ValueFloat64() == 0 { + resourceLimits.CUI = types.Float64Value(limits.CuI) + } + if resourceLimits.CUM.ValueFloat64() == 0 { + resourceLimits.CUM = types.Float64Value(limits.CuM) + } + if resourceLimits.CUNP.ValueFloat64() == 0 { + resourceLimits.CUNP = types.Float64Value(limits.CuNP) + } + if resourceLimits.GPUUnits.ValueFloat64() == 0 { + resourceLimits.GPUUnits = types.Float64Value(limits.GPUUnits) + } + + res, err := types.ObjectValueFrom(ctx, models.ItemResourceLimitsInAccountResource, resourceLimits) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenResourceLimitsInAccountResource struct to obj", err)) + } + + tflog.Info(ctx, "End flattenResourceLimitsInAccountResource") + return res +} + +func resourceFlattenACLInAccount(ctx context.Context, aclList []account.ACL) types.List { + tflog.Info(ctx, "Start flattenACLInAccount") + tempSlice := make([]types.Object, 0, len(aclList)) + for _, item := range aclList { + temp := models.ACLInAccountModel{ + Explicit: types.BoolValue(item.Explicit), + GUID: types.StringValue(item.GUID), + Right: types.StringValue(item.Right), + Status: types.StringValue(item.Status), + Type: types.StringValue(item.Type), + UserGroupID: types.StringValue(item.UserGroupID), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemACLInAccount, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenACLInAccount struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemACLInAccount}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenACLInAccount", diags)) + } + + tflog.Info(ctx, "End flattenACLInAccount") + return res +} diff --git a/internal/service/cloudbroker/account/models/model_data_source_cb_account_audits_list.go b/internal/service/cloudbroker/account/models/model_data_source_cb_account_audits_list.go new file mode 100644 index 0000000..e26b0b3 --- /dev/null +++ b/internal/service/cloudbroker/account/models/model_data_source_cb_account_audits_list.go @@ -0,0 +1,24 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountAuditsListModel struct { + // request fields + AccountID types.Int64 `tfsdk:"account_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAuditModel `tfsdk:"items"` +} + +type ItemAuditModel struct { + Call types.String `tfsdk:"call"` + ResponseTime types.Float64 `tfsdk:"responsetime"` + StatusCode types.Int64 `tfsdk:"statuscode"` + Timestamp types.Float64 `tfsdk:"timestamp"` + User types.String `tfsdk:"user"` +} diff --git a/internal/service/cloudbroker/account/models/model_data_source_cb_account_available_templates_list.go b/internal/service/cloudbroker/account/models/model_data_source_cb_account_available_templates_list.go new file mode 100644 index 0000000..be82c6f --- /dev/null +++ b/internal/service/cloudbroker/account/models/model_data_source_cb_account_available_templates_list.go @@ -0,0 +1,15 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountAvailableTemplatesListModel struct { + // request fields + AccountID types.Int64 `tfsdk:"account_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Items types.List `tfsdk:"items"` +} diff --git a/internal/service/cloudbroker/account/models/model_data_source_cb_account_computes_list.go b/internal/service/cloudbroker/account/models/model_data_source_cb_account_computes_list.go new file mode 100644 index 0000000..7bbee25 --- /dev/null +++ b/internal/service/cloudbroker/account/models/model_data_source_cb_account_computes_list.go @@ -0,0 +1,50 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ListComputesModel struct { + // request fields + AccountID types.Int64 `tfsdk:"account_id"` + ComputeID types.Int64 `tfsdk:"compute_id"` + Name types.String `tfsdk:"name"` + RGName types.String `tfsdk:"rg_name"` + RGID types.Int64 `tfsdk:"rg_id"` + TechStatus types.String `tfsdk:"tech_status"` + IpAddress types.String `tfsdk:"ip_address"` + ExtNetName types.String `tfsdk:"extnet_name"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Items []ItemComputeModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemComputeModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + CPUs types.Int64 `tfsdk:"cpus"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + ID types.Int64 `tfsdk:"compute_id"` + Name types.String `tfsdk:"compute_name"` + RAM types.Int64 `tfsdk:"ram"` + Registered types.Bool `tfsdk:"registered"` + RGID types.Int64 `tfsdk:"rg_id"` + RgName types.String `tfsdk:"rg_name"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + TotalDisksSize types.Int64 `tfsdk:"total_disks_size"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` + VINSConnected types.Int64 `tfsdk:"vins_connected"` +} diff --git a/internal/service/cloudbroker/account/models/model_data_source_cb_account_disks_list.go b/internal/service/cloudbroker/account/models/model_data_source_cb_account_disks_list.go new file mode 100644 index 0000000..d87f54a --- /dev/null +++ b/internal/service/cloudbroker/account/models/model_data_source_cb_account_disks_list.go @@ -0,0 +1,36 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountDisksListModel struct { + // required fields + AccountID types.Int64 `tfsdk:"account_id"` + + // optional fields + DiskID types.Int64 `tfsdk:"disk_id"` + Name types.String `tfsdk:"name"` + DiskMaxSize types.Int64 `tfsdk:"disk_max_size"` + Type types.String `tfsdk:"type"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + SortBy types.String `tfsdk:"sort_by"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemDiskModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemDiskModel struct { + DiskID types.Int64 `tfsdk:"disk_id"` + DiskName types.String `tfsdk:"disk_name"` + Pool types.String `tfsdk:"pool"` + SEPID types.Int64 `tfsdk:"sep_id"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + Type types.String `tfsdk:"type"` +} diff --git a/internal/service/cloudbroker/account/models/model_data_source_cb_account_flipgroups_list.go b/internal/service/cloudbroker/account/models/model_data_source_cb_account_flipgroups_list.go new file mode 100644 index 0000000..fc7020c --- /dev/null +++ b/internal/service/cloudbroker/account/models/model_data_source_cb_account_flipgroups_list.go @@ -0,0 +1,50 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountFlipgroupsListModel struct { + // optional and required fields + AccountID types.Int64 `tfsdk:"account_id"` + Name types.String `tfsdk:"name"` + VINSID types.Int64 `tfsdk:"vins_id"` + VINSName types.String `tfsdk:"vins_name"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + ByIP types.String `tfsdk:"by_ip"` + FLIPGroupID types.Int64 `tfsdk:"flipgroup_id"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAccountFlipgroupModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemAccountFlipgroupModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + ClientType types.String `tfsdk:"client_type"` + ConnType types.String `tfsdk:"conn_type"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DefaultGW types.String `tfsdk:"default_gw"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"fg_id"` + IP types.String `tfsdk:"ip"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"fg_name"` + NetID types.Int64 `tfsdk:"net_id"` + NetType types.String `tfsdk:"net_type"` + NetMask types.Int64 `tfsdk:"netmask"` + Status types.String `tfsdk:"status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` +} diff --git a/internal/service/cloudbroker/account/models/model_data_source_cb_account_get_resource_consumption.go b/internal/service/cloudbroker/account/models/model_data_source_cb_account_get_resource_consumption.go new file mode 100644 index 0000000..8984f8f --- /dev/null +++ b/internal/service/cloudbroker/account/models/model_data_source_cb_account_get_resource_consumption.go @@ -0,0 +1,45 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AccountGetResourceConsumptionModel struct { + // request fields + AccountID types.Int64 `tfsdk:"account_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Consumed *ResourceConsumptionModel `tfsdk:"consumed"` + Reserved *ResourceConsumptionModel `tfsdk:"reserved"` + Limits *ResourceConsumptionLimitsModel `tfsdk:"resource_limits"` +} + +type ResourceConsumptionModel struct { + CPU types.Int64 `tfsdk:"cpu"` + DiskSize types.Float64 `tfsdk:"disksize"` + DiskSizeMax types.Float64 `tfsdk:"disksizemax"` + ExtIPs types.Int64 `tfsdk:"extips"` + ExtTraffic types.Int64 `tfsdk:"exttraffic"` + GPU types.Int64 `tfsdk:"gpu"` + RAM types.Int64 `tfsdk:"ram"` + SEPs []ResourceConsumptionSepModel `tfsdk:"seps"` +} + +type ResourceConsumptionSepModel struct { + SepID types.String `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"data_name"` + DiskSize types.Float64 `tfsdk:"disk_size"` + DiskSizeMax types.Float64 `tfsdk:"disk_size_max"` +} + +type ResourceConsumptionLimitsModel struct { + CUC types.Float64 `tfsdk:"cu_c"` + CUD types.Float64 `tfsdk:"cu_d"` + CUI types.Float64 `tfsdk:"cu_i"` + CUM types.Float64 `tfsdk:"cu_m"` + CUDM types.Float64 `tfsdk:"cu_dm"` + CUNP types.Float64 `tfsdk:"cu_np"` + GPUUnits types.Float64 `tfsdk:"gpu_units"` +} diff --git a/internal/service/cloudbroker/account/models/model_data_source_cb_account_list_deleted.go b/internal/service/cloudbroker/account/models/model_data_source_cb_account_list_deleted.go new file mode 100644 index 0000000..740ee9e --- /dev/null +++ b/internal/service/cloudbroker/account/models/model_data_source_cb_account_list_deleted.go @@ -0,0 +1,68 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountListDeletedModel struct { + // optional fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + ACL types.String `tfsdk:"acl"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + SortBy types.String `tfsdk:"sort_by"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAccountListDeletedModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemAccountListDeletedModel struct { + DCLocation types.String `tfsdk:"dc_location"` + Ckey types.String `tfsdk:"ckey"` + Meta types.List `tfsdk:"meta"` + ACL []RecordACLModel `tfsdk:"acl"` + ComputeFeatures types.List `tfsdk:"compute_features"` + Company types.String `tfsdk:"company"` + Companyurl types.String `tfsdk:"companyurl"` + CpuAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"` + CpuAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"` + CreatedTime types.Int64 `tfsdk:"created_time"` + CreatedBy types.String `tfsdk:"created_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + AccountID types.Int64 `tfsdk:"account_id"` + GUID types.Int64 `tfsdk:"guid"` + AccountName types.String `tfsdk:"account_name"` + Status types.String `tfsdk:"status"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + ResourceLimits *AccountLimitsModel `tfsdk:"resource_limits"` + ResourceTypes types.List `tfsdk:"resource_types"` + SendAccessEmails types.Bool `tfsdk:"send_access_emails"` + UniqPools types.List `tfsdk:"uniq_pools"` + Version types.Int64 `tfsdk:"version"` + Vins types.List `tfsdk:"vins"` +} + +type RecordACLModel struct { + Explicit types.Bool `tfsdk:"explicit"` + GUID types.String `tfsdk:"guid"` + Right types.String `tfsdk:"right"` + Status types.String `tfsdk:"status"` + Type types.String `tfsdk:"type"` + UserGroupID types.String `tfsdk:"user_group_id"` +} + +type AccountLimitsModel struct { + CUC types.Float64 `tfsdk:"cu_c"` + CUD types.Float64 `tfsdk:"cu_d"` + CUI types.Float64 `tfsdk:"cu_i"` + CUM types.Float64 `tfsdk:"cu_m"` + CUDM types.Float64 `tfsdk:"cu_dm"` + CUNP types.Float64 `tfsdk:"cu_np"` + GPUUnits types.Float64 `tfsdk:"gpu_units"` +} diff --git a/internal/service/cloudbroker/account/models/model_data_source_cb_account_resource_consumption_list.go b/internal/service/cloudbroker/account/models/model_data_source_cb_account_resource_consumption_list.go new file mode 100644 index 0000000..42a2f53 --- /dev/null +++ b/internal/service/cloudbroker/account/models/model_data_source_cb_account_resource_consumption_list.go @@ -0,0 +1,40 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AccountGetResourceConsumptionListModel struct { + // request fields + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Items []AccountGetResourceConsumptionListItemModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type AccountGetResourceConsumptionListItemModel struct { + // response fields + AccountId types.Int64 `tfsdk:"account_id"` + Consumed *ResourceConsumptionListModel `tfsdk:"consumed"` + Reserved *ResourceConsumptionListModel `tfsdk:"reserved"` +} + +type ResourceConsumptionListModel struct { + CPU types.Int64 `tfsdk:"cpu"` + DiskSize types.Float64 `tfsdk:"disksize"` + DiskSizeMax types.Float64 `tfsdk:"disksizemax"` + ExtIPs types.Int64 `tfsdk:"extips"` + ExtTraffic types.Int64 `tfsdk:"exttraffic"` + GPU types.Int64 `tfsdk:"gpu"` + RAM types.Int64 `tfsdk:"ram"` + SEPs []ResourceConsumptionSepListModel `tfsdk:"seps"` +} + +type ResourceConsumptionSepListModel struct { + SepID types.String `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"data_name"` + DiskSize types.Float64 `tfsdk:"disk_size"` + DiskSizeMax types.Float64 `tfsdk:"disk_size_max"` +} diff --git a/internal/service/cloudbroker/account/models/model_data_source_cb_account_rg_list.go b/internal/service/cloudbroker/account/models/model_data_source_cb_account_rg_list.go new file mode 100644 index 0000000..27cf31a --- /dev/null +++ b/internal/service/cloudbroker/account/models/model_data_source_cb_account_rg_list.go @@ -0,0 +1,81 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountRGListModel struct { + // optional and required fields + AccountID types.Int64 `tfsdk:"account_id"` + RGID types.Int64 `tfsdk:"rg_id"` + VinsID types.Int64 `tfsdk:"vins_id"` + VMID types.Int64 `tfsdk:"vm_id"` + Name types.String `tfsdk:"name"` + Status types.String `tfsdk:"status"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + SortBy types.String `tfsdk:"sort_by"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAccountRGModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemAccountRGModel struct { + Computes *RGComputesModel `tfsdk:"computes"` + Resources *RGResourcesModel `tfsdk:"resources"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + RGID types.Int64 `tfsdk:"rg_id"` + Milestones types.Int64 `tfsdk:"milestones"` + RGName types.String `tfsdk:"rg_name"` + Status types.String `tfsdk:"status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + VINSes types.Int64 `tfsdk:"vinses"` +} + +type RGComputesModel struct { + Started types.Int64 `tfsdk:"started"` + Stopped types.Int64 `tfsdk:"stopped"` +} + +type RGResourcesModel struct { + Consumed *ResourceModel `tfsdk:"consumed"` + Limits *LimitsRGModel `tfsdk:"limits"` + Reserved *ResourceModel `tfsdk:"reserved"` +} + +type LimitsRGModel struct { + CPU types.Int64 `tfsdk:"cpu"` + DiskSize types.Int64 `tfsdk:"disksize"` + DiskSizeMax types.Int64 `tfsdk:"disksizemax"` + ExtIPs types.Int64 `tfsdk:"extips"` + ExtTraffic types.Int64 `tfsdk:"exttraffic"` + GPU types.Int64 `tfsdk:"gpu"` + RAM types.Int64 `tfsdk:"ram"` + SEPs types.Int64 `tfsdk:"seps"` +} + +type ResourceModel struct { + CPU types.Int64 `tfsdk:"cpu"` + DiskSize types.Float64 `tfsdk:"disksize"` + DiskSizeMax types.Float64 `tfsdk:"disksizemax"` + ExtIPs types.Int64 `tfsdk:"extips"` + ExtTraffic types.Int64 `tfsdk:"exttraffic"` + GPU types.Int64 `tfsdk:"gpu"` + RAM types.Int64 `tfsdk:"ram"` + SEPs []SepModel `tfsdk:"seps"` +} + +type SepModel struct { + SepID types.String `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"data_name"` + DiskSize types.Float64 `tfsdk:"disk_size"` + DiskSizeMax types.Float64 `tfsdk:"disk_size_max"` +} diff --git a/internal/service/cloudbroker/account/models/model_resource_cb_account.go b/internal/service/cloudbroker/account/models/model_resource_cb_account.go new file mode 100644 index 0000000..f2e0812 --- /dev/null +++ b/internal/service/cloudbroker/account/models/model_resource_cb_account.go @@ -0,0 +1,91 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceAccountModel struct { + // request fields - required + AccountName types.String `tfsdk:"account_name"` + Username types.String `tfsdk:"username"` + + // request fields - optional + EmailAddress types.String `tfsdk:"emailaddress"` + SendAccessEmails types.Bool `tfsdk:"send_access_emails"` + Users types.List `tfsdk:"users"` + UniqPools types.List `tfsdk:"uniq_pools"` + CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"` + CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"` + AvailableTemplates types.Set `tfsdk:"available_templates"` + Restore types.Bool `tfsdk:"restore"` + Permanently types.Bool `tfsdk:"permanently"` + Enable types.Bool `tfsdk:"enable"` + ResourceLimits types.Object `tfsdk:"resource_limits"` + ComputeFeatures types.Set `tfsdk:"compute_features"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + DCLocation types.String `tfsdk:"dc_location"` + AccountID types.Int64 `tfsdk:"account_id"` + CKey types.String `tfsdk:"ckey"` + ACL types.List `tfsdk:"acl"` + Company types.String `tfsdk:"company"` + CompanyURL types.String `tfsdk:"companyurl"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeactivationTime types.Float64 `tfsdk:"deactivation_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DisplayName types.String `tfsdk:"displayname"` + GUID types.Int64 `tfsdk:"guid"` + ResourceTypes types.List `tfsdk:"resource_types"` + Status types.String `tfsdk:"status"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + Version types.Int64 `tfsdk:"version"` + VINS types.List `tfsdk:"vins"` +} + +type UsersModel struct { + UserID types.String `tfsdk:"user_id"` + AccessType types.String `tfsdk:"access_type"` + RecursiveDelete types.Bool `tfsdk:"recursive_delete"` +} + +type ResourceLimitsInAccountResourceModel struct { + CUC types.Float64 `tfsdk:"cu_c"` + CUD types.Float64 `tfsdk:"cu_d"` + CUI types.Float64 `tfsdk:"cu_i"` + CUM types.Float64 `tfsdk:"cu_m"` + CUNP types.Float64 `tfsdk:"cu_np"` + GPUUnits types.Float64 `tfsdk:"gpu_units"` +} + +var ItemResourceLimitsInAccountResource = map[string]attr.Type{ + "cu_c": types.Float64Type, + "cu_d": types.Float64Type, + "cu_i": types.Float64Type, + "cu_m": types.Float64Type, + "cu_np": types.Float64Type, + "gpu_units": types.Float64Type, +} + +type ACLInAccountModel struct { + Explicit types.Bool `tfsdk:"explicit"` + GUID types.String `tfsdk:"guid"` + Right types.String `tfsdk:"right"` + Status types.String `tfsdk:"status"` + Type types.String `tfsdk:"type"` + UserGroupID types.String `tfsdk:"user_group_id"` +} + +var ItemACLInAccount = map[string]attr.Type{ + "explicit": types.BoolType, + "guid": types.StringType, + "right": types.StringType, + "status": types.StringType, + "type": types.StringType, + "user_group_id": types.StringType, +} diff --git a/internal/service/cloudbroker/account/resource_cb_account.go b/internal/service/cloudbroker/account/resource_cb_account.go new file mode 100644 index 0000000..0b66a6d --- /dev/null +++ b/internal/service/cloudbroker/account/resource_cb_account.go @@ -0,0 +1,321 @@ +package cbAccount + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceAccount{} + _ resource.ResourceWithImportState = &resourceAccount{} +) + +// NewResourceAccount is a helper function to simplify the provider implementation. +func NewResourceAccount() resource.Resource { + return &resourceAccount{} +} + +// resourceAccount is the resource implementation. +type resourceAccount struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceAccount) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + tflog.Info(ctx, "Create resourceAccount: start creating") + var plan models.ResourceAccountModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceAccount: Error receiving the plan") + return + } + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceAccount: Error set timeout") + return + } + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + tflog.Info(ctx, "Create resourceAccount: set timeouts successfully", map[string]any{ + "createTimeout": createTimeout}) + + diags, id := utilities.UtilityAccountCreate(ctx, &plan, r.client) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceAccount: error with UtilityAccountCreate") + return + } + + plan.Id = types.StringValue(strconv.Itoa(int(*id))) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.AccountResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + tflog.Info(ctx, "resourceAccount: Account created", map[string]any{"account_id": id}) + + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceAccount) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceAccountModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceAccount: Error get state") + return + } + tflog.Info(ctx, "Read resourceAccount: got state successfully", map[string]any{"account_id": state.Id.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceAccount: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceAccount: set timeouts successfully", map[string]any{ + "account_id": state.Id.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // read status + resp.Diagnostics.Append(utilities.AccountReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceAccount: Error reading status") + return + } + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.AccountResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceAccount: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceAccount: Error set state") + return + } + tflog.Info(ctx, "End read resourceAccount") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceAccount) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceAccountModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error receiving the plan") + return + } + + logMap := map[string]any{"account_id": plan.Id.ValueString()} + tflog.Info(ctx, "Update resourceAccount: got plan successfully", logMap) + + // Retrieve values from state + var state models.ResourceAccountModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceAccount: got state successfully", logMap) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceAccount: set timeouts successfully", map[string]any{ + "account_id": state.Id.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + accountId, err := strconv.Atoi(state.Id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Update resourceAccount: Cannot parse ID from state", err.Error()) + return + } + + // enable/disable account + if !plan.Enable.Equal(state.Enable) && !plan.Enable.IsNull() { + resp.Diagnostics.Append(utilities.EnableDisableAccount(ctx, uint64(accountId), plan.Enable.ValueBool(), r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error enabling/disabling account") + return + } + } + + // general update account + resp.Diagnostics.Append(utilities.UpdateAccount(ctx, uint64(accountId), &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error with general account update") + return + } + + // cpu_allocation_parameter + if !plan.CPUAllocationParameter.Equal(state.CPUAllocationParameter) && !plan.CPUAllocationParameter.IsNull() && plan.CPUAllocationParameter.ValueString() != "" { + resp.Diagnostics.Append(utilities.UtilityAccountCPUParameterUpdate(ctx, uint64(state.AccountID.ValueInt64()), &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error update CPUAllocationParameter ") + return + } + } + + // cpu_allocation_ratio + if !plan.CPUAllocationRatio.Equal(state.CPUAllocationRatio) && !plan.CPUAllocationRatio.IsNull() { + resp.Diagnostics.Append(utilities.UtilityAccountCPURatioUpdate(ctx, uint64(accountId), &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error update CPUAllocationRatio ") + return + } + } + + // add/delete users + if !plan.Users.Equal(state.Users) { + resp.Diagnostics.Append(utilities.AddDeleteUsersAccount(ctx, uint64(accountId), &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error adding/deleting users to/from account") + return + } + } + + // available_templates + if !plan.AvailableTemplates.Equal(state.AvailableTemplates) { + resp.Diagnostics.Append(utilities.UtilityAccountAvailiableTemplatesUpdate(ctx, &state, &plan, false, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error update AvailiableTemplates") + return + } + } + + // compute_features + if !plan.ComputeFeatures.Equal(state.ComputeFeatures) { + resp.Diagnostics.Append(utilities.UtilityAccountComputeFeaturesUpdate(ctx, uint64(accountId), &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error update ComputeFeatures") + return + } + } + + tflog.Info(ctx, "Update resourceAccount: account update is completed", logMap) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.AccountResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceAccount) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceAccountModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceAccount: Error get state") + return + } + tflog.Info(ctx, "Delete resourceAccount: got state successfully", map[string]any{"account_id": state.Id.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceAccount: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceAccount: set timeouts successfully", map[string]any{ + "account_id": state.Id.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + permanently := state.Permanently.ValueBool() + if state.Permanently.IsNull() { + permanently = true + } // default true + + // Delete existing resource group + delReq := account.DeleteRequest{ + AccountID: uint64(state.AccountID.ValueInt64()), + Permanently: permanently, + } + + tflog.Info(ctx, "Delete resourceAccount: before CloudBroker().Account().Delete", map[string]any{"req": delReq}) + _, err := r.client.CloudBroker().Account().Delete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceAccount: Error deleting account with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceAccount", map[string]any{"account_id": state.Id.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceAccount) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceAccount(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceAccount) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_account" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceAccount) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceAccount") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceAccount successfully") +} + +func (r *resourceAccount) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_audits_list.go b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_audits_list.go new file mode 100644 index 0000000..ae570f3 --- /dev/null +++ b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_audits_list.go @@ -0,0 +1,42 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountAuditsList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "account id", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "call": schema.StringAttribute{ + Computed: true, + }, + "responsetime": schema.Float64Attribute{ + Computed: true, + }, + "statuscode": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Float64Attribute{ + Computed: true, + }, + "user": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_available_templates_list.go b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_available_templates_list.go new file mode 100644 index 0000000..8777881 --- /dev/null +++ b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_available_templates_list.go @@ -0,0 +1,19 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceAccountAvailableTemplatesList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Required: true, + }, + "items": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + } + +} diff --git a/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_computes_list.go b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_computes_list.go new file mode 100644 index 0000000..4c6508c --- /dev/null +++ b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_computes_list.go @@ -0,0 +1,117 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountComputesList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Required: true, + }, + "compute_id": schema.Int64Attribute{ + Optional: true, + }, + "name": schema.StringAttribute{ + Optional: true, + }, + "rg_name": schema.StringAttribute{ + Optional: true, + }, + "rg_id": schema.Int64Attribute{ + Optional: true, + }, + "tech_status": schema.StringAttribute{ + Optional: true, + }, + "ip_address": schema.StringAttribute{ + Optional: true, + }, + "extnet_name": schema.StringAttribute{ + Optional: true, + }, + "extnet_id": schema.Int64Attribute{ + Optional: true, + }, + "sort_by": schema.StringAttribute{ + Optional: true, + }, + "page": schema.Int64Attribute{ + Optional: true, + }, + "size": schema.Int64Attribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "cpus": schema.Int64Attribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "registered": schema.BoolAttribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "total_disks_size": schema.Int64Attribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "vins_connected": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } + +} diff --git a/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_disks_list.go b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_disks_list.go new file mode 100644 index 0000000..7931a2a --- /dev/null +++ b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_disks_list.go @@ -0,0 +1,81 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountDisksList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "account id", + }, + + // optional attributes + "disk_id": schema.Int64Attribute{ + Optional: true, + Description: "find by disk id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "disk_max_size": schema.Int64Attribute{ + Optional: true, + Description: "find by max size disk", + }, + "type": schema.StringAttribute{ + Optional: true, + Description: "find by type of the disks", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_flipgroups_list.go b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_flipgroups_list.go new file mode 100644 index 0000000..5a0f7a6 --- /dev/null +++ b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_flipgroups_list.go @@ -0,0 +1,131 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountFlipgroupsList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the account", + }, + + // optional attributes + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "vins_id": schema.Int64Attribute{ + Optional: true, + Description: "find by vins ID", + }, + "vins_name": schema.StringAttribute{ + Optional: true, + Description: "find by vins name", + }, + "extnet_id": schema.Int64Attribute{ + Optional: true, + Description: "find by extnet ID", + }, + "by_ip": schema.StringAttribute{ + Optional: true, + Description: "find by ip address", + }, + "flipgroup_id": schema.Int64Attribute{ + Optional: true, + Description: "find by flipgroup id", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "client_type": schema.StringAttribute{ + Computed: true, + }, + "conn_type": schema.StringAttribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "fg_id": schema.Int64Attribute{ + Computed: true, + }, + "ip": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "fg_name": schema.StringAttribute{ + Computed: true, + }, + "net_id": schema.Int64Attribute{ + Computed: true, + }, + "net_type": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_get_resource_consumption.go b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_get_resource_consumption.go new file mode 100644 index 0000000..b70a8d5 --- /dev/null +++ b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_get_resource_consumption.go @@ -0,0 +1,131 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountGetResourceConsumption() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Required: true, + }, + "consumed": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disksize": schema.Float64Attribute{ + Computed: true, + }, + "disksizemax": schema.Float64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + + "reserved": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + "ext_ips": schema.Int64Attribute{ + Computed: true, + }, + "ext_traffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + + "resource_limits": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_list_deleted.go b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_list_deleted.go new file mode 100644 index 0000000..1085214 --- /dev/null +++ b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_list_deleted.go @@ -0,0 +1,181 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceAccountListDeleted() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "filter by id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "filter by name", + }, + "acl": schema.StringAttribute{ + Optional: true, + Description: "filter by acl", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "dc_location": schema.StringAttribute{ + Computed: true, + }, + "ckey": schema.StringAttribute{ + Computed: true, + }, + "meta": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "acl": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "explicit": schema.BoolAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "right": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "user_group_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "compute_features": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "company": schema.StringAttribute{ + Computed: true, + }, + "companyurl": schema.StringAttribute{ + Computed: true, + }, + "cpu_allocation_parameter": schema.StringAttribute{ + Computed: true, + }, + "cpu_allocation_ratio": schema.Float64Attribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "resource_limits": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + }, + }, + "resource_types": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "send_access_emails": schema.BoolAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "uniq_pools": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "version": schema.Int64Attribute{ + Computed: true, + }, + "vins": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_resource_consumption_list.go b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_resource_consumption_list.go new file mode 100644 index 0000000..1fa35e5 --- /dev/null +++ b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_resource_consumption_list.go @@ -0,0 +1,113 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountGetResourceListConsumption() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Required: true, + }, + "consumed": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disksize": schema.Float64Attribute{ + Computed: true, + }, + "disksizemax": schema.Float64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "reserved": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disksize": schema.Float64Attribute{ + Computed: true, + }, + "disksizemax": schema.Float64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_rg_list.go b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_rg_list.go new file mode 100644 index 0000000..91d1935 --- /dev/null +++ b/internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_rg_list.go @@ -0,0 +1,232 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountRGList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the account", + }, + + // optional attributes + "rg_id": schema.Int64Attribute{ + Optional: true, + Description: "find by rg id", + }, + "vins_id": schema.Int64Attribute{ + Optional: true, + Description: "find by vins id", + }, + "vm_id": schema.Int64Attribute{ + Optional: true, + Description: "find by vm id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "find by status", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "computes": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "started": schema.Int64Attribute{ + Computed: true, + }, + "stopped": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "resources": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "consumed": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disksize": schema.Int64Attribute{ + Computed: true, + }, + "disksizemax": schema.Int64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "limits": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disksize": schema.Int64Attribute{ + Computed: true, + }, + "disksizemax": schema.Int64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "reserved": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disksize": schema.Float64Attribute{ + Computed: true, + }, + "disksizemax": schema.Float64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "vinses": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/account/schemas/schema_resource_cb_account.go b/internal/service/cloudbroker/account/schemas/schema_resource_cb_account.go new file mode 100644 index 0000000..793a60f --- /dev/null +++ b/internal/service/cloudbroker/account/schemas/schema_resource_cb_account.go @@ -0,0 +1,210 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/setvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaResourceAccount() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_name": schema.StringAttribute{ + Required: true, + Description: "name of the account", + }, + "username": schema.StringAttribute{ + Required: true, + Description: "username of owner the account", + }, + + // optional attributes + "emailaddress": schema.StringAttribute{ + Optional: true, + Description: "email", + }, + "send_access_emails": schema.BoolAttribute{ + Optional: true, + Description: "if true send emails when a user is granted access to resources", + }, + "uniq_pools": schema.ListAttribute{ + Optional: true, + Computed: true, + ElementType: types.StringType, + }, + "users": schema.ListNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "user_id": schema.StringAttribute{ + Required: true, + }, + "access_type": schema.StringAttribute{ + Required: true, + }, + "recursive_delete": schema.BoolAttribute{ + Optional: true, + // default is false + }, + }, + }, + }, + "cpu_allocation_parameter": schema.StringAttribute{ + Computed: true, + Optional: true, + Description: "set cpu allocation parameter", + }, + "cpu_allocation_ratio": schema.Float64Attribute{ + Computed: true, + Optional: true, + Description: "set cpu allocation ratio", + }, + "available_templates": schema.SetAttribute{ + Optional: true, + ElementType: types.Int64Type, + Description: "Share images with account", + }, + "restore": schema.BoolAttribute{ + Optional: true, + Description: "restore a deleted account", + }, + "permanently": schema.BoolAttribute{ + Optional: true, + Description: "whether to completely delete the account", + // default is false + }, + "enable": schema.BoolAttribute{ + Optional: true, + Description: "enable/disable account", + }, + "resource_limits": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + }, + }, + "compute_features": schema.SetAttribute{ + Optional: true, + ElementType: types.StringType, + Computed: true, + Validators: []validator.Set{ + setvalidator.ValueStringsAre(stringvalidator.OneOfCaseInsensitive("hugepages", "numa", "cpupin", "vfnic")), + }, + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "dc_location": schema.StringAttribute{ + Computed: true, + }, + "ckey": schema.StringAttribute{ + Computed: true, + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "acl": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "explicit": schema.BoolAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "right": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "user_group_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "company": schema.StringAttribute{ + Computed: true, + }, + "companyurl": schema.StringAttribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deactivation_time": schema.Float64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "displayname": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "resource_types": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "version": schema.Int64Attribute{ + Computed: true, + }, + "vins": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + } +} diff --git a/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_audits_list.go b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_audits_list.go new file mode 100644 index 0000000..f3a54d3 --- /dev/null +++ b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_audits_list.go @@ -0,0 +1,21 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" +) + +func AccountAuditsListDataSourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.ListAudits, error) { + tflog.Info(ctx, fmt.Sprintf("AccountAuditsListDataSourceCheckPresence: Get info about account audits with account ID - %v", accountId)) + + auditsList, err := c.CloudBroker().Account().Audits(ctx, account.AuditsRequest{AccountID: accountId}) + if err != nil { + return nil, fmt.Errorf("cannot get info about account audits with error: %w", err) + } + + return &auditsList, err +} diff --git a/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_available_templates_list.go b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_available_templates_list.go new file mode 100644 index 0000000..c0232ae --- /dev/null +++ b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_available_templates_list.go @@ -0,0 +1,21 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" +) + +func AccountAvailableTemplatesListDataSourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) ([]uint64, error) { + tflog.Info(ctx, fmt.Sprintf("AccountAvailableTemplatesListDataSourceCheckPresence: Get info about templates with account ID - %v", accountId)) + + templatesList, err := c.CloudBroker().Account().ListAvailableTemplates(ctx, account.ListAvailableTemplatesRequest{AccountID: accountId}) + if err != nil { + return nil, fmt.Errorf("cannot get info about templated with error: %w", err) + } + + return templatesList, err +} diff --git a/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_computes_list.go b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_computes_list.go new file mode 100644 index 0000000..c5a8a19 --- /dev/null +++ b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_computes_list.go @@ -0,0 +1,61 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" +) + +func AccountComputesListDataSourceCheckPresence(ctx context.Context, plan *models.ListComputesModel, c *decort.DecortClient) (*account.ListComputes, error) { + tflog.Info(ctx, "AccountComputesListDataSourceCheckPresence: Get info about list accounts") + req := account.ListComputesRequest{ + AccountID: uint64(plan.AccountID.ValueInt64()), + } + + if !plan.ComputeID.IsNull() { + req.ComputeID = uint64(plan.ComputeID.ValueInt64()) + } + if !plan.Name.IsNull() { + req.Name = plan.Name.ValueString() + } + if !plan.RGName.IsNull() { + req.RGName = plan.RGName.ValueString() + } + if !plan.RGID.IsNull() { + req.RGID = uint64(plan.RGID.ValueInt64()) + } + if !plan.TechStatus.IsNull() { + req.TechStatus = plan.TechStatus.ValueString() + } + if !plan.IpAddress.IsNull() { + req.IPAddress = plan.IpAddress.ValueString() + } + if !plan.ExtNetName.IsNull() { + req.ExtNetName = plan.ExtNetName.ValueString() + } + if !plan.ExtNetID.IsNull() { + req.ExtNetID = uint64(plan.ExtNetID.ValueInt64()) + } + if !plan.Page.IsNull() { + req.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + req.Size = uint64(plan.Size.ValueInt64()) + } + if !plan.SortBy.IsNull() { + req.SortBy = plan.SortBy.ValueString() + } + tflog.Info(ctx, "AccountComputesListDataSourceCheckPresence: before call CloudBroker().Account().List", map[string]any{"req": req}) + + listComputes, err := c.CloudBroker().Account().ListComputes(ctx, req) + if err != nil { + return nil, fmt.Errorf("cannot get list computes with error: %w", err) + } + tflog.Info(ctx, "AccountComputesListDataSourceCheckPresence: response from CloudBroker().Account().List") + + return listComputes, err +} diff --git a/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_disks_list.go b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_disks_list.go new file mode 100644 index 0000000..5d1ffe2 --- /dev/null +++ b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_disks_list.go @@ -0,0 +1,51 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" +) + +func AccountDisksListCheckPresence(ctx context.Context, plan *models.DataSourceAccountDisksListModel, c *decort.DecortClient) (*account.ListDisks, error) { + tflog.Info(ctx, "AccountDisksListCheckPresence: Get info about account disks list") + + disksListReq := account.ListDisksRequest{ + AccountID: uint64(plan.AccountID.ValueInt64()), + } + + if !plan.DiskID.IsNull() { + disksListReq.DiskID = uint64(plan.DiskID.ValueInt64()) + } + if !plan.Name.IsNull() { + disksListReq.Name = plan.Name.ValueString() + } + if !plan.DiskMaxSize.IsNull() { + disksListReq.DiskMaxSize = uint64(plan.DiskMaxSize.ValueInt64()) + } + if !plan.Type.IsNull() { + disksListReq.Type = plan.Type.ValueString() + } + if !plan.Page.IsNull() { + disksListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + disksListReq.Size = uint64(plan.Size.ValueInt64()) + } + if !plan.SortBy.IsNull() { + disksListReq.SortBy = plan.SortBy.ValueString() + } + + tflog.Info(ctx, "AccountDisksListCheckPresence: before call CloudBroker().Account().ListDisks", map[string]any{"req": disksListReq}) + disksList, err := c.CloudBroker().Account().ListDisks(ctx, disksListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account disks list with error: %w", err) + } + + tflog.Info(ctx, "AccountDisksListCheckPresence: response from CloudBroker().Account().ListDisks") + + return disksList, err +} diff --git a/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_flipgroups_list.go b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_flipgroups_list.go new file mode 100644 index 0000000..239d266 --- /dev/null +++ b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_flipgroups_list.go @@ -0,0 +1,55 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" +) + +func AccountFlipgroupsListCheckPresence(ctx context.Context, plan *models.DataSourceAccountFlipgroupsListModel, c *decort.DecortClient) (*account.ListFLIPGroups, error) { + tflog.Info(ctx, "AccountFlipgroupsListCheckPresence: Get info about account flipgroups list") + + flipgroupsListReq := account.ListFLIPGroupsRequest{AccountID: uint64(plan.AccountID.ValueInt64())} + + if !plan.Name.IsNull() { + flipgroupsListReq.Name = plan.Name.ValueString() + } + if !plan.VINSID.IsNull() { + flipgroupsListReq.VINSID = uint64(plan.VINSID.ValueInt64()) + } + if !plan.VINSName.IsNull() { + flipgroupsListReq.VINSName = plan.VINSName.ValueString() + } + if !plan.ExtNetID.IsNull() { + flipgroupsListReq.ExtNetID = uint64(plan.ExtNetID.ValueInt64()) + } + if !plan.ByIP.IsNull() { + flipgroupsListReq.ByIP = plan.ByIP.ValueString() + } + if !plan.FLIPGroupID.IsNull() { + flipgroupsListReq.FLIPGroupID = uint64(plan.FLIPGroupID.ValueInt64()) + } + if !plan.SortBy.IsNull() { + flipgroupsListReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + flipgroupsListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + flipgroupsListReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "AccountListCheckPresence: before call CloudBroker().Account().ListFLIPGroups", map[string]any{"req": flipgroupsListReq}) + flipgroupsList, err := c.CloudBroker().Account().ListFLIPGroups(ctx, flipgroupsListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account flipgroups list with error: %w", err) + } + + tflog.Info(ctx, "AccountListCheckPresence: response from CloudBroker().Account().ListFLIPGroups") + + return flipgroupsList, err +} diff --git a/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_get_resource_consumption.go b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_get_resource_consumption.go new file mode 100644 index 0000000..bc96d41 --- /dev/null +++ b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_get_resource_consumption.go @@ -0,0 +1,24 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" +) + +func AccountGetResourceConsumptionDataSourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.RecordResourceConsumption, error) { + tflog.Info(ctx, fmt.Sprintf("AccountGetResourceConsumptionDataSourceCheckPresence: Get info about account with ID - %v", accountId)) + + record, err := c.CloudBroker().Account().GetResourceConsumption(ctx, account.GetResourceConsumptionRequest{AccountID: accountId}) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource with error: %w", err) + } + + tflog.Info(ctx, "AccountGetResourceConsumptionDataSourceCheckPresence: response from CloudBroker().Account().GetResourceConsumption", + map[string]any{"account_id": accountId, "response": record}) + + return record, err +} diff --git a/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_list_deleted.go b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_list_deleted.go new file mode 100644 index 0000000..cc62fd3 --- /dev/null +++ b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_list_deleted.go @@ -0,0 +1,46 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" +) + +func AccountListDeletedCheckPresence(ctx context.Context, plan *models.DataSourceAccountListDeletedModel, c *decort.DecortClient) (*account.ListAccounts, error) { + tflog.Info(ctx, "AccountListDeletedCheckPresence: Get info about account list deleted") + + accListDelReq := account.ListDeletedRequest{} + + if !plan.ByID.IsNull() { + accListDelReq.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.Name.IsNull() { + accListDelReq.Name = plan.Name.ValueString() + } + if !plan.ACL.IsNull() { + accListDelReq.ACL = plan.ACL.ValueString() + } + if !plan.Page.IsNull() { + accListDelReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + accListDelReq.Size = uint64(plan.Size.ValueInt64()) + } + if !plan.SortBy.IsNull() { + accListDelReq.SortBy = plan.SortBy.ValueString() + } + + tflog.Info(ctx, "AccountListDeletedCheckPresence: before call CloudBroker().Account().ListDeleted", map[string]any{"req": accListDelReq}) + accListDel, err := c.CloudBroker().Account().ListDeleted(ctx, accListDelReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account with error: %w", err) + } + + tflog.Info(ctx, "AccountListDeletedCheckPresence: response from CloudBroker().Account().ListDeleted") + + return accListDel, err +} diff --git a/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_resource_consumption_list.go b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_resource_consumption_list.go new file mode 100644 index 0000000..171b278 --- /dev/null +++ b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_resource_consumption_list.go @@ -0,0 +1,24 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" +) + +func AccountGetResourceConsumptionListDataSourceCheckPresence(ctx context.Context, c *decort.DecortClient) (*account.ListResources, error) { + tflog.Info(ctx, "AccountGetResourceConsumptionListDataSourceCheckPresence: Get info about account resource consumption list") + + record, err := c.CloudBroker().Account().ListResourceConsumption(ctx) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource with error: %w", err) + } + + tflog.Info(ctx, "AccountGetResourceConsumptionListDataSourceCheckPresence: response from CloudBroker().Account().ListResourceConsumption", + map[string]any{"response": record}) + + return record, err +} diff --git a/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_rg_list.go b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_rg_list.go new file mode 100644 index 0000000..e5ab2b3 --- /dev/null +++ b/internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_rg_list.go @@ -0,0 +1,52 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" +) + +func AccountRGListCheckPresence(ctx context.Context, plan *models.DataSourceAccountRGListModel, c *decort.DecortClient) (*account.ListRG, error) { + tflog.Info(ctx, "AccountRGListCheckPresence: Get info about account rg list") + + rgListReq := account.ListRGRequest{AccountID: uint64(plan.AccountID.ValueInt64())} + + if !plan.RGID.IsNull() { + rgListReq.RGID = uint64(plan.RGID.ValueInt64()) + } + if !plan.VinsID.IsNull() { + rgListReq.VINSID = uint64(plan.VinsID.ValueInt64()) + } + if !plan.VMID.IsNull() { + rgListReq.VMID = uint64(plan.VMID.ValueInt64()) + } + if !plan.Name.IsNull() { + rgListReq.Name = plan.Name.ValueString() + } + if !plan.Status.IsNull() { + rgListReq.Status = plan.Status.ValueString() + } + if !plan.Page.IsNull() { + rgListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + rgListReq.Size = uint64(plan.Size.ValueInt64()) + } + if !plan.SortBy.IsNull() { + rgListReq.SortBy = plan.SortBy.ValueString() + } + + tflog.Info(ctx, "AccountRGListCheckPresence: before call CloudBroker().Account().ListRG", map[string]any{"req": rgListReq}) + rgList, err := c.CloudBroker().Account().ListRG(ctx, rgListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account with error: %w", err) + } + + tflog.Info(ctx, "AccountRGListCheckPresence: response from CloudBroker().Account().ListRG") + + return rgList, err +} diff --git a/internal/service/cloudbroker/account/utilities/utility_resource_cb_account.go b/internal/service/cloudbroker/account/utilities/utility_resource_cb_account.go new file mode 100644 index 0000000..883be06 --- /dev/null +++ b/internal/service/cloudbroker/account/utilities/utility_resource_cb_account.go @@ -0,0 +1,725 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status" +) + +// AccountResourceCheckPresence checks if account with accountId exists +func AccountResourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.RecordAccount, error) { + tflog.Info(ctx, fmt.Sprintf("AccountResourceCheckPresence: Get info about resource with ID - %v", accountId)) + + accountRecord, err := c.CloudBroker().Account().Get(ctx, account.GetRequest{AccountID: accountId}) + if err != nil { + return nil, fmt.Errorf("AccountResourceCheckPresence: cannot get info about resource with error: %w", err) + } + + tflog.Info(ctx, "AccountResourceCheckPresence: response from CloudBroker().Account().Get", map[string]any{"account_id": accountId, "response": accountRecord}) + + return accountRecord, err +} + +// AccountReadStatus loads account resource by its id, gets it current status. Performs restore and enable if needed for +// Deleted status. +// In case of failure returns errors. +func AccountReadStatus(ctx context.Context, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "AccountReadStatus: Read status resource with ID", map[string]any{"account_id": state.Id.ValueString()}) + + diags := diag.Diagnostics{} + + accountId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64) + if err != nil { + diags.AddError("AccountReadStatus: Cannot parse resource ID from state", err.Error()) + return diags + } + + recordAccount, err := AccountResourceCheckPresence(ctx, accountId, c) + if err != nil { + diags.AddError("AccountReadStatus: Unable to Read account before status check", err.Error()) + return diags + } + + // check resource status + switch recordAccount.Status { + case status.Disabled: + tflog.Info(ctx, "The account is in status Disabled, troubles may occur with update. Please, enable account first.") + + case status.Deleted: + restore := state.Restore.ValueBool() + if state.Restore.IsNull() { + restore = true + } // default true + if restore { + // attempt to restore account + tflog.Info(ctx, "AccountReadStatus: account with status.Deleted is being read, attempt to restore it", map[string]any{ + "account_id": accountId, + "status": recordAccount.Status}) + diags.Append(RestoreAccount(ctx, accountId, c)...) + if diags.HasError() { + tflog.Error(ctx, "AccountReadStatus: cannot restore account") + return diags + } + tflog.Info(ctx, "AccountReadStatus: account restored successfully", map[string]any{"account_id": accountId}) + } else { + tflog.Info(ctx, "AccountReadStatus: account is i status Deleted but restore is not specified") + } + + case status.Destroyed: + diags.AddError( + "AccountReadStatus: Account is in status Destroyed", + fmt.Sprintf("the resource with account_id %d cannot be read or updated because it has been destroyed", accountId), + ) + return diags + + case status.Destroying: + diags.AddError( + "AccountReadStatus: Account is in progress with status Destroying", + fmt.Sprintf("the resource with account_id %d cannot be read or updated because it is currently being destroyed", accountId), + ) + return diags + } + + return nil +} + +// RestoreAccount performs account Restore request. +// Returns error in case of failures. +func RestoreAccount(ctx context.Context, accountId uint64, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + restoreReq := account.RestoreRequest{ + AccountID: accountId, + } + + tflog.Info(ctx, "RestoreAccount: before calling CloudBroker().Account().Restore", map[string]any{"account_id": accountId, "req": restoreReq}) + + res, err := c.CloudBroker().Account().Restore(ctx, restoreReq) + if err != nil { + diags.AddError( + "RestoreAccount: cannot restore account", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "RestoreAccount: response from CloudBroker().Account().Restore", map[string]any{"account_id": accountId, "response": res}) + + return nil +} + +// EnableDisableAccount performs account Enable/Disable request. +// Returns error in case of failures. +func EnableDisableAccount(ctx context.Context, accountId uint64, enable bool, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start EnableDisableAccount", map[string]any{"account_id": accountId}) + + diags := diag.Diagnostics{} + + if enable { + tflog.Info(ctx, "EnableDisableAccount: before calling CloudBroker().Account().Enable", map[string]any{"account_id": accountId}) + res, err := c.CloudBroker().Account().Enable(ctx, account.EnableRequest{AccountID: accountId}) + if err != nil { + diags.AddError( + "EnableDisableAccount: cannot enable account", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "EnableDisableAccount: response from CloudBroker().Account().Enable", map[string]any{"account_id": accountId, "response": res}) + + return nil + } + + tflog.Info(ctx, "EnableDisableAccount: before calling CloudBroker().Account().Disable", map[string]any{"account_id": accountId}) + res, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{AccountID: accountId}) + if err != nil { + diags.AddError( + "EnableDisableAccount: cannot disable account", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "EnableDisableAccount: response from CloudBroker().Account().Disable", map[string]any{"account_id": accountId, "response": res}) + + return nil +} + +func UtilityAccountCreate(ctx context.Context, plan *models.ResourceAccountModel, c *decort.DecortClient) (diag.Diagnostics, *uint64) { + tflog.Info(ctx, "Start UtilityAccountCreate", map[string]any{"account_id": plan.AccountID}) + diags := diag.Diagnostics{} + + req := account.CreateRequest{ + Name: plan.AccountName.ValueString(), + Username: plan.Username.ValueString(), + } + + if !plan.EmailAddress.IsUnknown() { + req.EmailAddress = plan.EmailAddress.ValueString() + } + + if !plan.SendAccessEmails.IsUnknown() { + req.SendAccessEmails = plan.SendAccessEmails.ValueBool() + } + + if !plan.UniqPools.IsUnknown() { + var uniqPools []string + diags.Append(plan.UniqPools.ElementsAs(ctx, &uniqPools, true)...) + if diags.HasError() { + tflog.Error(ctx, "UtilityAccountCreate: cannot populate UniqPools with plan.UniqPools object element") + return diags, nil + } + req.UniqPools = uniqPools + } + + if !plan.ResourceLimits.IsUnknown() { + var resourceLimitsPlan models.ResourceLimitsInAccountResourceModel + diags.Append(plan.ResourceLimits.As(ctx, &resourceLimitsPlan, basetypes.ObjectAsOptions{})...) + if diags.HasError() { + tflog.Error(ctx, "UtilityAccountCreate: cannot populate ResourceLimits with plan.ResourceLimits object element") + return diags, nil + } + + if resourceLimitsPlan.CUM.ValueFloat64() == 0 { + req.MaxMemoryCapacity = -1 + } else { + req.MaxMemoryCapacity = int64(resourceLimitsPlan.CUM.ValueFloat64()) + } + + if resourceLimitsPlan.CUD.ValueFloat64() == 0 { + req.MaxVDiskCapacity = -1 + } else { + req.MaxVDiskCapacity = int64(resourceLimitsPlan.CUD.ValueFloat64()) + } + + if resourceLimitsPlan.CUC.ValueFloat64() == 0 { + req.MaxCPUCapacity = -1 + } else { + req.MaxCPUCapacity = int64(resourceLimitsPlan.CUC.ValueFloat64()) + } + + if resourceLimitsPlan.CUI.ValueFloat64() == 0 { + req.MaxNumPublicIP = -1 + } else { + req.MaxNumPublicIP = int64(resourceLimitsPlan.CUI.ValueFloat64()) + } + + if resourceLimitsPlan.CUNP.ValueFloat64() == 0 { + req.MaxNetworkPeerTransfer = -1 + } else { + req.MaxNetworkPeerTransfer = int64(resourceLimitsPlan.CUNP.ValueFloat64()) + } + + if resourceLimitsPlan.GPUUnits.ValueFloat64() == 0 { + req.GPUUnits = -1 + } else { + req.GPUUnits = int64(resourceLimitsPlan.GPUUnits.ValueFloat64()) + } + } + + accountId, err := c.CloudBroker().Account().Create(ctx, req) + if err != nil { + diags.AddError("UtilityAccountCreate: Unable to create account", + err.Error()) + return diags, nil + } + + if !plan.Users.IsUnknown() { + usersPlan := make([]models.UsersModel, 0, len(plan.Users.Elements())) + + diagsI := plan.Users.ElementsAs(ctx, &usersPlan, true) + if diagsI.HasError() { + tflog.Error(ctx, "UtilityAccountCreate: cannot populate usersPlan with plan.Users list elements") + diags.AddWarning("UtilityAccountCreate: cannot populate usersPlan with plan.Users list elements", + fmt.Sprintf("%v", diagsI)) + return diags, nil + } + for _, v := range usersPlan { + req := account.AddUserRequest{ + AccountID: accountId, + Username: v.UserID.ValueString(), + AccessType: v.AccessType.ValueString(), + } + + _, err := c.CloudBroker().Account().AddUser(ctx, req) + if err != nil { + diags.AddWarning("UtilityAccountCreate: Unable to add users", + err.Error()) + return diags, nil + } + } + } + + if !plan.CPUAllocationParameter.IsUnknown() { + req := account.SetCPUAllocationParameterRequest{ + AccountID: accountId, + StrictLoose: plan.CPUAllocationParameter.ValueString(), + } + _, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, req) + if err != nil { + diags.AddWarning("UtilityAccountCreate: Unable to set CPUAllocationParameter ", + err.Error()) + return diags, nil + } + } + + if !plan.CPUAllocationRatio.IsUnknown() { + req := account.SetCPUAllocationRatioRequest{ + AccountID: accountId, + Ratio: plan.CPUAllocationRatio.ValueFloat64(), + } + + _, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, req) + if err != nil { + diags.AddWarning("UtilityAccountCreate: Unable to set CPUAllocationRatio ", + err.Error()) + return diags, nil + } + } + + if !plan.Enable.IsUnknown() && !plan.Enable.ValueBool() { + _, err := c.CloudBroker().Account().Disable(ctx, account.DisableRequest{ + AccountID: accountId, + }) + if err != nil { + diags.AddWarning("UtilityAccountCreate: Unable to disable account", + err.Error()) + return diags, nil + } + } + + if !plan.AvailableTemplates.IsUnknown() { + diagsI := UtilityAccountAvailiableTemplatesUpdate(ctx, plan, plan, true, c) + if diagsI.HasError() { + tflog.Error(ctx, "UtilityAccountCreate: error with utilityAccountAvailiableTemplatesUpdate") + diags.AddWarning("UtilityAccountCreate: cannot populate usersPlan with plan.Users list elements", fmt.Sprintf("%v", diagsI)) + return diags, nil + } + } + tflog.Info(ctx, "End UtilityAccountCreate", map[string]any{"account_id": plan.AccountID.ValueInt64()}) + return diags, &accountId +} + +// UpdateAccount updates disk data: account_name, resource_limits, send_access_emails. +// Returns error in case of failures. +func UpdateAccount(ctx context.Context, accountId uint64, plan, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start UpdateAccount", map[string]any{"account_id": accountId}) + + var diags diag.Diagnostics + var updateNeeded bool + + updateReq := account.UpdateRequest{ + AccountID: accountId, + } + + // check if account_name was changed + if !plan.AccountName.Equal(state.AccountName) { + updateReq.Name = plan.AccountName.ValueString() + updateNeeded = true + } + + // check if resource_limits were changed + if !plan.ResourceLimits.Equal(state.ResourceLimits) && !plan.ResourceLimits.IsUnknown() { + tflog.Info(ctx, "UpdateAccount: new ResourceLimits specified", map[string]any{"account_id": accountId}) + var resourceLimitsPlan models.ResourceLimitsInAccountResourceModel + diags.Append(plan.ResourceLimits.As(ctx, &resourceLimitsPlan, basetypes.ObjectAsOptions{})...) + if diags.HasError() { + tflog.Error(ctx, "UpdateAccount: cannot populate ResourceLimits with plan.ResourceLimits object element") + return diags + } + + if resourceLimitsPlan.CUM.ValueFloat64() == 0 { + updateReq.MaxMemoryCapacity = -1 + } else { + updateReq.MaxMemoryCapacity = int64(resourceLimitsPlan.CUM.ValueFloat64()) + } + + if resourceLimitsPlan.CUD.ValueFloat64() == 0 { + updateReq.MaxVDiskCapacity = -1 + } else { + updateReq.MaxVDiskCapacity = int64(resourceLimitsPlan.CUD.ValueFloat64()) + } + + if resourceLimitsPlan.CUC.ValueFloat64() == 0 { + updateReq.MaxCPUCapacity = -1 + } else { + updateReq.MaxCPUCapacity = int64(resourceLimitsPlan.CUC.ValueFloat64()) + } + + if resourceLimitsPlan.CUI.ValueFloat64() == 0 { + updateReq.MaxNumPublicIP = -1 + } else { + updateReq.MaxNumPublicIP = int64(resourceLimitsPlan.CUI.ValueFloat64()) + } + + if resourceLimitsPlan.CUNP.ValueFloat64() == 0 { + updateReq.MaxNetworkPeerTransfer = -1 + } else { + updateReq.MaxNetworkPeerTransfer = int64(resourceLimitsPlan.CUNP.ValueFloat64()) + } + + if resourceLimitsPlan.GPUUnits.ValueFloat64() == 0 { + updateReq.GPUUnits = -1 + } else { + updateReq.GPUUnits = int64(resourceLimitsPlan.GPUUnits.ValueFloat64()) + } + + updateNeeded = true + } + + // check if send_access_emails was changed + if !plan.SendAccessEmails.Equal(state.SendAccessEmails) && !plan.SendAccessEmails.IsNull() { + updateReq.SendAccessEmails = plan.SendAccessEmails.ValueBool() + updateNeeded = true + } + + if !updateNeeded { + tflog.Info(ctx, "UpdateAccount: no general account update is needed because neither account_name, nor resource_limits, nor send_access_emails were changed.", map[string]any{ + "account_id": plan.Id.ValueString(), + }) + return nil + } + + // perform account update + tflog.Info(ctx, "UpdateAccount: before calling CloudBroker().Account().Update", map[string]any{ + "account_id": accountId, + "req": updateReq, + }) + res, err := c.CloudBroker().Account().Update(ctx, updateReq) + if err != nil { + diags.AddError("UpdateAccount: Unable to update account", + err.Error()) + return diags + } + tflog.Info(ctx, "UpdateAccount: response from CloudBroker().Account().Update", map[string]any{ + "account_id": accountId, + "response": res}) + + return nil +} + +// AddDeleteUsersAccount adds/deletes users to/from account. +// In case of failure returns errors. +func AddDeleteUsersAccount(ctx context.Context, accountId uint64, plan, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start AddDeleteUsersAccount: new users specified", map[string]any{"account_id": accountId}) + diags := diag.Diagnostics{} + + usersPlan := make([]models.UsersModel, 0, len(plan.Users.Elements())) + diags.Append(plan.Users.ElementsAs(ctx, &usersPlan, true)...) + if diags.HasError() { + tflog.Error(ctx, "AddDeleteUsersAccount: cannot populate usersPlan with plan.Users list elements") + return diags + } + + usersState := make([]models.UsersModel, 0, len(state.Users.Elements())) + diags.Append(state.Users.ElementsAs(ctx, &usersState, true)...) + if diags.HasError() { + tflog.Error(ctx, "AddDeleteUsersAccount: cannot populate usersState with state.Users list elements") + return diags + } + + // define users to be deleted, added and updated + var deletedUsers, addedUsers, updatedUsers []models.UsersModel + for _, user := range usersState { + if !containsUser(usersPlan, user) { + deletedUsers = append(deletedUsers, user) + } + } + for _, user := range usersPlan { + if !containsUser(usersState, user) { + addedUsers = append(addedUsers, user) + } else if isChangedUser(usersState, user) { + updatedUsers = append(updatedUsers, user) + } + } + + // delete users + if len(deletedUsers) == 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: no users need to be deleted", map[string]any{"account_id": accountId}) + } + if len(deletedUsers) > 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: users need to be deleted", map[string]any{ + "accountId": accountId, + "deletedUsers": deletedUsers}) + + for _, user := range deletedUsers { + delUserReq := account.DeleteUserRequest{ + AccountID: accountId, + UserName: user.UserID.ValueString(), + RecursiveDelete: user.RecursiveDelete.ValueBool(), // default false + } + + tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudBroker().Account().DeleteUser", map[string]any{"account_id": accountId, "req": delUserReq}) + res, err := c.CloudBroker().Account().DeleteUser(ctx, delUserReq) + tflog.Info(ctx, "AddDeleteUsersAccount: response from CloudBroker().Account().DeleteUser", map[string]any{"account_id": accountId, "response": res}) + if err != nil { + diags.AddError( + "AddDeleteUsersAccount: can not delete user from account", + err.Error()) + } + } + } + + // add users + if len(addedUsers) == 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: no users needs to be added", map[string]any{"account_id": accountId}) + } + if len(addedUsers) > 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: users need to be added", map[string]any{"account_id": accountId}) + + for _, user := range addedUsers { + addUserReq := account.AddUserRequest{ + AccountID: accountId, + Username: user.UserID.ValueString(), + AccessType: user.AccessType.ValueString(), + } + + tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudBroker().Account().AddUser", map[string]any{ + "account_id": accountId, + "addUserReq": addUserReq}) + res, err := c.CloudBroker().Account().AddUser(ctx, addUserReq) + if err != nil { + diags.AddError("AddDeleteUsersAccount: Unable to add users to account", + err.Error()) + } + tflog.Info(ctx, "AddDeleteUsersAccount: response from CloudBroker().Account().AddUser", map[string]any{ + "account_id": accountId, + "response": res}) + } + } + + // update users + if len(updatedUsers) == 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: no users needs to be updated", map[string]any{"account_id": accountId}) + } + if len(updatedUsers) > 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: users need to be updated", map[string]any{"account_id": accountId}) + + for _, user := range updatedUsers { + updUserReq := account.UpdateUserRequest{ + AccountID: accountId, + UserID: user.UserID.ValueString(), + AccessType: user.AccessType.ValueString(), + } + + tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudBroker().Account().UpdateUser", map[string]any{ + "account_id": accountId, + "updatedUsers": updatedUsers}) + res, err := c.CloudBroker().Account().UpdateUser(ctx, updUserReq) + if err != nil { + diags.AddError("AddDeleteUsersAccount: Unable to update users", + err.Error()) + } + tflog.Info(ctx, "AddDeleteUsersAccount: response from CloudBroker().Account().UpdateUser", map[string]any{ + "account_id": accountId, + "response": res}) + } + } + + return diags +} + +func containsUser(users []models.UsersModel, target models.UsersModel) bool { + for _, user := range users { + if target.UserID == user.UserID { + return true + } + } + return false +} + +func isChangedUser(users []models.UsersModel, target models.UsersModel) bool { + for _, user := range users { + if user.UserID.Equal(target.UserID) && !user.AccessType.Equal(target.AccessType) { + return true + } + } + return false +} + +func UtilityAccountCPUParameterUpdate(ctx context.Context, accountID uint64, plan *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start utilityAccountCPUParameterUpdate", map[string]any{"account_id": plan.AccountID}) + diags := diag.Diagnostics{} + + _, err := c.CloudBroker().Account().SetCPUAllocationParameter(ctx, account.SetCPUAllocationParameterRequest{ + AccountID: accountID, + StrictLoose: plan.CPUAllocationParameter.ValueString(), + }) + + if err != nil { + diags.AddError("utilityAccountCPUParameterUpdate: Unable to update CPUAllocationParameter", + err.Error()) + return diags + } + + return diags +} + +func UtilityAccountCPURatioUpdate(ctx context.Context, accountID uint64, plan *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start utilityAccountCPURatioUpdate", map[string]any{"account_id": plan.AccountID}) + diags := diag.Diagnostics{} + + _, err := c.CloudBroker().Account().SetCPUAllocationRatio(ctx, account.SetCPUAllocationRatioRequest{ + AccountID: accountID, + Ratio: plan.CPUAllocationRatio.ValueFloat64(), + }) + if err != nil { + diags.AddError("utilityAccountCPURatioUpdate: Unable to update CPUAllocationRatio", + err.Error()) + return diags + } + return diags +} + +func UtilityAccountAvailiableTemplatesUpdate(ctx context.Context, state, plan *models.ResourceAccountModel, afterCreate bool, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start utilityAccountAvailiableTemplatesUpdate", map[string]any{"account_id": plan.AccountID}) + diags := diag.Diagnostics{} + + if afterCreate { + + imageIds := make([]uint64, 0, len(plan.AvailableTemplates.Elements())) + + diags.Append(plan.AvailableTemplates.ElementsAs(ctx, &imageIds, true)...) + if diags.HasError() { + tflog.Error(ctx, "UpdateAccount: cannot populate AvailableTemplates with plan.AvailableTemplates object element") + return diags + } + + if len(imageIds) == 0 { + diags.AddError( + "you have not been granted access to any images", + "len(imageIds) == 0", + ) + return diags + } + tflog.Error(ctx, "you have not been granted access to any images") + if err := ic.ExistImages(ctx, imageIds, c); err != nil { + diags.AddError( + fmt.Sprintf("can not grant access for available templates: %s", err.Error()), + err.Error(), + ) + return diags + } + tflog.Error(ctx, fmt.Sprint(imageIds)) + + req := account.GrantAccessTemplatesRequest{ + AccountID: uint64(state.AccountID.ValueInt64()), + ImageIDs: imageIds, + } + + _, err := c.CloudBroker().Account().GrantAccessTemplates(ctx, req) + if err != nil { + diags.AddError( + fmt.Sprintf("can not grant access for available templates: %s", err.Error()), + err.Error(), + ) + return diags + } + + return diags + } + + var oldSet, newSet []int + diags.Append(plan.AvailableTemplates.ElementsAs(ctx, &newSet, true)...) + diags.Append(state.AvailableTemplates.ElementsAs(ctx, &oldSet, true)...) + if diags.HasError() { + tflog.Error(ctx, "UtilityAccountCreate: cannot populate newSet or oldSet with AvailableTemplates") + return diags + } + + revokeAT := setDifference(oldSet, newSet) + if len(revokeAT) > 0 { + imageIds := make([]uint64, 0, len(revokeAT)) + + for _, imageId := range revokeAT { + imageIds = append(imageIds, imageId) + } + + if err := ic.ExistImages(ctx, imageIds, c); err != nil { + diags.AddError(fmt.Sprintf("can not grant access for available templates: %s", err), err.Error()) + return diags + } + + req := account.RevokeAccessTemplatesRequest{ + AccountID: uint64(state.AccountID.ValueInt64()), + ImageIDs: imageIds, + } + + _, err := c.CloudBroker().Account().RevokeAccessTemplates(ctx, req) + if err != nil { + diags.AddError("UtilityAccountCreate: error with RevokeAccessTemplates", err.Error()) + return diags + } + } + + addedAT := setDifference(newSet, oldSet) + if len(addedAT) > 0 { + imageIds := make([]uint64, 0, len(addedAT)) + + imageIds = append(imageIds, addedAT...) + + if err := ic.ExistImages(ctx, imageIds, c); err != nil { + diags.AddError(fmt.Sprintf("can grant access for available templates: %s", err.Error()), err.Error()) + return diags + } + + req := account.GrantAccessTemplatesRequest{ + AccountID: uint64(plan.AccountID.ValueInt64()), + ImageIDs: imageIds, + } + + _, err := c.CloudBroker().Account().GrantAccessTemplates(ctx, req) + if err != nil { + diags.AddError("UtilityAccountCreate: error with GrantAccessTemplates", err.Error()) + return diags + } + } + + return diags +} + +func setDifference(set, check []int) []uint64 { + mapCheck := make(map[int]struct{}) + for _, id := range check { + mapCheck[id] = struct{}{} + } + var diff []uint64 + for _, id := range set { + if _, ok := mapCheck[id]; !ok { + diff = append(diff, uint64(id)) + } + } + return diff +} + +func UtilityAccountComputeFeaturesUpdate(ctx context.Context, accountID uint64, plan *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + var compFeatures []string + + diags.Append(plan.ComputeFeatures.ElementsAs(ctx, &compFeatures, true)...) + if diags.HasError() { + tflog.Error(ctx, "utilityAccountComputeFeaturesUpdate: cannot populate compFeatures with plan.ComputeFeatures object element") + return diags + } + + req := account.UpdateComputeFeaturesRequest{ + AccountID: accountID, + ComputeFeatures: compFeatures, + } + + _, err := c.CloudBroker().Account().UpdateComputeFeatures(ctx, req) + if err != nil { + diags.AddError("utilityAccountComputeFeaturesUpdate: error with CloudBroker().Account().UpdateComputeFeatures", err.Error()) + return diags + } + + return diags +} diff --git a/internal/service/cloudbroker/audit/data_source_cb_audit.go b/internal/service/cloudbroker/audit/data_source_cb_audit.go new file mode 100644 index 0000000..365776e --- /dev/null +++ b/internal/service/cloudbroker/audit/data_source_cb_audit.go @@ -0,0 +1,91 @@ +package audit + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAudit{} +) + +func NewDataSourceAudit() datasource.DataSource { + return &dataSourceAudit{} +} + +// dataSourceAudit is the data source implementation. +type dataSourceAudit struct { + client *decort.DecortClient +} + +func (d *dataSourceAudit) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAudit + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAudit: Error get state") + return + } + auditGuid := state.AuditGuid.ValueString() + tflog.Info(ctx, "Read dataSourceAudit: got state successfully", map[string]any{"audit_guid": auditGuid}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAudit: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAudit: set timeouts successfully", map[string]any{ + "audit_guid": auditGuid, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AuditDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAudit: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAudit: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAudit", map[string]any{"audit_guid": auditGuid}) +} + +func (d *dataSourceAudit) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAudit(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAudit) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_audit" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAudit) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAudit") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAudit successfully") +} diff --git a/internal/service/cloudbroker/audit/data_source_cb_audit_linked_jobs.go b/internal/service/cloudbroker/audit/data_source_cb_audit_linked_jobs.go new file mode 100644 index 0000000..098fd75 --- /dev/null +++ b/internal/service/cloudbroker/audit/data_source_cb_audit_linked_jobs.go @@ -0,0 +1,91 @@ +package audit + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAuditLinkedJobs{} +) + +func NewDataSourceAuditLinkedJobs() datasource.DataSource { + return &dataSourceAuditLinkedJobs{} +} + +// dataSourceAuditLinkedJobs is the data source implementation. +type dataSourceAuditLinkedJobs struct { + client *decort.DecortClient +} + +func (d *dataSourceAuditLinkedJobs) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAuditLinkedJobs + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAuditLinkedJobs: Error get state") + return + } + auditGuid := state.AuditGuid.ValueString() + tflog.Info(ctx, "Read dataSourceAudit: got state successfully", map[string]any{"audit_guid": auditGuid}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAuditLinkedJobs: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAuditLinkedJobs: set timeouts successfully", map[string]any{ + "audit_guid": auditGuid, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AuditLinkedJobsDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAuditLinkedJobs: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAuditLinkedJobs: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAuditLinkedJobs", map[string]any{"audit_guid": auditGuid}) +} + +func (d *dataSourceAuditLinkedJobs) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAuditLinkedJobs(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAuditLinkedJobs) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_audit_linked_jobs" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAuditLinkedJobs) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAuditLinkedJobs") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAuditLinkedJobs successfully") +} diff --git a/internal/service/cloudbroker/audit/data_source_cb_audit_list.go b/internal/service/cloudbroker/audit/data_source_cb_audit_list.go new file mode 100644 index 0000000..873d425 --- /dev/null +++ b/internal/service/cloudbroker/audit/data_source_cb_audit_list.go @@ -0,0 +1,89 @@ +package audit + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAuditList{} +) + +func NewDataSourceAuditList() datasource.DataSource { + return &dataSourceAuditList{} +} + +// dataSourceAuditList is the data source implementation. +type dataSourceAuditList struct { + client *decort.DecortClient +} + +func (d *dataSourceAuditList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAuditList + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAuditList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAuditList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAuditList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAuditList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AuditListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAuditList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAuditList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAuditList") +} + +func (d *dataSourceAuditList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAuditList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAuditList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_audit_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAuditList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAuditList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAuditList successfully") +} diff --git a/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit.go b/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit.go new file mode 100644 index 0000000..c886dd1 --- /dev/null +++ b/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit.go @@ -0,0 +1,50 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/utilities" +) + +func AuditDataSource(ctx context.Context, state *models.DataSourceAudit, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AuditDataSource") + + diags := diag.Diagnostics{} + + auditGuid := state.AuditGuid.ValueString() + + recordAudit, diags := utilities.AuditDataSourceCheckPresence(ctx, auditGuid, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.AuditDataSource: before flatten", map[string]any{"audit_guid": auditGuid}) + + *state = models.DataSourceAudit{ + AuditGuid: state.AuditGuid, + Timeouts: state.Timeouts, + + Apitask: types.StringValue(recordAudit.Apitask), + Arguments: types.StringValue(recordAudit.Arguments), + Call: types.StringValue(recordAudit.Call), + GUID: types.StringValue(recordAudit.GUID), + ID: types.StringValue(recordAudit.GUID), + Kwargs: types.StringValue(recordAudit.Kwargs), + RemoteAddr: types.StringValue(recordAudit.RemoteAddr), + ResponseTime: types.Float64Value(recordAudit.ResponseTime), + Result: types.StringValue(recordAudit.Result), + StatusCode: types.Int64Value(int64(recordAudit.StatusCode)), + Tags: types.StringValue(recordAudit.Tags), + Timestamp: types.Float64Value(recordAudit.Timestamp), + TimestampEnd: types.Float64Value(recordAudit.TimestampEnd), + User: types.StringValue(recordAudit.User), + } + + tflog.Info(ctx, "End flattens.AuditDataSource", map[string]any{"audit_guid": auditGuid}) + return nil +} diff --git a/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit_linked_jobs.go b/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit_linked_jobs.go new file mode 100644 index 0000000..b052f40 --- /dev/null +++ b/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit_linked_jobs.go @@ -0,0 +1,64 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/audit" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/utilities" +) + +func AuditLinkedJobsDataSource(ctx context.Context, state *models.DataSourceAuditLinkedJobs, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AuditLinkedJobsDataSource") + + diags := diag.Diagnostics{} + + auditGuid := state.AuditGuid.ValueString() + + recordAudit, diags := utilities.AuditLinkedJobsDataSourceCheckPresence(ctx, auditGuid, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.AuditLinkedJobsDataSource: before flatten", map[string]any{"audit_guid": auditGuid}) + + *state = models.DataSourceAuditLinkedJobs{ + AuditGuid: state.AuditGuid, + Timeouts: state.Timeouts, + + ID: types.StringValue(uuid.New().String()), + Items: flattenLinkedJobs(ctx, *recordAudit), + } + + tflog.Info(ctx, "End flattens.AuditLinkedJobsDataSource", map[string]any{"audit_guid": auditGuid}) + return nil +} + +func flattenLinkedJobs(ctx context.Context, linkedJobs []audit.ItemLinkedJobs) []models.LinkedJob { + tflog.Info(ctx, "Start flattenLinkedJobs") + + res := make([]models.LinkedJob, 0, len(linkedJobs)) + + for _, item := range linkedJobs { + temp := models.LinkedJob{ + CMD: types.StringValue(item.CMD), + GUID: types.StringValue(item.GUID), + NID: types.Int64Value(int64(item.NID)), + State: types.StringValue(item.State), + TimeCreate: types.Int64Value(int64(item.TimeCreate)), + TimeStart: types.Int64Value(int64(item.TimeStart)), + TimeStop: types.Int64Value(int64(item.TimeStop)), + Timeout: types.Int64Value(int64(item.Timeout)), + } + + res = append(res, temp) + } + + tflog.Info(ctx, "End flattenLinkedJobs") + return res +} diff --git a/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit_list.go b/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit_list.go new file mode 100644 index 0000000..bfac2e0 --- /dev/null +++ b/internal/service/cloudbroker/audit/flattens/flatten_data_source_cb_audit_list.go @@ -0,0 +1,67 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/audit" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/utilities" +) + +func AuditListDataSource(ctx context.Context, state *models.DataSourceAuditList, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AuditListDataSource") + + diags := diag.Diagnostics{} + + recordAuditList, diags := utilities.AuditListDataSourceCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.AuditListDataSource: before flatten") + + *state = models.DataSourceAuditList{ + TimestampAt: state.TimestampAt, + TimestampTo: state.TimestampTo, + User: state.User, + Call: state.Call, + MinStatusCode: state.MinStatusCode, + MaxStatusCode: state.MaxStatusCode, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + EntryCount: types.Int64Value(int64(recordAuditList.EntryCount)), + Items: flattenAuditItems(ctx, recordAuditList.Data), + } + + tflog.Info(ctx, "End flattens.AuditListDataSource") + return nil +} + +func flattenAuditItems(ctx context.Context, auditList []audit.ItemAudit) []models.ItemAudit { + tflog.Info(ctx, "Start flattenAuditItems") + + res := make([]models.ItemAudit, 0, len(auditList)) + + for _, item := range auditList { + temp := models.ItemAudit{ + Call: types.StringValue(item.Call), + GUID: types.StringValue(item.GUID), + ResponseTime: types.Float64Value(item.ResponseTime), + StatusCode: types.Int64Value(int64(item.StatusCode)), + Timestamp: types.Float64Value(item.Timestamp), + User: types.StringValue(item.User), + } + + res = append(res, temp) + } + + tflog.Info(ctx, "End flattenAuditItems") + return res +} diff --git a/internal/service/cloudbroker/audit/models/model_data_source_cb_audit.go b/internal/service/cloudbroker/audit/models/model_data_source_cb_audit.go new file mode 100644 index 0000000..769e979 --- /dev/null +++ b/internal/service/cloudbroker/audit/models/model_data_source_cb_audit.go @@ -0,0 +1,28 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAudit struct { + //required field + AuditGuid types.String `tfsdk:"audit_guid"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //response field + Apitask types.String `tfsdk:"apitask"` + Arguments types.String `tfsdk:"args"` + Call types.String `tfsdk:"call"` + GUID types.String `tfsdk:"guid"` + ID types.String `tfsdk:"id"` + Kwargs types.String `tfsdk:"kwargs"` + RemoteAddr types.String `tfsdk:"remote_addr"` + ResponseTime types.Float64 `tfsdk:"responsetime"` + Result types.String `tfsdk:"result"` + StatusCode types.Int64 `tfsdk:"status_code"` + Tags types.String `tfsdk:"tags"` + Timestamp types.Float64 `tfsdk:"timestamp"` + TimestampEnd types.Float64 `tfsdk:"timestamp_end"` + User types.String `tfsdk:"user"` +} diff --git a/internal/service/cloudbroker/audit/models/model_data_source_cb_audit_linked_jobs.go b/internal/service/cloudbroker/audit/models/model_data_source_cb_audit_linked_jobs.go new file mode 100644 index 0000000..a020a68 --- /dev/null +++ b/internal/service/cloudbroker/audit/models/model_data_source_cb_audit_linked_jobs.go @@ -0,0 +1,27 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAuditLinkedJobs struct { + //required field + AuditGuid types.String `tfsdk:"audit_guid"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //response field + ID types.String `tfsdk:"id"` + Items []LinkedJob `tfsdk:"items"` +} + +type LinkedJob struct { + CMD types.String `tfsdk:"cmd"` + GUID types.String `tfsdk:"guid"` + NID types.Int64 `tfsdk:"nid"` + State types.String `tfsdk:"state"` + TimeCreate types.Int64 `tfsdk:"time_create"` + TimeStart types.Int64 `tfsdk:"time_start"` + TimeStop types.Int64 `tfsdk:"time_stop"` + Timeout types.Int64 `tfsdk:"timeout"` +} diff --git a/internal/service/cloudbroker/audit/models/model_data_source_cb_audit_list.go b/internal/service/cloudbroker/audit/models/model_data_source_cb_audit_list.go new file mode 100644 index 0000000..cb2e482 --- /dev/null +++ b/internal/service/cloudbroker/audit/models/model_data_source_cb_audit_list.go @@ -0,0 +1,33 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAuditList struct { + //request field + TimestampAt types.Int64 `tfsdk:"timestamp_at"` + TimestampTo types.Int64 `tfsdk:"timestamp_to"` + User types.String `tfsdk:"user"` + Call types.String `tfsdk:"call"` + MinStatusCode types.Int64 `tfsdk:"min_status_code"` + MaxStatusCode types.Int64 `tfsdk:"max_status_code"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //response field + EntryCount types.Int64 `tfsdk:"entry_count"` + Items []ItemAudit `tfsdk:"items"` +} + +type ItemAudit struct { + Call types.String `tfsdk:"call"` + GUID types.String `tfsdk:"guid"` + ResponseTime types.Float64 `tfsdk:"responsetime"` + StatusCode types.Int64 `tfsdk:"status_code"` + Timestamp types.Float64 `tfsdk:"timestamp"` + User types.String `tfsdk:"user"` +} diff --git a/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit.go b/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit.go new file mode 100644 index 0000000..2a400ec --- /dev/null +++ b/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit.go @@ -0,0 +1,55 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAudit() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "audit_guid": schema.StringAttribute{ + Required: true, + }, + "apitask": schema.StringAttribute{ + Computed: true, + }, + "args": schema.StringAttribute{ + Computed: true, + }, + "call": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "kwargs": schema.StringAttribute{ + Computed: true, + }, + "remote_addr": schema.StringAttribute{ + Computed: true, + }, + "responsetime": schema.Float64Attribute{ + Computed: true, + }, + "result": schema.StringAttribute{ + Computed: true, + }, + "status_code": schema.Int64Attribute{ + Computed: true, + }, + "tags": schema.StringAttribute{ + Computed: true, + }, + "timestamp": schema.Float64Attribute{ + Computed: true, + }, + "timestamp_end": schema.Float64Attribute{ + Computed: true, + }, + "user": schema.StringAttribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit_linked_jobs.go b/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit_linked_jobs.go new file mode 100644 index 0000000..2e29186 --- /dev/null +++ b/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit_linked_jobs.go @@ -0,0 +1,47 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAuditLinkedJobs() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "audit_guid": schema.StringAttribute{ + Required: true, + }, + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "cmd": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "nid": schema.Int64Attribute{ + Computed: true, + }, + "state": schema.StringAttribute{ + Computed: true, + }, + "time_create": schema.Int64Attribute{ + Computed: true, + }, + "time_start": schema.Int64Attribute{ + Computed: true, + }, + "time_stop": schema.Int64Attribute{ + Computed: true, + }, + "timeout": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit_list.go b/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit_list.go new file mode 100644 index 0000000..f6b2c2d --- /dev/null +++ b/internal/service/cloudbroker/audit/schemas/schema_data_source_cb_audit_list.go @@ -0,0 +1,65 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAuditList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "timestamp_at": schema.Int64Attribute{ + Optional: true, + }, + "timestamp_to": schema.Int64Attribute{ + Optional: true, + }, + "user": schema.StringAttribute{ + Optional: true, + }, + "call": schema.StringAttribute{ + Optional: true, + }, + "min_status_code": schema.Int64Attribute{ + Optional: true, + }, + "max_status_code": schema.Int64Attribute{ + Optional: true, + }, + "sort_by": schema.StringAttribute{ + Optional: true, + }, + "page": schema.Int64Attribute{ + Optional: true, + }, + "size": schema.Int64Attribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "call": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "responsetime": schema.Float64Attribute{ + Computed: true, + }, + "status_code": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Float64Attribute{ + Computed: true, + }, + "user": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit.go b/internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit.go new file mode 100644 index 0000000..494b658 --- /dev/null +++ b/internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/audit" +) + +func AuditDataSourceCheckPresence(ctx context.Context, auditGuid string, c *decort.DecortClient) (*audit.RecordAudit, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("AuditDataSourceCheckPresence: Get info about audit with ID - %v", auditGuid)) + + diags := diag.Diagnostics{} + + recordAudit, err := c.CloudBroker().Audit().Get(ctx, audit.GetRequest{AuditGuid: auditGuid}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about audit with ID %v", auditGuid), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "AuditDataSourceCheckPresence: response from CloudBroker().Audit().Get", map[string]any{"audit_guid": auditGuid, "response": recordAudit}) + + return recordAudit, nil +} diff --git a/internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit_linked_jobs.go b/internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit_linked_jobs.go new file mode 100644 index 0000000..1b5742e --- /dev/null +++ b/internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit_linked_jobs.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/audit" +) + +func AuditLinkedJobsDataSourceCheckPresence(ctx context.Context, auditGuid string, c *decort.DecortClient) (*audit.ListLinkedJobs, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("AuditLinkedJobsDataSourceCheckPresence: Get info about audit linked jobs with ID - %v", auditGuid)) + + diags := diag.Diagnostics{} + + linkedJobsList, err := c.CloudBroker().Audit().LinkedJobs(ctx, audit.LinkedJobsRequest{AuditGuid: auditGuid}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about audit linked jobs with ID %v", auditGuid), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "AuditLinkedJobsDataSourceCheckPresence: response from CloudBroker().Audit().LinkedJobs", map[string]any{"audit_guid": auditGuid, "response": linkedJobsList}) + + return linkedJobsList, nil +} diff --git a/internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit_list.go b/internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit_list.go new file mode 100644 index 0000000..f9402e9 --- /dev/null +++ b/internal/service/cloudbroker/audit/utilities/utility_data_source_cb_audit_list.go @@ -0,0 +1,58 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/audit" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/audit/models" +) + +func AuditListDataSourceCheckPresence(ctx context.Context, state *models.DataSourceAuditList, c *decort.DecortClient) (*audit.ListAudits, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("AuditListDataSourceCheckPresence: Get info about audit list")) + + diags := diag.Diagnostics{} + + req := audit.ListRequest{} + + if !state.TimestampAt.IsNull() { + req.TimestampAt = uint64(state.TimestampAt.ValueInt64()) + } + if !state.TimestampTo.IsNull() { + req.TimestampTo = uint64(state.TimestampTo.ValueInt64()) + } + if !state.User.IsNull() { + req.User = state.User.ValueString() + } + if !state.Call.IsNull() { + req.Call = state.Call.ValueString() + } + if !state.MaxStatusCode.IsNull() { + req.MaxStatusCode = uint64(state.MaxStatusCode.ValueInt64()) + } + if !state.MinStatusCode.IsNull() { + req.MinStatusCode = uint64(state.MinStatusCode.ValueInt64()) + } + if !state.SortBy.IsNull() { + req.SortBy = state.SortBy.ValueString() + } + if !state.Page.IsNull() { + req.Page = uint64(state.Page.ValueInt64()) + } + if !state.Size.IsNull() { + req.Size = uint64(state.Size.ValueInt64()) + } + + recordAuditList, err := c.CloudBroker().Audit().List(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about audit list"), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "AuditDataSourceCheckPresence: response from CloudBroker().Audit().List", map[string]any{"response": recordAuditList}) + + return recordAuditList, nil +} diff --git a/internal/service/cloudbroker/ic/input_checks.go b/internal/service/cloudbroker/ic/input_checks.go new file mode 100644 index 0000000..8307696 --- /dev/null +++ b/internal/service/cloudbroker/ic/input_checks.go @@ -0,0 +1,305 @@ +// Input checks +package ic + +import ( + "context" + "fmt" + + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + account "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/account" + extnet "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/extnet" + grid "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/grid" + image "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/image" + lb "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + rg "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + vins "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" +) + +func ExistRG(ctx context.Context, rgId uint64, c *decort.DecortClient) error { + req := rg.ListRequest{ + ByID: rgId, + IncludeDeleted: false, + } + + rgList, err := c.CloudBroker().RG().List(ctx, req) + if err != nil { + return err + } + + if len(rgList.Data) == 0 { + return fmt.Errorf("RG with id %v not found", rgId) + } + + return nil +} + +func ExistAccount(ctx context.Context, accountId uint64, c *decort.DecortClient) error { + req := account.ListRequest{ + ByID: accountId, + } + + accountList, err := c.CloudBroker().Account().List(ctx, req) + if err != nil { + return err + } + + if len(accountList.Data) == 0 { + return fmt.Errorf("account with id %d not found", accountId) + } + + return nil +} + +func ExistAccounts(ctx context.Context, accountIds []uint64, c *decort.DecortClient) error { + req := account.ListRequest{} + + accountList, err := c.CloudBroker().Account().List(ctx, req) + if err != nil { + return err + } + if len(accountList.Data) == 0 { + return fmt.Errorf("you have not been granted access to any account") + } + + notFound := make([]uint64, 0, len(accountIds)) + + for _, accID := range accountIds { + found := false + + for _, acc := range accountList.Data { + if accID == acc.ID { + found = true + break + } + } + + if !found { + notFound = append(notFound, accID) + } + } + + if len(notFound) > 0 { + return fmt.Errorf("Accounts with ids %v not found", notFound) + } + + return nil +} + +func ExistRGs(ctx context.Context, rgIDs []uint64, c *decort.DecortClient) error { + req := rg.ListRequest{ + IncludeDeleted: false, + } + + rgList, err := c.CloudBroker().RG().List(ctx, req) + if err != nil { + return err + } + + if len(rgList.Data) == 0 { + return fmt.Errorf("you have not been granted access to any resource group") + } + + notFound := make([]uint64, 0, len(rgIDs)) + + for _, rgID := range rgIDs { + found := false + + for _, rg := range rgList.Data { + if rgID == rg.ID { + found = true + break + } + } + + if !found { + notFound = append(notFound, rgID) + } + } + + if len(notFound) > 0 { + return fmt.Errorf("RGs with ids %v not found", notFound) + } + + return nil +} + +func ExistLB(ctx context.Context, lbId uint64, c *decort.DecortClient) error { + req := lb.ListRequest{ + ByID: lbId, + } + + lbList, err := c.CloudBroker().LB().List(ctx, req) + if err != nil { + return err + } + + if len(lbList.Data) == 0 { + return fmt.Errorf("LB with ID %v not found", lbId) + } + + return nil +} + +func ExistLBFrontend(ctx context.Context, lbId uint64, fName string, c *decort.DecortClient) error { + lb, err := c.CloudBroker().LB().Get(ctx, lb.GetRequest{LBID: lbId}) + if err != nil { + return err + } + + frontends := lb.Frontends + for _, f := range frontends { + if f.Name == fName { + return nil + } + } + + return fmt.Errorf("frontend with name %v not found", fName) +} + +func ExistLBBackend(ctx context.Context, lbId uint64, bName string, c *decort.DecortClient) error { + lb, err := c.CloudBroker().LB().Get(ctx, lb.GetRequest{LBID: lbId}) + if err != nil { + return err + } + + backends := lb.Backends + for _, b := range backends { + if b.Name == bName { + return nil + } + } + + return fmt.Errorf("backend with name %v not found", bName) +} + +func ExistExtNetInLb(ctx context.Context, extNetId uint64, c *decort.DecortClient) error { + if extNetId == 0 { + return nil + } + req := extnet.ListRequest{ + ByID: extNetId, + } + + extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + return err + } + + if len(extNetList.Data) == 0 { + return fmt.Errorf("EXTNET with ID %v not found", extNetId) + } + + return nil +} + +func ExistVinsInLb(ctx context.Context, vinsId uint64, c *decort.DecortClient) error { + if vinsId == 0 { + return nil + } + + req := vins.ListRequest{ + ByID: vinsId, + } + + vinsList, err := c.CloudBroker().VINS().List(ctx, req) + if err != nil { + return err + } + + if len(vinsList.Data) == 0 { + return fmt.Errorf("VINS with ID %v not found", vinsId) + } + + return nil +} + +func ExistExtNetInVins(ctx context.Context, extNetId int, c *decort.DecortClient) error { + if extNetId == 0 || extNetId == -1 { + return nil + } + req := extnet.ListRequest{ + ByID: uint64(extNetId), + } + + extNetList, err := c.CloudBroker().ExtNet().List(ctx, req) + if err != nil { + return err + } + + if len(extNetList.Data) == 0 { + return fmt.Errorf("EXTNET with ID %v not found", extNetId) + } + + return nil +} + +func ExistGID(ctx context.Context, gid uint64, c *decort.DecortClient) error { + req := grid.ListRequest{} + + gridList, err := c.CloudBroker().Grid().List(ctx, req) + if err != nil { + return err + } + + for _, grid := range gridList.Data { + if grid.GID == gid { + return nil + } + } + + return fmt.Errorf("GID with id %v not found", gid) +} + +func ExistVins(ctx context.Context, vinsId uint64, c *decort.DecortClient) error { + req := vins.ListRequest{ + ByID: vinsId, + IncludeDeleted: false, + } + + vinsList, err := c.CloudBroker().VINS().List(ctx, req) + if err != nil { + return err + } + + if len(vinsList.Data) == 0 { + return fmt.Errorf("vins with ID %v not found", vinsId) + } + + return nil +} + +func ExistImages(ctx context.Context, imageIDs []uint64, c *decort.DecortClient) error { + req := image.ListRequest{} + + listImages, err := c.CloudBroker().Image().List(ctx, req) + if err != nil { + return err + } + + if len(listImages.Data) == 0 { + return fmt.Errorf("you have not been granted access to any images") + } + + notFound := make([]uint64, 0, len(imageIDs)) + + for _, imageID := range imageIDs { + found := false + + for _, image := range listImages.Data { + if imageID == image.ID { + found = true + break + } + } + + if !found { + notFound = append(notFound, imageID) + } + } + + if len(notFound) > 0 { + return fmt.Errorf("images with ids %v not found", notFound) + } + + return nil +} diff --git a/internal/service/cloudbroker/lb/data_source_lb.go b/internal/service/cloudbroker/lb/data_source_lb.go new file mode 100644 index 0000000..4c7a1b2 --- /dev/null +++ b/internal/service/cloudbroker/lb/data_source_lb.go @@ -0,0 +1,91 @@ +package lb + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceLB{} +) + +func NewDataSourceLB() datasource.DataSource { + return &dataSourceLB{} +} + +// dataSourceLB is the data source implementation. +type dataSourceLB struct { + client *decort.DecortClient +} + +func (d *dataSourceLB) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceLB + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLB: Error get state") + return + } + lbID := uint64(state.ID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceLB: got state successfully", map[string]any{"lb_id": lbID}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLB: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceLB: set timeouts successfully", map[string]any{ + "lb_id": lbID, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.LBDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLB: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLB: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceLB", map[string]any{"lb_id": lbID}) +} + +func (d *dataSourceLB) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceLB(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceLB) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_lb" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceLB) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceLB") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceLB successfully") +} diff --git a/internal/service/cloudbroker/lb/data_source_lb_list.go b/internal/service/cloudbroker/lb/data_source_lb_list.go new file mode 100644 index 0000000..129d52b --- /dev/null +++ b/internal/service/cloudbroker/lb/data_source_lb_list.go @@ -0,0 +1,88 @@ +package lb + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceLBList{} +) + +func NewDataSourceLBList() datasource.DataSource { + return &dataSourceLBList{} +} + +// dataSourceLBList is the data source implementation. +type dataSourceLBList struct { + client *decort.DecortClient +} + +func (d *dataSourceLBList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceLBList + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLBList: Error get state") + return + } + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLBList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceLBList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.LBListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLBList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLBList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceLBList") +} + +func (d *dataSourceLBList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceLBList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceLBList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_lb_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceLBList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceLBList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceLBList successfully") +} diff --git a/internal/service/cloudbroker/lb/data_source_lb_list_deleted.go b/internal/service/cloudbroker/lb/data_source_lb_list_deleted.go new file mode 100644 index 0000000..11605c5 --- /dev/null +++ b/internal/service/cloudbroker/lb/data_source_lb_list_deleted.go @@ -0,0 +1,89 @@ +package lb + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceLBListDeleted{} +) + +func NewDataSourceLBListDeleted() datasource.DataSource { + return &dataSourceLBListDeleted{} +} + +// dataSourceLBListDeleted is the data source implementation. +type dataSourceLBListDeleted struct { + client *decort.DecortClient +} + +func (d *dataSourceLBListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceLBListDeleted + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLBListDeleted: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceLBListDeleted: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLBListDeleted: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceLBListDeleted: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.LBListDeletedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLBListDeleted: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceLBListDeleted: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceLBListDeleted") +} + +func (d *dataSourceLBListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceLBListDeleted(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceLBListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_lb_list_deleted" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceLBListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceLBListDeleted") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceLBListDeleted successfully") +} diff --git a/internal/service/cloudbroker/lb/flattens/flatten_data_source_lb.go b/internal/service/cloudbroker/lb/flattens/flatten_data_source_lb.go new file mode 100644 index 0000000..890717a --- /dev/null +++ b/internal/service/cloudbroker/lb/flattens/flatten_data_source_lb.go @@ -0,0 +1,219 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +// LBDataSource flattens data source for lb. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func LBDataSource(ctx context.Context, state *models.DataSourceLB, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.LBDataSource") + + diags := diag.Diagnostics{} + + lbID := uint64(state.LBID.ValueInt64()) + + recordLB, diags := utilities.LBDataSourceCheckPresence(ctx, lbID, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.LBDataSource: before flatten", map[string]any{"lb_id": lbID}) + + acl, _ := json.Marshal(recordLB.ACL) + + *state = models.DataSourceLB{ + LBID: state.LBID, + Timeouts: state.Timeouts, + + HAMode: types.BoolValue(recordLB.HAMode), + ACL: types.StringValue(string(acl)), + BackendHAIP: types.StringValue(recordLB.BackendHAIP), + Backends: flattenBackendsInLB(ctx, recordLB.Backends), + CKey: types.StringValue(recordLB.CKey), + Description: types.StringValue(recordLB.Description), + DPAPIUser: types.StringValue(recordLB.DPAPIUser), + DPAPIPassword: types.StringValue(recordLB.DPAPIPassword), + ExtNetID: types.Int64Value(int64(recordLB.ExtNetID)), + FrontendHAIP: types.StringValue(recordLB.FrontendHAIP), + Frontends: flattenFrontendsInLB(ctx, recordLB.Frontends), + GID: types.Int64Value(int64(recordLB.GID)), + GUID: types.Int64Value(int64(recordLB.GUID)), + ID: types.Int64Value(int64(recordLB.ID)), + ImageID: types.Int64Value(int64(recordLB.ImageID)), + ManagerId: types.Int64Value(int64(recordLB.ManagerId)), + ManagerType: types.StringValue(recordLB.ManagerType), + Meta: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordLB.Meta), + Milestones: types.Int64Value(int64(recordLB.Milestones)), + Name: types.StringValue(recordLB.Name), + PartK8s: types.BoolValue(recordLB.PartK8s), + PrimaryNode: flattenNodeInLB(ctx, recordLB.PrimaryNode), + RGID: types.Int64Value(int64(recordLB.RGID)), + SecondaryNode: flattenNodeInLB(ctx, recordLB.SecondaryNode), + Status: types.StringValue(recordLB.Status), + TechStatus: types.StringValue(recordLB.TechStatus), + UserManaged: types.BoolValue(recordLB.UserManaged), + VINSID: types.Int64Value(int64(recordLB.VINSID)), + } + + tflog.Info(ctx, "End flattens.LBDataSource", map[string]any{"lb_id": state.ID.ValueInt64()}) + return nil +} + +func flattenBackendsInLB(ctx context.Context, backends []lb.ItemBackend) types.List { + tflog.Info(ctx, "Start flattenBackendsInLB") + tempSlice := make([]types.Object, 0, len(backends)) + for _, backend := range backends { + b := models.ItemBackendModel{ + Algorithm: types.StringValue(backend.Algorithm), + GUID: types.StringValue(backend.GUID), + Name: types.StringValue(backend.Name), + ServerDefaultSettings: flattenServersSettings(ctx, backend.ServerDefaultSettings), + Servers: flattenServersInLB(ctx, backend.Servers), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemBackend, b) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenBackendsInLB struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemBackend}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenBackendsInLB", diags)) + } + + tflog.Info(ctx, "End flattenBackendsInLB") + return res +} + +func flattenFrontendsInLB(ctx context.Context, frontends []lb.ItemFrontend) types.List { + tflog.Info(ctx, "Start flattenFrontendsInLB") + tempSlice := make([]types.Object, 0, len(frontends)) + for _, frontend := range frontends { + b := models.ItemFrontendModel{ + Backend: types.StringValue(frontend.Backend), + Bindings: flattenBindingsInLB(ctx, frontend.Bindings), + GUID: types.StringValue(frontend.GUID), + Name: types.StringValue(frontend.Name), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemFrontend, b) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenFrontendsInLB struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemFrontend}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenFrontendsInLB", diags)) + } + + tflog.Info(ctx, "End flattenFrontendsInLB") + return res +} + +func flattenNodeInLB(ctx context.Context, node lb.Node) types.Object { + tflog.Info(ctx, "Start flattenNodeInLB") + n := models.RecordNodeModel{ + BackendIP: types.StringValue(node.BackendIP), + ComputeID: types.Int64Value(int64(node.ComputeID)), + FrontendIP: types.StringValue(node.FrontendIP), + GUID: types.StringValue(node.GUID), + MGMTIP: types.StringValue(node.MGMTIP), + NetworkID: types.Int64Value(int64(node.NetworkID)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemNode, n) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenNodeInLB struct to obj", diags)) + } + tflog.Info(ctx, "End flattenNodeInLB") + return obj +} + +func flattenServersSettings(ctx context.Context, settings lb.ServerSettings) types.Object { + tflog.Info(ctx, "Start flattenServersSettings") + s := models.RecordServerSettingsModel{ + Inter: types.Int64Value(int64(settings.Inter)), + GUID: types.StringValue(settings.GUID), + DownInter: types.Int64Value(int64(settings.DownInter)), + Rise: types.Int64Value(int64(settings.Rise)), + Fall: types.Int64Value(int64(settings.Fall)), + SlowStart: types.Int64Value(int64(settings.SlowStart)), + MaxConn: types.Int64Value(int64(settings.MaxConn)), + MaxQueue: types.Int64Value(int64(settings.MaxQueue)), + Weight: types.Int64Value(int64(settings.Weight)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemServerSettings, s) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenServersSettings struct to obj", diags)) + } + tflog.Info(ctx, "End flattenServersSettings") + return obj +} + +func flattenServersInLB(ctx context.Context, servers []lb.ItemServer) types.List { + tflog.Info(ctx, "Start flattenServersInLBBackend") + tempSlice := make([]types.Object, 0, len(servers)) + for _, server := range servers { + s := models.RecordServerModel{ + Address: types.StringValue(server.Address), + Check: types.StringValue(server.Check), + GUID: types.StringValue(server.GUID), + Name: types.StringValue(server.Name), + Port: types.Int64Value(int64(server.Port)), + ServerSettings: flattenServersSettings(ctx, server.ServerSettings), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemServers, s) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenServersInLBBackend struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemServers}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenServersInLBBackend", diags)) + } + + tflog.Info(ctx, "End flattenServersInLBBackend") + return res +} + +func flattenBindingsInLB(ctx context.Context, bindings []lb.ItemBinding) types.List { + tflog.Info(ctx, "Start flattenBindingsInLBFrontend") + tempSlice := make([]types.Object, 0, len(bindings)) + for _, binding := range bindings { + s := models.ItemBindingModel{ + Address: types.StringValue(binding.Address), + GUID: types.StringValue(binding.GUID), + Name: types.StringValue(binding.Name), + Port: types.Int64Value(int64(binding.Port)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemBindings, s) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenBindingsInLBFrontend struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemBindings}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenBindingsInLBFrontend", diags)) + } + + tflog.Info(ctx, "End flattenBindingsInLBFrontend") + return res +} diff --git a/internal/service/cloudbroker/lb/flattens/flatten_data_source_lb_list.go b/internal/service/cloudbroker/lb/flattens/flatten_data_source_lb_list.go new file mode 100644 index 0000000..f4bf653 --- /dev/null +++ b/internal/service/cloudbroker/lb/flattens/flatten_data_source_lb_list.go @@ -0,0 +1,93 @@ +package flattens + +import ( + "context" + "encoding/json" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +func LBListDataSource(ctx context.Context, state *models.DataSourceLBList, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.LBListDataSource") + + diags := diag.Diagnostics{} + + lbList, err := utilities.LBListDataSourceCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about list lb", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.LBListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceLBList{ + ByID: state.ByID, + Name: state.Name, + AccountID: state.AccountID, + RgID: state.RgID, + TechStatus: state.TechStatus, + Status: state.Status, + FrontIP: state.FrontIP, + BackIP: state.BackIP, + IncludeDeleted: state.IncludeDeleted, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemsLBListModel, 0, len(lbList.Data)) + for _, lbItem := range lbList.Data { + acl, _ := json.Marshal(lbItem.ACL) + + item := models.ItemsLBListModel{ + HAMode: types.BoolValue(lbItem.HAMode), + ACL: types.StringValue(string(acl)), + BackendHAIP: types.StringValue(lbItem.BackendHAIP), + Backends: flattenBackendsInLB(ctx, lbItem.Backends), + CreatedBy: types.StringValue(lbItem.CreatedBy), + CreatedTime: types.Int64Value(int64(lbItem.CreatedTime)), + DeletedBy: types.StringValue(lbItem.DeletedBy), + DeletedTime: types.Int64Value(int64(lbItem.DeletedTime)), + Description: types.StringValue(lbItem.Description), + DPAPIUser: types.StringValue(lbItem.DPAPIUser), + DPAPIPassword: types.StringValue(lbItem.DPAPIPassword), + ExtNetID: types.Int64Value(int64(lbItem.ExtNetID)), + FrontendHAIP: types.StringValue(lbItem.FrontendHAIP), + Frontends: flattenFrontendsInLB(ctx, lbItem.Frontends), + GID: types.Int64Value(int64(lbItem.GID)), + GUID: types.Int64Value(int64(lbItem.GUID)), + LBID: types.Int64Value(int64(lbItem.ID)), + Milestones: types.Int64Value(int64(lbItem.Milestones)), + Name: types.StringValue(lbItem.Name), + PrimaryNode: flattenNodeInLB(ctx, lbItem.PrimaryNode), + RGID: types.Int64Value(int64(lbItem.RGID)), + RGName: types.StringValue(lbItem.RGName), + SecondaryNode: flattenNodeInLB(ctx, lbItem.SecondaryNode), + Status: types.StringValue(lbItem.Status), + TechStatus: types.StringValue(lbItem.TechStatus), + UpdatedBy: types.StringValue(lbItem.UpdatedBy), + UpdatedTime: types.Int64Value(int64(lbItem.UpdatedTime)), + VINSID: types.Int64Value(int64(lbItem.VINSID)), + } + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(lbList.EntryCount)) + + tflog.Info(ctx, "flattens.LBListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.LBListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/lb/flattens/flatten_data_source_lb_list_deleted.go b/internal/service/cloudbroker/lb/flattens/flatten_data_source_lb_list_deleted.go new file mode 100644 index 0000000..31a6677 --- /dev/null +++ b/internal/service/cloudbroker/lb/flattens/flatten_data_source_lb_list_deleted.go @@ -0,0 +1,91 @@ +package flattens + +import ( + "context" + "encoding/json" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +func LBListDeletedDataSource(ctx context.Context, state *models.DataSourceLBListDeleted, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.LBListDeletedDataSource") + + diags := diag.Diagnostics{} + + lbList, err := utilities.LBListDeletedDataSourceCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about list deleted", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.LBListDeletedDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceLBListDeleted{ + ByID: state.ByID, + Name: state.Name, + AccountID: state.AccountID, + RgID: state.RgID, + TechStatus: state.TechStatus, + FrontIP: state.FrontIP, + BackIP: state.BackIP, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemsLBListDeletedModel, 0, len(lbList.Data)) + for _, lbItem := range lbList.Data { + acl, _ := json.Marshal(lbItem.ACL) + + item := models.ItemsLBListDeletedModel{ + HAMode: types.BoolValue(lbItem.HAMode), + ACL: types.StringValue(string(acl)), + BackendHAIP: types.StringValue(lbItem.BackendHAIP), + Backends: flattenBackendsInLB(ctx, lbItem.Backends), + CreatedBy: types.StringValue(lbItem.CreatedBy), + CreatedTime: types.Int64Value(int64(lbItem.CreatedTime)), + DeletedBy: types.StringValue(lbItem.DeletedBy), + DeletedTime: types.Int64Value(int64(lbItem.DeletedTime)), + Description: types.StringValue(lbItem.Description), + DPAPIUser: types.StringValue(lbItem.DPAPIUser), + DPAPIPassword: types.StringValue(lbItem.DPAPIPassword), + ExtNetID: types.Int64Value(int64(lbItem.ExtNetID)), + FrontendHAIP: types.StringValue(lbItem.FrontendHAIP), + Frontends: flattenFrontendsInLB(ctx, lbItem.Frontends), + GID: types.Int64Value(int64(lbItem.GID)), + GUID: types.Int64Value(int64(lbItem.GUID)), + LBID: types.Int64Value(int64(lbItem.ID)), + Milestones: types.Int64Value(int64(lbItem.Milestones)), + Name: types.StringValue(lbItem.Name), + PrimaryNode: flattenNodeInLB(ctx, lbItem.PrimaryNode), + RGID: types.Int64Value(int64(lbItem.RGID)), + RGName: types.StringValue(lbItem.RGName), + SecondaryNode: flattenNodeInLB(ctx, lbItem.SecondaryNode), + Status: types.StringValue(lbItem.Status), + TechStatus: types.StringValue(lbItem.TechStatus), + UpdatedBy: types.StringValue(lbItem.UpdatedBy), + UpdatedTime: types.Int64Value(int64(lbItem.UpdatedTime)), + VINSID: types.Int64Value(int64(lbItem.VINSID)), + } + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(lbList.EntryCount)) + + tflog.Info(ctx, "flattens.LBListDeletedDataSource: after flatten") + + tflog.Info(ctx, "End flattens.LBListDeletedDataSource") + return nil +} diff --git a/internal/service/cloudbroker/lb/flattens/flatten_resource_lb.go b/internal/service/cloudbroker/lb/flattens/flatten_resource_lb.go new file mode 100644 index 0000000..c360b18 --- /dev/null +++ b/internal/service/cloudbroker/lb/flattens/flatten_resource_lb.go @@ -0,0 +1,72 @@ +package flattens + +import ( + "context" + "encoding/json" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +func LBResource(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.LBResource") + + diags := diag.Diagnostics{} + + recordItemLB, diags := utilities.LBResourceCheckPresence(ctx, plan, c) + if diags.HasError() { + return diags + } + + acl, _ := json.Marshal(recordItemLB.ACL) + + *plan = models.ResourceLBModel{ + RGID: plan.RGID, + Name: plan.Name, + ExtNetID: plan.ExtNetID, + VINSID: plan.VINSID, + Start: plan.Start, + ID: plan.ID, + + HAMode: types.BoolValue(recordItemLB.HAMode), + Safe: plan.Safe, + Timeouts: plan.Timeouts, + SysctlParams: plan.SysctlParams, + Permanently: plan.Permanently, + Restart: plan.Restart, + Enable: plan.Enable, + ConfigReset: plan.ConfigReset, + + ACL: types.StringValue(string(acl)), + BackendHAIP: types.StringValue(recordItemLB.BackendHAIP), + Backends: flattenBackendsInLB(ctx, recordItemLB.Backends), + CKey: types.StringValue(recordItemLB.CKey), + Description: types.StringValue(recordItemLB.Description), + DPAPIUser: types.StringValue(recordItemLB.DPAPIUser), + DPAPIPassword: types.StringValue(recordItemLB.DPAPIPassword), + FrontendHAIP: types.StringValue(recordItemLB.FrontendHAIP), + Frontends: flattenFrontendsInLB(ctx, recordItemLB.Frontends), + GID: types.Int64Value(int64(recordItemLB.GID)), + GUID: types.Int64Value(int64(recordItemLB.GUID)), + ImageID: types.Int64Value(int64(recordItemLB.ImageID)), + LBID: types.Int64Value(int64(recordItemLB.ID)), + Meta: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordItemLB.Meta), + Milestones: types.Int64Value(int64(recordItemLB.Milestones)), + ManagerId: types.Int64Value(int64(recordItemLB.ManagerId)), + ManagerType: types.StringValue(recordItemLB.ManagerType), + PartK8s: types.BoolValue(recordItemLB.PartK8s), + PrimaryNode: flattenNodeInLB(ctx, recordItemLB.PrimaryNode), + SecondaryNode: flattenNodeInLB(ctx, recordItemLB.SecondaryNode), + Status: types.StringValue(recordItemLB.Status), + TechStatus: types.StringValue(recordItemLB.TechStatus), + UserManaged: types.BoolValue(recordItemLB.UserManaged), + } + + tflog.Info(ctx, "End flattens.LBResource", map[string]any{"id": plan.ID.ValueString()}) + return nil +} diff --git a/internal/service/cloudbroker/lb/flattens/flatten_resource_lb_backend.go b/internal/service/cloudbroker/lb/flattens/flatten_resource_lb_backend.go new file mode 100644 index 0000000..449fd56 --- /dev/null +++ b/internal/service/cloudbroker/lb/flattens/flatten_resource_lb_backend.go @@ -0,0 +1,44 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +func LBBackendResource(ctx context.Context, plan *models.ResourceLBBackendModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.LBBackendResource") + + diags := diag.Diagnostics{} + + recordItemBackend, diags := utilities.LBBackendResourceCheckPresence(ctx, plan, c) + if diags.HasError() { + return diags + } + + *plan = models.ResourceLBBackendModel{ + LBID: plan.LBID, + Name: plan.Name, + ID: plan.ID, + Timeouts: plan.Timeouts, + GUID: types.StringValue(recordItemBackend.GUID), + Algorithm: types.StringValue(recordItemBackend.Algorithm), + DownInter: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.DownInter)), + Fall: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.Fall)), + Inter: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.Inter)), + MaxConn: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.MaxConn)), + MaxQueue: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.MaxQueue)), + Rise: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.Rise)), + SlowStart: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.SlowStart)), + Weight: types.Int64Value(int64(recordItemBackend.ServerDefaultSettings.Weight)), + Servers: flattenServersInLB(ctx, recordItemBackend.Servers), + } + + tflog.Info(ctx, "End flattens.LBBackendResource", map[string]any{"name": plan.Name.ValueString()}) + return nil +} diff --git a/internal/service/cloudbroker/lb/flattens/flatten_resource_lb_backend_server.go b/internal/service/cloudbroker/lb/flattens/flatten_resource_lb_backend_server.go new file mode 100644 index 0000000..2485f65 --- /dev/null +++ b/internal/service/cloudbroker/lb/flattens/flatten_resource_lb_backend_server.go @@ -0,0 +1,45 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +func LBBackendServerResource(ctx context.Context, plan *models.ResourceLBBackendServerModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.LBBackendServerResource") + + diags := diag.Diagnostics{} + + recordItemServer, diags := utilities.LBBackendServerResourceCheckPresence(ctx, plan, c) + if diags.HasError() { + return diags + } + + *plan = models.ResourceLBBackendServerModel{ + LBID: plan.LBID, + Backend: plan.Backend, + Name: plan.Name, + Address: plan.Address, + Port: plan.Port, + ID: plan.ID, + Timeouts: plan.Timeouts, + Check: types.StringValue(recordItemServer.Check), + Inter: types.Int64Value(int64(recordItemServer.ServerSettings.Inter)), + DownInter: types.Int64Value(int64(recordItemServer.ServerSettings.DownInter)), + Rise: types.Int64Value(int64(recordItemServer.ServerSettings.Rise)), + Fall: types.Int64Value(int64(recordItemServer.ServerSettings.Fall)), + SlowStart: types.Int64Value(int64(recordItemServer.ServerSettings.SlowStart)), + MaxConn: types.Int64Value(int64(recordItemServer.ServerSettings.MaxConn)), + MaxQueue: types.Int64Value(int64(recordItemServer.ServerSettings.MaxQueue)), + Weight: types.Int64Value(int64(recordItemServer.ServerSettings.Weight)), + } + + tflog.Info(ctx, "End flattens.LBBackendServerResource", map[string]any{"name": plan.Name.ValueString()}) + return nil +} diff --git a/internal/service/cloudbroker/lb/flattens/flatten_resoursce_lb_frontend.go b/internal/service/cloudbroker/lb/flattens/flatten_resoursce_lb_frontend.go new file mode 100644 index 0000000..7b99717 --- /dev/null +++ b/internal/service/cloudbroker/lb/flattens/flatten_resoursce_lb_frontend.go @@ -0,0 +1,36 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +func LBFrontendResource(ctx context.Context, plan *models.ResourceLBFrontendModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.LBFrontendResource") + + diags := diag.Diagnostics{} + + recordItemFrontend, diags := utilities.LBFrontendResourceCheckPresence(ctx, plan, c) + if diags.HasError() { + return diags + } + + *plan = models.ResourceLBFrontendModel{ + LBID: plan.LBID, + Name: plan.Name, + Backend: plan.Backend, + ID: plan.ID, + Timeouts: plan.Timeouts, + GUID: types.StringValue(recordItemFrontend.GUID), + Bindings: flattenBindingsInLB(ctx, recordItemFrontend.Bindings), + } + + tflog.Info(ctx, "End flattens.LBFrontendResource", map[string]any{"name": plan.Name.ValueString()}) + return nil +} diff --git a/internal/service/cloudbroker/lb/flattens/flatten_resoursce_lb_frontend_bind.go b/internal/service/cloudbroker/lb/flattens/flatten_resoursce_lb_frontend_bind.go new file mode 100644 index 0000000..7cb1178 --- /dev/null +++ b/internal/service/cloudbroker/lb/flattens/flatten_resoursce_lb_frontend_bind.go @@ -0,0 +1,37 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +func LBFrontendBindResource(ctx context.Context, plan *models.ResourceLBFrontendBindModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.LBFrontendBindResource") + + diags := diag.Diagnostics{} + + recordItemFrontendBind, diags := utilities.LBFrontendBindResourceCheckPresence(ctx, plan, c) + if diags.HasError() { + return diags + } + + *plan = models.ResourceLBFrontendBindModel{ + Address: plan.Address, + Frontend: plan.Frontend, + LBID: plan.LBID, + Name: plan.Name, + ID: plan.ID, + Timeouts: plan.Timeouts, + GUID: types.StringValue(recordItemFrontendBind.GUID), + Port: plan.Port, + } + + tflog.Info(ctx, "End flattens.LBFrontendBindResource", map[string]any{"name": plan.Name.ValueString()}) + return nil +} diff --git a/internal/service/cloudbroker/lb/input_check.go b/internal/service/cloudbroker/lb/input_check.go new file mode 100644 index 0000000..e4bd7a6 --- /dev/null +++ b/internal/service/cloudbroker/lb/input_check.go @@ -0,0 +1,125 @@ +package lb + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" +) + +// resourceLBInputChecks checks if rg_id, extnet_id and vins_id are valid. +func resourceLBInputChecks(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + extNetId := uint64(plan.ExtNetID.ValueInt64()) + vinsId := uint64(plan.VINSID.ValueInt64()) + + if extNetId == 0 && vinsId == 0 { + diags.AddError(fmt.Sprintf("Unable to validate vins_id and extnet_id"), "vins_id and ext_net_id cannot be both in the value 0") + return diags + } + + rgID := uint64(plan.RGID.ValueInt64()) + tflog.Info(ctx, "resourceLBInputChecks: exist resource rg", map[string]any{"rg_id": rgID}) + err := ic.ExistRG(ctx, rgID, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about rg with ID %v", rgID), err.Error()) + } + + tflog.Info(ctx, "resourceLBInputChecks: exist resource extNet", map[string]any{" extnet_id": extNetId}) + err = ic.ExistExtNetInLb(ctx, extNetId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about extNet with ID %v", extNetId), err.Error()) + } + + tflog.Info(ctx, "resourceLBInputChecks: exist resource VINS", map[string]any{" vins_id": vinsId}) + err = ic.ExistVinsInLb(ctx, vinsId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about VINS with ID %v", vinsId), err.Error()) + } + + return diags +} + +// resourceLBFrontendBindInputChecks checks if lb_id and backend_name are valid. +func resourceLBFrontendBindInputChecks(ctx context.Context, plan *models.ResourceLBFrontendBindModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + lbID := uint64(plan.LBID.ValueInt64()) + fName := plan.Frontend.ValueString() + tflog.Info(ctx, "resourceLBFrontendBindInputChecks: exist resource lb", map[string]any{"lb_id": lbID}) + err := ic.ExistLB(ctx, lbID, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbID), err.Error()) + return diags + } + tflog.Info(ctx, "resourceLBFrontendInputChecks: exist resource lbFrontend", map[string]any{"name": fName}) + err = ic.ExistLBFrontend(ctx, lbID, fName, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about frontend with name %v", fName), err.Error()) + return diags + } + + return diags +} + +// resourceLBFrontendInputChecks checks if lb_id and backend_name are valid. +func resourceLBFrontendInputChecks(ctx context.Context, plan *models.ResourceLBFrontendModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + lbID := uint64(plan.LBID.ValueInt64()) + bName := plan.Backend.ValueString() + tflog.Info(ctx, "resourceLBFrontendInputChecks: exist resource lb", map[string]any{"lb_id": lbID}) + err := ic.ExistLB(ctx, lbID, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbID), err.Error()) + return diags + } + tflog.Info(ctx, "resourceLBFrontendInputChecks: exist resource lbBackend", map[string]any{"name": bName}) + err = ic.ExistLBBackend(ctx, lbID, bName, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about backend with name %v", bName), err.Error()) + return diags + } + + return diags +} + +// resourceLBBackendServerInputChecks checks if lb_id and backend_name are valid. +func resourceLBBackendServerInputChecks(ctx context.Context, plan *models.ResourceLBBackendServerModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + lbID := uint64(plan.LBID.ValueInt64()) + bName := plan.Backend.ValueString() + tflog.Info(ctx, "resourceLBBackendServerInputChecks: exist resource lb", map[string]any{"lb_id": lbID}) + err := ic.ExistLB(ctx, lbID, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbID), err.Error()) + } else { + tflog.Info(ctx, "resourceLBBackendServerInputChecks: exist resource lbBackend", map[string]any{"name": bName}) + err = ic.ExistLBBackend(ctx, lbID, bName, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about backend with name %v", bName), err.Error()) + } + } + + return diags +} + +// resourceLBBackendInputChecks checks if lb_id are valid. +func resourceLBBackendInputChecks(ctx context.Context, plan *models.ResourceLBBackendModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + lbID := uint64(plan.LBID.ValueInt64()) + tflog.Info(ctx, "resourceLBBackendInputChecks: exist resource lb", map[string]any{"lb_id": lbID}) + err := ic.ExistLB(ctx, lbID, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbID), err.Error()) + } + + return diags +} diff --git a/internal/service/cloudbroker/lb/models/model_data_source_lb.go b/internal/service/cloudbroker/lb/models/model_data_source_lb.go new file mode 100644 index 0000000..3fc7a0e --- /dev/null +++ b/internal/service/cloudbroker/lb/models/model_data_source_lb.go @@ -0,0 +1,147 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceLB struct { + // required fields + Timeouts timeouts.Value `tfsdk:"timeouts"` + LBID types.Int64 `tfsdk:"lb_id"` + + // response fields + HAMode types.Bool `tfsdk:"ha_mode"` + ACL types.String `tfsdk:"acl"` + BackendHAIP types.String `tfsdk:"backend_haip"` + Backends types.List `tfsdk:"backends"` + CKey types.String `tfsdk:"ckey"` + Description types.String `tfsdk:"desc"` + DPAPIUser types.String `tfsdk:"dp_api_user"` + DPAPIPassword types.String `tfsdk:"dp_api_password"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + FrontendHAIP types.String `tfsdk:"frontend_haip"` + Frontends types.List `tfsdk:"frontends"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"id"` + ImageID types.Int64 `tfsdk:"image_id"` + ManagerId types.Int64 `tfsdk:"manager_id"` + ManagerType types.String `tfsdk:"manager_type"` + Meta types.List `tfsdk:"meta"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + PartK8s types.Bool `tfsdk:"part_k8s"` + PrimaryNode types.Object `tfsdk:"primary_node"` + RGID types.Int64 `tfsdk:"rg_id"` + SecondaryNode types.Object `tfsdk:"secondary_node"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UserManaged types.Bool `tfsdk:"user_managed"` + VINSID types.Int64 `tfsdk:"vins_id"` +} + +type ItemBackendModel struct { + Algorithm types.String `tfsdk:"algorithm"` + GUID types.String `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + ServerDefaultSettings types.Object `tfsdk:"server_default_settings"` + Servers types.List `tfsdk:"servers"` +} + +type ItemFrontendModel struct { + Backend types.String `tfsdk:"backend"` + Bindings types.List `tfsdk:"bindings"` + GUID types.String `tfsdk:"guid"` + Name types.String `tfsdk:"name"` +} + +type RecordNodeModel struct { + BackendIP types.String `tfsdk:"backend_ip"` + ComputeID types.Int64 `tfsdk:"compute_id"` + FrontendIP types.String `tfsdk:"frontend_ip"` + GUID types.String `tfsdk:"guid"` + MGMTIP types.String `tfsdk:"mgmt_ip"` + NetworkID types.Int64 `tfsdk:"network_id"` +} + +type RecordServerSettingsModel struct { + Inter types.Int64 `tfsdk:"inter"` + GUID types.String `tfsdk:"guid"` + DownInter types.Int64 `tfsdk:"downinter"` + Rise types.Int64 `tfsdk:"rise"` + Fall types.Int64 `tfsdk:"fall"` + SlowStart types.Int64 `tfsdk:"slowstart"` + MaxConn types.Int64 `tfsdk:"maxconn"` + MaxQueue types.Int64 `tfsdk:"maxqueue"` + Weight types.Int64 `tfsdk:"weight"` +} + +type RecordServerModel struct { + Address types.String `tfsdk:"address"` + Check types.String `tfsdk:"check"` + GUID types.String `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + Port types.Int64 `tfsdk:"port"` + ServerSettings types.Object `tfsdk:"server_settings"` +} + +type ItemBindingModel struct { + Address types.String `tfsdk:"address"` + GUID types.String `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + Port types.Int64 `tfsdk:"port"` +} + +var ItemNode = map[string]attr.Type{ + "backend_ip": types.StringType, + "compute_id": types.Int64Type, + "frontend_ip": types.StringType, + "guid": types.StringType, + "mgmt_ip": types.StringType, + "network_id": types.Int64Type, +} + +var ItemBackend = map[string]attr.Type{ + "algorithm": types.StringType, + "guid": types.StringType, + "name": types.StringType, + "server_default_settings": types.ObjectType{AttrTypes: ItemServerSettings}, + "servers": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemServers}}, +} + +var ItemFrontend = map[string]attr.Type{ + "backend": types.StringType, + "bindings": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemBindings}}, + "guid": types.StringType, + "name": types.StringType, +} + +var ItemServers = map[string]attr.Type{ + "address": types.StringType, + "check": types.StringType, + "guid": types.StringType, + "name": types.StringType, + "port": types.Int64Type, + "server_settings": types.ObjectType{AttrTypes: ItemServerSettings}, +} + +var ItemServerSettings = map[string]attr.Type{ + "inter": types.Int64Type, + "guid": types.StringType, + "downinter": types.Int64Type, + "rise": types.Int64Type, + "fall": types.Int64Type, + "slowstart": types.Int64Type, + "maxconn": types.Int64Type, + "maxqueue": types.Int64Type, + "weight": types.Int64Type, +} + +var ItemBindings = map[string]attr.Type{ + "address": types.StringType, + "name": types.StringType, + "port": types.Int64Type, + "guid": types.StringType, +} diff --git a/internal/service/cloudbroker/lb/models/model_data_source_lb_list.go b/internal/service/cloudbroker/lb/models/model_data_source_lb_list.go new file mode 100644 index 0000000..8f8a954 --- /dev/null +++ b/internal/service/cloudbroker/lb/models/model_data_source_lb_list.go @@ -0,0 +1,64 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceLBList struct { + + // optional fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + AccountID types.Int64 `tfsdk:"account_id"` + RgID types.Int64 `tfsdk:"rg_id"` + TechStatus types.String `tfsdk:"tech_status"` + Status types.String `tfsdk:"status"` + FrontIP types.String `tfsdk:"front_ip"` + BackIP types.String `tfsdk:"back_ip"` + IncludeDeleted types.Bool `tfsdk:"include_deleted"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemsLBListModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemsLBListModel struct { + HAMode types.Bool `tfsdk:"ha_mode"` + ACL types.String `tfsdk:"acl"` + BackendHAIP types.String `tfsdk:"backend_haip"` + Backends types.List `tfsdk:"backends"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + DPAPIUser types.String `tfsdk:"dp_api_user"` + DPAPIPassword types.String `tfsdk:"dp_api_password"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + FrontendHAIP types.String `tfsdk:"frontend_haip"` + Frontends types.List `tfsdk:"frontends"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + LBID types.Int64 `tfsdk:"lb_id"` + ManagerId types.Int64 `tfsdk:"manager_id"` + ManagerType types.String `tfsdk:"manager_type"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + PartK8s types.Bool `tfsdk:"part_k8s"` + PrimaryNode types.Object `tfsdk:"primary_node"` + RGName types.String `tfsdk:"rg_name"` + RGID types.Int64 `tfsdk:"rg_id"` + SecondaryNode types.Object `tfsdk:"secondary_node"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` + VINSID types.Int64 `tfsdk:"vins_id"` +} diff --git a/internal/service/cloudbroker/lb/models/model_data_source_lb_list_deleted.go b/internal/service/cloudbroker/lb/models/model_data_source_lb_list_deleted.go new file mode 100644 index 0000000..0516b0d --- /dev/null +++ b/internal/service/cloudbroker/lb/models/model_data_source_lb_list_deleted.go @@ -0,0 +1,62 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceLBListDeleted struct { + + // optional fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + AccountID types.Int64 `tfsdk:"account_id"` + RgID types.Int64 `tfsdk:"rg_id"` + TechStatus types.String `tfsdk:"tech_status"` + FrontIP types.String `tfsdk:"front_ip"` + BackIP types.String `tfsdk:"back_ip"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemsLBListDeletedModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemsLBListDeletedModel struct { + HAMode types.Bool `tfsdk:"ha_mode"` + ACL types.String `tfsdk:"acl"` + BackendHAIP types.String `tfsdk:"backend_haip"` + Backends types.List `tfsdk:"backends"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + DPAPIUser types.String `tfsdk:"dp_api_user"` + DPAPIPassword types.String `tfsdk:"dp_api_password"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + FrontendHAIP types.String `tfsdk:"frontend_haip"` + Frontends types.List `tfsdk:"frontends"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + LBID types.Int64 `tfsdk:"lb_id"` + ManagerId types.Int64 `tfsdk:"manager_id"` + ManagerType types.String `tfsdk:"manager_type"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + PartK8s types.Bool `tfsdk:"part_k8s"` + PrimaryNode types.Object `tfsdk:"primary_node"` + RGName types.String `tfsdk:"rg_name"` + RGID types.Int64 `tfsdk:"rg_id"` + SecondaryNode types.Object `tfsdk:"secondary_node"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` + VINSID types.Int64 `tfsdk:"vins_id"` +} diff --git a/internal/service/cloudbroker/lb/models/model_resource_lb.go b/internal/service/cloudbroker/lb/models/model_resource_lb.go new file mode 100644 index 0000000..39268ad --- /dev/null +++ b/internal/service/cloudbroker/lb/models/model_resource_lb.go @@ -0,0 +1,52 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceLBModel struct { + // required fields + RGID types.Int64 `tfsdk:"rg_id"` + Name types.String `tfsdk:"name"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + VINSID types.Int64 `tfsdk:"vins_id"` + Start types.Bool `tfsdk:"start"` + + // optional fields + HAMode types.Bool `tfsdk:"ha_mode"` + ACL types.String `tfsdk:"acl"` + Description types.String `tfsdk:"desc"` + Enable types.Bool `tfsdk:"enable"` + Restart types.Bool `tfsdk:"restart"` + ConfigReset types.Bool `tfsdk:"config_reset"` + Permanently types.Bool `tfsdk:"permanently"` + Restore types.Bool `tfsdk:"restore"` + Safe types.Bool `tfsdk:"safe"` + SysctlParams types.List `tfsdk:"sysctl_params"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + BackendHAIP types.String `tfsdk:"backend_haip"` + Backends types.List `tfsdk:"backends"` + CKey types.String `tfsdk:"ckey"` + DPAPIUser types.String `tfsdk:"dp_api_user"` + DPAPIPassword types.String `tfsdk:"dp_api_password"` + FrontendHAIP types.String `tfsdk:"frontend_haip"` + Frontends types.List `tfsdk:"frontends"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ID types.String `tfsdk:"id"` + ImageID types.Int64 `tfsdk:"image_id"` + LBID types.Int64 `tfsdk:"lb_id"` + ManagerId types.Int64 `tfsdk:"manager_id"` + ManagerType types.String `tfsdk:"manager_type"` + Meta types.List `tfsdk:"meta"` + Milestones types.Int64 `tfsdk:"milestones"` + PartK8s types.Bool `tfsdk:"part_k8s"` + PrimaryNode types.Object `tfsdk:"primary_node"` + SecondaryNode types.Object `tfsdk:"secondary_node"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UserManaged types.Bool `tfsdk:"user_managed"` +} diff --git a/internal/service/cloudbroker/lb/models/model_resource_lb_backend.go b/internal/service/cloudbroker/lb/models/model_resource_lb_backend.go new file mode 100644 index 0000000..97892d5 --- /dev/null +++ b/internal/service/cloudbroker/lb/models/model_resource_lb_backend.go @@ -0,0 +1,24 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceLBBackendModel struct { + Algorithm types.String `tfsdk:"algorithm"` + LBID types.Int64 `tfsdk:"lb_id"` + Name types.String `tfsdk:"name"` + ID types.String `tfsdk:"id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + GUID types.String `tfsdk:"guid"` + Inter types.Int64 `tfsdk:"inter"` + DownInter types.Int64 `tfsdk:"downinter"` + Rise types.Int64 `tfsdk:"rise"` + Fall types.Int64 `tfsdk:"fall"` + SlowStart types.Int64 `tfsdk:"slowstart"` + MaxConn types.Int64 `tfsdk:"maxconn"` + MaxQueue types.Int64 `tfsdk:"maxqueue"` + Weight types.Int64 `tfsdk:"weight"` + Servers types.List `tfsdk:"servers"` +} diff --git a/internal/service/cloudbroker/lb/models/model_resource_lb_backend_server.go b/internal/service/cloudbroker/lb/models/model_resource_lb_backend_server.go new file mode 100644 index 0000000..fd7e82c --- /dev/null +++ b/internal/service/cloudbroker/lb/models/model_resource_lb_backend_server.go @@ -0,0 +1,26 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceLBBackendServerModel struct { + LBID types.Int64 `tfsdk:"lb_id"` + Backend types.String `tfsdk:"backend_name"` + Name types.String `tfsdk:"name"` + Address types.String `tfsdk:"address"` + Port types.Int64 `tfsdk:"port"` + Check types.String `tfsdk:"check"` + Inter types.Int64 `tfsdk:"inter"` + GUID types.String `tfsdk:"guid"` + DownInter types.Int64 `tfsdk:"downinter"` + Rise types.Int64 `tfsdk:"rise"` + Fall types.Int64 `tfsdk:"fall"` + SlowStart types.Int64 `tfsdk:"slowstart"` + MaxConn types.Int64 `tfsdk:"maxconn"` + MaxQueue types.Int64 `tfsdk:"maxqueue"` + Weight types.Int64 `tfsdk:"weight"` + ID types.String `tfsdk:"id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} diff --git a/internal/service/cloudbroker/lb/models/model_resource_lb_frontend.go b/internal/service/cloudbroker/lb/models/model_resource_lb_frontend.go new file mode 100644 index 0000000..8c3d474 --- /dev/null +++ b/internal/service/cloudbroker/lb/models/model_resource_lb_frontend.go @@ -0,0 +1,16 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceLBFrontendModel struct { + Backend types.String `tfsdk:"backend_name"` + Bindings types.List `tfsdk:"bindings"` + LBID types.Int64 `tfsdk:"lb_id"` + GUID types.String `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + ID types.String `tfsdk:"id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} diff --git a/internal/service/cloudbroker/lb/models/model_resource_lb_frontend_bind.go b/internal/service/cloudbroker/lb/models/model_resource_lb_frontend_bind.go new file mode 100644 index 0000000..f5da79f --- /dev/null +++ b/internal/service/cloudbroker/lb/models/model_resource_lb_frontend_bind.go @@ -0,0 +1,17 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceLBFrontendBindModel struct { + LBID types.Int64 `tfsdk:"lb_id"` + Frontend types.String `tfsdk:"frontend_name"` + Address types.String `tfsdk:"address"` + GUID types.String `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + Port types.Int64 `tfsdk:"port"` + ID types.String `tfsdk:"id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} diff --git a/internal/service/cloudbroker/lb/resource_lb.go b/internal/service/cloudbroker/lb/resource_lb.go new file mode 100644 index 0000000..9befe27 --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_lb.go @@ -0,0 +1,359 @@ +package lb + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +var ( + _ resource.Resource = &resourceLB{} + _ resource.ResourceWithImportState = &resourceLB{} +) + +// NewResourceLB is a helper function to simplify the provider implementation. +func NewResourceLB() resource.Resource { + return &resourceLB{} +} + +// resourceLB is the resource implementation. +type resourceLB struct { + client *decort.DecortClient +} + +func (r *resourceLB) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create lb + var plan models.ResourceLBModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLB: Error receiving the plan") + return + } + tflog.Info(ctx, "Create resourceLB: start creating", map[string]any{"name": plan.Name.ValueString()}) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLB: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceLB: set timeouts successfully", map[string]any{ + "name": plan.Name.ValueString(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceLB: starting input checks", map[string]any{"name": plan.Name.ValueString()}) + resp.Diagnostics.Append(resourceLBInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLB: Error input checks") + return + } + tflog.Info(ctx, "Create resourceLB: input checks successful", map[string]any{"name": plan.Name.ValueString()}) + + // Make create request and get response for creation + lbId, diags := utilities.CreateResourceLB(ctx, &plan, r.client) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + tflog.Error(ctx, "Create resourceLB: Error response for create resource LB") + return + } + plan.ID = types.StringValue(strconv.Itoa(int(lbId))) + + // additional settings after lb creation: in case of failures, warnings are added to resp.Diagnostics, + // because additional settings failure is not critical. If errors were added instead of warnings, terraform + // framework would mark resource as tainted and delete it, which would be unwanted behaviour. + + // enable or disable lb, warnings added to resp.Diagnostics in case of failure. + if !plan.Enable.IsNull() { // Enable is optional + diags := utilities.LBEnableDisable(ctx, &plan, r.client) + for _, d := range diags { + if d.Severity() == diag.SeverityError { + resp.Diagnostics.AddWarning(d.Summary(), d.Detail()) + } + } + } + + tflog.Info(ctx, "Create resourceLB: resource creation is completed", map[string]any{"id": lbId}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.LBResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *resourceLB) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceLBModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLB: Error get state") + return + } + tflog.Info(ctx, "Read resourceLB: got state successfully", map[string]any{"ID": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLB: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceLB: set timeouts successfully", map[string]any{ + "ID": state.ID.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Read status lb and if it is necessary to restore it + resp.Diagnostics.Append(utilities.LBReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Error read status or restore") + return + } + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.LBResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLB: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLB: Error set state") + return + } + tflog.Info(ctx, "End read resourceLB") +} + +func (r *resourceLB) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceLBModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLB: Error receiving the plan") + return + } + tflog.Info(ctx, "Update resourceLB: got plan successfully", map[string]any{"ID": plan.ID.ValueString()}) + + // Retrieve values from state + var state models.ResourceLBModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLB: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceLB: got state successfully", map[string]any{"ID": plan.ID.ValueString()}) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Error set timeout") + return + } + tflog.Info(ctx, "Update resourceLB: set timeouts successfully", map[string]any{ + "ID": state.ID.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // Checking for values in the platform + tflog.Info(ctx, "Update resourceLB: starting input checks", map[string]any{"ID": plan.ID.ValueString()}) + resp.Diagnostics.Append(resourceLBInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLB: Error input checks") + return + } + tflog.Info(ctx, "Update resourceLB: input checks successful", map[string]any{"ID": plan.ID.ValueString()}) + + // Read status lb and if it is necessary to restore it + resp.Diagnostics.Append(utilities.LBReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Error read status or restore") + return + } + + // Update ha mode lb + if !plan.HAMode.Equal(state.HAMode) && plan.HAMode.ValueBool() { + resp.Diagnostics.Append(utilities.LBUpdateHaMode(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLB: Error update ha mode") + return + } + } + + // Update sysctl LB params + if !plan.SysctlParams.Equal(state.SysctlParams) { + resp.Diagnostics.Append(utilities.LBUpdateSysctlParams(ctx, &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLB: Error update sysctl LB params") + return + } + } + + plan.ID = types.StringValue(strconv.Itoa(int(state.LBID.ValueInt64()))) + + // Enable/disable LB + if !plan.Enable.Equal(state.Enable) { + resp.Diagnostics.Append(utilities.LBEnableDisable(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLB: Error enable/disable LB") + return + } + } + + // Start/stop LB + if !plan.Start.Equal(state.Start) { + resp.Diagnostics.Append(utilities.LBStartStop(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLB: Error start/stop LB") + return + } + } + + // Update description LB + if !plan.Description.IsUnknown() && !plan.Description.Equal(state.Description) { + resp.Diagnostics.Append(utilities.LBUpdateDescription(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLB: Error update LB description") + return + } + } + + // Restart LB + if !plan.Restart.Equal(state.Restart) && plan.Restart.ValueBool() { + resp.Diagnostics.Append(utilities.LBRestart(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLB: Error update LB description") + return + } + } + + // Config reset + if !plan.ConfigReset.Equal(state.Restart) && plan.ConfigReset.ValueBool() { + resp.Diagnostics.Append(utilities.LBConfigReset(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLB: Error update LB description") + return + } + } + + tflog.Info(ctx, "Update resourceLB: resource update is completed", map[string]any{"ID": plan.ID.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.LBResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *resourceLB) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceLBModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceLB: Error get state") + return + } + tflog.Info(ctx, "Delete resourceLB: got state successfully", map[string]any{"ID": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceLB: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceLB: set timeouts successfully", map[string]any{ + "id": state.ID.ValueString(), + "deleteTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + var permanently bool + if state.Permanently.IsNull() { + permanently = true + } else { + permanently = state.Permanently.ValueBool() + } + + // Delete existing lb + delReq := lb.DeleteRequest{ + LBID: uint64(state.LBID.ValueInt64()), + Permanently: permanently, + } + + tflog.Info(ctx, "Delete resourceLB: calling cloudbroker().LB().Delete", map[string]any{ + "ID": state.ID.ValueString(), + "req": delReq, + }) + _, err := r.client.CloudBroker().LB().Delete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceLB: Error deleting", err.Error()) + return + } + + tflog.Info(ctx, "End delete resource lb ", map[string]any{"id": state.ID.ValueString()}) +} + +func (r *resourceLB) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceLB(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +func (r *resourceLB) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_lb" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceLB) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceLB") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceLB successfully") +} + +func (r *resourceLB) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/lb/resource_lb_backend.go b/internal/service/cloudbroker/lb/resource_lb_backend.go new file mode 100644 index 0000000..4dbad17 --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_lb_backend.go @@ -0,0 +1,267 @@ +package lb + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +var ( + _ resource.Resource = &resourceLBBackend{} + _ resource.ResourceWithImportState = &resourceLBBackend{} +) + +// NewResourceLBBackend is a helper function to simplify the provider implementation. +func NewResourceLBBackend() resource.Resource { + return &resourceLBBackend{} +} + +// resourceLBBackend is the resource implementation. +type resourceLBBackend struct { + client *decort.DecortClient +} + +func (r *resourceLBBackend) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create lb backend + var plan models.ResourceLBBackendModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBBackend: Error receiving the plan") + return + } + tflog.Info(ctx, "Create resourceLBBackend: start creating", map[string]any{"name": plan.Name.ValueString()}) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBBackend: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceLBBackend: set timeouts successfully", map[string]any{ + "name": plan.Name.ValueString(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceLBBackend: starting input checks", map[string]any{"name": plan.Name.ValueString()}) + resp.Diagnostics.Append(resourceLBBackendInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBBackend: Error input checks") + return + } + tflog.Info(ctx, "Create resourceLBBackend: input checks successful", map[string]any{"name": plan.Name.ValueString()}) + + // Make create request and get response for creation + diags = utilities.CreateResourceLBBackend(ctx, &plan, r.client) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + tflog.Error(ctx, "Create resourceLBBackend: Error response for create resource LBBackend") + return + } + + plan.ID = types.StringValue(strconv.Itoa(int(plan.LBID.ValueInt64())) + "#" + plan.Name.ValueString()) + + tflog.Info(ctx, "Create resourceLBBackend: resource creation is completed", map[string]any{"name": plan.Name.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.LBBackendResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *resourceLBBackend) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceLBBackendModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBBackend: Error get state") + return + } + tflog.Info(ctx, "Read resourceLBBackend: got state successfully", map[string]any{"ID": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBBackend: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceLBBackend: set timeouts successfully", map[string]any{ + "ID": state.ID.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.LBBackendResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBBackend: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBBackend: Error set state") + return + } + tflog.Info(ctx, "End read resourceLBBackend") +} + +func (r *resourceLBBackend) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceLBBackendModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBBackend: Error receiving the plan") + return + } + tflog.Info(ctx, "Update resourceLBBackend: got plan successfully", map[string]any{"ID": plan.ID.ValueString()}) + + // Retrieve values from state + var state models.ResourceLBBackendModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBBackend: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceLBBackend: got state successfully", map[string]any{"ID": plan.ID.ValueString()}) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Error set timeout") + return + } + tflog.Info(ctx, "Update resourceLBBackend: set timeouts successfully", map[string]any{ + "ID": state.ID.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // Checking for values in the platform + tflog.Info(ctx, "Update resourceLBBackend: starting input checks", map[string]any{"ID": plan.ID.ValueString()}) + resp.Diagnostics.Append(resourceLBBackendInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBBackend: Error input checks") + return + } + tflog.Info(ctx, "Update resourceLBBackend: input checks successful", map[string]any{"ID": plan.ID.ValueString()}) + + // Check and update resource + resp.Diagnostics.Append(utilities.UpdateLBBackend(ctx, &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBBackend: Error editing lb backend") + return + } + + plan.ID = types.StringValue(strconv.Itoa(int(plan.LBID.ValueInt64())) + "#" + plan.Name.ValueString()) + tflog.Info(ctx, "Update resourceLBBackend: resource update is completed", map[string]any{"ID": plan.ID.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.LBBackendResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *resourceLBBackend) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceLBBackendModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceLBBackend: Error get state") + return + } + tflog.Info(ctx, "Delete resourceLBBackend: got state successfully", map[string]any{"ID": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceLBBackend: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceLBBackend: set timeouts successfully", map[string]any{ + "id": state.ID.ValueString(), + "deleteTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Delete existing lb backend + delReq := lb.BackendDeleteRequest{ + LBID: uint64(state.LBID.ValueInt64()), + BackendName: state.Name.ValueString(), + } + + tflog.Info(ctx, "Delete resourceLBBackend: calling cloudbroker().LB().BackendDelete", map[string]any{ + "ID": state.ID.ValueString(), + "req": delReq, + }) + _, err := r.client.CloudBroker().LB().BackendDelete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceLBBackend: Error deleting", err.Error()) + return + } + + tflog.Info(ctx, "End delete resource lb backend", map[string]any{"id": state.ID.ValueString()}) +} + +func (r *resourceLBBackend) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceLBBackend(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +func (r *resourceLBBackend) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_lb_backend" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceLBBackend) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceLBBackend") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceLBBackend successfully") +} + +func (r *resourceLBBackend) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/lb/resource_lb_backend_server.go b/internal/service/cloudbroker/lb/resource_lb_backend_server.go new file mode 100644 index 0000000..1e9945e --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_lb_backend_server.go @@ -0,0 +1,268 @@ +package lb + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +var ( + _ resource.Resource = &resourceLBBackendServer{} + _ resource.ResourceWithImportState = &resourceLBBackendServer{} +) + +// NewResourceLBBackendServer is a helper function to simplify the provider implementation. +func NewResourceLBBackendServer() resource.Resource { + return &resourceLBBackendServer{} +} + +// resourceLBBackendServer is the resource implementation. +type resourceLBBackendServer struct { + client *decort.DecortClient +} + +func (r *resourceLBBackendServer) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create lb backend server + var plan models.ResourceLBBackendServerModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBBackendServer: Error receiving the plan") + return + } + tflog.Info(ctx, "Create resourceLBBackendServer: start creating", map[string]any{"name": plan.Name.ValueString()}) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBBackendServer: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceLBBackendServer: set timeouts successfully", map[string]any{ + "name": plan.Name.ValueString(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceLBBackendServer: starting input checks", map[string]any{"name": plan.Name.ValueString()}) + resp.Diagnostics.Append(resourceLBBackendServerInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBBackendServer: Error input checks") + return + } + tflog.Info(ctx, "Create resourceLBBackendServer: input checks successful", map[string]any{"name": plan.Name.ValueString()}) + + // Make create request and get response for creation + diags = utilities.CreateResourceLBBackendServer(ctx, &plan, r.client) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + tflog.Error(ctx, "Create resourceLBBackendServer: Error response for create resource LBBackendServer") + return + } + + plan.ID = types.StringValue(strconv.Itoa(int(plan.LBID.ValueInt64())) + "#" + plan.Backend.ValueString() + "#" + plan.Name.ValueString()) + + tflog.Info(ctx, "Create resourceLBBackendServer: resource creation is completed", map[string]any{"name": plan.Name.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.LBBackendServerResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *resourceLBBackendServer) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceLBBackendServerModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBBackendServer: Error get state") + return + } + tflog.Info(ctx, "Read resourceLBBackendServer: got state successfully", map[string]any{"ID": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBBackendServer: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceLBBackendServer: set timeouts successfully", map[string]any{ + "ID": state.ID.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.LBBackendServerResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBBackendServer: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBBackendServer: Error set state") + return + } + tflog.Info(ctx, "End read resourceLBBackendServer") +} + +func (r *resourceLBBackendServer) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceLBBackendServerModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBBackendServer: Error receiving the plan") + return + } + tflog.Info(ctx, "Update resourceLBBackendServer: got plan successfully", map[string]any{"ID": plan.ID.ValueString()}) + + // Retrieve values from state + var state models.ResourceLBBackendServerModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBBackendServer: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceLBBackendServer: got state successfully", map[string]any{"ID": plan.ID.ValueString()}) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Error set timeout") + return + } + tflog.Info(ctx, "Update resourceLBBackendServer: set timeouts successfully", map[string]any{ + "ID": state.ID.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // Checking for values in the platform + tflog.Info(ctx, "Update resourceLBBackendServer: starting input checks", map[string]any{"ID": plan.ID.ValueString()}) + resp.Diagnostics.Append(resourceLBBackendServerInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBBackendServer: Error input checks") + return + } + tflog.Info(ctx, "Update resourceLBBackendServer: input checks successful", map[string]any{"ID": plan.ID.ValueString()}) + + // Check and update resource + resp.Diagnostics.Append(utilities.UpdateLBBackendServer(ctx, &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBBackendServer: Error editing lb backend server") + return + } + + plan.ID = types.StringValue(strconv.Itoa(int(plan.LBID.ValueInt64())) + "#" + plan.Backend.ValueString() + "#" + plan.Name.ValueString()) + tflog.Info(ctx, "Update resourceLBBackendServer: resource update is completed", map[string]any{"ID": plan.ID.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.LBBackendServerResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *resourceLBBackendServer) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceLBBackendServerModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceLBBackendServer: Error get state") + return + } + tflog.Info(ctx, "Delete resourceLBBackendServer: got state successfully", map[string]any{"ID": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceLBBackendServer: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceLBBackendServer: set timeouts successfully", map[string]any{ + "id": state.ID.ValueString(), + "deleteTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Delete existing lb backend server + delReq := lb.BackendServerDeleteRequest{ + LBID: uint64(state.LBID.ValueInt64()), + BackendName: state.Backend.ValueString(), + ServerName: state.Name.ValueString(), + } + + tflog.Info(ctx, "Delete resourceLBBackendServer: calling cloudbroker().LB().BackendServerDelete", map[string]any{ + "ID": state.ID.ValueString(), + "req": delReq, + }) + _, err := r.client.CloudBroker().LB().BackendServerDelete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceLBBackendServer: Error deleting", err.Error()) + return + } + + tflog.Info(ctx, "End delete resource lb backend server", map[string]any{"id": state.ID.ValueString()}) +} + +func (r *resourceLBBackendServer) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceLBBackendServer(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +func (r *resourceLBBackendServer) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_lb_backend_server" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceLBBackendServer) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceLBBackendServer") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceLBBackendServer successfully") +} + +func (r *resourceLBBackendServer) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/lb/resource_lb_frontend.go b/internal/service/cloudbroker/lb/resource_lb_frontend.go new file mode 100644 index 0000000..a982681 --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_lb_frontend.go @@ -0,0 +1,208 @@ +package lb + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +var ( + _ resource.Resource = &resourceLBFrontend{} + _ resource.ResourceWithImportState = &resourceLBFrontend{} +) + +// NewResourceLBFrontend is a helper function to simplify the provider implementation. +func NewResourceLBFrontend() resource.Resource { + return &resourceLBFrontend{} +} + +// resourceLBFrontend is the resource implementation. +type resourceLBFrontend struct { + client *decort.DecortClient +} + +func (r *resourceLBFrontend) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create lb frontend + var plan models.ResourceLBFrontendModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBFrontend: Error receiving the plan") + return + } + tflog.Info(ctx, "Create resourceLBFrontend: start creating", map[string]any{"name": plan.Name.ValueString()}) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBFrontend: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceLBFrontend: set timeouts successfully", map[string]any{ + "name": plan.Name.ValueString(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceLBFrontend: starting input checks", map[string]any{"name": plan.Name.ValueString()}) + resp.Diagnostics.Append(resourceLBFrontendInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBFrontend: Error input checks") + return + } + tflog.Info(ctx, "Create resourceLBFrontend: input checks successful", map[string]any{"name": plan.Name.ValueString()}) + + // Make create request and get response for creation + diags = utilities.CreateResourceLBFrontend(ctx, &plan, r.client) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + tflog.Error(ctx, "Create resourceLBFrontend: Error response for create resource lb frontend") + return + } + + plan.ID = types.StringValue(strconv.Itoa(int(plan.LBID.ValueInt64())) + "#" + plan.Name.ValueString()) + + tflog.Info(ctx, "Create resourceLBFrontend: resource creation is completed", map[string]any{"name": plan.Name.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.LBFrontendResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *resourceLBFrontend) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceLBFrontendModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBFrontend: Error get state") + return + } + tflog.Info(ctx, "Read resourceLBFrontend: got state successfully", map[string]any{"ID": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBFrontend: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceLBFrontend: set timeouts successfully", map[string]any{ + "ID": state.ID.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.LBFrontendResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBFrontend: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBFrontend: Error set state") + return + } + tflog.Info(ctx, "End read resourceLBFrontend") +} + +func (r *resourceLBFrontend) Update(ctx context.Context, _ resource.UpdateRequest, resp *resource.UpdateResponse) { + tflog.Error(ctx, "Update resourceLBFrontend: This resource cannot be updated") + resp.Diagnostics.AddError("This resource cannot be updated", "") + return +} + +func (r *resourceLBFrontend) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceLBFrontendModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceLBFrontend: Error get state") + return + } + tflog.Info(ctx, "Delete resourceLBFrontend: got state successfully", map[string]any{"ID": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceLBFrontend: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceLBFrontend: set timeouts successfully", map[string]any{ + "id": state.ID.ValueString(), + "deleteTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Delete existing lb frontend + delReq := lb.FrontendDeleteRequest{ + LBID: uint64(state.LBID.ValueInt64()), + FrontendName: state.Name.ValueString(), + } + + tflog.Info(ctx, "Delete resourceLBFrontend: calling cloudbroker().LB().FrontendDelete", map[string]any{ + "ID": state.ID.ValueString(), + "req": delReq, + }) + _, err := r.client.CloudBroker().LB().FrontendDelete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceLBFrontend: Error deleting", err.Error()) + return + } + + tflog.Info(ctx, "End delete resource lb frontend", map[string]any{"id": state.ID.ValueString()}) +} + +func (r *resourceLBFrontend) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceLBFrontend(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +func (r *resourceLBFrontend) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_lb_frontend" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceLBFrontend) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceLBFrontend") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceLBFrontend successfully") +} + +func (r *resourceLBFrontend) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/lb/resource_lb_frontend_bind.go b/internal/service/cloudbroker/lb/resource_lb_frontend_bind.go new file mode 100644 index 0000000..a5af402 --- /dev/null +++ b/internal/service/cloudbroker/lb/resource_lb_frontend_bind.go @@ -0,0 +1,268 @@ +package lb + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/utilities" +) + +var ( + _ resource.Resource = &resourceLBFrontendBind{} + _ resource.ResourceWithImportState = &resourceLBFrontendBind{} +) + +// NewResourceLBFrontendBind is a helper function to simplify the provider implementation. +func NewResourceLBFrontendBind() resource.Resource { + return &resourceLBFrontendBind{} +} + +// resourceLBFrontendBind is the resource implementation. +type resourceLBFrontendBind struct { + client *decort.DecortClient +} + +func (r *resourceLBFrontendBind) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create lb frontend bind + var plan models.ResourceLBFrontendBindModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBFrontendBind: Error receiving the plan") + return + } + tflog.Info(ctx, "Create resourceLBFrontendBind: start creating", map[string]any{"name": plan.Name.ValueString()}) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBFrontendBind: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceLBFrontendBind: set timeouts successfully", map[string]any{ + "name": plan.Name.ValueString(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceLBFrontendBind: starting input checks", map[string]any{"name": plan.Name.ValueString()}) + resp.Diagnostics.Append(resourceLBFrontendBindInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceLBFrontendBind: Error input checks") + return + } + tflog.Info(ctx, "Create resourceLBFrontendBind: input checks successful", map[string]any{"name": plan.Name.ValueString()}) + + // Make create request and get response for creation + diags = utilities.CreateResourceLBFrontendBind(ctx, &plan, r.client) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + tflog.Error(ctx, "Create resourceLBFrontendBind: Error response for create resource flipgroup") + return + } + + plan.ID = types.StringValue(strconv.Itoa(int(plan.LBID.ValueInt64())) + "#" + plan.Frontend.ValueString() + "#" + plan.Name.ValueString()) + + tflog.Info(ctx, "Create resourceLBFrontendBind: resource creation is completed", map[string]any{"name": plan.Name.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.LBFrontendBindResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *resourceLBFrontendBind) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceLBFrontendBindModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBFrontendBind: Error get state") + return + } + tflog.Info(ctx, "Read resourceLBFrontendBind: got state successfully", map[string]any{"ID": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBFrontendBind: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceLBFrontendBind: set timeouts successfully", map[string]any{ + "ID": state.ID.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.LBFrontendBindResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBFrontendBind: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceLBFrontendBind: Error set state") + return + } + tflog.Info(ctx, "End read resourceLBFrontendBind") +} + +func (r *resourceLBFrontendBind) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceLBFrontendBindModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBFrontendBind: Error receiving the plan") + return + } + tflog.Info(ctx, "Update resourceLBFrontendBind: got plan successfully", map[string]any{"ID": plan.ID.ValueString()}) + + // Retrieve values from state + var state models.ResourceLBFrontendBindModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBFrontendBind: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceLBFrontendBind: got state successfully", map[string]any{"ID": plan.ID.ValueString()}) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Error set timeout") + return + } + tflog.Info(ctx, "Update resourceLBFrontendBind: set timeouts successfully", map[string]any{ + "ID": state.ID.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // Checking for values in the platform + tflog.Info(ctx, "Update resourceLBFrontendBind: starting input checks", map[string]any{"ID": plan.ID.ValueString()}) + resp.Diagnostics.Append(resourceLBFrontendBindInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBFrontendBind: Error input checks") + return + } + tflog.Info(ctx, "Update resourceLBFrontendBind: input checks successful", map[string]any{"ID": plan.ID.ValueString()}) + + // Check and update resource + resp.Diagnostics.Append(utilities.UpdateLBFrontendBind(ctx, &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceLBFrontendBind: Error editing lb backend") + return + } + + plan.ID = types.StringValue(strconv.Itoa(int(plan.LBID.ValueInt64())) + "#" + plan.Frontend.ValueString() + "#" + plan.Name.ValueString()) + tflog.Info(ctx, "Update resourceLBFrontendBind: resource update is completed", map[string]any{"ID": plan.ID.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.LBFrontendBindResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +func (r *resourceLBFrontendBind) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceLBFrontendBindModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceLBFrontendBind: Error get state") + return + } + tflog.Info(ctx, "Delete resourceLBFrontendBind: got state successfully", map[string]any{"ID": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceLBFrontendBind: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceLBFrontendBind: set timeouts successfully", map[string]any{ + "id": state.ID.ValueString(), + "deleteTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Delete existing flipgroup + delReq := lb.FrontendBindDeleteRequest{ + LBID: uint64(state.LBID.ValueInt64()), + FrontendName: state.Frontend.ValueString(), + BindingName: state.Name.ValueString(), + } + + tflog.Info(ctx, "Delete resourceLBFrontendBind: calling cloudbroker().LB().FrontendBindDelete", map[string]any{ + "ID": state.ID.ValueString(), + "req": delReq, + }) + _, err := r.client.CloudBroker().LB().FrontendBindDelete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceLBFrontendBind: Error deleting", err.Error()) + return + } + + tflog.Info(ctx, "End delete resource lb frontend bind", map[string]any{"id": state.ID.ValueString()}) +} + +func (r *resourceLBFrontendBind) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceLBFrontendBind(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +func (r *resourceLBFrontendBind) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_lb_frontend_bind" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceLBFrontendBind) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceLBFrontendBind") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceLBFrontendBind successfully") +} + +func (r *resourceLBFrontendBind) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/lb/schemas/schema_data_source_lb.go b/internal/service/cloudbroker/lb/schemas/schema_data_source_lb.go new file mode 100644 index 0000000..9562803 --- /dev/null +++ b/internal/service/cloudbroker/lb/schemas/schema_data_source_lb.go @@ -0,0 +1,270 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceLB() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "lb_id": schema.Int64Attribute{ + Required: true, + }, + "ha_mode": schema.BoolAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "backend_haip": schema.StringAttribute{ + Computed: true, + }, + "backends": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "algorithm": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "server_default_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "downinter": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "inter": schema.Int64Attribute{ + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "servers": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "check": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "server_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "downinter": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "inter": schema.Int64Attribute{ + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "ckey": schema.StringAttribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "dp_api_user": schema.StringAttribute{ + Computed: true, + }, + "dp_api_password": schema.StringAttribute{ + Computed: true, + }, + "extnet_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_haip": schema.StringAttribute{ + Computed: true, + }, + "frontends": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "backend": schema.StringAttribute{ + Computed: true, + }, + "bindings": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "id": schema.Int64Attribute{ + Computed: true, + }, + "manager_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_type": schema.StringAttribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "meta": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "part_k8s": schema.BoolAttribute{ + Computed: true, + }, + "primary_node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "mgmt_ip": schema.StringAttribute{ + Computed: true, + }, + "network_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "secondary_node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "mgmt_ip": schema.StringAttribute{ + Computed: true, + }, + "network_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "vins_id": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/lb/schemas/schema_data_source_lb_list.go b/internal/service/cloudbroker/lb/schemas/schema_data_source_lb_list.go new file mode 100644 index 0000000..ff83d3c --- /dev/null +++ b/internal/service/cloudbroker/lb/schemas/schema_data_source_lb_list.go @@ -0,0 +1,341 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceLBList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "find by id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "find by account ID", + }, + "rg_id": schema.Int64Attribute{ + Optional: true, + Description: "find by RG ID", + }, + "tech_status": schema.StringAttribute{ + Optional: true, + Description: "find by tech status", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "find by status", + }, + "front_ip": schema.StringAttribute{ + Optional: true, + Description: "find by frontend Ip", + }, + "back_ip": schema.StringAttribute{ + Optional: true, + Description: "find by backend Ip", + }, + "include_deleted": schema.BoolAttribute{ + Optional: true, + Description: "included deleted LBs. If using field 'status', then include_deleted will be ignored", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "size number", + }, + + // computed attributes + "id": schema.StringAttribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "ha_mode": schema.BoolAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "backend_haip": schema.StringAttribute{ + Computed: true, + }, + "backends": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "algorithm": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "server_default_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "inter": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "downinter": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "servers": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "check": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "server_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "inter": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "downinter": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "dp_api_user": schema.StringAttribute{ + Computed: true, + }, + "dp_api_password": schema.StringAttribute{ + Computed: true, + }, + "extnet_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_haip": schema.StringAttribute{ + Computed: true, + }, + "frontends": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "backend": schema.StringAttribute{ + Computed: true, + }, + "bindings": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "lb_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_type": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "part_k8s": schema.BoolAttribute{ + Computed: true, + }, + "primary_node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "mgmt_ip": schema.StringAttribute{ + Computed: true, + }, + "network_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "secondary_node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "mgmt_ip": schema.StringAttribute{ + Computed: true, + }, + "network_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "vins_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/lb/schemas/schema_data_source_lb_list_deleted.go b/internal/service/cloudbroker/lb/schemas/schema_data_source_lb_list_deleted.go new file mode 100644 index 0000000..2169bca --- /dev/null +++ b/internal/service/cloudbroker/lb/schemas/schema_data_source_lb_list_deleted.go @@ -0,0 +1,333 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceLBListDeleted() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "find by id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "find by account ID", + }, + "rg_id": schema.Int64Attribute{ + Optional: true, + Description: "find by RG ID", + }, + "tech_status": schema.StringAttribute{ + Optional: true, + Description: "find by tech status", + }, + "front_ip": schema.StringAttribute{ + Optional: true, + Description: "find by frontend Ip", + }, + "back_ip": schema.StringAttribute{ + Optional: true, + Description: "find by backend Ip", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "size number", + }, + + // computed attributes + "id": schema.StringAttribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "ha_mode": schema.BoolAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "backend_haip": schema.StringAttribute{ + Computed: true, + }, + "backends": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "algorithm": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "server_default_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "inter": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "downinter": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "servers": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "check": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "server_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "inter": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "downinter": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "dp_api_user": schema.StringAttribute{ + Computed: true, + }, + "dp_api_password": schema.StringAttribute{ + Computed: true, + }, + "extnet_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_haip": schema.StringAttribute{ + Computed: true, + }, + "frontends": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "backend": schema.StringAttribute{ + Computed: true, + }, + "bindings": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "lb_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_type": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "part_k8s": schema.BoolAttribute{ + Computed: true, + }, + "primary_node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "mgmt_ip": schema.StringAttribute{ + Computed: true, + }, + "network_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "secondary_node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "mgmt_ip": schema.StringAttribute{ + Computed: true, + }, + "network_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "vins_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/lb/schemas/schema_resource_lb.go b/internal/service/cloudbroker/lb/schemas/schema_resource_lb.go new file mode 100644 index 0000000..a79830e --- /dev/null +++ b/internal/service/cloudbroker/lb/schemas/schema_resource_lb.go @@ -0,0 +1,316 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaResourceLB() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "extnet_id": schema.Int64Attribute{ + Required: true, + }, + "vins_id": schema.Int64Attribute{ + Required: true, + }, + "start": schema.BoolAttribute{ + Required: true, + }, + + // optional attributes + "ha_mode": schema.BoolAttribute{ + Optional: true, + Computed: true, + }, + "desc": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + "enable": schema.BoolAttribute{ + Optional: true, + }, + "restart": schema.BoolAttribute{ + Optional: true, + }, + "config_reset": schema.BoolAttribute{ + Optional: true, + }, + "permanently": schema.BoolAttribute{ + Optional: true, + }, + "restore": schema.BoolAttribute{ + Optional: true, + }, + "safe": schema.BoolAttribute{ + // Default: true + Optional: true, + }, + "sysctl_params": schema.ListNestedAttribute{ + Optional: true, + Description: "Custom sysctl values for Load Balancer instance. Applied on boot.", + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "key": schema.StringAttribute{ + Required: true, + }, + "value": schema.StringAttribute{ + Required: true, + }, + }, + }, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "backend_haip": schema.StringAttribute{ + Computed: true, + }, + "backends": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "algorithm": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "server_default_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "downinter": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "inter": schema.Int64Attribute{ + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "servers": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "check": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "server_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "downinter": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "inter": schema.Int64Attribute{ + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "ckey": schema.StringAttribute{ + Computed: true, + }, + "meta": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "dp_api_user": schema.StringAttribute{ + Computed: true, + }, + "dp_api_password": schema.StringAttribute{ + Computed: true, + }, + "frontend_haip": schema.StringAttribute{ + Computed: true, + }, + "frontends": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "backend": schema.StringAttribute{ + Computed: true, + }, + "bindings": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "lb_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_type": schema.StringAttribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "part_k8s": schema.BoolAttribute{ + Computed: true, + }, + "primary_node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "mgmt_ip": schema.StringAttribute{ + Computed: true, + }, + "network_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "secondary_node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "mgmt_ip": schema.StringAttribute{ + Computed: true, + }, + "network_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/lb/schemas/schema_resource_lb_backend.go b/internal/service/cloudbroker/lb/schemas/schema_resource_lb_backend.go new file mode 100644 index 0000000..bb6d138 --- /dev/null +++ b/internal/service/cloudbroker/lb/schemas/schema_resource_lb_backend.go @@ -0,0 +1,122 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +func MakeSchemaResourceLBBackend() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "lb_id": schema.Int64Attribute{ + Required: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "algorithm": schema.StringAttribute{ + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.OneOf("roundrobin", "static-rr", "leastconn"), + }, + }, + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "downinter": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "fall": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "inter": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "rise": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "weight": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "servers": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "check": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "server_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "downinter": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "inter": schema.Int64Attribute{ + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/lb/schemas/schema_resource_lb_backend_server.go b/internal/service/cloudbroker/lb/schemas/schema_resource_lb_backend_server.go new file mode 100644 index 0000000..8f38281 --- /dev/null +++ b/internal/service/cloudbroker/lb/schemas/schema_resource_lb_backend_server.go @@ -0,0 +1,83 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" +) + +func MakeSchemaResourceLBBackendServer() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "lb_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the LB instance to backendCreate", + }, + "backend_name": schema.StringAttribute{ + Required: true, + Description: "Must be unique among all backends of this LB - name of the new backend to create", + }, + "name": schema.StringAttribute{ + Required: true, + Description: "Must be unique among all servers defined for this backend - name of the server definition to add.", + }, + "address": schema.StringAttribute{ + Required: true, + Description: "IP address of the server.", + }, + "port": schema.Int64Attribute{ + Required: true, + Description: "Port number on the server", + }, + "check": schema.StringAttribute{ + Computed: true, + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf("disabled", "enabled"), + }, + Description: "set to disabled if this server should be used regardless of its state.", + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "downinter": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "fall": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "inter": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "maxconn": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "maxqueue": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "rise": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "slowstart": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "weight": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + } +} diff --git a/internal/service/cloudbroker/lb/schemas/schema_resource_lb_frontend.go b/internal/service/cloudbroker/lb/schemas/schema_resource_lb_frontend.go new file mode 100644 index 0000000..289df04 --- /dev/null +++ b/internal/service/cloudbroker/lb/schemas/schema_resource_lb_frontend.go @@ -0,0 +1,49 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +func MakeSchemaResourceLBFrontend() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "lb_id": schema.Int64Attribute{ + Required: true, + }, + "backend_name": schema.StringAttribute{ + Required: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "bindings": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + } +} diff --git a/internal/service/cloudbroker/lb/schemas/schema_resource_lb_frontend_bind.go b/internal/service/cloudbroker/lb/schemas/schema_resource_lb_frontend_bind.go new file mode 100644 index 0000000..82383f0 --- /dev/null +++ b/internal/service/cloudbroker/lb/schemas/schema_resource_lb_frontend_bind.go @@ -0,0 +1,36 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +func MakeSchemaResourceLBFrontendBind() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "lb_id": schema.Int64Attribute{ + Required: true, + }, + "frontend_name": schema.StringAttribute{ + Required: true, + }, + "address": schema.StringAttribute{ + Required: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Required: true, + }, + "port": schema.Int64Attribute{ + Required: true, + }, + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + } +} diff --git a/internal/service/cloudbroker/lb/utilities/utility_data_source_lb.go b/internal/service/cloudbroker/lb/utilities/utility_data_source_lb.go new file mode 100644 index 0000000..446cc77 --- /dev/null +++ b/internal/service/cloudbroker/lb/utilities/utility_data_source_lb.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" +) + +func LBDataSourceCheckPresence(ctx context.Context, lbId uint64, c *decort.DecortClient) (*lb.RecordLB, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("LBDataSourceCheckPresence: Get info about lb with ID - %v", lbId)) + + diags := diag.Diagnostics{} + + recordLB, err := c.CloudBroker().LB().Get(ctx, lb.GetRequest{LBID: lbId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbId), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "LBDataSourceCheckPresence: response from CloudBroker().LB().Get", map[string]any{"lb_id": lbId, "response": recordLB}) + + return recordLB, nil +} diff --git a/internal/service/cloudbroker/lb/utilities/utility_data_source_lb_list.go b/internal/service/cloudbroker/lb/utilities/utility_data_source_lb_list.go new file mode 100644 index 0000000..60090df --- /dev/null +++ b/internal/service/cloudbroker/lb/utilities/utility_data_source_lb_list.go @@ -0,0 +1,63 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" +) + +func LBListDataSourceCheckPresence(ctx context.Context, plan *models.DataSourceLBList, c *decort.DecortClient) (*lb.ListLB, error) { + + listLBReq := lb.ListRequest{} + + if !plan.ByID.IsNull() { + listLBReq.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.Name.IsNull() { + listLBReq.Name = plan.Name.ValueString() + } + if !plan.AccountID.IsNull() { + listLBReq.AccountID = uint64(plan.AccountID.ValueInt64()) + } + if !plan.RgID.IsNull() { + listLBReq.RGID = uint64(plan.RgID.ValueInt64()) + } + if !plan.TechStatus.IsNull() { + listLBReq.TechStatus = plan.TechStatus.ValueString() + } + if !plan.Status.IsNull() { + listLBReq.Status = plan.Status.ValueString() + } + if !plan.FrontIP.IsNull() { + listLBReq.FrontIP = plan.FrontIP.ValueString() + } + if !plan.BackIP.IsNull() { + listLBReq.BackIP = plan.BackIP.ValueString() + } + if plan.Status.IsNull() && !plan.IncludeDeleted.IsNull() { + listLBReq.IncludeDeleted = plan.IncludeDeleted.ValueBool() + } + if !plan.SortBy.IsNull() { + listLBReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + listLBReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + listLBReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "LBListDataSourceCheckPresence: before call CloudBroker().LB().List", map[string]any{"response": listLBReq}) + lbList, err := c.CloudBroker().LB().List(ctx, listLBReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about data source list lb with error: %w", err) + } + + tflog.Info(ctx, "LBListDataSourceCheckPresence: response from CloudBroker().LB().List", map[string]any{"response": lbList}) + + return lbList, err +} diff --git a/internal/service/cloudbroker/lb/utilities/utility_data_source_lb_list_deleted.go b/internal/service/cloudbroker/lb/utilities/utility_data_source_lb_list_deleted.go new file mode 100644 index 0000000..f3a9b1c --- /dev/null +++ b/internal/service/cloudbroker/lb/utilities/utility_data_source_lb_list_deleted.go @@ -0,0 +1,57 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" +) + +func LBListDeletedDataSourceCheckPresence(ctx context.Context, plan *models.DataSourceLBListDeleted, c *decort.DecortClient) (*lb.ListLB, error) { + + req := lb.ListDeletedRequest{} + + if !plan.ByID.IsNull() { + req.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.Name.IsNull() { + req.Name = plan.Name.ValueString() + } + if !plan.AccountID.IsNull() { + req.AccountID = uint64(plan.AccountID.ValueInt64()) + } + if !plan.RgID.IsNull() { + req.RGID = uint64(plan.RgID.ValueInt64()) + } + if !plan.TechStatus.IsNull() { + req.TechStatus = plan.TechStatus.ValueString() + } + if !plan.FrontIP.IsNull() { + req.FrontIP = plan.FrontIP.ValueString() + } + if !plan.BackIP.IsNull() { + req.BackIP = plan.BackIP.ValueString() + } + if !plan.SortBy.IsNull() { + req.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + req.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + req.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "LBListDeletedDataSourceCheckPresence: before call CloudBroker().LB().ListDeleted", map[string]any{"response": req}) + lbDelList, err := c.CloudBroker().LB().ListDeleted(ctx, req) + if err != nil { + return nil, fmt.Errorf("cannot get info about data source list lb with error: %w", err) + } + + tflog.Info(ctx, "LBListDeletedDataSourceCheckPresence: response from CloudBroker().LB().ListDeleted", map[string]any{"response": lbDelList}) + + return lbDelList, err +} diff --git a/internal/service/cloudbroker/lb/utilities/utility_resource_lb.go b/internal/service/cloudbroker/lb/utilities/utility_resource_lb.go new file mode 100644 index 0000000..23f7395 --- /dev/null +++ b/internal/service/cloudbroker/lb/utilities/utility_resource_lb.go @@ -0,0 +1,352 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status" +) + +func CreateResourceLB(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) (uint64, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("Start create ResourceLB: name %s", plan.Name.ValueString())) + + diags := diag.Diagnostics{} + + createReq := lb.CreateRequest{ + Name: plan.Name.ValueString(), + RGID: uint64(plan.RGID.ValueInt64()), + ExtNetID: uint64(plan.ExtNetID.ValueInt64()), + VINSID: uint64(plan.VINSID.ValueInt64()), + Start: plan.Start.ValueBool(), + } + + if plan.HAMode.IsUnknown() { // HAMode is optional & computed + createReq.HighlyAvailable = false + } else { + createReq.HighlyAvailable = plan.HAMode.ValueBool() + } + + if !plan.Description.IsNull() { // Description is optional & computed + createReq.Description = plan.Description.ValueString() + } + + if !plan.SysctlParams.IsNull() { + result := make([]map[string]interface{}, 0, len(plan.SysctlParams.Elements())) + for _, val := range plan.SysctlParams.Elements() { + objVal := val.(types.Object) + valMap := objVal.Attributes() + mapKey := valMap["key"].(types.String).ValueString() + mapVal := valMap["value"].(types.String).ValueString() + tempMap := make(map[string]interface{}) + tempMap[mapKey] = mapVal + result = append(result, tempMap) + } + createReq.SysctlParams = result + } + + tflog.Info(ctx, "CreateResourceLB: before call CloudBroker().LB().Create", map[string]any{"req": createReq}) + + lbId, err := c.CloudBroker().LB().Create(ctx, createReq) + if err != nil { + diags.AddError("CreateResourceLB: unable to create LB", err.Error()) + return 0, diags + } + tflog.Info(ctx, "CreateResourceLB: LB created", map[string]any{"lb_id": lbId, "name": plan.Name.ValueString()}) + + return lbId, nil +} + +func LBResourceCheckPresence(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) (*lb.RecordLB, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("LBCheckPresence: Get info about LB with ID - %v", plan.ID.ValueString())) + + diags := diag.Diagnostics{} + + lbId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parsed ID lb from state", err.Error()) + return nil, diags + } + + lbItem, err := c.CloudBroker().LB().Get(ctx, lb.GetRequest{LBID: lbId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbId), err.Error()) + return nil, diags + } + + return lbItem, nil +} + +func LBEnableDisable(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "EnableDisable lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) + diags := diag.Diagnostics{} + lbId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parsed ID lb from state", err.Error()) + return diags + } + if plan.Enable.IsNull() || plan.Enable.ValueBool() { + tflog.Info(ctx, "Enable lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) + _, err := c.CloudBroker().LB().Enable(ctx, lb.EnableRequest{LBID: lbId}) + if err != nil { + diags.AddError("EnableDisableLB: error to enable LB", err.Error()) + return diags + } + } else { + tflog.Info(ctx, "Disable lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) + _, err := c.CloudBroker().LB().Disable(ctx, lb.DisableRequest{LBID: lbId}) + if err != nil { + diags.AddError("EnableDisableLB: error to disable LB", err.Error()) + return diags + } + } + return diags +} + +func LBReadStatus(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Read status lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) + + diags := diag.Diagnostics{} + + lbId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parsed ID lb from state", err.Error()) + return diags + } + + lbItem, err := c.CloudBroker().LB().Get(ctx, lb.GetRequest{LBID: lbId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", lbItem), err.Error()) + return diags + } + + switch lbItem.Status { + case status.Modeled: + diags.AddError("Error:", fmt.Sprintf("The lb is in status: %s, please, contact support for more information", lbItem.Status)) + return diags + case status.Deleted: + if plan.Restore.ValueBool() || plan.Restore.IsNull() { + diags = LBRestore(ctx, plan, c) + if diags.HasError() { + tflog.Error(ctx, "Error restore lb", map[string]any{"lb_id": plan.ID.ValueString()}) + return diags + } + } else { + diags.AddError("LB in status Deleted:", "please clean state, or restore lb") + return diags + } + if plan.Enable.ValueBool() || plan.Enable.IsNull() { + diags = LBEnableDisable(ctx, plan, c) + if diags.HasError() { + tflog.Error(ctx, "Error enable/disable lb", map[string]any{"lb_id": plan.ID.ValueString()}) + return diags + } + if plan.Start.ValueBool() || plan.Start.IsNull() { + diags = LBStartStop(ctx, plan, c) + if diags.HasError() { + tflog.Error(ctx, "Error start/stop lb", map[string]any{"lb_id": plan.ID.ValueString()}) + return diags + } + } + } + case status.Destroying: + diags.AddError("Error:", fmt.Sprintf("The lb is in progress with status: %s", lbItem.Status)) + return diags + case status.Destroyed: + diags.AddError("Error:", "The resource cannot be updated because it has been destroyed") + return diags + } + + tflog.Info(ctx, "Read status lb successfully", map[string]any{"lb_id": plan.ID.ValueString()}) + return diags +} + +func LBRestore(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Restore lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) + + diags := diag.Diagnostics{} + + lbId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parsed ID lb from state", err.Error()) + return diags + } + + _, err = c.CloudBroker().LB().Restore(ctx, lb.RestoreRequest{LBID: lbId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot restore lb with ID - %s", plan.ID.ValueString()), err.Error()) + return diags + } + + tflog.Info(ctx, "Restore lb successfully", map[string]any{"lb_id": plan.ID.ValueString()}) + return diags +} + +func LBStartStop(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "StartStop lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) + diags := diag.Diagnostics{} + lbId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parsed ID lb from state", err.Error()) + return diags + } + if plan.Enable.IsNull() || plan.Enable.ValueBool() { + if plan.Start.ValueBool() || plan.Start.IsNull() { + tflog.Info(ctx, "Start lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) + _, err := c.CloudBroker().LB().Start(ctx, lb.StartRequest{LBID: lbId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot start lb with ID - %s", plan.ID.ValueString()), err.Error()) + return diags + } + } else { + tflog.Info(ctx, "Stop lb with ID", map[string]any{"lb_id": plan.ID.ValueString()}) + _, err := c.CloudBroker().LB().Stop(ctx, lb.StopRequest{LBID: lbId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot stop lb with ID - %s", plan.ID.ValueString()), err.Error()) + return diags + } + } + } + + return diags +} + +func LBUpdateHaMode(ctx context.Context, state *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Update ha mode from lb with ID", map[string]any{"id": state.ID.ValueString()}) + + diags := diag.Diagnostics{} + + lbId, err := strconv.ParseUint(state.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parsed ID lb from state", err.Error()) + return diags + } + + _, err = c.CloudBroker().LB().HighlyAvailable(ctx, lb.HighlyAvailableRequest{LBID: lbId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot update ha mode from lb with ID - %s", state.ID.ValueString()), err.Error()) + return diags + } + + tflog.Info(ctx, "Update ha mode from LB with ID successfully", map[string]any{"id": state.ID.ValueString()}) + + return diags +} + +func LBUpdateSysctlParams(ctx context.Context, plan *models.ResourceLBModel, state *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Update sysctl parameters from LB with ID", map[string]any{"id": state.ID.ValueString()}) + + diags := diag.Diagnostics{} + + lbId, err := strconv.ParseUint(state.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parsed ID lb from state", err.Error()) + return diags + } + + sysctlParams := make([]map[string]interface{}, 0, len(plan.SysctlParams.Elements())) + for _, val := range plan.SysctlParams.Elements() { + objVal := val.(types.Object) + valMap := objVal.Attributes() + mapKey := valMap["key"].(types.String).ValueString() + mapVal := valMap["value"].(types.String).ValueString() + tempMap := make(map[string]interface{}) + tempMap[mapKey] = mapVal + sysctlParams = append(sysctlParams, tempMap) + } + + req := lb.UpdateSysctParamsRequest{ + LBID: lbId, + SysctlParams: sysctlParams, + } + + _, err = c.CloudBroker().LB().UpdateSysctlParams(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot update sysctl parameters from LB with ID - %s", state.ID.ValueString()), err.Error()) + return diags + } + + tflog.Info(ctx, "Update sysctl parameters from LB with ID successfully", map[string]any{"id": state.ID.ValueString()}) + return diags +} + +func LBUpdateDescription(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Update description from lb with ID", map[string]any{"id": plan.ID.ValueString()}) + + diags := diag.Diagnostics{} + + lbId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parsed ID lb from state", err.Error()) + return diags + } + + _, err = c.CloudBroker().LB().Update(ctx, lb.UpdateRequest{LBID: lbId, Description: plan.Description.ValueString()}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot update description from lb with ID - %s", plan.ID.ValueString()), err.Error()) + return diags + } + + tflog.Info(ctx, "Update description from LB with ID successfully", map[string]any{"id": plan.ID.ValueString()}) + + return diags +} + +func LBRestart(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Restart lb with ID", map[string]any{"id": plan.ID.ValueString()}) + + diags := diag.Diagnostics{} + + lbId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parsed ID lb from state", err.Error()) + return diags + } + + req := lb.RestartRequest{ + LBID: lbId, + Safe: false, + } + + if plan.Safe.ValueBool() || plan.Safe.IsNull() { + req.Safe = true + } + + _, err = c.CloudBroker().LB().Restart(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot restart lb with ID - %s", plan.ID.ValueString()), err.Error()) + return diags + } + + tflog.Info(ctx, "Restart LB with ID successfully", map[string]any{"id": plan.ID.ValueString()}) + + return diags +} + +func LBConfigReset(ctx context.Context, plan *models.ResourceLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Reset config from lb with ID", map[string]any{"id": plan.ID.ValueString()}) + + diags := diag.Diagnostics{} + + lbId, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parsed ID lb from state", err.Error()) + return diags + } + + _, err = c.CloudBroker().LB().ConfigReset(ctx, lb.ConfigResetRequest{LBID: lbId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot reset config from lb with ID - %s", plan.ID.ValueString()), err.Error()) + return diags + } + + tflog.Info(ctx, "Reset config from LB with ID successfully", map[string]any{"id": plan.ID.ValueString()}) + + return diags +} diff --git a/internal/service/cloudbroker/lb/utilities/utility_resource_lb_backend.go b/internal/service/cloudbroker/lb/utilities/utility_resource_lb_backend.go new file mode 100644 index 0000000..451ba46 --- /dev/null +++ b/internal/service/cloudbroker/lb/utilities/utility_resource_lb_backend.go @@ -0,0 +1,167 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" +) + +func CreateResourceLBBackend(ctx context.Context, plan *models.ResourceLBBackendModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, fmt.Sprintf("Start create ResourceLBBackend: backend_name %s", plan.Name.ValueString())) + + diags := diag.Diagnostics{} + + createReq := lb.BackendCreateRequest{ + LBID: uint64(plan.LBID.ValueInt64()), + BackendName: plan.Name.ValueString(), + } + + if !plan.Algorithm.IsUnknown() { // Algorithm is optional & computed + createReq.Algorithm = plan.Algorithm.ValueString() + } + + if !plan.Inter.IsUnknown() { // Inter is optional & computed + createReq.Inter = uint64(plan.Inter.ValueInt64()) + } + + if !plan.DownInter.IsUnknown() { // DownInter is optional & computed + createReq.DownInter = uint64(plan.DownInter.ValueInt64()) + } + + if !plan.Rise.IsUnknown() { // Rise is optional & computed + createReq.Rise = uint64(plan.Rise.ValueInt64()) + } + + if !plan.Fall.IsUnknown() { // Fall is optional & computed + createReq.Fall = uint64(plan.Fall.ValueInt64()) + } + + if !plan.SlowStart.IsUnknown() { // SlowStart is optional & computed + createReq.SlowStart = uint64(plan.SlowStart.ValueInt64()) + } + + if !plan.MaxConn.IsUnknown() { // MaxConn is optional & computed + createReq.MaxConn = uint64(plan.MaxConn.ValueInt64()) + } + + if !plan.MaxQueue.IsUnknown() { // MaxQueue is optional & computed + createReq.MaxQueue = uint64(plan.MaxQueue.ValueInt64()) + } + + if !plan.Weight.IsUnknown() { // Weight is optional & computed + createReq.Weight = uint64(plan.Weight.ValueInt64()) + } + + tflog.Info(ctx, "CreateResourceLBBackend: before call CloudBroker().LB().BackendCreate", map[string]any{"req": createReq}) + + resp, err := c.CloudBroker().LB().BackendCreate(ctx, createReq) + if err != nil { + diags.AddError("CreateResourceLBBackend: unable to create LB Backend", err.Error()) + return diags + } + tflog.Info(ctx, "CreateResourceLBBackend: LB Backend created", map[string]any{"responce": resp, "name": plan.Name.ValueString()}) + + return nil +} + +func LBBackendResourceCheckPresence(ctx context.Context, plan *models.ResourceLBBackendModel, c *decort.DecortClient) (*lb.ItemBackend, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("LBBackendCheckPresence: Get info about LB Backend with ID - %v", plan.ID.ValueString())) + + diags := diag.Diagnostics{} + req := lb.GetRequest{} + bName := plan.Name.ValueString() + + if !plan.LBID.IsNull() { + req.LBID = uint64(plan.LBID.ValueInt64()) + } else { + parameters := strings.Split(plan.ID.ValueString(), "#") + if len(parameters) != 2 { + diags.AddError("LBBackendResourceCheckPresence: broken state id", + fmt.Sprintf("state id expected: #, got: %v", plan.ID.ValueString())) + return nil, diags + } + lbId, _ := strconv.ParseUint(parameters[0], 10, 64) + req.LBID = lbId + bName = parameters[1] + } + + lb, err := c.CloudBroker().LB().Get(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", req.LBID), err.Error()) + return nil, diags + } + + backends := lb.Backends + for _, b := range backends { + if b.Name == bName { + return &b, diags + } + } + + diags.AddError(fmt.Sprintf("can not find backend with name: %s for lb: %d", bName, lb.ID), "") + return nil, diags +} + +func UpdateLBBackend(ctx context.Context, plan, state *models.ResourceLBBackendModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, fmt.Sprintf("UpdateLBBackend: Start edit lb backend with ID - %v", state.ID.ValueString())) + + diags := diag.Diagnostics{} + + req := lb.BackendUpdateRequest{ + LBID: uint64(plan.LBID.ValueInt64()), + BackendName: plan.Name.ValueString(), + } + + if !plan.Algorithm.IsUnknown() { + req.Algorithm = plan.Algorithm.ValueString() + } + + if !plan.Inter.IsUnknown() { + req.Inter = uint64(plan.Inter.ValueInt64()) + } + + if !plan.DownInter.IsUnknown() { + req.DownInter = uint64(plan.DownInter.ValueInt64()) + } + + if !plan.Rise.IsUnknown() { + req.Rise = uint64(plan.Rise.ValueInt64()) + } + + if !plan.Fall.IsUnknown() { + req.Fall = uint64(plan.Fall.ValueInt64()) + } + + if !plan.SlowStart.IsUnknown() { + req.SlowStart = uint64(plan.SlowStart.ValueInt64()) + } + + if !plan.MaxConn.IsUnknown() { + req.MaxConn = uint64(plan.MaxConn.ValueInt64()) + } + + if !plan.MaxQueue.IsUnknown() { + req.MaxQueue = uint64(plan.MaxQueue.ValueInt64()) + } + + if !plan.Weight.IsUnknown() { + req.Weight = uint64(plan.Weight.ValueInt64()) + } + + _, err := c.CloudBroker().LB().BackendUpdate(ctx, req) + if err != nil { + diags.AddError("UpdateLBBackend: cannot edit lb backend", err.Error()) + return diags + } + + tflog.Info(ctx, fmt.Sprintf("UpdateLBBackend: Finish edit lb backend with name - %v", req.BackendName)) + + return nil +} diff --git a/internal/service/cloudbroker/lb/utilities/utility_resource_lb_backend_server.go b/internal/service/cloudbroker/lb/utilities/utility_resource_lb_backend_server.go new file mode 100644 index 0000000..0853370 --- /dev/null +++ b/internal/service/cloudbroker/lb/utilities/utility_resource_lb_backend_server.go @@ -0,0 +1,187 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" +) + +func CreateResourceLBBackendServer(ctx context.Context, plan *models.ResourceLBBackendServerModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, fmt.Sprintf("Start create ResourceLBBackendServer: name %s", plan.Name.ValueString())) + + diags := diag.Diagnostics{} + + createReq := lb.BackendServerAddRequest{ + LBID: uint64(plan.LBID.ValueInt64()), + BackendName: plan.Backend.ValueString(), + ServerName: plan.Name.ValueString(), + Address: plan.Address.ValueString(), + Port: uint64(plan.Port.ValueInt64()), + } + + if !plan.Check.IsUnknown() { // Check is optional & computed + createReq.Check = plan.Check.ValueString() + } + + if !plan.Inter.IsUnknown() { // Inter is optional & computed + createReq.Inter = uint64(plan.Inter.ValueInt64()) + } + + if !plan.DownInter.IsUnknown() { // DownInter is optional & computed + createReq.DownInter = uint64(plan.DownInter.ValueInt64()) + } + + if !plan.Rise.IsUnknown() { // Rise is optional & computed + createReq.Rise = uint64(plan.Rise.ValueInt64()) + } + + if !plan.Fall.IsUnknown() { // Fall is optional & computed + createReq.Fall = uint64(plan.Fall.ValueInt64()) + } + + if !plan.SlowStart.IsUnknown() { // SlowStart is optional & computed + createReq.SlowStart = uint64(plan.SlowStart.ValueInt64()) + } + + if !plan.MaxConn.IsUnknown() { // MaxConn is optional & computed + createReq.MaxConn = uint64(plan.MaxConn.ValueInt64()) + } + + if !plan.MaxQueue.IsUnknown() { // MaxQueue is optional & computed + createReq.MaxQueue = uint64(plan.MaxQueue.ValueInt64()) + } + + if !plan.Weight.IsUnknown() { // Weight is optional & computed + createReq.Weight = uint64(plan.Weight.ValueInt64()) + } + + tflog.Info(ctx, "ResourceLBBackendServer: before call CloudBroker().LB().BackendServerAdd", map[string]any{"req": createReq}) + + resp, err := c.CloudBroker().LB().BackendServerAdd(ctx, createReq) + if err != nil { + diags.AddError("ResourceLBBackendServer: unable to create LB Backend Server", err.Error()) + return diags + } + tflog.Info(ctx, "ResourceLBBackendServer: LB Backend Server created", map[string]any{"responce": resp, "name": plan.Name.ValueString()}) + + return nil +} + +func LBBackendServerResourceCheckPresence(ctx context.Context, plan *models.ResourceLBBackendServerModel, c *decort.DecortClient) (*lb.ItemServer, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("LBBackendServerCheckPresence: Get info about LB Backend Server with ID - %v", plan.ID.ValueString())) + + diags := diag.Diagnostics{} + req := lb.GetRequest{} + bName := plan.Backend.ValueString() + sName := plan.Name.ValueString() + + if !plan.LBID.IsNull() { + req.LBID = uint64(plan.LBID.ValueInt64()) + } else { + parameters := strings.Split(plan.ID.ValueString(), "#") + if len(parameters) != 3 { + diags.AddError("LBBackendServerResourceCheckPresence: broken state id", + fmt.Sprintf("state id expected: ##, got: %v", plan.ID.ValueString())) + return nil, diags + } + lbId, _ := strconv.ParseUint(parameters[0], 10, 64) + req.LBID = lbId + bName = parameters[1] + sName = parameters[2] + } + + lbItem, err := c.CloudBroker().LB().Get(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", req.LBID), err.Error()) + return nil, diags + } + + backend := &lb.ItemBackend{} + backends := lbItem.Backends + for i, b := range backends { + if b.Name == bName { + backend = &backends[i] + break + } + } + if backend.Name == "" { + diags.AddError(fmt.Sprintf("can not find backend with name: %s for lb: %d", bName, lbItem.ID), "") + return nil, diags + } + + for _, s := range backend.Servers { + if s.Name == sName { + return &s, nil + } + } + + diags.AddError(fmt.Sprintf("can not find server with name: %s for backend: %s for lb: %d", sName, bName, lbItem.ID), "") + return nil, diags +} + +func UpdateLBBackendServer(ctx context.Context, plan, state *models.ResourceLBBackendServerModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, fmt.Sprintf("UpdateLBBackendServer: Start edit lb backend server with ID - %v", state.ID.ValueString())) + + diags := diag.Diagnostics{} + + req := lb.BackendServerUpdateRequest{ + LBID: uint64(plan.LBID.ValueInt64()), + BackendName: plan.Backend.ValueString(), + ServerName: plan.Name.ValueString(), + Address: plan.Address.ValueString(), + Port: uint64(plan.Port.ValueInt64()), + } + + if !plan.Check.IsUnknown() { + req.Check = plan.Check.ValueString() + } + + if !plan.Inter.IsUnknown() { + req.Inter = uint64(plan.Inter.ValueInt64()) + } + + if !plan.DownInter.IsUnknown() { + req.DownInter = uint64(plan.DownInter.ValueInt64()) + } + + if !plan.Rise.IsUnknown() { + req.Rise = uint64(plan.Rise.ValueInt64()) + } + + if !plan.Fall.IsUnknown() { + req.Fall = uint64(plan.Fall.ValueInt64()) + } + + if !plan.SlowStart.IsUnknown() { + req.SlowStart = uint64(plan.SlowStart.ValueInt64()) + } + + if !plan.MaxConn.IsUnknown() { + req.MaxConn = uint64(plan.MaxConn.ValueInt64()) + } + + if !plan.MaxQueue.IsUnknown() { + req.MaxQueue = uint64(plan.MaxQueue.ValueInt64()) + } + + if !plan.Weight.IsUnknown() { + req.Weight = uint64(plan.Weight.ValueInt64()) + } + + _, err := c.CloudBroker().LB().BackendServerUpdate(ctx, req) + if err != nil { + diags.AddError("UpdateLBBackendServer: cannot edit lb backend server", err.Error()) + return diags + } + + tflog.Info(ctx, fmt.Sprintf("UpdateLBBackendServer: Finish edit lb backend server with name - %v", req.BackendName)) + + return nil +} diff --git a/internal/service/cloudbroker/lb/utilities/utility_resource_lb_frontend.go b/internal/service/cloudbroker/lb/utilities/utility_resource_lb_frontend.go new file mode 100644 index 0000000..f35128c --- /dev/null +++ b/internal/service/cloudbroker/lb/utilities/utility_resource_lb_frontend.go @@ -0,0 +1,75 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" +) + +func CreateResourceLBFrontend(ctx context.Context, plan *models.ResourceLBFrontendModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, fmt.Sprintf("Start create ResourceLBFrontend: frontend_name %s", plan.Name.ValueString())) + + diags := diag.Diagnostics{} + + createReq := lb.FrontendCreateRequest{ + LBID: uint64(plan.LBID.ValueInt64()), + BackendName: plan.Backend.ValueString(), + FrontendName: plan.Name.ValueString(), + } + + tflog.Info(ctx, "ResourceLBFrontend: before call CloudBroker().LB().FrontendCreate", map[string]any{"req": createReq}) + + resp, err := c.CloudBroker().LB().FrontendCreate(ctx, createReq) + if err != nil { + diags.AddError("ResourceLBFrontend: unable to create LB Frontend", err.Error()) + return diags + } + tflog.Info(ctx, "ResourceLBFrontend: LB Frontend created", map[string]any{"responce": resp, "name": plan.Name.ValueString()}) + + return nil +} + +func LBFrontendResourceCheckPresence(ctx context.Context, plan *models.ResourceLBFrontendModel, c *decort.DecortClient) (*lb.ItemFrontend, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("LBFrontendCheckPresence: Get info about LB Frontend with ID - %v", plan.ID.ValueString())) + + diags := diag.Diagnostics{} + req := lb.GetRequest{} + fName := plan.Name.ValueString() + + if !plan.LBID.IsNull() { + req.LBID = uint64(plan.LBID.ValueInt64()) + } else { + parameters := strings.Split(plan.ID.ValueString(), "#") + if len(parameters) != 2 { + diags.AddError("LBFrontendResourceCheckPresence: broken state id", + fmt.Sprintf("state id expected: #, got: %v", plan.ID.ValueString())) + return nil, diags + } + lbId, _ := strconv.ParseUint(parameters[0], 10, 64) + req.LBID = lbId + fName = parameters[1] + } + + lb, err := c.CloudBroker().LB().Get(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", req.LBID), err.Error()) + return nil, diags + } + + frontends := lb.Frontends + for _, f := range frontends { + if f.Name == fName { + return &f, diags + } + } + + diags.AddError(fmt.Sprintf("can not find frontend with name: %s for lb: %d", fName, lb.ID), "") + return nil, diags +} diff --git a/internal/service/cloudbroker/lb/utilities/utility_resource_lb_frontend_bind.go b/internal/service/cloudbroker/lb/utilities/utility_resource_lb_frontend_bind.go new file mode 100644 index 0000000..38699d6 --- /dev/null +++ b/internal/service/cloudbroker/lb/utilities/utility_resource_lb_frontend_bind.go @@ -0,0 +1,115 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/lb/models" +) + +func CreateResourceLBFrontendBind(ctx context.Context, plan *models.ResourceLBFrontendBindModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, fmt.Sprintf("Start create ResourceLBFrontendBind: frontend_name %s", plan.Name.ValueString())) + + diags := diag.Diagnostics{} + + createReq := lb.FrontendBindRequest{ + LBID: uint64(plan.LBID.ValueInt64()), + FrontendName: plan.Frontend.ValueString(), + BindingName: plan.Name.ValueString(), + BindingAddress: plan.Address.ValueString(), + BindingPort: uint64(plan.Port.ValueInt64()), + } + + tflog.Info(ctx, "ResourceLBFrontendBind: before call CloudBroker().LB().FrontendBind", map[string]any{"req": createReq}) + + resp, err := c.CloudBroker().LB().FrontendBind(ctx, createReq) + if err != nil { + diags.AddError("ResourceLBFrontendBind: unable to create LB Frontend Bind", err.Error()) + return diags + } + tflog.Info(ctx, "ResourceLBFrontendBind: LB Frontend Bind created", map[string]any{"responce": resp, "name": plan.Name.ValueString()}) + + return nil +} + +func LBFrontendBindResourceCheckPresence(ctx context.Context, plan *models.ResourceLBFrontendBindModel, c *decort.DecortClient) (*lb.ItemBinding, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("LBFrontendBindCheckPresence: Get info about LB Frontend Bind with ID - %v", plan.ID.ValueString())) + + diags := diag.Diagnostics{} + req := lb.GetRequest{} + fName := plan.Frontend.ValueString() + bName := plan.Name.ValueString() + + if !plan.LBID.IsNull() { + req.LBID = uint64(plan.LBID.ValueInt64()) + } else { + parameters := strings.Split(plan.ID.ValueString(), "#") + if len(parameters) != 3 { + diags.AddError("LBFrontendResourceCheckPresence: broken state id", + fmt.Sprintf("state id expected: ##, got: %v", plan.ID.ValueString())) + return nil, diags + } + lbId, _ := strconv.ParseUint(parameters[0], 10, 64) + req.LBID = lbId + fName = parameters[1] + bName = parameters[2] + } + + lbItem, err := c.CloudBroker().LB().Get(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about lb with ID %v", req.LBID), err.Error()) + return nil, diags + } + + frontend := &lb.ItemFrontend{} + frontends := lbItem.Frontends + for i, f := range frontends { + if f.Name == fName { + frontend = &frontends[i] + break + } + } + if frontend.Name == "" { + diags.AddError(fmt.Sprintf("can not find frontend with name: %s for lb: %d", fName, lbItem.ID), "") + return nil, diags + } + + for _, b := range frontend.Bindings { + if b.Name == bName { + return &b, nil + } + } + + diags.AddError(fmt.Sprintf("can not find bind with name: %s for frontend: %s for lb: %d", bName, fName, lbItem.ID), "") + return nil, diags +} + +func UpdateLBFrontendBind(ctx context.Context, plan, state *models.ResourceLBFrontendBindModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, fmt.Sprintf("UpdateLBFrontendBind: Start edit lb frontend bind with ID - %v", state.ID.ValueString())) + + diags := diag.Diagnostics{} + + req := lb.FrontendBindUpdateRequest{ + LBID: uint64(plan.LBID.ValueInt64()), + FrontendName: plan.Frontend.ValueString(), + BindingName: plan.Name.ValueString(), + BindingAddress: plan.Address.ValueString(), + BindingPort: uint64(plan.Port.ValueInt64()), + } + + _, err := c.CloudBroker().LB().FrontendBindUpdate(ctx, req) + if err != nil { + diags.AddError("UpdateLBFrontendBind: cannot edit lb frontend bind", err.Error()) + return diags + } + + tflog.Info(ctx, fmt.Sprintf("UpdateLBFrontendBind: Finish edit lb frontend bind with name - %v", req.BindingName)) + + return nil +} diff --git a/internal/service/cloudbroker/node/data_source_cb_node.go b/internal/service/cloudbroker/node/data_source_cb_node.go new file mode 100644 index 0000000..1a5f22f --- /dev/null +++ b/internal/service/cloudbroker/node/data_source_cb_node.go @@ -0,0 +1,91 @@ +package node + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceNode{} +) + +func NewDataSourceNode() datasource.DataSource { + return &dataSourceNode{} +} + +// dataSourceNode is the data source implementation. +type dataSourceNode struct { + client *decort.DecortClient +} + +func (d *dataSourceNode) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceNode + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceNode: Error get state") + return + } + nodeID := uint64(state.NodeID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceNode: got state successfully", map[string]any{"node_id": nodeID}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceNode: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceNode: set timeouts successfully", map[string]any{ + "node_id": nodeID, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.NodeDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceNode: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceNode: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceNode", map[string]any{"node_id": nodeID}) +} + +func (d *dataSourceNode) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceNode(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceNode) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_node" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceNode) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceNode") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceNode successfully") +} diff --git a/internal/service/cloudbroker/node/data_source_cb_node_list.go b/internal/service/cloudbroker/node/data_source_cb_node_list.go new file mode 100644 index 0000000..71576b1 --- /dev/null +++ b/internal/service/cloudbroker/node/data_source_cb_node_list.go @@ -0,0 +1,87 @@ +package node + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceNodeList{} +) + +func NewDataSourceNodeList() datasource.DataSource { + return &dataSourceNodeList{} +} + +// dataSourceNodeList is the data source implementation. +type dataSourceNodeList struct { + client *decort.DecortClient +} + +func (d *dataSourceNodeList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceNodeList + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceNodeList: Error get state") + return + } + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceNodeList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceNodeList: set timeouts successfully") + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.NodeListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceNodeList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceNodeList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceNodeList") +} + +func (d *dataSourceNodeList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceNodeList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceNodeList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_node_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceNodeList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceNodeList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceNodeList successfully") +} diff --git a/internal/service/cloudbroker/node/flattens/flatten_data_source_cb_node.go b/internal/service/cloudbroker/node/flattens/flatten_data_source_cb_node.go new file mode 100644 index 0000000..348086b --- /dev/null +++ b/internal/service/cloudbroker/node/flattens/flatten_data_source_cb_node.go @@ -0,0 +1,169 @@ +package flattens + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/node" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/utilities" +) + +func NodeDataSource(ctx context.Context, state *models.DataSourceNode, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.NodeDataSource") + + diags := diag.Diagnostics{} + + nodeID := uint64(state.NodeID.ValueInt64()) + + recordNode, diags := utilities.NodeDataSourceCheckPresence(ctx, nodeID, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.NodeDataSource: before flatten", map[string]any{"node_id": nodeID}) + + *state = models.DataSourceNode{ + NodeID: state.NodeID, + Timeouts: state.Timeouts, + + Consumption: flattenConsumpion(ctx, &recordNode.Consumption), + CpuInfo: flattenCpuInfo(ctx, &recordNode.CpuInfo), + CPUAllocationRatio: types.Int64Value(int64(recordNode.CPUAllocationRatio)), + GID: types.Int64Value(int64(recordNode.GID)), + ID: types.StringValue(strconv.Itoa(int(recordNode.ID))), + IPAddr: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordNode.IPAddr), + IsolatedCPUs: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordNode.IsolatedCpus), + Name: types.StringValue(recordNode.Name), + NeedReboot: types.BoolValue(recordNode.NeedReboot), + NicInfo: flattenNicInfo(ctx, recordNode.NicInfo), + NumaTopology: flattenNumaTopology(ctx, &recordNode.NumaTopology), + ReservedCPUs: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordNode.ReservedCPUs), + Roles: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordNode.Roles), + SriovEnabled: types.BoolValue(recordNode.SriovEnabled), + StackId: types.Int64Value(int64(recordNode.StackID)), + Status: types.StringValue(recordNode.Status), + Version: types.StringValue(recordNode.Version), + } + + tflog.Info(ctx, "End flattens.NodeDataSource", map[string]any{"Node_id": state.NodeID.ValueInt64()}) + return nil +} + +func flattenConsumpion(ctx context.Context, consumpion *node.ConsumptionInfo) *models.ConsumptionModel { + tflog.Info(ctx, "Start flattenConsumpion") + + res := models.ConsumptionModel{ + Consumed: &models.ConsumedModel{ + RAM: types.Int64Value(int64(consumpion.Consumed.RAM)), + Computes: types.Int64Value(int64(consumpion.Consumed.Computes)), + Routers: types.Int64Value(int64(consumpion.Consumed.Routers)), + VCPU: types.Int64Value(int64(consumpion.Consumed.VCPU)), + }, + Free: &models.ResourcesInfo{ + RAM: types.Int64Value(int64(consumpion.Free.RAM)), + }, + Reserved: &models.ResourcesInfo{ + RAM: types.Int64Value(int64(consumpion.Reserved.RAM)), + }, + Total: &models.ResourcesInfo{ + RAM: types.Int64Value(int64(consumpion.Total.RAM)), + }, + Hostname: types.StringValue(consumpion.Hostname), + } + + tflog.Info(ctx, "End flattenConsumpion") + return &res +} + +func flattenCpuInfo(ctx context.Context, cpuInfo *node.CpuInfo) *models.CpuInfoModel { + tflog.Info(ctx, "Start flattenCpuInfo") + + res := models.CpuInfoModel{ + ClockSpeed: types.Int64Value(int64(cpuInfo.ClockSpeed)), + CoreCount: types.Int64Value(int64(cpuInfo.CoreCount)), + PhysCount: types.Int64Value(int64(cpuInfo.PhysCount)), + } + + tflog.Info(ctx, "End flattenCpuInfo") + return &res +} + +func flattenNicInfo(ctx context.Context, nicInfo []node.ItemNicInfo) []models.NicInfoModel { + tflog.Info(ctx, "Start flattenNicInfo") + + res := make([]models.NicInfoModel, 0, len(nicInfo)) + + for _, item := range nicInfo { + temp := models.NicInfoModel{ + Driver: types.StringValue(item.Driver), + MaxVFS: types.Int64Value(int64(item.MaxVFS)), + NumaNode: types.Int64Value(int64(item.NumaNode)), + NumVFS: types.Int64Value(int64(item.NumVFS)), + OSName: types.StringValue(item.OSName), + PCISlot: types.StringValue(item.PCISlot), + VFList: flattenVFList(ctx, item.VFList), + } + + res = append(res, temp) + } + + tflog.Info(ctx, "End flattenNicInfo") + return res +} + +func flattenVFList(ctx context.Context, vfMap []interface{}) []models.VFList { + tflog.Info(ctx, "Start flattenVFList") + + vfList := make([]models.VFList, 0, len(vfMap)) + + for _, item := range vfMap { + itemMap := item.(map[string]interface{}) + vf := models.VFList{ + FnID: types.Int64Value(itemMap["fnId"].(int64)), + PCISlot: types.StringValue(itemMap["pciSlot"].(string)), + } + vfList = append(vfList, vf) + } + + tflog.Info(ctx, "End flattenVFList") + return vfList +} + +func flattenNumaTopology(ctx context.Context, numaTopolpgy *node.NumaTopologyInfo) *models.NumaTopologyModel { + tflog.Info(ctx, "Start flattenNumaTopology") + + res := models.NumaTopologyModel{ + NodeNum: types.Int64Value(int64(numaTopolpgy.NodeNum)), + Nodes: flattenNumaTopologyNodes(ctx, numaTopolpgy.Nodes), + } + + tflog.Info(ctx, "End flattenNumaTopology") + return &res +} + +func flattenNumaTopologyNodes(ctx context.Context, nodes map[string]node.NodeInfo) []models.NumaTopologyNodes { + tflog.Info(ctx, "Start flattenNumaTopologyNodes") + + res := make([]models.NumaTopologyNodes, 0, len(nodes)) + + for _, item := range nodes { + temp := models.NumaTopologyNodes{ + CPUList: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.CPUList), + Memory: &models.NumaTopologyNodesMemory{ + OneG: types.Int64Value(int64(item.Memory.OneG)), + TwoM: types.Int64Value(int64(item.Memory.TwoM)), + Total: types.Int64Value(int64(item.Memory.Total)), + }, + } + res = append(res, temp) + } + + tflog.Info(ctx, "End flattenNumaTopologyNodes") + return res +} diff --git a/internal/service/cloudbroker/node/flattens/flatten_data_source_cb_node_list.go b/internal/service/cloudbroker/node/flattens/flatten_data_source_cb_node_list.go new file mode 100644 index 0000000..627578f --- /dev/null +++ b/internal/service/cloudbroker/node/flattens/flatten_data_source_cb_node_list.go @@ -0,0 +1,126 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/node" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/utilities" +) + +func NodeListDataSource(ctx context.Context, state *models.DataSourceNodeList, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.NodeDataListSource") + + diags := diag.Diagnostics{} + + recordList, diags := utilities.NodeListDataSourceCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.NodeListDataSource: before flatten") + + id := uuid.New() + + *state = models.DataSourceNodeList{ + ByID: state.ByID, + Name: state.Name, + Version: state.Version, + Release: state.Release, + SepID: state.SepID, + Role: state.Role, + Status: state.Status, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + Items: flattenItemsList(ctx, recordList), + EntryCount: types.Int64Value(int64(recordList.EntryCount)), + } + + tflog.Info(ctx, "End flattens.NodeListDataSource") + return nil +} + +func flattenItemsList(ctx context.Context, recordList *node.ListNodes) []models.ItemNodeModel { + tflog.Info(ctx, "Start flattenItemsList") + + res := make([]models.ItemNodeModel, 0, len(recordList.Data)) + + for _, item := range recordList.Data { + temp := models.ItemNodeModel{ + AdditionalPkgs: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.AdditionalPkgs), + CpuInfo: flattenCpuInfo(ctx, &item.CpuInfo), + Description: types.StringValue(item.Description), + GID: types.Int64Value(int64(item.GID)), + GUID: types.StringValue(item.GUID), + HostKey: types.StringValue(item.HostKey), + IPAddr: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.IPAddr), + IsolatedCPUs: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.IsolatedCpus), + LastCheck: types.Int64Value(int64(item.LastCheck)), + MachineGUID: types.StringValue(item.MachineGUID), + MainboardSN: types.StringValue(item.MainboardSN), + Memory: types.Int64Value(int64(item.Memory)), + Milestones: types.Int64Value(int64(item.Milestones)), + Model: types.StringValue(item.Model), + Name: types.StringValue(item.Name), + NeedReboot: types.BoolValue(item.NeedReboot), + NetAddr: flattenNetAddr(ctx, item.NetAddr), + NetworkMode: types.StringValue(item.NetworkMode), + NicInfo: flattenNicInfo(ctx, item.NicInfo), + NodeUUID: types.StringValue(item.NodeUUID), + NodeID: types.Int64Value(int64(item.ID)), + NumaTopology: flattenNumaTopology(ctx, &item.NumaTopology), + PeerBackup: types.Int64Value(int64(item.PeerBackup)), + PeerLog: types.Int64Value(int64(item.PeerLog)), + PeerStats: types.Int64Value(int64(item.PeerStats)), + Pgpus: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.Pgpus), + PublicKeys: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.PublicKeys), + Release: types.StringValue(item.Release), + ReservedCPUs: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.ReservedCPUs), + Roles: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.Roles), + SEPs: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &item.Seps), + SerialNum: types.StringValue(item.SerialNum), + SriovEnabled: types.BoolValue(item.SriovEnabled), + StackId: types.Int64Value(int64(item.StackID)), + Status: types.StringValue(item.Status), + Tags: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.Tags), + Type: types.StringValue(item.Type), + Version: types.StringValue(item.Version), + } + res = append(res, temp) + } + + tflog.Info(ctx, "End flattenItemsList") + return res +} + +func flattenNetAddr(ctx context.Context, netAddr node.ListNetAddr) []models.NetAddrModel { + tflog.Info(ctx, "Start flattenNetAddr") + + res := make([]models.NetAddrModel, 0, len(netAddr)) + + for _, item := range netAddr { + temp := models.NetAddrModel{ + CIDR: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.CIDR), + Index: types.Int64Value(int64(item.Index)), + IP: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &item.IP), + MAC: types.StringValue(item.Mac), + MTU: types.Int64Value(int64(item.MTU)), + Name: types.StringValue(item.Name), + } + + res = append(res, temp) + } + + tflog.Info(ctx, "End flattenNetAddr") + return res +} diff --git a/internal/service/cloudbroker/node/models/model_data_source_cb_node.go b/internal/service/cloudbroker/node/models/model_data_source_cb_node.go new file mode 100644 index 0000000..6a22598 --- /dev/null +++ b/internal/service/cloudbroker/node/models/model_data_source_cb_node.go @@ -0,0 +1,87 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceNode struct { + //required field + NodeID types.Int64 `tfsdk:"node_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //response field + Consumption *ConsumptionModel `tfsdk:"consumption"` + CpuInfo *CpuInfoModel `tfsdk:"cpu_info"` + CPUAllocationRatio types.Int64 `tfsdk:"cpu_allocation_ratio"` + GID types.Int64 `tfsdk:"gid"` + ID types.String `tfsdk:"id"` + IPAddr types.List `tfsdk:"ipaddr"` + IsolatedCPUs types.List `tfsdk:"isolated_cpus"` + Name types.String `tfsdk:"name"` + NeedReboot types.Bool `tfsdk:"need_reboot"` + NicInfo []NicInfoModel `tfsdk:"nic_info"` + NumaTopology *NumaTopologyModel `tfsdk:"numa_topology"` + ReservedCPUs types.List `tfsdk:"reserved_cpus"` + Roles types.List `tfsdk:"roles"` + SriovEnabled types.Bool `tfsdk:"sriov_enabled"` + StackId types.Int64 `tfsdk:"stack_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` +} + +type ConsumptionModel struct { + Consumed *ConsumedModel `tfsdk:"consumed"` + Free *ResourcesInfo `tfsdk:"free"` + Reserved *ResourcesInfo `tfsdk:"reserved"` + Total *ResourcesInfo `tfsdk:"total"` + Hostname types.String `tfsdk:"hostname"` +} + +type ConsumedModel struct { + RAM types.Int64 `tfsdk:"ram"` + Computes types.Int64 `tfsdk:"computes"` + Routers types.Int64 `tfsdk:"routers"` + VCPU types.Int64 `tfsdk:"vcpu"` +} + +type ResourcesInfo struct { + RAM types.Int64 `tfsdk:"ram"` +} + +type CpuInfoModel struct { + ClockSpeed types.Int64 `tfsdk:"clock_speed"` + CoreCount types.Int64 `tfsdk:"core_count"` + PhysCount types.Int64 `tfsdk:"phys_count"` +} + +type NicInfoModel struct { + Driver types.String `tfsdk:"driver"` + MaxVFS types.Int64 `tfsdk:"max_vfs"` + NumaNode types.Int64 `tfsdk:"numa_node"` + NumVFS types.Int64 `tfsdk:"num_vfs"` + OSName types.String `tfsdk:"os_name"` + PCISlot types.String `tfsdk:"pci_slot"` + VFList []VFList `tfsdk:"vf_list"` +} + +type VFList struct { + FnID types.Int64 `tfsdk:"fn_id"` + PCISlot types.String `tfsdk:"pci_slot"` +} + +type NumaTopologyModel struct { + NodeNum types.Int64 `tfsdk:"node_num"` + Nodes []NumaTopologyNodes `tfsdk:"nodes"` +} + +type NumaTopologyNodes struct { + CPUList types.List `tfsdk:"cpu_list"` + Memory *NumaTopologyNodesMemory `tfsdk:"memory"` +} + +type NumaTopologyNodesMemory struct { + OneG types.Int64 `tfsdk:"one_g"` + TwoM types.Int64 `tfsdk:"two_m"` + Total types.Int64 `tfsdk:"total"` +} diff --git a/internal/service/cloudbroker/node/models/model_data_source_cb_node_list.go b/internal/service/cloudbroker/node/models/model_data_source_cb_node_list.go new file mode 100644 index 0000000..aaa478a --- /dev/null +++ b/internal/service/cloudbroker/node/models/model_data_source_cb_node_list.go @@ -0,0 +1,75 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceNodeList struct { + // request fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + Version types.String `tfsdk:"version"` + Release types.String `tfsdk:"release"` + SepID types.Int64 `tfsdk:"sep_id"` + Role types.String `tfsdk:"role"` + Status types.String `tfsdk:"status"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + // response fields + Id types.String `tfsdk:"id"` + Items []ItemNodeModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemNodeModel struct { + AdditionalPkgs types.List `tfsdk:"additional_pkgs"` + CpuInfo *CpuInfoModel `tfsdk:"cpu_info"` + Description types.String `tfsdk:"description"` + GID types.Int64 `tfsdk:"gid"` + GUID types.String `tfsdk:"guid"` + HostKey types.String `tfsdk:"hostkey"` + IPAddr types.List `tfsdk:"ipaddr"` + IsolatedCPUs types.List `tfsdk:"isolated_cpus"` + LastCheck types.Int64 `tfsdk:"lastcheck"` + MachineGUID types.String `tfsdk:"machine_guid"` + MainboardSN types.String `tfsdk:"mainboard_sn"` + Memory types.Int64 `tfsdk:"memory"` + Milestones types.Int64 `tfsdk:"milestones"` + Model types.String `tfsdk:"model"` + Name types.String `tfsdk:"name"` + NeedReboot types.Bool `tfsdk:"need_reboot"` + NetAddr []NetAddrModel `tfsdk:"net_addr"` + NetworkMode types.String `tfsdk:"network_mode"` + NicInfo []NicInfoModel `tfsdk:"nic_info"` + NodeUUID types.String `tfsdk:"node_uuid"` + NodeID types.Int64 `tfsdk:"node_id"` + NumaTopology *NumaTopologyModel `tfsdk:"numa_topology"` + PeerBackup types.Int64 `tfsdk:"peer_backup"` + PeerLog types.Int64 `tfsdk:"peer_log"` + PeerStats types.Int64 `tfsdk:"peer_stats"` + Pgpus types.List `tfsdk:"pgpus"` + PublicKeys types.List `tfsdk:"public_keys"` + Release types.String `tfsdk:"release"` + ReservedCPUs types.List `tfsdk:"reserved_cpus"` + Roles types.List `tfsdk:"roles"` + SEPs types.List `tfsdk:"seps"` + SerialNum types.String `tfsdk:"serial_num"` + SriovEnabled types.Bool `tfsdk:"sriov_enabled"` + StackId types.Int64 `tfsdk:"stack_id"` + Status types.String `tfsdk:"status"` + Tags types.List `tfsdk:"tags"` + Type types.String `tfsdk:"type"` + Version types.String `tfsdk:"version"` +} + +type NetAddrModel struct { + CIDR types.List `tfsdk:"cidr"` + Index types.Int64 `tfsdk:"index"` + IP types.List `tfsdk:"ip"` + MAC types.String `tfsdk:"mac"` + MTU types.Int64 `tfsdk:"mtu"` + Name types.String `tfsdk:"name"` +} diff --git a/internal/service/cloudbroker/node/schemas/schema_data_source_cb_node.go b/internal/service/cloudbroker/node/schemas/schema_data_source_cb_node.go new file mode 100644 index 0000000..502b50b --- /dev/null +++ b/internal/service/cloudbroker/node/schemas/schema_data_source_cb_node.go @@ -0,0 +1,191 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceNode() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "node_id": schema.Int64Attribute{ + Required: true, + }, + "consumption": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "consumed": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ram": schema.Int64Attribute{ + Computed: true, + }, + "computes": schema.Int64Attribute{ + Computed: true, + }, + "routers": schema.Int64Attribute{ + Computed: true, + }, + "vcpu": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "free": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ram": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "reserved": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ram": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "total": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ram": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "hostname": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "cpu_info": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "clock_speed": schema.Int64Attribute{ + Computed: true, + }, + "core_count": schema.Int64Attribute{ + Computed: true, + }, + "phys_count": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "cpu_allocation_ratio": schema.Int64Attribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + }, + "ipaddr": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "isolated_cpus": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "need_reboot": schema.BoolAttribute{ + Computed: true, + }, + "nic_info": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "driver": schema.StringAttribute{ + Computed: true, + }, + "max_vfs": schema.Int64Attribute{ + Computed: true, + }, + "numa_node": schema.Int64Attribute{ + Computed: true, + }, + "num_vfs": schema.Int64Attribute{ + Computed: true, + }, + "os_name": schema.StringAttribute{ + Computed: true, + }, + "pci_slot": schema.StringAttribute{ + Computed: true, + }, + "vf_list": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "fn_id": schema.Int64Attribute{ + Computed: true, + }, + "pci_slot": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "numa_topology": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "node_num": schema.Int64Attribute{ + Computed: true, + }, + "nodes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "cpu_list": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "memory": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "one_g": schema.Int64Attribute{ + Computed: true, + }, + "two_m": schema.Int64Attribute{ + Computed: true, + }, + "total": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + "reserved_cpus": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "roles": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "sriov_enabled": schema.BoolAttribute{ + Computed: true, + }, + "stack_id": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "version": schema.StringAttribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/node/schemas/schema_data_source_cb_node_list.go b/internal/service/cloudbroker/node/schemas/schema_data_source_cb_node_list.go new file mode 100644 index 0000000..4ce2489 --- /dev/null +++ b/internal/service/cloudbroker/node/schemas/schema_data_source_cb_node_list.go @@ -0,0 +1,287 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceNodeList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "find node by id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find node by name", + }, + "version": schema.StringAttribute{ + Optional: true, + Description: "find node by version", + }, + "release": schema.StringAttribute{ + Optional: true, + Description: "find node by release version", + }, + "sep_id": schema.Int64Attribute{ + Optional: true, + Description: "find node by sepID", + }, + "role": schema.StringAttribute{ + Optional: true, + Description: "find node by node roles", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "find node by node status", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size, maximum - 100", + }, + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "additional_pkgs": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "cpu_info": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "clock_speed": schema.Int64Attribute{ + Computed: true, + }, + "core_count": schema.Int64Attribute{ + Computed: true, + }, + "phys_count": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "hostkey": schema.StringAttribute{ + Computed: true, + }, + "node_id": schema.Int64Attribute{ + Computed: true, + }, + "ipaddr": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "isolated_cpus": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "lastcheck": schema.Int64Attribute{ + Computed: true, + }, + "machine_guid": schema.StringAttribute{ + Computed: true, + }, + "mainboard_sn": schema.StringAttribute{ + Computed: true, + }, + "memory": schema.Int64Attribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "model": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "need_reboot": schema.BoolAttribute{ + Computed: true, + }, + "net_addr": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "cidr": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "index": schema.Int64Attribute{ + Computed: true, + }, + "ip": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "mac": schema.StringAttribute{ + Computed: true, + }, + "mtu": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "network_mode": schema.StringAttribute{ + Computed: true, + }, + "nic_info": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "driver": schema.StringAttribute{ + Computed: true, + }, + "max_vfs": schema.Int64Attribute{ + Computed: true, + }, + "numa_node": schema.Int64Attribute{ + Computed: true, + }, + "num_vfs": schema.Int64Attribute{ + Computed: true, + }, + "os_name": schema.StringAttribute{ + Computed: true, + }, + "pci_slot": schema.StringAttribute{ + Computed: true, + }, + "vf_list": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "fn_id": schema.Int64Attribute{ + Computed: true, + }, + "pci_slot": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "node_uuid": schema.StringAttribute{ + Computed: true, + }, + "numa_topology": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "node_num": schema.Int64Attribute{ + Computed: true, + }, + "nodes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "cpu_list": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "memory": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "one_g": schema.Int64Attribute{ + Computed: true, + }, + "two_m": schema.Int64Attribute{ + Computed: true, + }, + "total": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + "peer_backup": schema.Int64Attribute{ + Computed: true, + }, + "peer_log": schema.Int64Attribute{ + Computed: true, + }, + "peer_stats": schema.Int64Attribute{ + Computed: true, + }, + "pgpus": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "public_keys": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "release": schema.StringAttribute{ + Computed: true, + }, + "reserved_cpus": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "roles": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "seps": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "serial_num": schema.StringAttribute{ + Computed: true, + }, + "sriov_enabled": schema.BoolAttribute{ + Computed: true, + }, + "stack_id": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tags": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "version": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/node/utilities/utility_data_source_cb_node.go b/internal/service/cloudbroker/node/utilities/utility_data_source_cb_node.go new file mode 100644 index 0000000..83f493e --- /dev/null +++ b/internal/service/cloudbroker/node/utilities/utility_data_source_cb_node.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/node" +) + +func NodeDataSourceCheckPresence(ctx context.Context, nodeId uint64, c *decort.DecortClient) (*node.RecordNode, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("NodeDataSourceCheckPresence: Get info about Node with ID - %v", nodeId)) + + diags := diag.Diagnostics{} + + recordNode, err := c.CloudBroker().Node().Get(ctx, node.GetRequest{NID: nodeId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about Node with ID %v", nodeId), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "NodeDataSourceCheckPresence: response from loudBroker().Node().Get", map[string]any{"node_id": nodeId, "response": recordNode}) + + return recordNode, nil +} diff --git a/internal/service/cloudbroker/node/utilities/utility_data_source_cb_node_list.go b/internal/service/cloudbroker/node/utilities/utility_data_source_cb_node_list.go new file mode 100644 index 0000000..877eaa7 --- /dev/null +++ b/internal/service/cloudbroker/node/utilities/utility_data_source_cb_node_list.go @@ -0,0 +1,60 @@ +package utilities + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/node" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/node/models" +) + +func NodeListDataSourceCheckPresence(ctx context.Context, state *models.DataSourceNodeList, c *decort.DecortClient) (*node.ListNodes, diag.Diagnostics) { + tflog.Info(ctx, "NodeListDataSourceCheckPresence: Get node list info") + + diags := diag.Diagnostics{} + + req := node.ListRequest{} + + if !state.ByID.IsNull() { + req.ByID = uint64(state.ByID.ValueInt64()) + } + if !state.Name.IsNull() { + req.Name = state.Name.ValueString() + } + if !state.Version.IsNull() { + req.Version = state.Version.ValueString() + } + if !state.Release.IsNull() { + req.Release = state.Release.ValueString() + } + if !state.SepID.IsNull() { + req.SepID = uint64(state.SepID.ValueInt64()) + } + if !state.Role.IsNull() { + req.Role = state.Role.ValueString() + } + if !state.Status.IsNull() { + req.Status = state.Status.ValueString() + } + if !state.SortBy.IsNull() { + req.SortBy = state.SortBy.ValueString() + } + if !state.Page.IsNull() { + req.Page = uint64(state.Page.ValueInt64()) + } + if !state.Size.IsNull() { + req.Size = uint64(state.Size.ValueInt64()) + } + + recordNodeList, err := c.CloudBroker().Node().List(ctx, req) + if err != nil { + diags.AddError("Cannot get info about Node list", err.Error()) + return nil, diags + } + + tflog.Info(ctx, "NodeListDataSourceCheckPresence: response from CloudBroker().Node().List") + + return recordNodeList, nil +} diff --git a/internal/service/cloudbroker/rg/data_source_rg.go b/internal/service/cloudbroker/rg/data_source_rg.go new file mode 100644 index 0000000..0f33759 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg.go @@ -0,0 +1,91 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRG{} +) + +func NewDataSourceRG() datasource.DataSource { + return &dataSourceRG{} +} + +// dataSourceRG is the data source implementation. +type dataSourceRG struct { + client *decort.DecortClient +} + +func (d *dataSourceRG) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRG: Error get state") + return + } + rgId := uint64(state.RGID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceRG: got state successfully", map[string]any{"rg_id": rgId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRG: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRG: set timeouts successfully", map[string]any{ + "rg_id": rgId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRG: Error flatten data source rg") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRG: Error set state") + return + } + tflog.Info(ctx, "End read resource group", map[string]any{"rg_id": rgId}) +} + +func (d *dataSourceRG) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRG(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRG) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRG) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRG") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRG successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go b/internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go new file mode 100644 index 0000000..7c8bb90 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_affinity_group_computes.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGAffinityGroupComputes{} +) + +func NewDataSourceRGAffinityGroupComputes() datasource.DataSource { + return &dataSourceRGAffinityGroupComputes{} +} + +// dataSourceRGAffinityGroupComputes is the data source implementation. +type dataSourceRGAffinityGroupComputes struct { + client *decort.DecortClient +} + +func (d *dataSourceRGAffinityGroupComputes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGAffinityGroupComputesModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGAffinityGroupComputes: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGAffinityGroupComputes: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGAffinityGroupComputesDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error flatten data source rg affinity group computes") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupComputes: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGAffinityGroupComputes") +} + +func (d *dataSourceRGAffinityGroupComputes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGAffinityGroupComputes(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGAffinityGroupComputes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_affinity_group_computes" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGAffinityGroupComputes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGAffinityGroupComputes") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGAffinityGroupComputes successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go new file mode 100644 index 0000000..81ac2dd --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_get.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGAffinityGroupsGet{} +) + +func NewDataSourceRGAffinityGroupsGet() datasource.DataSource { + return &dataSourceRGAffinityGroupsGet{} +} + +// dataSourceRGAffinityGroupsGet is the data source implementation. +type dataSourceRGAffinityGroupsGet struct { + client *decort.DecortClient +} + +func (d *dataSourceRGAffinityGroupsGet) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGAffinityGroupsGetModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGAffinityGroupsGet: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGAffinityGroupsGet: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGAffinityGroupsGetDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error flatten data source rg affinity group get") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupsGet: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGAffinityGroupsGet") +} + +func (d *dataSourceRGAffinityGroupsGet) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGAffinityGroupsGet(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGAffinityGroupsGet) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_affinity_groups_get" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGAffinityGroupsGet) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGAffinityGroupsGet") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGAffinityGroupsGet successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go new file mode 100644 index 0000000..21dc619 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_affinity_groups_list.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGAffinityGroupsList{} +) + +func NewDataSourceRGAffinityGroupsList() datasource.DataSource { + return &dataSourceRGAffinityGroupsList{} +} + +// dataSourceRGAffinityGroupsList is the data source implementation. +type dataSourceRGAffinityGroupsList struct { + client *decort.DecortClient +} + +func (d *dataSourceRGAffinityGroupsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGAffinityGroupsListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGAffinityGroupsList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGAffinityGroupsList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGAffinityGroupsListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error flatten data source rg resource consumption list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAffinityGroupsList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGAffinityGroupsList") +} + +func (d *dataSourceRGAffinityGroupsList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGAffinityGroupsList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGAffinityGroupsList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_affinity_groups_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGAffinityGroupsList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGAffinityGroupsList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGAffinityGroupsList successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_audits.go b/internal/service/cloudbroker/rg/data_source_rg_audits.go new file mode 100644 index 0000000..b7f75ca --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_audits.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGAudits{} +) + +func NewDataSourceRGAudits() datasource.DataSource { + return &dataSourceRGAudits{} +} + +// dataSourceRGAudits is the data source implementation. +type dataSourceRGAudits struct { + client *decort.DecortClient +} + +func (d *dataSourceRGAudits) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGAuditsModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAudits: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGAudits: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAudits: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGAudits: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGAuditsDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAudits: Error flatten data source rg audits") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGAudits: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGAudits") +} + +func (d *dataSourceRGAudits) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGAudits(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGAudits) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_audits" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGAudits) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGAudits") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGAudits successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_get_resource_consumption.go b/internal/service/cloudbroker/rg/data_source_rg_get_resource_consumption.go new file mode 100644 index 0000000..b7f7327 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_get_resource_consumption.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGGetResourceConsumption{} +) + +func NewDataSourceRGGetResourceConsumption() datasource.DataSource { + return &dataSourceRGGetResourceConsumption{} +} + +// dataSourceRGGetResourceConsumption is the data source implementation. +type dataSourceRGGetResourceConsumption struct { + client *decort.DecortClient +} + +func (d *dataSourceRGGetResourceConsumption) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGGetResourceConsumptionModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGGetResourceConsumption: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGGetResourceConsumption: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGGetResourceConsumptionDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error flatten data source rg get resource consumption") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGGetResourceConsumption: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGGetResourceConsumption") +} + +func (d *dataSourceRGGetResourceConsumption) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGGetResourceConsumption(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGGetResourceConsumption) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_resource_consumption_get" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGGetResourceConsumption) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGGetResourceConsumption") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGGetResourceConsumption successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_list.go b/internal/service/cloudbroker/rg/data_source_rg_list.go new file mode 100644 index 0000000..1c046bd --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_list.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGList{} +) + +func NewDataSourceRGList() datasource.DataSource { + return &dataSourceRGList{} +} + +// dataSourceRGList is the data source implementation. +type dataSourceRGList struct { + client *decort.DecortClient +} + +func (d *dataSourceRGList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGList: Error flatten data source rg list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGList") +} + +func (d *dataSourceRGList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGList successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_list_computes.go b/internal/service/cloudbroker/rg/data_source_rg_list_computes.go new file mode 100644 index 0000000..aa5d382 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_list_computes.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGListComputes{} +) + +func NewDataSourceRGListComputes() datasource.DataSource { + return &dataSourceRGListComputes{} +} + +// dataSourceRGListComputes is the data source implementation. +type dataSourceRGListComputes struct { + client *decort.DecortClient +} + +func (d *dataSourceRGListComputes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGListComputesModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListComputes: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGListComputes: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListComputes: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGListComputes: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGListComputesDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListComputes: Error flatten data source rg list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListComputes: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGListComputes") +} + +func (d *dataSourceRGListComputes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGListComputes(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGListComputes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_list_computes" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGListComputes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGListComputes") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGListComputes successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_list_deleted.go b/internal/service/cloudbroker/rg/data_source_rg_list_deleted.go new file mode 100644 index 0000000..addb810 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_list_deleted.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGListDeleted{} +) + +func NewDataSourceRGListDeleted() datasource.DataSource { + return &dataSourceRGListDeleted{} +} + +// dataSourceRGListDeleted is the data source implementation. +type dataSourceRGListDeleted struct { + client *decort.DecortClient +} + +func (d *dataSourceRGListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGListDeletedModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListDeleted: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGListDeleted: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListDeleted: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGListDeleted: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGListDeletedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListDeleted: Error flatten data source rg list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListDeleted: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGListDeleted") +} + +func (d *dataSourceRGListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGListDeleted(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_list_deleted" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGListDeleted") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGListDeleted successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_list_lb.go b/internal/service/cloudbroker/rg/data_source_rg_list_lb.go new file mode 100644 index 0000000..8523a81 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_list_lb.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGListLB{} +) + +func NewDataSourceRGListLB() datasource.DataSource { + return &dataSourceRGListLB{} +} + +// dataSourceRGListLB is the data source implementation. +type dataSourceRGListLB struct { + client *decort.DecortClient +} + +func (d *dataSourceRGListLB) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGListLBModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListLB: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGListLB: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListLB: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGListLB: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGListLBDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListLB: Error flatten data source rg list lb") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListLB: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGListLB") +} + +func (d *dataSourceRGListLB) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGListLB(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGListLB) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_list_lb" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGListLB) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGListLB") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGListLB successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_list_pfw.go b/internal/service/cloudbroker/rg/data_source_rg_list_pfw.go new file mode 100644 index 0000000..e9b9bd6 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_list_pfw.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGListPFW{} +) + +func NewDataSourceRGListPFW() datasource.DataSource { + return &dataSourceRGListPFW{} +} + +// dataSourceRGListPFW is the data source implementation. +type dataSourceRGListPFW struct { + client *decort.DecortClient +} + +func (d *dataSourceRGListPFW) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGListPFWModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListPFW: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGListPFW: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListPFW: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGListPFW: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGListPFWDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListPFW: Error flatten data source rg list pfw") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListPFW: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGListPFW") +} + +func (d *dataSourceRGListPFW) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGListPFW(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGListPFW) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_list_pfw" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGListPFW) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGListPFW") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGListPFW successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_list_vins.go b/internal/service/cloudbroker/rg/data_source_rg_list_vins.go new file mode 100644 index 0000000..9039f03 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_list_vins.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGListVins{} +) + +func NewDataSourceRGListVins() datasource.DataSource { + return &dataSourceRGListVins{} +} + +// dataSourceRGListVins is the data source implementation. +type dataSourceRGListVins struct { + client *decort.DecortClient +} + +func (d *dataSourceRGListVins) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGListVinsModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListVins: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGListVins: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListVins: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGListVins: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGListVinsDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListVins: Error flatten data source rg list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGListVins: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGListVins") +} + +func (d *dataSourceRGListVins) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGListVins(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGListVins) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_list_vins" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGListVins) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGListVins") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGListVins successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_resource_consumption_list.go b/internal/service/cloudbroker/rg/data_source_rg_resource_consumption_list.go new file mode 100644 index 0000000..6b62813 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_resource_consumption_list.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGResourceConsumptionList{} +) + +func NewDataSourceRGResourceConsumptionList() datasource.DataSource { + return &dataSourceRGResourceConsumptionList{} +} + +// dataSourceRGResourceConsumptionList is the data source implementation. +type dataSourceRGResourceConsumptionList struct { + client *decort.DecortClient +} + +func (d *dataSourceRGResourceConsumptionList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGResourceConsumptionListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGResourceConsumptionList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout180s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGResourceConsumptionList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGResourceConsumptionListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error flatten data source rg resource consumption list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGResourceConsumptionList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGResourceConsumptionList") +} + +func (d *dataSourceRGResourceConsumptionList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGResourceConsumptionList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGResourceConsumptionList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_resource_consumption_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGResourceConsumptionList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGResourceConsumptionList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGResourceConsumptionList successfully") +} diff --git a/internal/service/cloudbroker/rg/data_source_rg_usage.go b/internal/service/cloudbroker/rg/data_source_rg_usage.go new file mode 100644 index 0000000..343a3f3 --- /dev/null +++ b/internal/service/cloudbroker/rg/data_source_rg_usage.go @@ -0,0 +1,89 @@ +package rg + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceRGUsage{} +) + +func NewDataSourceRGUsage() datasource.DataSource { + return &dataSourceRGUsage{} +} + +// dataSourceRGUsage is the data source implementation. +type dataSourceRGUsage struct { + client *decort.DecortClient +} + +func (d *dataSourceRGUsage) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceRGUsageModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGUsage: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceRGUsage: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGUsage: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceRGUsage: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.RGUsageDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGUsage: Error flatten data source rg usage") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceRGUsage: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceRGUsage") +} + +func (d *dataSourceRGUsage) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceRGUsage(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceRGUsage) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_rg_usage" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceRGUsage) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceRGUsage") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceRGUsage successfully") +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg.go new file mode 100644 index 0000000..f58953f --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg.go @@ -0,0 +1,126 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGDataSource flattens data source for rg (resource group). +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGDataSource(ctx context.Context, state *models.DataSourceRGModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGDataSource") + + diags := diag.Diagnostics{} + + rgId := uint64(state.RGID.ValueInt64()) + + recordRG, err := utilities.RGCheckPresence(ctx, rgId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about resource group with ID %v", rgId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGDataSource: before flatten", map[string]any{"rg_id": rgId, "recordRG": recordRG}) + + id := uuid.New() + *state = models.DataSourceRGModel{ + RGID: state.RGID, + Reason: state.Reason, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + AccountID: types.Int64Value(int64(recordRG.AccountID)), + AccountName: types.StringValue(recordRG.AccountName), + ACL: flattenACL(ctx, &recordRG.ACL), + ComputeFeatures: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordRG.ComputeFeatures), + Computes: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &recordRG.VMs), + CPUAllocationParameter: types.StringValue(recordRG.CPUAllocationParameter), + CPUAllocationRatio: types.Float64Value(recordRG.CPUAllocationRatio), + CreatedBy: types.StringValue(recordRG.CreatedBy), + CreatedTime: types.Int64Value(int64(recordRG.CreatedTime)), + DefNetID: types.Int64Value(recordRG.DefNetID), + DefNetType: types.StringValue(recordRG.DefNetType), + DeletedBy: types.StringValue(recordRG.DeletedBy), + DeletedTime: types.Int64Value(int64(recordRG.DeletedTime)), + Description: types.StringValue(recordRG.Description), + Dirty: types.BoolValue(recordRG.Dirty), + GID: types.Int64Value(int64(recordRG.GID)), + GUID: types.Int64Value(int64(recordRG.GUID)), + LockStatus: types.StringValue(recordRG.LockStatus), + Milestones: types.Int64Value(int64(recordRG.Milestones)), + Name: types.StringValue(recordRG.Name), + RegisterComputes: types.BoolValue(recordRG.RegisterComputes), + ResourceLimits: flattenResourceLimits(ctx, &recordRG.ResourceLimits), + ResourceTypes: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordRG.ResTypes), + Secret: types.StringValue(recordRG.Secret), + Status: types.StringValue(recordRG.Status), + UniqPools: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &recordRG.UniqPools), + UpdatedBy: types.StringValue(recordRG.UpdatedBy), + UpdatedTime: types.Int64Value(int64(recordRG.UpdatedTime)), + VINS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &recordRG.VINS), + } + + tflog.Info(ctx, "flattens.RGDataSource: after flatten", map[string]any{"rg_id": state.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.RGDataSource", map[string]any{"rg_id": state.Id.ValueString()}) + return nil +} + +func flattenACL(ctx context.Context, item *rg.ListACL) types.List { + tflog.Info(ctx, "Start flattenACLItems") + tempSlice := make([]types.Object, 0, len(*item)) + for _, aclItem := range *item { + temp := models.ItemACLModel{ + Explicit: types.BoolValue(aclItem.Explicit), + GUID: types.StringValue(aclItem.GUID), + Right: types.StringValue(aclItem.Right), + Status: types.StringValue(aclItem.Status), + Type: types.StringValue(aclItem.Type), + UserGroupID: types.StringValue(aclItem.UserGroupID), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemACL, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenACLItems struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemACL}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenACLItems", diags)) + } + + tflog.Info(ctx, "End flattenACLItems") + return res +} + +func flattenResourceLimits(ctx context.Context, item *rg.ResourceLimits) types.Object { + tflog.Info(ctx, "Start flattenResourceLimits") + temp := models.ResourceLimitsModel{ + CUC: types.Float64Value(item.CUC), + CUD: types.Float64Value(item.CuD), + CUDM: types.Float64Value(item.CUDM), + CUI: types.Float64Value(item.CUI), + CUM: types.Float64Value(item.CUM), + CUNP: types.Float64Value(item.CUNP), + GPUUnits: types.Float64Value(item.GPUUnits), + } + res, diags := types.ObjectValueFrom(ctx, models.ItemResourseModel, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenResourceLimits struct to obj", diags)) + } + + tflog.Info(ctx, "End flattenResourceLimits") + return res +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_group_computes.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_group_computes.go new file mode 100644 index 0000000..d9db610 --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_group_computes.go @@ -0,0 +1,82 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGAffinityGroupComputesDataSource flattens data source for rg affinity group computes. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGAffinityGroupComputesDataSource(ctx context.Context, state *models.DataSourceRGAffinityGroupComputesModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGAffinityGroupComputesDataSource") + + diags := diag.Diagnostics{} + + agCompsList, err := utilities.RGAffinityGroupComputesCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group affinity group computes", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGAffinityGroupComputesDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGAffinityGroupComputesModel{ + RGID: state.RGID, + AffinityGroup: state.AffinityGroup, + + Timeouts: state.Timeouts, + Id: types.StringValue(id.String()), + } + + var diagsItem diag.Diagnostics + items := make([]models.ItemAffinityGroupComputeModel, 0, len(*agCompsList)) + for _, comp := range *agCompsList { + item := models.ItemAffinityGroupComputeModel{ + ComputeID: types.Int64Value(int64(comp.ComputeID)), + } + + item.OtherNode, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNode) + if diagsItem.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNode to item.OtherNode", diags)) + } + item.OtherNodeIndirect, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNodeIndirect) + if diagsItem.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNodeIndirect to item.OtherNodeIndirect", diags)) + } + item.OtherNodeIndirectSoft, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNodeIndirectSoft) + if diagsItem.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNodeIndirectSoft to item.OtherNodeIndirectSoft", diags)) + } + item.OtherNodeSoft, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.OtherNodeSoft) + if diagsItem.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.OtherNodeSoft to item.OtherNodeSoft", diags)) + } + item.SameNode, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.SameNode) + if diagsItem.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.SameNode to item.SameNode", diags)) + } + item.SameNodeSoft, diagsItem = types.ListValueFrom(ctx, types.Int64Type, comp.SameNodeSoft) + if diagsItem.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupComputesDataSource: cannot flatten comp.SameNodeSoft to item.SameNodeSoft", diags)) + } + + items = append(items, item) + } + + state.Items = items + + tflog.Info(ctx, "flattens.RGAffinityGroupComputesDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGAffinityGroupComputesDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_groups_get.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_groups_get.go new file mode 100644 index 0000000..22ad5ac --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_groups_get.go @@ -0,0 +1,51 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGAffinityGroupsGetDataSource flattens data source for rg affinity groups get. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGAffinityGroupsGetDataSource(ctx context.Context, state *models.DataSourceRGAffinityGroupsGetModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGAffinityGroupsGetDataSource") + + diags := diag.Diagnostics{} + + agItem, err := utilities.RGAffinityGroupsGetCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group affinity groups get", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGAffinityGroupsGetDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGAffinityGroupsGetModel{ + RGID: state.RGID, + AffinityGroup: state.AffinityGroup, + + Timeouts: state.Timeouts, + Id: types.StringValue(id.String()), + } + + var diagsItem diag.Diagnostics + state.Ids, diagsItem = types.ListValueFrom(ctx, types.Int64Type, agItem) + if diagsItem.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.RGAffinityGroupsGetDataSource: cannot flatten agItem to state.Ids", diags)) + } + + tflog.Info(ctx, "flattens.RGAffinityGroupsGetDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGAffinityGroupsGetDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_groups_list.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_groups_list.go new file mode 100644 index 0000000..5186717 --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_affinity_groups_list.go @@ -0,0 +1,68 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGAffinityGroupsListDataSource flattens data source for rg affinity groups list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGAffinityGroupsListDataSource(ctx context.Context, state *models.DataSourceRGAffinityGroupsListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGAffinityGroupsListDataSource") + + diags := diag.Diagnostics{} + + agList, err := utilities.RGAffinityGroupsListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group affinity groups list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGAffinityGroupsListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGAffinityGroupsListModel{ + RGID: state.RGID, + + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemAffinityGroupModel, 0, len(agList.Data)) + for _, data := range agList.Data { + for agLabel, listAG := range data { + item := models.ItemAffinityGroupModel{ + Label: types.StringValue(agLabel), + } + + ids := make([]models.ItemIDModel, 0, len(listAG)) + for _, agItem := range listAG { + idItem := models.ItemIDModel{ + Id: types.Int64Value(int64(agItem.ID)), + NodeId: types.Int64Value(int64(agItem.NodeID)), + } + ids = append(ids, idItem) + } + item.Ids = ids + items = append(items, item) + } + } + state.AffinityGroups = items + state.EntryCount = types.Int64Value(int64(agList.EntryCount)) + + tflog.Info(ctx, "flattens.RGAffinityGroupsListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGAffinityGroupsListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_audits.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_audits.go new file mode 100644 index 0000000..e87d158 --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_audits.go @@ -0,0 +1,56 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGAuditsDataSource flattens data source for rg audits. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGAuditsDataSource(ctx context.Context, state *models.DataSourceRGAuditsModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGAuditsDataSource") + + diags := diag.Diagnostics{} + + rgAudits, err := utilities.RGAuditsCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group audits", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGAuditsDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGAuditsModel{ + RGID: state.RGID, + Timeouts: state.Timeouts, + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemsRGAuditModel, 0, len(*rgAudits)) + for _, auditItem := range *rgAudits { + item := models.ItemsRGAuditModel{ + Call: types.StringValue(auditItem.Call), + ResponseTime: types.Float64Value(auditItem.ResponseTime), + StatusCode: types.Int64Value(int64(auditItem.StatusCode)), + Timestamp: types.Float64Value(auditItem.Timestamp), + User: types.StringValue(auditItem.User), + } + + items = append(items, item) + } + state.Items = items + + tflog.Info(ctx, "flattens.RGAuditsDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGAuditsDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_get_resource_consumption.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_get_resource_consumption.go new file mode 100644 index 0000000..d573009 --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_get_resource_consumption.go @@ -0,0 +1,97 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGGetResourceConsumptionDataSource flattens data source for rg get resource consumption. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGGetResourceConsumptionDataSource(ctx context.Context, state *models.DataSourceRGGetResourceConsumptionModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGGetResourceConsumptionDataSource") + + diags := diag.Diagnostics{} + + resourceItem, err := utilities.RGGetResourceConsumptionCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group get resource consumption", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGGetResourceConsumptionDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGGetResourceConsumptionModel{ + RGID: state.RGID, + Timeouts: state.Timeouts, + Id: types.StringValue(id.String()), + + Consumed: flattenResource(ctx, &resourceItem.Consumed), + Reserved: flattenResource(ctx, &resourceItem.Reserved), + ResourceLimits: flattenResourceLimits(ctx, &resourceItem.ResourceLimits), + } + + tflog.Info(ctx, "flattens.RGGetResourceConsumptionDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGGetResourceConsumptionDataSource") + return nil +} + +func flattenResource(ctx context.Context, item *rg.Reservation) types.Object { + tflog.Info(ctx, "Start flattenResource") + temp := models.ResourceModel{ + CPU: types.Int64Value(item.CPU), + DiskSize: types.Float64Value(item.DiskSize), + DiskSizeMax: types.Float64Value(item.DiskSizeMax), + ExtIPs: types.Int64Value(item.ExtIPs), + ExtTraffic: types.Int64Value(item.ExtTraffic), + GPU: types.Int64Value(item.GPU), + RAM: types.Int64Value(item.RAM), + SEPs: flattenSEPs(ctx, item.SEPs), + } + res, diags := types.ObjectValueFrom(ctx, models.ItemResource, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenResource struct to obj", diags)) + } + + tflog.Info(ctx, "End flattenResource") + return res +} + +func flattenSEPs(ctx context.Context, seps map[string]map[string]rg.DiskUsage) types.List { + tflog.Info(ctx, "Start flattenSEPs") + tempSlice := make([]types.Object, 0, len(seps)) + for sepId, data := range seps { + for dataName, diskData := range data { + sepItem := models.SEPsModel{ + SepID: types.StringValue(sepId), + DataName: types.StringValue(dataName), + DiskSize: types.Float64Value(diskData.DiskSize), + DiskSizeMax: types.Float64Value(diskData.DiskSizeMax), + } + obj, err := types.ObjectValueFrom(ctx, models.ItemSEPs, sepItem) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenSEPs struct to obj", err)) + } + tempSlice = append(tempSlice, obj) + } + } + + res, err := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSEPs}, tempSlice) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenSEPs", err)) + } + + tflog.Info(ctx, "End flattenSEPs") + return res +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list.go new file mode 100644 index 0000000..1a6687b --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list.go @@ -0,0 +1,95 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGListDataSource flattens data source for rg list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGListDataSource(ctx context.Context, state *models.DataSourceRGListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGListDataSource") + + diags := diag.Diagnostics{} + + rgList, err := utilities.RGListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGListModel{ + ById: state.ById, + Name: state.Name, + AccountId: state.AccountId, + AccountName: state.AccountName, + CreatedAfter: state.CreatedAfter, + CreatedBefore: state.CreatedBefore, + Status: state.Status, + LockStatus: state.LockStatus, + IncludeDeleted: state.IncludeDeleted, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemsRGListModel, 0, len(rgList.Data)) + for _, rgItem := range rgList.Data { + item := models.ItemsRGListModel{ + AccountACL: flattenACL(ctx, &rgItem.ACL), + AccountID: types.Int64Value(int64(rgItem.AccountID)), + AccountName: types.StringValue(rgItem.AccountName), + ComputeFeatures: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.ComputeFeatures), + CPUAllocationParameter: types.StringValue(rgItem.CPUAllocationParameter), + CPUAllocationRatio: types.Float64Value(rgItem.CPUAllocationRatio), + CreatedBy: types.StringValue(rgItem.CreatedBy), + CreatedTime: types.Int64Value(int64(rgItem.CreatedTime)), + DefNetID: types.Int64Value(rgItem.DefNetID), + DefNetType: types.StringValue(rgItem.DefNetType), + DeletedBy: types.StringValue(rgItem.DeletedBy), + DeletedTime: types.Int64Value(int64(rgItem.DeletedTime)), + Description: types.StringValue(rgItem.Description), + Dirty: types.BoolValue(rgItem.Dirty), + GID: types.Int64Value(int64(rgItem.GID)), + GUID: types.Int64Value(int64(rgItem.GUID)), + RGID: types.Int64Value(int64(rgItem.ID)), + LockStatus: types.StringValue(rgItem.LockStatus), + Milestones: types.Int64Value(int64(rgItem.Milestones)), + Name: types.StringValue(rgItem.Name), + ResTypes: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.ResTypes), + RegisterComputes: types.BoolValue(rgItem.RegisterComputes), + ResourceLimits: flattenResourceLimits(ctx, &rgItem.ResourceLimits), + Secret: types.StringValue(rgItem.Secret), + Status: types.StringValue(rgItem.Status), + UniqPools: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.UniqPools), + UpdatedBy: types.StringValue(rgItem.UpdatedBy), + UpdatedTime: types.Int64Value(int64(rgItem.UpdatedTime)), + VINS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &rgItem.VINS), + VMS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &rgItem.VMs), + } + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(rgList.EntryCount)) + + tflog.Info(ctx, "flattens.RGListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_computes.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_computes.go new file mode 100644 index 0000000..baec43a --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_computes.go @@ -0,0 +1,121 @@ +package flattens + +import ( + "context" + "fmt" + + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGListComputesDataSource flattens data source for rg list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGListComputesDataSource(ctx context.Context, state *models.DataSourceRGListComputesModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGListComputesDataSource") + + diags := diag.Diagnostics{} + + rgListComputes, err := utilities.RGListComputesCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group list computes", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGListComputesDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGListComputesModel{ + RGID: state.RGID, + ComputeID: state.ComputeID, + Name: state.Name, + AccountID: state.AccountID, + TechStatus: state.TechStatus, + Status: state.Status, + IPAddress: state.IPAddress, + ExtNetName: state.ExtNetName, + ExtNetID: state.ExtNetID, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemsRGListComputeModel, 0, len(rgListComputes.Data)) + for _, compItem := range rgListComputes.Data { + item := models.ItemsRGListComputeModel{ + AccountID: types.Int64Value(int64(compItem.AccountID)), + AccountName: types.StringValue(compItem.AccountName), + AffinityLabel: types.StringValue(compItem.AffinityLabel), + AffinityRules: flattenAffinityRules(ctx, &compItem.AffinityRules), + AffinityWeight: types.Int64Value(int64(compItem.AffinityWeight)), + AntiAffinityRules: flattenAffinityRules(ctx, &compItem.AntiAffinityRules), + CPUs: types.Int64Value(int64(compItem.CPUs)), + CreatedBy: types.StringValue(compItem.CreatedBy), + CreatedTime: types.Int64Value(int64(compItem.CreatedTime)), + DeletedBy: types.StringValue(compItem.DeletedBy), + DeletedTime: types.Int64Value(int64(compItem.DeletedTime)), + ID: types.Int64Value(int64(compItem.ID)), + Name: types.StringValue(compItem.Name), + RAM: types.Int64Value(int64(compItem.RAM)), + Registered: types.BoolValue(compItem.Registered), + RGID: types.Int64Value(int64(compItem.RGID)), + RGName: types.StringValue(compItem.RGName), + Status: types.StringValue(compItem.Status), + TechStatus: types.StringValue(compItem.TechStatus), + TotalDisksSize: types.Int64Value(int64(compItem.TotalDisksSize)), + UpdatedBy: types.StringValue(compItem.UpdatedBy), + UpdatedTime: types.Int64Value(int64(compItem.UpdatedTime)), + UserManaged: types.BoolValue(compItem.UserManaged), + VINSConnected: types.Int64Value(int64(compItem.VINSConnected)), + } + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(rgListComputes.EntryCount)) + + tflog.Info(ctx, "flattens.RGListComputesDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGListComputesDataSource") + return nil +} + +func flattenAffinityRules(ctx context.Context, items *rg.ListRules) types.List { + tflog.Info(ctx, "Start flattenAffinityRules") + tempSlice := make([]types.Object, 0, len(*items)) + for _, ruleItem := range *items { + temp := models.AffinityRuleModel{ + GUID: types.StringValue(ruleItem.GUID), + Key: types.StringValue(ruleItem.Key), + Mode: types.StringValue(ruleItem.Mode), + Policy: types.StringValue(ruleItem.Policy), + Topology: types.StringValue(ruleItem.Topology), + Value: types.StringValue(ruleItem.Value), + } + + obj, diags := types.ObjectValueFrom(ctx, models.ItemAffinityRule, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenAffinityRules struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemAffinityRule}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenAffinityRules", diags)) + } + + tflog.Info(ctx, "End flattenAffinityRules") + return res +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_deleted.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_deleted.go new file mode 100644 index 0000000..3cced28 --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_deleted.go @@ -0,0 +1,94 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGListDeletedDataSource flattens data source for rg list deleted. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGListDeletedDataSource(ctx context.Context, state *models.DataSourceRGListDeletedModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGListDeletedDataSource") + + diags := diag.Diagnostics{} + + rgList, err := utilities.RGListDeletedCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group list deleted", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGListDeletedDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGListDeletedModel{ + ById: state.ById, + Name: state.Name, + AccountId: state.AccountId, + AccountName: state.AccountName, + CreatedAfter: state.CreatedAfter, + CreatedBefore: state.CreatedBefore, + LockStatus: state.LockStatus, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemsRGListDeletedModel, 0, len(rgList.Data)) + for _, rgItem := range rgList.Data { + item := models.ItemsRGListDeletedModel{ + AccountACL: flattenACL(ctx, &rgItem.ACL), + AccountID: types.Int64Value(int64(rgItem.AccountID)), + AccountName: types.StringValue(rgItem.AccountName), + ComputeFeatures: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.ComputeFeatures), + CPUAllocationParameter: types.StringValue(rgItem.CPUAllocationParameter), + CPUAllocationRatio: types.Float64Value(rgItem.CPUAllocationRatio), + CreatedBy: types.StringValue(rgItem.CreatedBy), + CreatedTime: types.Int64Value(int64(rgItem.CreatedTime)), + DefNetID: types.Int64Value(rgItem.DefNetID), + DefNetType: types.StringValue(rgItem.DefNetType), + DeletedBy: types.StringValue(rgItem.DeletedBy), + DeletedTime: types.Int64Value(int64(rgItem.DeletedTime)), + Description: types.StringValue(rgItem.Description), + Dirty: types.BoolValue(rgItem.Dirty), + GID: types.Int64Value(int64(rgItem.GID)), + GUID: types.Int64Value(int64(rgItem.GUID)), + RGID: types.Int64Value(int64(rgItem.ID)), + LockStatus: types.StringValue(rgItem.LockStatus), + Milestones: types.Int64Value(int64(rgItem.Milestones)), + Name: types.StringValue(rgItem.Name), + ResTypes: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.ResTypes), + RegisterComputes: types.BoolValue(rgItem.RegisterComputes), + ResourceLimits: flattenResourceLimits(ctx, &rgItem.ResourceLimits), + Secret: types.StringValue(rgItem.Secret), + Status: types.StringValue(rgItem.Status), + UniqPools: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &rgItem.UniqPools), + UpdatedBy: types.StringValue(rgItem.UpdatedBy), + UpdatedTime: types.Int64Value(int64(rgItem.UpdatedTime)), + VINS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &rgItem.VINS), + VMS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &rgItem.VMs), + } + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(rgList.EntryCount)) + + tflog.Info(ctx, "flattens.RGListDeletedDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGListDeletedDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_lb.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_lb.go new file mode 100644 index 0000000..11d8e9a --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_lb.go @@ -0,0 +1,172 @@ +package flattens + +import ( + "context" + "encoding/json" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGListLBDataSource flattens data source for rg list lb. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGListLBDataSource(ctx context.Context, state *models.DataSourceRGListLBModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGListLBDataSource") + + diags := diag.Diagnostics{} + + rgListLB, err := utilities.RGListLBCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group list lb", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGListLBDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGListLBModel{ + RGID: state.RGID, + ByID: state.ByID, + Name: state.Name, + TechStatus: state.TechStatus, + Status: state.Status, + FrontIP: state.FrontIP, + BackIP: state.BackIP, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemsRGListLBModel, 0, len(rgListLB.Data)) + for _, lbItem := range rgListLB.Data { + acl, _ := json.Marshal(lbItem.ACL) + item := models.ItemsRGListLBModel{ + HAMode: types.BoolValue(lbItem.HAMode), + ACL: types.StringValue(string(acl)), + CreatedBy: types.StringValue(lbItem.CreatedBy), + CreatedTime: types.Int64Value(int64(lbItem.CreatedTime)), + DeletedBy: types.StringValue(lbItem.DeletedBy), + DeletedTime: types.Int64Value(int64(lbItem.DeletedTime)), + Description: types.StringValue(lbItem.Description), + DPAPIUser: types.StringValue(lbItem.DPAPIUser), + ExtNetID: types.Int64Value(int64(lbItem.ExtNetID)), + GID: types.Int64Value(int64(lbItem.GID)), + GUID: types.Int64Value(int64(lbItem.GUID)), + ID: types.Int64Value(int64(lbItem.ID)), + ImageID: types.Int64Value(int64(lbItem.ImageID)), + Milestones: types.Int64Value(int64(lbItem.Milestones)), + Name: types.StringValue(lbItem.Name), + PrimaryNode: models.RecordNodeModel{ + BackendIP: types.StringValue(lbItem.PrimaryNode.BackendIP), + ComputeID: types.Int64Value(int64(lbItem.PrimaryNode.ComputeID)), + FrontendIP: types.StringValue(lbItem.PrimaryNode.FrontendIP), + GUID: types.StringValue(lbItem.PrimaryNode.GUID), + MGMTIP: types.StringValue(lbItem.PrimaryNode.MGMTIP), + NetworkID: types.Int64Value(int64(lbItem.PrimaryNode.NetworkID)), + }, + RGName: types.StringValue(lbItem.RGName), + SecondaryNode: models.RecordNodeModel{ + BackendIP: types.StringValue(lbItem.SecondaryNode.BackendIP), + ComputeID: types.Int64Value(int64(lbItem.SecondaryNode.ComputeID)), + FrontendIP: types.StringValue(lbItem.SecondaryNode.FrontendIP), + GUID: types.StringValue(lbItem.SecondaryNode.GUID), + MGMTIP: types.StringValue(lbItem.SecondaryNode.MGMTIP), + NetworkID: types.Int64Value(int64(lbItem.SecondaryNode.NetworkID)), + }, + Status: types.StringValue(lbItem.Status), + TechStatus: types.StringValue(lbItem.TechStatus), + UpdatedBy: types.StringValue(lbItem.UpdatedBy), + UpdatedTime: types.Int64Value(int64(lbItem.UpdatedTime)), + VINSID: types.Int64Value(int64(lbItem.VINSID)), + } + + // flatten Backends + backends := make([]models.ItemBackendModel, 0, len(lbItem.Backends)) + for _, backendItem := range lbItem.Backends { + be := models.ItemBackendModel{ + Algorithm: types.StringValue(backendItem.Algorithm), + GUID: types.StringValue(backendItem.GUID), + Name: types.StringValue(backendItem.Name), + ServerDefaultSettings: models.RecordServerSettingsModel{ + Inter: types.Int64Value(int64(backendItem.ServerDefaultSettings.Inter)), + GUID: types.StringValue(backendItem.ServerDefaultSettings.GUID), + DownInter: types.Int64Value(int64(backendItem.ServerDefaultSettings.DownInter)), + Rise: types.Int64Value(int64(backendItem.ServerDefaultSettings.Rise)), + Fall: types.Int64Value(int64(backendItem.ServerDefaultSettings.Fall)), + SlowStart: types.Int64Value(int64(backendItem.ServerDefaultSettings.SlowStart)), + MaxConn: types.Int64Value(int64(backendItem.ServerDefaultSettings.MaxConn)), + MaxQueue: types.Int64Value(int64(backendItem.ServerDefaultSettings.MaxQueue)), + Weight: types.Int64Value(int64(backendItem.ServerDefaultSettings.Weight)), + }, + } + + servers := make([]models.ItemServerModel, 0, len(backendItem.Servers)) + for _, server := range backendItem.Servers { + s := models.ItemServerModel{ + Address: types.StringValue(server.Address), + Check: types.StringValue(server.Check), + GUID: types.StringValue(server.GUID), + Name: types.StringValue(server.Name), + Port: types.Int64Value(int64(server.Port)), + ServerSettings: models.RecordServerSettingsModel{ + Inter: types.Int64Value(int64(server.ServerSettings.Inter)), + GUID: types.StringValue(server.ServerSettings.GUID), + DownInter: types.Int64Value(int64(server.ServerSettings.DownInter)), + Rise: types.Int64Value(int64(server.ServerSettings.Rise)), + Fall: types.Int64Value(int64(server.ServerSettings.Fall)), + SlowStart: types.Int64Value(int64(server.ServerSettings.SlowStart)), + MaxConn: types.Int64Value(int64(server.ServerSettings.MaxConn)), + MaxQueue: types.Int64Value(int64(server.ServerSettings.MaxQueue)), + Weight: types.Int64Value(int64(server.ServerSettings.Weight)), + }, + } + servers = append(servers, s) + } + be.Servers = servers + backends = append(backends, be) + } + item.Backends = backends + + // flatten Frontends + frontends := make([]models.ItemFrontendModel, 0, len(lbItem.Frontends)) + for _, frontendItem := range lbItem.Frontends { + fr := models.ItemFrontendModel{ + Backend: types.StringValue(frontendItem.Backend), + GUID: types.StringValue(frontendItem.GUID), + Name: types.StringValue(frontendItem.Name), + } + bindings := make([]models.ItemBindingModel, 0, len(frontendItem.Bindings)) + for _, bingingItem := range frontendItem.Bindings { + b := models.ItemBindingModel{ + Address: types.StringValue(bingingItem.Address), + GUID: types.StringValue(bingingItem.GUID), + Name: types.StringValue(bingingItem.Name), + Port: types.Int64Value(int64(bingingItem.Port)), + } + bindings = append(bindings, b) + } + fr.Bindings = bindings + frontends = append(frontends, fr) + } + item.Frontends = frontends + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(rgListLB.EntryCount)) + + tflog.Info(ctx, "flattens.RGListLBDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGListLBDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_pfw.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_pfw.go new file mode 100644 index 0000000..3a16276 --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_pfw.go @@ -0,0 +1,62 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGListPFWDataSource flattens data source for rg list pfw. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGListPFWDataSource(ctx context.Context, state *models.DataSourceRGListPFWModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGListPFWDataSource") + + diags := diag.Diagnostics{} + + rgListPFW, err := utilities.RGListPFWCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group list pfw", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGListPFWDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGListPFWModel{ + RGID: state.RGID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemsRGListPFWModel, 0, len(rgListPFW.Data)) + for _, pfwItem := range rgListPFW.Data { + item := models.ItemsRGListPFWModel{ + PublicPortEnd: types.Int64Value(int64(pfwItem.PublicPortEnd)), + PublicPortStart: types.Int64Value(int64(pfwItem.PublicPortStart)), + VMID: types.Int64Value(int64(pfwItem.VMID)), + VMIP: types.StringValue(pfwItem.VMIP), + VMName: types.StringValue(pfwItem.VMName), + VMPort: types.Int64Value(int64(pfwItem.VMPort)), + VINSID: types.Int64Value(int64(pfwItem.VINSID)), + VINSName: types.StringValue(pfwItem.VINSName), + } + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(rgListPFW.EntryCount)) + + tflog.Info(ctx, "flattens.RGListPFWDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGListPFWDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_vins.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_vins.go new file mode 100644 index 0000000..a6c8390 --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_list_vins.go @@ -0,0 +1,80 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGListVinsDataSource flattens data source for rg list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGListVinsDataSource(ctx context.Context, state *models.DataSourceRGListVinsModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGListVinsDataSource") + + diags := diag.Diagnostics{} + + rgListVins, err := utilities.RGListVinsCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group list vins", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGListVinsDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGListVinsModel{ + RGID: state.RGID, + Name: state.Name, + AccountID: state.AccountID, + ExtIP: state.ExtIP, + VINSID: state.VINSID, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemsRGListVinsModel, 0, len(rgListVins.Data)) + for _, vinsItem := range rgListVins.Data { + item := models.ItemsRGListVinsModel{ + AccountID: types.Int64Value(int64(vinsItem.AccountID)), + AccountName: types.StringValue(vinsItem.AccountName), + Computes: types.Int64Value(int64(vinsItem.Computes)), + CreatedBy: types.StringValue(vinsItem.CreatedBy), + CreatedTime: types.Int64Value(int64(vinsItem.CreatedTime)), + DeletedBy: types.StringValue(vinsItem.DeletedBy), + DeletedTime: types.Int64Value(int64(vinsItem.DeletedTime)), + ExternalIP: types.StringValue(vinsItem.ExternalIP), + ExtnetID: types.Int64Value(int64(vinsItem.ExtnetId)), + FreeIPs: types.Int64Value(int64(vinsItem.FreeIPs)), + ID: types.Int64Value(int64(vinsItem.ID)), + Name: types.StringValue(vinsItem.Name), + Network: types.StringValue(vinsItem.Network), + PriVNFDevID: types.Int64Value(int64(vinsItem.PriVNFDevID)), + RGID: types.Int64Value(int64(vinsItem.RGID)), + RGName: types.StringValue(vinsItem.RGName), + Status: types.StringValue(vinsItem.Status), + UpdatedBy: types.StringValue(vinsItem.UpdatedBy), + UpdatedTime: types.Int64Value(int64(vinsItem.UpdatedTime)), + } + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(rgListVins.EntryCount)) + + tflog.Info(ctx, "flattens.RGListVinsDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGListVinsDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_resource_consumption_list.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_resource_consumption_list.go new file mode 100644 index 0000000..192cf7f --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_resource_consumption_list.go @@ -0,0 +1,56 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGResourceConsumptionListDataSource flattens data source for rg resource consumption list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGResourceConsumptionListDataSource(ctx context.Context, state *models.DataSourceRGResourceConsumptionListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGResourceConsumptionListDataSource") + + diags := diag.Diagnostics{} + + resConsList, err := utilities.RGResourceConsumptionListCheckPresence(ctx, c) + if err != nil { + diags.AddError("Cannot get info about resource group resource consumption list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGResourceConsumptionListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGResourceConsumptionListModel{ + Timeouts: state.Timeouts, + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemResourceConsumptionModel, 0, len(resConsList.Data)) + for _, resConsItem := range resConsList.Data { + item := models.ItemResourceConsumptionModel{ + RGID: types.Int64Value(int64(resConsItem.RGID)), + Consumed: flattenResource(ctx, &resConsItem.Consumed), + Reserved: flattenResource(ctx, &resConsItem.Reserved), + ResourceLimits: flattenResourceLimits(ctx, &resConsItem.ResourceLimits), + } + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(resConsList.EntryCount)) + + tflog.Info(ctx, "flattens.RGResourceConsumptionListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGResourceConsumptionListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_usage.go b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_usage.go new file mode 100644 index 0000000..d716998 --- /dev/null +++ b/internal/service/cloudbroker/rg/flattens/flatten_data_source_rg_usage.go @@ -0,0 +1,52 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/utilities" +) + +// RGUsageDataSource flattens data source for rg usage. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func RGUsageDataSource(ctx context.Context, state *models.DataSourceRGUsageModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.RGUsageDataSource") + + diags := diag.Diagnostics{} + + usageInfo, err := utilities.RGUsageCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about resource group usage", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.RGUsageDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceRGUsageModel{ + RGID: state.RGID, + Reason: state.Reason, + Timeouts: state.Timeouts, + Id: types.StringValue(id.String()), + + CPU: types.Int64Value(int64(usageInfo.CPU)), + DiskSize: types.Int64Value(int64(usageInfo.DiskSize)), + DiskSizeMax: types.Int64Value(int64(usageInfo.DiskSizeMax)), + ExtIPs: types.Int64Value(int64(usageInfo.ExtIPs)), + ExtTraffic: types.Int64Value(int64(usageInfo.ExtTraffic)), + GPU: types.Int64Value(int64(usageInfo.GPU)), + RAM: types.Int64Value(int64(usageInfo.RAM)), + SEPs: flattenSEPs(ctx, usageInfo.SEPs), + } + + tflog.Info(ctx, "flattens.RGUsageDataSource: after flatten") + + tflog.Info(ctx, "End flattens.RGUsageDataSource") + return nil +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg.go b/internal/service/cloudbroker/rg/models/model_data_source_rg.go new file mode 100644 index 0000000..0024cc2 --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg.go @@ -0,0 +1,84 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGModel struct { + // request fields + RGID types.Int64 `tfsdk:"rg_id"` + Reason types.String `tfsdk:"reason"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + ACL types.List `tfsdk:"acl"` + ComputeFeatures types.List `tfsdk:"compute_features"` + CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"` + CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DefNetID types.Int64 `tfsdk:"def_net_id"` + DefNetType types.String `tfsdk:"def_net_type"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + Dirty types.Bool `tfsdk:"dirty"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + Id types.String `tfsdk:"id"` + LockStatus types.String `tfsdk:"lock_status"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + RegisterComputes types.Bool `tfsdk:"register_computes"` + ResourceLimits types.Object `tfsdk:"resource_limits"` + ResourceTypes types.List `tfsdk:"resource_types"` + Secret types.String `tfsdk:"secret"` + Status types.String `tfsdk:"status"` + UniqPools types.List `tfsdk:"uniq_pools"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + VINS types.List `tfsdk:"vins"` + Computes types.List `tfsdk:"computes"` +} + +type ResourceLimitsModel struct { + CUC types.Float64 `tfsdk:"cu_c"` + CUD types.Float64 `tfsdk:"cu_d"` + CUDM types.Float64 `tfsdk:"cu_dm"` + CUI types.Float64 `tfsdk:"cu_i"` + CUM types.Float64 `tfsdk:"cu_m"` + CUNP types.Float64 `tfsdk:"cu_np"` + GPUUnits types.Float64 `tfsdk:"gpu_units"` +} + +var ItemResourseModel = map[string]attr.Type{ + "cu_c": types.Float64Type, + "cu_d": types.Float64Type, + "cu_dm": types.Float64Type, + "cu_i": types.Float64Type, + "cu_m": types.Float64Type, + "cu_np": types.Float64Type, + "gpu_units": types.Float64Type, +} + +type ItemACLModel struct { + Explicit types.Bool `tfsdk:"explicit"` + GUID types.String `tfsdk:"guid"` + Right types.String `tfsdk:"right"` + Status types.String `tfsdk:"status"` + Type types.String `tfsdk:"type"` + UserGroupID types.String `tfsdk:"user_group_id"` +} + +var ItemACL = map[string]attr.Type{ + "explicit": types.BoolType, + "guid": types.StringType, + "right": types.StringType, + "status": types.StringType, + "type": types.StringType, + "user_group_id": types.StringType, +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_group_computes.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_group_computes.go new file mode 100644 index 0000000..919d625 --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_group_computes.go @@ -0,0 +1,29 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGAffinityGroupComputesModel struct { + // request required fields + RGID types.Int64 `tfsdk:"rg_id"` + AffinityGroup types.String `tfsdk:"affinity_group"` + + // request optional fields + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAffinityGroupComputeModel `tfsdk:"items"` +} + +type ItemAffinityGroupComputeModel struct { + ComputeID types.Int64 `tfsdk:"compute_id"` + OtherNode types.List `tfsdk:"other_node"` + OtherNodeIndirect types.List `tfsdk:"other_node_indirect"` + OtherNodeIndirectSoft types.List `tfsdk:"other_node_indirect_soft"` + OtherNodeSoft types.List `tfsdk:"other_node_soft"` + SameNode types.List `tfsdk:"same_node"` + SameNodeSoft types.List `tfsdk:"same_node_soft"` +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_groups_get.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_groups_get.go new file mode 100644 index 0000000..f25eaef --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_groups_get.go @@ -0,0 +1,19 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGAffinityGroupsGetModel struct { + // request required fields + RGID types.Int64 `tfsdk:"rg_id"` + AffinityGroup types.String `tfsdk:"affinity_group"` + + // request optional fields + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Ids types.List `tfsdk:"ids"` +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_groups_list.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_groups_list.go new file mode 100644 index 0000000..ca8b866 --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_affinity_groups_list.go @@ -0,0 +1,31 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGAffinityGroupsListModel struct { + // request required fields + RGID types.Int64 `tfsdk:"rg_id"` + + // request optional fields + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + AffinityGroups []ItemAffinityGroupModel `tfsdk:"affinity_groups"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemAffinityGroupModel struct { + Label types.String `tfsdk:"label"` + Ids []ItemIDModel `tfsdk:"ids"` +} + +type ItemIDModel struct { + Id types.Int64 `tfsdk:"id"` + NodeId types.Int64 `tfsdk:"node_id"` +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_audits.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_audits.go new file mode 100644 index 0000000..efc789d --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_audits.go @@ -0,0 +1,24 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGAuditsModel struct { + // request field + RGID types.Int64 `tfsdk:"rg_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemsRGAuditModel `tfsdk:"items"` +} + +type ItemsRGAuditModel struct { + Call types.String `tfsdk:"call"` + ResponseTime types.Float64 `tfsdk:"responsetime"` + StatusCode types.Int64 `tfsdk:"statuscode"` + Timestamp types.Float64 `tfsdk:"timestamp"` + User types.String `tfsdk:"user"` +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_get_resource_limits.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_get_resource_limits.go new file mode 100644 index 0000000..b2b3d5e --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_get_resource_limits.go @@ -0,0 +1,55 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGGetResourceConsumptionModel struct { + // request fields + RGID types.Int64 `tfsdk:"rg_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Consumed types.Object `tfsdk:"consumed"` + Reserved types.Object `tfsdk:"reserved"` + ResourceLimits types.Object `tfsdk:"resource_limits"` +} + +type ResourceModel struct { + CPU types.Int64 `tfsdk:"cpu"` + DiskSize types.Float64 `tfsdk:"disk_size"` + DiskSizeMax types.Float64 `tfsdk:"disk_size_max"` + ExtIPs types.Int64 `tfsdk:"extips"` + ExtTraffic types.Int64 `tfsdk:"exttraffic"` + GPU types.Int64 `tfsdk:"gpu"` + RAM types.Int64 `tfsdk:"ram"` + SEPs types.List `tfsdk:"seps"` +} + +type SEPsModel struct { + SepID types.String `tfsdk:"sep_id"` + DataName types.String `tfsdk:"data_name"` + DiskSize types.Float64 `tfsdk:"disk_size"` + DiskSizeMax types.Float64 `tfsdk:"disk_size_max"` +} + +var ItemSEPs = map[string]attr.Type{ + "sep_id": types.StringType, + "data_name": types.StringType, + "disk_size": types.Float64Type, + "disk_size_max": types.Float64Type, +} + +var ItemResource = map[string]attr.Type{ + "cpu": types.Int64Type, + "disk_size": types.Float64Type, + "disk_size_max": types.Float64Type, + "extips": types.Int64Type, + "exttraffic": types.Int64Type, + "gpu": types.Int64Type, + "ram": types.Int64Type, + "seps": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemSEPs}}, +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_list.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_list.go new file mode 100644 index 0000000..a3d1dc0 --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_list.go @@ -0,0 +1,61 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGListModel struct { + // request optional fields + ById types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + AccountId types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + CreatedAfter types.Int64 `tfsdk:"created_after"` + CreatedBefore types.Int64 `tfsdk:"created_before"` + Status types.String `tfsdk:"status"` + LockStatus types.String `tfsdk:"lock_status"` + IncludeDeleted types.Bool `tfsdk:"includedeleted"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemsRGListModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemsRGListModel struct { + AccountACL types.List `tfsdk:"acl"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + ComputeFeatures types.List `tfsdk:"compute_features"` + CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"` + CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DefNetID types.Int64 `tfsdk:"def_net_id"` + DefNetType types.String `tfsdk:"def_net_type"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + Dirty types.Bool `tfsdk:"dirty"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + RGID types.Int64 `tfsdk:"rg_id"` + LockStatus types.String `tfsdk:"lock_status"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + RegisterComputes types.Bool `tfsdk:"register_computes"` + ResourceLimits types.Object `tfsdk:"resource_limits"` + ResTypes types.List `tfsdk:"resource_types"` + Secret types.String `tfsdk:"secret"` + Status types.String `tfsdk:"status"` + UniqPools types.List `tfsdk:"uniq_pools"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + VINS types.List `tfsdk:"vins"` + VMS types.List `tfsdk:"vms"` +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_list_computes.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_list_computes.go new file mode 100644 index 0000000..210293c --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_list_computes.go @@ -0,0 +1,76 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGListComputesModel struct { + // request required fields + RGID types.Int64 `tfsdk:"rg_id"` + + // request optional fields + ComputeID types.Int64 `tfsdk:"compute_id"` + Name types.String `tfsdk:"name"` + AccountID types.Int64 `tfsdk:"account_id"` + TechStatus types.String `tfsdk:"tech_status"` + Status types.String `tfsdk:"status"` + IPAddress types.String `tfsdk:"ip_address"` + ExtNetName types.String `tfsdk:"extnet_name"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemsRGListComputeModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemsRGListComputeModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + AffinityLabel types.String `tfsdk:"affinity_label"` + AffinityRules types.List `tfsdk:"affinity_rules"` + AffinityWeight types.Int64 `tfsdk:"affinity_weight"` + AntiAffinityRules types.List `tfsdk:"antiaffinity_rules"` + CPUs types.Int64 `tfsdk:"cpus"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + ID types.Int64 `tfsdk:"id"` + Name types.String `tfsdk:"name"` + RAM types.Int64 `tfsdk:"ram"` + Registered types.Bool `tfsdk:"registered"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + TotalDisksSize types.Int64 `tfsdk:"total_disks_size"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` + VINSConnected types.Int64 `tfsdk:"vins_connected"` +} + +type AffinityRuleModel struct { + GUID types.String `tfsdk:"guid"` + Key types.String `tfsdk:"key"` + Mode types.String `tfsdk:"mode"` + Policy types.String `tfsdk:"policy"` + Topology types.String `tfsdk:"topology"` + Value types.String `tfsdk:"value"` +} + +var ItemAffinityRule = map[string]attr.Type{ + "guid": types.StringType, + "key": types.StringType, + "mode": types.StringType, + "policy": types.StringType, + "topology": types.StringType, + "value": types.StringType, +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_list_deleted.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_list_deleted.go new file mode 100644 index 0000000..84a7f16 --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_list_deleted.go @@ -0,0 +1,59 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGListDeletedModel struct { + // request optional fields + ById types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + AccountId types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + CreatedAfter types.Int64 `tfsdk:"created_after"` + CreatedBefore types.Int64 `tfsdk:"created_before"` + LockStatus types.String `tfsdk:"lock_status"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemsRGListDeletedModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemsRGListDeletedModel struct { + AccountACL types.List `tfsdk:"acl"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + ComputeFeatures types.List `tfsdk:"compute_features"` + CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"` + CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DefNetID types.Int64 `tfsdk:"def_net_id"` + DefNetType types.String `tfsdk:"def_net_type"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + Dirty types.Bool `tfsdk:"dirty"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + RGID types.Int64 `tfsdk:"rg_id"` + LockStatus types.String `tfsdk:"lock_status"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + RegisterComputes types.Bool `tfsdk:"register_computes"` + ResourceLimits types.Object `tfsdk:"resource_limits"` + ResTypes types.List `tfsdk:"resource_types"` + Secret types.String `tfsdk:"secret"` + Status types.String `tfsdk:"status"` + UniqPools types.List `tfsdk:"uniq_pools"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + VINS types.List `tfsdk:"vins"` + VMS types.List `tfsdk:"vms"` +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_list_lb.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_list_lb.go new file mode 100644 index 0000000..494be41 --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_list_lb.go @@ -0,0 +1,108 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGListLBModel struct { + // request required fields + RGID types.Int64 `tfsdk:"rg_id"` + + // request optional fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + TechStatus types.String `tfsdk:"tech_status"` + Status types.String `tfsdk:"status"` + FrontIP types.String `tfsdk:"front_ip"` + BackIP types.String `tfsdk:"back_ip"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemsRGListLBModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemsRGListLBModel struct { + HAMode types.Bool `tfsdk:"ha_mode"` + ACL types.String `tfsdk:"acl"` + Backends []ItemBackendModel `tfsdk:"backends"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + DPAPIUser types.String `tfsdk:"dp_api_user"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + Frontends []ItemFrontendModel `tfsdk:"frontends"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"id"` + ImageID types.Int64 `tfsdk:"image_id"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + PrimaryNode RecordNodeModel `tfsdk:"primary_node"` + RGName types.String `tfsdk:"rg_name"` + SecondaryNode RecordNodeModel `tfsdk:"secondary_node"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + VINSID types.Int64 `tfsdk:"vins_id"` +} + +type ItemFrontendModel struct { + Backend types.String `tfsdk:"backend"` + Bindings []ItemBindingModel `tfsdk:"bindings"` + GUID types.String `tfsdk:"guid"` + Name types.String `tfsdk:"name"` +} + +type ItemBindingModel struct { + Address types.String `tfsdk:"address"` + GUID types.String `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + Port types.Int64 `tfsdk:"port"` +} + +type RecordNodeModel struct { + BackendIP types.String `tfsdk:"backend_ip"` + ComputeID types.Int64 `tfsdk:"compute_id"` + FrontendIP types.String `tfsdk:"frontend_ip"` + GUID types.String `tfsdk:"guid"` + MGMTIP types.String `tfsdk:"mgmt_ip"` + NetworkID types.Int64 `tfsdk:"network_id"` +} + +type ItemBackendModel struct { + Algorithm types.String `tfsdk:"algorithm"` + GUID types.String `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + ServerDefaultSettings RecordServerSettingsModel `tfsdk:"server_default_settings"` + Servers []ItemServerModel `tfsdk:"servers"` +} + +type RecordServerSettingsModel struct { + Inter types.Int64 `tfsdk:"inter"` + GUID types.String `tfsdk:"guid"` + DownInter types.Int64 `tfsdk:"down_inter"` + Rise types.Int64 `tfsdk:"rise"` + Fall types.Int64 `tfsdk:"fall"` + SlowStart types.Int64 `tfsdk:"slow_start"` + MaxConn types.Int64 `tfsdk:"max_conn"` + MaxQueue types.Int64 `tfsdk:"max_queue"` + Weight types.Int64 `tfsdk:"weight"` +} + +type ItemServerModel struct { + Address types.String `tfsdk:"address"` + Check types.String `tfsdk:"check"` + GUID types.String `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + Port types.Int64 `tfsdk:"port"` + ServerSettings RecordServerSettingsModel `tfsdk:"server_settings"` +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_list_pfw.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_list_pfw.go new file mode 100644 index 0000000..6c5a203 --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_list_pfw.go @@ -0,0 +1,28 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGListPFWModel struct { + // request fields + RGID types.Int64 `tfsdk:"rg_id"` // required + Timeouts timeouts.Value `tfsdk:"timeouts"` // optional + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemsRGListPFWModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemsRGListPFWModel struct { + PublicPortEnd types.Int64 `tfsdk:"public_port_end"` + PublicPortStart types.Int64 `tfsdk:"public_port_start"` + VMID types.Int64 `tfsdk:"vm_id"` + VMIP types.String `tfsdk:"vm_ip"` + VMName types.String `tfsdk:"vm_name"` + VMPort types.Int64 `tfsdk:"vm_port"` + VINSID types.Int64 `tfsdk:"vins_id"` + VINSName types.String `tfsdk:"vins_name"` +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_list_vins.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_list_vins.go new file mode 100644 index 0000000..f5b628c --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_list_vins.go @@ -0,0 +1,48 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGListVinsModel struct { + // request required fields + RGID types.Int64 `tfsdk:"rg_id"` + + // request optional fields + Name types.String `tfsdk:"name"` + AccountID types.Int64 `tfsdk:"account_id"` + ExtIP types.String `tfsdk:"ext_ip"` + VINSID types.Int64 `tfsdk:"vins_id"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemsRGListVinsModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemsRGListVinsModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Computes types.Int64 `tfsdk:"computes"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + ExternalIP types.String `tfsdk:"external_ip"` + ExtnetID types.Int64 `tfsdk:"extnet_id"` + FreeIPs types.Int64 `tfsdk:"free_ips"` + ID types.Int64 `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Network types.String `tfsdk:"network"` + PriVNFDevID types.Int64 `tfsdk:"pri_vnf_dev_id"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + Status types.String `tfsdk:"status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_resource_limits_list.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_resource_limits_list.go new file mode 100644 index 0000000..d8521f5 --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_resource_limits_list.go @@ -0,0 +1,23 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGResourceConsumptionListModel struct { + // request fields + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemResourceConsumptionModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemResourceConsumptionModel struct { + RGID types.Int64 `tfsdk:"rg_id"` + Consumed types.Object `tfsdk:"consumed"` + Reserved types.Object `tfsdk:"reserved"` + ResourceLimits types.Object `tfsdk:"resource_limits"` +} diff --git a/internal/service/cloudbroker/rg/models/model_data_source_rg_usage.go b/internal/service/cloudbroker/rg/models/model_data_source_rg_usage.go new file mode 100644 index 0000000..12b0446 --- /dev/null +++ b/internal/service/cloudbroker/rg/models/model_data_source_rg_usage.go @@ -0,0 +1,24 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceRGUsageModel struct { + // request fields + RGID types.Int64 `tfsdk:"rg_id"` + Reason types.String `tfsdk:"reason"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + CPU types.Int64 `tfsdk:"cpu"` + DiskSize types.Int64 `tfsdk:"disk_size"` + DiskSizeMax types.Int64 `tfsdk:"disk_size_max"` + ExtIPs types.Int64 `tfsdk:"extips"` + ExtTraffic types.Int64 `tfsdk:"exttraffic"` + GPU types.Int64 `tfsdk:"gpu"` + RAM types.Int64 `tfsdk:"ram"` + SEPs types.List `tfsdk:"seps"` +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg.go new file mode 100644 index 0000000..f4cd758 --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg.go @@ -0,0 +1,164 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceRG() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + Description: "resource group id", + }, + + // optional attributes + "reason": schema.StringAttribute{ + Optional: true, + Description: "reason for request", + }, + + //computed attributes + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "explicit": schema.BoolAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "right": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "user_group_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "compute_features": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "cpu_allocation_parameter": schema.StringAttribute{ + Computed: true, + }, + "cpu_allocation_ratio": schema.Float64Attribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "def_net_id": schema.Int64Attribute{ + Computed: true, + }, + "def_net_type": schema.StringAttribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "dirty": schema.BoolAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "register_computes": schema.BoolAttribute{ + Computed: true, + }, + "resource_limits": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + "resource_types": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "secret": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "vins": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "computes": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "uniq_pools": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_group_computes.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_group_computes.go new file mode 100644 index 0000000..353c41a --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_group_computes.go @@ -0,0 +1,59 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceRGAffinityGroupComputes() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + Description: "find by rg id", + }, + "affinity_group": schema.StringAttribute{ + Required: true, + Description: "Affinity group label", + }, + + //computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "other_node": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "other_node_indirect": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "other_node_indirect_soft": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "other_node_soft": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "same_node": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "same_node_soft": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_groups_get.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_groups_get.go new file mode 100644 index 0000000..5ca6974 --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_groups_get.go @@ -0,0 +1,29 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceRGAffinityGroupsGet() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + Description: "find by rg id", + }, + "affinity_group": schema.StringAttribute{ + Required: true, + Description: "Affinity group label", + }, + + //computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_groups_list.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_groups_list.go new file mode 100644 index 0000000..e00681b --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_affinity_groups_list.go @@ -0,0 +1,57 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceRGAffinityGroupsList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + Description: "find by rg id", + }, + + // optional attributes + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "size number", + }, + + //computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "affinity_groups": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "label": schema.StringAttribute{ + Computed: true, + }, + + "ids": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.Int64Attribute{ + Computed: true, + }, + "node_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_audits.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_audits.go new file mode 100644 index 0000000..7b9a359 --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_audits.go @@ -0,0 +1,41 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceRGAudits() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + }, + + //computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "call": schema.StringAttribute{ + Computed: true, + }, + "responsetime": schema.Float64Attribute{ + Computed: true, + }, + "statuscode": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Float64Attribute{ + Computed: true, + }, + "user": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_get_resource_consumption.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_get_resource_consumption.go new file mode 100644 index 0000000..757237a --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_get_resource_consumption.go @@ -0,0 +1,136 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceRGGetResourceConsumption() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + Description: "find by rg id", + }, + + //computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "consumed": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "reserved": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "resource_limits": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list.go new file mode 100644 index 0000000..6197567 --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list.go @@ -0,0 +1,215 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceRGList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "find by id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "find by account id", + }, + "account_name": schema.StringAttribute{ + Optional: true, + Description: "find by account name", + }, + "created_after": schema.Int64Attribute{ + Optional: true, + Description: "find by created after time (unix timestamp)", + }, + "created_before": schema.Int64Attribute{ + Optional: true, + Description: "find by created before time (unix timestamp)", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "find by status", + }, + "lock_status": schema.StringAttribute{ + Optional: true, + Description: "find by lock status", + }, + "includedeleted": schema.BoolAttribute{ + Optional: true, + Description: "included deleted resource groups. If using field 'status', then includedeleted will be ignored", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "size number", + }, + + //computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "acl": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "explicit": schema.BoolAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "right": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "user_group_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "compute_features": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "def_net_id": schema.Int64Attribute{ + Computed: true, + }, + "def_net_type": schema.StringAttribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "dirty": schema.BoolAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "register_computes": schema.BoolAttribute{ + Computed: true, + }, + "resource_limits": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + "secret": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "vins": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "vms": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "resource_types": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "cpu_allocation_parameter": schema.StringAttribute{ + Computed: true, + }, + "cpu_allocation_ratio": schema.Float64Attribute{ + Computed: true, + }, + "uniq_pools": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_computes.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_computes.go new file mode 100644 index 0000000..e6a16d8 --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_computes.go @@ -0,0 +1,192 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceRGListComputes() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the RG", + }, + + // optional attributes + "compute_id": schema.Int64Attribute{ + Optional: true, + Description: "find by id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "find by account id", + }, + "tech_status": schema.StringAttribute{ + Optional: true, + Description: "find by tech status", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "find by status", + }, + "ip_address": schema.StringAttribute{ + Optional: true, + Description: "find by ip address", + }, + "extnet_name": schema.StringAttribute{ + Optional: true, + Description: "find by external network name", + }, + "extnet_id": schema.Int64Attribute{ + Optional: true, + Description: "find by external network id", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "size number", + }, + + //computed attributes + "id": schema.StringAttribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "affinity_label": schema.StringAttribute{ + Computed: true, + }, + "affinity_rules": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "key": schema.StringAttribute{ + Computed: true, + }, + "mode": schema.StringAttribute{ + Computed: true, + }, + "policy": schema.StringAttribute{ + Computed: true, + }, + "topology": schema.StringAttribute{ + Computed: true, + }, + "value": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "affinity_weight": schema.Int64Attribute{ + Computed: true, + }, + "antiaffinity_rules": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "key": schema.StringAttribute{ + Computed: true, + }, + "mode": schema.StringAttribute{ + Computed: true, + }, + "policy": schema.StringAttribute{ + Computed: true, + }, + "topology": schema.StringAttribute{ + Computed: true, + }, + "value": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "cpus": schema.Int64Attribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "id": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "registered": schema.BoolAttribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "total_disks_size": schema.Int64Attribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "vins_connected": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_deleted.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_deleted.go new file mode 100644 index 0000000..0aa030e --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_deleted.go @@ -0,0 +1,207 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceRGListDeleted() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "find by id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "find by account id", + }, + "account_name": schema.StringAttribute{ + Optional: true, + Description: "find by account name", + }, + "created_after": schema.Int64Attribute{ + Optional: true, + Description: "find by created after time (unix timestamp)", + }, + "created_before": schema.Int64Attribute{ + Optional: true, + Description: "find by created before time (unix timestamp)", + }, + "lock_status": schema.StringAttribute{ + Optional: true, + Description: "find by lock status", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "size number", + }, + + //computed attributes + "id": schema.StringAttribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "acl": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "explicit": schema.BoolAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "right": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "user_group_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "compute_features": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "def_net_id": schema.Int64Attribute{ + Computed: true, + }, + "def_net_type": schema.StringAttribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "dirty": schema.BoolAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "register_computes": schema.BoolAttribute{ + Computed: true, + }, + "resource_limits": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + "secret": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "vins": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "vms": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "resource_types": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "cpu_allocation_parameter": schema.StringAttribute{ + Computed: true, + }, + "cpu_allocation_ratio": schema.Float64Attribute{ + Computed: true, + }, + "uniq_pools": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_lb.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_lb.go new file mode 100644 index 0000000..9147d0e --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_lb.go @@ -0,0 +1,314 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceRGListLB() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the RG", + }, + + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "find by id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "tech_status": schema.StringAttribute{ + Optional: true, + Description: "find by tech status", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "find by status", + }, + "front_ip": schema.StringAttribute{ + Optional: true, + Description: "find by frontend Ip", + }, + "back_ip": schema.StringAttribute{ + Optional: true, + Description: "find by backend Ip", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "size number", + }, + + // computed attributes + "id": schema.StringAttribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "ha_mode": schema.BoolAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "backends": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "algorithm": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "server_default_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "inter": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "down_inter": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "slow_start": schema.Int64Attribute{ + Computed: true, + }, + "max_conn": schema.Int64Attribute{ + Computed: true, + }, + "max_queue": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "servers": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "check": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + "server_settings": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "inter": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "down_inter": schema.Int64Attribute{ + Computed: true, + }, + "rise": schema.Int64Attribute{ + Computed: true, + }, + "fall": schema.Int64Attribute{ + Computed: true, + }, + "slow_start": schema.Int64Attribute{ + Computed: true, + }, + "max_conn": schema.Int64Attribute{ + Computed: true, + }, + "max_queue": schema.Int64Attribute{ + Computed: true, + }, + "weight": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "dp_api_user": schema.StringAttribute{ + Computed: true, + }, + "extnet_id": schema.Int64Attribute{ + Computed: true, + }, + "frontends": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "backend": schema.StringAttribute{ + Computed: true, + }, + "bindings": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "address": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "port": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "id": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "primary_node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "mgmt_ip": schema.StringAttribute{ + Computed: true, + }, + "network_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "secondary_node": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "backend_ip": schema.StringAttribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "frontend_ip": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "mgmt_ip": schema.StringAttribute{ + Computed: true, + }, + "network_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "vins_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_pfw.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_pfw.go new file mode 100644 index 0000000..e7609ef --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_pfw.go @@ -0,0 +1,54 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceRGListPFW() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the RG", + }, + + // computed attributes + "id": schema.StringAttribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "public_port_end": schema.Int64Attribute{ + Computed: true, + }, + "public_port_start": schema.Int64Attribute{ + Computed: true, + }, + "vm_id": schema.Int64Attribute{ + Computed: true, + }, + "vm_ip": schema.StringAttribute{ + Computed: true, + }, + "vm_name": schema.StringAttribute{ + Computed: true, + }, + "vm_port": schema.Int64Attribute{ + Computed: true, + }, + "vins_id": schema.Int64Attribute{ + Computed: true, + }, + "vins_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_vins.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_vins.go new file mode 100644 index 0000000..cb27e98 --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_list_vins.go @@ -0,0 +1,117 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceRGListVins() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the RG", + }, + + // optional attributes + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "find by account id", + }, + "ext_ip": schema.StringAttribute{ + Optional: true, + Description: "find by external ip address", + }, + "vins_id": schema.Int64Attribute{ + Optional: true, + Description: "find by vins id", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "size number", + }, + + // computed attributes + "id": schema.StringAttribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.Int64Attribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "external_ip": schema.StringAttribute{ + Computed: true, + }, + "extnet_id": schema.Int64Attribute{ + Computed: true, + }, + "free_ips": schema.Int64Attribute{ + Computed: true, + }, + "id": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "network": schema.StringAttribute{ + Computed: true, + }, + "pri_vnf_dev_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_resource_consumption_list.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_resource_consumption_list.go new file mode 100644 index 0000000..7ea4ce3 --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_resource_consumption_list.go @@ -0,0 +1,143 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceRGResourceConsumptionList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + //computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "consumed": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "reserved": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "resource_limits": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_usage.go b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_usage.go new file mode 100644 index 0000000..aee92b6 --- /dev/null +++ b/internal/service/cloudbroker/rg/schemas/schema_data_source_rg_usage.go @@ -0,0 +1,66 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceRGUsage() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "rg_id": schema.Int64Attribute{ + Required: true, + Description: "find by rg id", + }, + + // optional attributes + "reason": schema.StringAttribute{ + Optional: true, + Description: "reason for action", + }, + + //computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disk_size": schema.Int64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Int64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg.go new file mode 100644 index 0000000..175acbc --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg.go @@ -0,0 +1,23 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" +) + +func RGCheckPresence(ctx context.Context, rgId uint64, c *decort.DecortClient) (*rg.RecordRG, error) { + tflog.Info(ctx, fmt.Sprintf("Get info about resource group with ID - %v", rgId)) + + recordRG, err := c.CloudBroker().RG().Get(ctx, rg.GetRequest{RGID: rgId}) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group with error: %w", err) + } + + tflog.Info(ctx, "UtilityRGCheckPresence: response from CloudBroker().RG().Get", map[string]any{"rg_id": rgId, "response": recordRG}) + + return recordRG, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_group_computes.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_group_computes.go new file mode 100644 index 0000000..e98a383 --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_group_computes.go @@ -0,0 +1,29 @@ +package utilities + +import ( + "context" + "fmt" + + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" +) + +func RGAffinityGroupComputesCheckPresence(ctx context.Context, plan *models.DataSourceRGAffinityGroupComputesModel, c *decort.DecortClient) (*rg.ListAffinityGroupCompute, error) { + agCompsReq := rg.AffinityGroupComputesRequest{ + RGID: uint64(plan.RGID.ValueInt64()), + AffinityGroup: plan.AffinityGroup.ValueString(), + } + + tflog.Info(ctx, "RGAffinityGroupComputesCheckPresence: before call CloudBroker().RG().AffinityGroupComputes", map[string]any{"req": agCompsReq}) + agCompsList, err := c.CloudBroker().RG().AffinityGroupComputes(ctx, agCompsReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group affinity group computes with error: %w", err) + } + + tflog.Info(ctx, "RGAffinityGroupComputesCheckPresence: response from CloudBroker().RG().AffinityGroupComputes", map[string]any{"response": agCompsList}) + + return &agCompsList, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_groups_get.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_groups_get.go new file mode 100644 index 0000000..4c30592 --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_groups_get.go @@ -0,0 +1,29 @@ +package utilities + +import ( + "context" + "fmt" + + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" +) + +func RGAffinityGroupsGetCheckPresence(ctx context.Context, plan *models.DataSourceRGAffinityGroupsGetModel, c *decort.DecortClient) ([]uint64, error) { + agReq := rg.AffinityGroupsGetRequest{ + RGID: uint64(plan.RGID.ValueInt64()), + AffinityGroup: plan.AffinityGroup.ValueString(), + } + + tflog.Info(ctx, "RGAffinityGroupsGetCheckPresence: before call CloudBroker().RG().AffinityGroupsGet", map[string]any{"req": agReq}) + agItem, err := c.CloudBroker().RG().AffinityGroupsGet(ctx, agReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group affinity groups get with error: %w", err) + } + + tflog.Info(ctx, "RGAffinityGroupsGetCheckPresence: response from CloudBroker().RG().AffinityGroupsGet", map[string]any{"response": agItem}) + + return agItem, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_groups_list.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_groups_list.go new file mode 100644 index 0000000..8e6b72c --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_affinity_groups_list.go @@ -0,0 +1,33 @@ +package utilities + +import ( + "context" + "fmt" + + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" +) + +func RGAffinityGroupsListCheckPresence(ctx context.Context, plan *models.DataSourceRGAffinityGroupsListModel, c *decort.DecortClient) (*rg.ListAffinityGroup, error) { + agListReq := rg.AffinityGroupsListRequest{RGID: uint64(plan.RGID.ValueInt64())} + + if !plan.Page.IsNull() { + agListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + agListReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "RGAffinityGroupsListCheckPresence: before call CloudBroker().RG().AffinityGroupsList", map[string]any{"req": agListReq}) + agList, err := c.CloudBroker().RG().AffinityGroupsList(ctx, agListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group affinity groups list with error: %w", err) + } + + tflog.Info(ctx, "RGAffinityGroupsListCheckPresence: response from CloudBroker().RG().AffinityGroupsList", map[string]any{"response": agList}) + + return agList, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_audits.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_audits.go new file mode 100644 index 0000000..6e7c58f --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_audits.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" +) + +func RGAuditsCheckPresence(ctx context.Context, plan *models.DataSourceRGAuditsModel, c *decort.DecortClient) (*rg.ListAudits, error) { + auditsReq := rg.AuditsRequest{ + RGID: uint64(plan.RGID.ValueInt64()), + } + + tflog.Info(ctx, "RGAuditsCheckPresence: before call CloudBroker().RG().Audits", map[string]any{"response": auditsReq}) + rgAudtis, err := c.CloudBroker().RG().Audits(ctx, auditsReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group audits with error: %w", err) + } + + tflog.Info(ctx, "RGAuditsCheckPresence: response from CloudBroker().RG().Audits", map[string]any{"response": rgAudtis}) + + return &rgAudtis, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_get_resource_consumption.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_get_resource_consumption.go new file mode 100644 index 0000000..5f6d8b4 --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_get_resource_consumption.go @@ -0,0 +1,25 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" +) + +func RGGetResourceConsumptionCheckPresence(ctx context.Context, plan *models.DataSourceRGGetResourceConsumptionModel, c *decort.DecortClient) (*rg.ItemResourceConsumption, error) { + resConsReq := rg.GetResourceConsumptionRequest{RGID: uint64(plan.RGID.ValueInt64())} + + tflog.Info(ctx, "RGGetResourceConsumptionCheckPresence: before call CloudBroker().RG().GetResourceConsumption", map[string]any{"response": resConsReq}) + resCons, err := c.CloudBroker().RG().GetResourceConsumption(ctx, resConsReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group get resource consumption with error: %w", err) + } + + tflog.Info(ctx, "RGGetResourceConsumptionCheckPresence: response from CloudBroker().RG().GetResourceConsumption", map[string]any{"response": resCons}) + + return resCons, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list.go new file mode 100644 index 0000000..ddf7575 --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list.go @@ -0,0 +1,62 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" +) + +func RGListCheckPresence(ctx context.Context, plan *models.DataSourceRGListModel, c *decort.DecortClient) (*rg.ListRG, error) { + listReq := rg.ListRequest{} + + if !plan.ById.IsNull() { + listReq.ByID = uint64(plan.ById.ValueInt64()) + } + if !plan.Name.IsNull() { + listReq.Name = plan.Name.ValueString() + } + if !plan.AccountId.IsNull() { + listReq.AccountID = uint64(plan.AccountId.ValueInt64()) + } + if !plan.AccountName.IsNull() { + listReq.AccountName = plan.AccountName.ValueString() + } + if !plan.CreatedAfter.IsNull() { + listReq.CreatedAfter = uint64(plan.CreatedAfter.ValueInt64()) + } + if !plan.CreatedBefore.IsNull() { + listReq.CreatedBefore = uint64(plan.CreatedBefore.ValueInt64()) + } + if !plan.Status.IsNull() { + listReq.Status = plan.Status.ValueString() + } + if !plan.LockStatus.IsNull() { + listReq.LockStatus = plan.LockStatus.ValueString() + } + if !plan.IncludeDeleted.IsNull() { + listReq.IncludeDeleted = plan.IncludeDeleted.ValueBool() + } + if !plan.SortBy.IsNull() { + listReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + listReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + listReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "RGListCheckPresence: before call CloudBroker().RG().List", map[string]any{"response": listReq}) + rgList, err := c.CloudBroker().RG().List(ctx, listReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group with error: %w", err) + } + + tflog.Info(ctx, "RGListCheckPresence: response from CloudBroker().RG().List", map[string]any{"response": rgList}) + + return rgList, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_computes.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_computes.go new file mode 100644 index 0000000..02e6577 --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_computes.go @@ -0,0 +1,61 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" +) + +func RGListComputesCheckPresence(ctx context.Context, plan *models.DataSourceRGListComputesModel, c *decort.DecortClient) (*rg.ListComputes, error) { + listCompReq := rg.ListComputesRequest{ + RGID: uint64(plan.RGID.ValueInt64()), + } + + if !plan.ComputeID.IsNull() { + listCompReq.ComputeID = uint64(plan.ComputeID.ValueInt64()) + } + if !plan.Name.IsNull() { + listCompReq.Name = plan.Name.ValueString() + } + if !plan.AccountID.IsNull() { + listCompReq.AccountID = uint64(plan.AccountID.ValueInt64()) + } + if !plan.TechStatus.IsNull() { + listCompReq.TechStatus = plan.TechStatus.ValueString() + } + if !plan.Status.IsNull() { + listCompReq.Status = plan.Status.ValueString() + } + if !plan.IPAddress.IsNull() { + listCompReq.IPAddress = plan.IPAddress.ValueString() + } + if !plan.ExtNetName.IsNull() { + listCompReq.ExtNetName = plan.ExtNetName.ValueString() + } + if !plan.ExtNetID.IsNull() { + listCompReq.ExtNetID = uint64(plan.ExtNetID.ValueInt64()) + } + if !plan.SortBy.IsNull() { + listCompReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + listCompReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + listCompReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "RGListComputesCheckPresence: before call CloudBroker().RG().ListComputes", map[string]any{"response": listCompReq}) + rgListComp, err := c.CloudBroker().RG().ListComputes(ctx, listCompReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group list computes with error: %w", err) + } + + tflog.Info(ctx, "RGListComputesCheckPresence: response from CloudBroker().RG().ListComputes", map[string]any{"response": rgListComp}) + + return rgListComp, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_deleted.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_deleted.go new file mode 100644 index 0000000..cafe0ef --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_deleted.go @@ -0,0 +1,56 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" +) + +func RGListDeletedCheckPresence(ctx context.Context, plan *models.DataSourceRGListDeletedModel, c *decort.DecortClient) (*rg.ListRG, error) { + listDelReq := rg.ListDeletedRequest{} + + if !plan.ById.IsNull() { + listDelReq.ByID = uint64(plan.ById.ValueInt64()) + } + if !plan.Name.IsNull() { + listDelReq.Name = plan.Name.ValueString() + } + if !plan.AccountId.IsNull() { + listDelReq.AccountID = uint64(plan.AccountId.ValueInt64()) + } + if !plan.AccountName.IsNull() { + listDelReq.AccountName = plan.AccountName.ValueString() + } + if !plan.CreatedAfter.IsNull() { + listDelReq.CreatedAfter = uint64(plan.CreatedAfter.ValueInt64()) + } + if !plan.CreatedBefore.IsNull() { + listDelReq.CreatedBefore = uint64(plan.CreatedBefore.ValueInt64()) + } + if !plan.SortBy.IsNull() { + listDelReq.SortBy = plan.SortBy.ValueString() + } + if !plan.LockStatus.IsNull() { + listDelReq.LockStatus = plan.LockStatus.ValueString() + } + if !plan.Page.IsNull() { + listDelReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + listDelReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "RGListDeletedCheckPresence: before call CloudBroker().RG().ListDeleted", map[string]any{"response": listDelReq}) + rgListDel, err := c.CloudBroker().RG().ListDeleted(ctx, listDelReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group list deleted with error: %w", err) + } + + tflog.Info(ctx, "RGListDeletedCheckPresence: response from CloudBroker().RG().ListDeleted", map[string]any{"response": rgListDel}) + + return rgListDel, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_lb.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_lb.go new file mode 100644 index 0000000..669565a --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_lb.go @@ -0,0 +1,55 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" +) + +func RGListLBCheckPresence(ctx context.Context, plan *models.DataSourceRGListLBModel, c *decort.DecortClient) (*rg.ListLB, error) { + listLBReq := rg.ListLBRequest{ + RGID: uint64(plan.RGID.ValueInt64()), + } + + if !plan.ByID.IsNull() { + listLBReq.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.Name.IsNull() { + listLBReq.Name = plan.Name.ValueString() + } + if !plan.TechStatus.IsNull() { + listLBReq.TechStatus = plan.TechStatus.ValueString() + } + if !plan.Status.IsNull() { + listLBReq.Status = plan.Status.ValueString() + } + if !plan.FrontIP.IsNull() { + listLBReq.FrontIP = plan.FrontIP.ValueString() + } + if !plan.BackIP.IsNull() { + listLBReq.BackIP = plan.BackIP.ValueString() + } + if !plan.SortBy.IsNull() { + listLBReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + listLBReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + listLBReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "RGListLBCheckPresence: before call CloudBroker().RG().ListLB", map[string]any{"response": listLBReq}) + rgListLB, err := c.CloudBroker().RG().ListLB(ctx, listLBReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group list lb with error: %w", err) + } + + tflog.Info(ctx, "RGListLBCheckPresence: response from CloudBroker().RG().ListLB", map[string]any{"response": rgListLB}) + + return rgListLB, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_pfw.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_pfw.go new file mode 100644 index 0000000..3f72509 --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_pfw.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" +) + +func RGListPFWCheckPresence(ctx context.Context, plan *models.DataSourceRGListPFWModel, c *decort.DecortClient) (*rg.ListPFW, error) { + listPFWReq := rg.ListPFWRequest{ + RGID: uint64(plan.RGID.ValueInt64()), + } + + tflog.Info(ctx, "RGListPFWCheckPresence: before call CloudBroker().RG().ListPFW", map[string]any{"response": listPFWReq}) + rgListPFW, err := c.CloudBroker().RG().ListPFW(ctx, listPFWReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group list pfw with error: %w", err) + } + + tflog.Info(ctx, "RGListPFWCheckPresence: response from CloudBroker().RG().ListPFW", map[string]any{"response": rgListPFW}) + + return rgListPFW, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_vins.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_vins.go new file mode 100644 index 0000000..9742126 --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_list_vins.go @@ -0,0 +1,49 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" +) + +func RGListVinsCheckPresence(ctx context.Context, plan *models.DataSourceRGListVinsModel, c *decort.DecortClient) (*rg.ListVINS, error) { + listVinsReq := rg.ListVINSRequest{ + RGID: uint64(plan.RGID.ValueInt64()), + } + + if !plan.Name.IsNull() { + listVinsReq.Name = plan.Name.ValueString() + } + if !plan.AccountID.IsNull() { + listVinsReq.AccountID = uint64(plan.AccountID.ValueInt64()) + } + if !plan.ExtIP.IsNull() { + listVinsReq.ExtIP = plan.ExtIP.ValueString() + } + if !plan.VINSID.IsNull() { + listVinsReq.VINSID = uint64(plan.VINSID.ValueInt64()) + } + if !plan.SortBy.IsNull() { + listVinsReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + listVinsReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + listVinsReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "RGListVinsCheckPresence: before call CloudBroker().RG().ListVINS", map[string]any{"response": listVinsReq}) + rgListVins, err := c.CloudBroker().RG().ListVINS(ctx, listVinsReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group list vins with error: %w", err) + } + + tflog.Info(ctx, "RGListVinsCheckPresence: response from CloudBroker().RG().ListVINS", map[string]any{"response": rgListVins}) + + return rgListVins, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_resource_consumption_list.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_resource_consumption_list.go new file mode 100644 index 0000000..1c3e733 --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_resource_consumption_list.go @@ -0,0 +1,22 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" +) + +func RGResourceConsumptionListCheckPresence(ctx context.Context, c *decort.DecortClient) (*rg.ListResourceConsumption, error) { + tflog.Info(ctx, "RGResourceConsumptionListCheckPresence: before call CloudBroker().RG().ListResourceConsumption") + resConsList, err := c.CloudBroker().RG().ListResourceConsumption(ctx) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group resource consumption list with error: %w", err) + } + + tflog.Info(ctx, "RGResourceConsumptionListCheckPresence: response from CloudBroker().RG().ListResourceConsumption", map[string]any{"response": resConsList}) + + return resConsList, err +} diff --git a/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_usage.go b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_usage.go new file mode 100644 index 0000000..d1ed0df --- /dev/null +++ b/internal/service/cloudbroker/rg/utilities/utility_data_source_rg_usage.go @@ -0,0 +1,29 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/rg/models" +) + +func RGUsageCheckPresence(ctx context.Context, plan *models.DataSourceRGUsageModel, c *decort.DecortClient) (*rg.Reservation, error) { + usageReq := rg.UsageRequest{RGID: uint64(plan.RGID.ValueInt64())} + + if !plan.Reason.IsNull() { + usageReq.Reason = plan.Reason.ValueString() + } + + tflog.Info(ctx, "RGUsageCheckPresence: before call CloudBroker().RG().Usage", map[string]any{"response": usageReq}) + usage, err := c.CloudBroker().RG().Usage(ctx, usageReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource group resource usage with error: %w", err) + } + + tflog.Info(ctx, "RGUsageCheckPresence: response from CloudBroker().RG().Usage", map[string]any{"response": usage}) + + return usage, err +} diff --git a/internal/service/cloudbroker/vfpool/data_source_cb_vfpool.go b/internal/service/cloudbroker/vfpool/data_source_cb_vfpool.go new file mode 100644 index 0000000..dbb6c04 --- /dev/null +++ b/internal/service/cloudbroker/vfpool/data_source_cb_vfpool.go @@ -0,0 +1,91 @@ +package cbVFpool + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVFPool{} +) + +func NewDataSourceVFPool() datasource.DataSource { + return &dataSourceVFPool{} +} + +// dataSourceVFPool is the data source implementation. +type dataSourceVFPool struct { + client *decort.DecortClient +} + +func (d *dataSourceVFPool) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.ItemVFPoolModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVFPool: Error get state") + return + } + id := uint64(state.VFPoolID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceVFPool: got state successfully", map[string]any{"vfpool_id": id}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVFPool: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVFPool: set timeouts successfully", map[string]any{ + "vfpool_id": id, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VFPoolDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVFPool: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVFPool: Error set state") + return + } + tflog.Info(ctx, "End read vfpool", map[string]any{"vfpool_id": id}) +} + +func (d *dataSourceVFPool) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVFPool(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVFPool) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vfpool" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVFPool) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVFPool") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVFPool successfully") +} diff --git a/internal/service/cloudbroker/vfpool/data_source_cb_vfpool_list.go b/internal/service/cloudbroker/vfpool/data_source_cb_vfpool_list.go new file mode 100644 index 0000000..628fd5a --- /dev/null +++ b/internal/service/cloudbroker/vfpool/data_source_cb_vfpool_list.go @@ -0,0 +1,89 @@ +package cbVFpool + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVFPoolList{} +) + +func NewDataSourceVFPoolList() datasource.DataSource { + return &dataSourceVFPoolList{} +} + +// dataSourceVFPoolList is the data source implementation. +type dataSourceVFPoolList struct { + client *decort.DecortClient +} + +func (d *dataSourceVFPoolList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.ListVFPoolModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVFPoolList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceVFPoolList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVFPoolList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVFPoolList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VFPoolListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVFPoolList: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVFPoolList: Error set state") + return + } + tflog.Info(ctx, "End read vfpool list") +} + +func (d *dataSourceVFPoolList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVFPoolList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVFPoolList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vfpool_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVFPoolList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVFPoolList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVFPoolList successfully") +} diff --git a/internal/service/cloudbroker/vfpool/flattens/flatten_data_source_cb_vfpool.go b/internal/service/cloudbroker/vfpool/flattens/flatten_data_source_cb_vfpool.go new file mode 100644 index 0000000..ea786ba --- /dev/null +++ b/internal/service/cloudbroker/vfpool/flattens/flatten_data_source_cb_vfpool.go @@ -0,0 +1,92 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/utilities" +) + +// VFPoolDataSource flattens data source for vfpool. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VFPoolDataSource(ctx context.Context, state *models.ItemVFPoolModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VFPoolDataSource") + + diags := diag.Diagnostics{} + + vfPoolID := uint64(state.VFPoolID.ValueInt64()) + + record, err := utilities.VFPoolCheckPresence(ctx, vfPoolID, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about VFpool with ID %v", vfPoolID), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.VFPoolDataSource: before flatten", map[string]any{"record": record}) + + *state = models.ItemVFPoolModel{ + //required fields + VFPoolID: state.VFPoolID, + + //optional fields + Timeouts: state.Timeouts, + + //compute fields + AccountAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, record.AccountAccess), + + CreatedTime: types.Int64Value(int64(record.CreatedTime)), + Description: types.StringValue(record.Description), + GID: types.Int64Value(int64(record.GID)), + GUID: types.Int64Value(int64(record.GUID)), + Name: types.StringValue(record.Name), + RGAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, record.RGAccess), + Status: types.StringValue(record.Status), + UpdatedTime: types.Int64Value(int64(record.UpdatedTime)), + } + + items := make([]models.VFSModel, 0, len(record.VFS)) + for _, vfsItem := range record.VFS { + i := models.VFSModel{ + NodeID: types.Int64Value(int64(vfsItem.NodeID)), + } + + vfList := make([]models.VFItemModel, 0, len(vfsItem.VFList)) + for _, vfItem := range vfsItem.VFList { + vfI := models.VFItemModel{ + NicName: types.StringValue(vfItem.NicName), + } + + vfInfoList := make([]models.VFSInfoItemModel, 0, len(vfItem.VFSInfo)) + for _, vfsInfoItem := range vfItem.VFSInfo { + vfsInfoI := models.VFSInfoItemModel{ + ID: types.Int64Value(int64(vfsInfoItem.ID)), + Claimed: types.BoolValue(vfsInfoItem.Claimed), + VMID: types.Int64Value(int64(vfsInfoItem.VMID)), + } + + vfInfoList = append(vfInfoList, vfsInfoI) + + } + + vfI.VFSInfo = vfInfoList + vfList = append(vfList, vfI) + + } + + i.VFList = vfList + items = append(items, i) + } + state.VFS = items + + tflog.Info(ctx, "flattens.VFPoolDataSource: after flatten") + + tflog.Info(ctx, "End flattens.VFPoolDataSource") + return nil +} diff --git a/internal/service/cloudbroker/vfpool/flattens/flatten_data_source_cb_vfpool_list.go b/internal/service/cloudbroker/vfpool/flattens/flatten_data_source_cb_vfpool_list.go new file mode 100644 index 0000000..f647bf3 --- /dev/null +++ b/internal/service/cloudbroker/vfpool/flattens/flatten_data_source_cb_vfpool_list.go @@ -0,0 +1,104 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/utilities" +) + +// VFPoolListDataSource flattens data source for vfpool list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VFPoolListDataSource(ctx context.Context, state *models.ListVFPoolModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VFPoolListDataSource") + + diags := diag.Diagnostics{} + + record, err := utilities.VFPoolListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about fvpool list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.VFPoolListDataSource: before flatten", map[string]any{"record": record}) + + *state = models.ListVFPoolModel{ + //optional fields + Timeouts: state.Timeouts, + ByID: state.ByID, + Name: state.Name, + Description: state.Description, + Status: state.Status, + AccountAccess: state.AccountAccess, + RgAccess: state.RgAccess, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + + //compute fields + EntryCount: types.Int64Value(int64(record.EntryCount)), + } + + data := make([]models.ItemVFPoolListModel, 0, len(record.Data)) + for _, item := range record.Data { + i := models.ItemVFPoolListModel{ + VFPoolID: types.Int64Value(int64(item.ID)), + AccountAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, item.AccountAccess), + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + Description: types.StringValue(item.Description), + GID: types.Int64Value(int64(item.GID)), + GUID: types.Int64Value(int64(item.GUID)), + Name: types.StringValue(item.Name), + RGAccess: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, item.RGAccess), + Status: types.StringValue(item.Status), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + } + items := make([]models.VFSListModel, 0, len(item.VFS)) + for _, item := range item.VFS { + i := models.VFSListModel{ + NodeID: types.Int64Value(int64(item.NodeID)), + } + + vfList := make([]models.VFItemListModel, 0, len(item.VFList)) + for _, vfItem := range item.VFList { + vfI := models.VFItemListModel{ + NicName: types.StringValue(vfItem.NicName), + } + + vfInfoList := make([]models.VFSInfoItemListModel, 0, len(vfItem.VFSInfo)) + for _, vfInfoItem := range vfItem.VFSInfo { + vfInfoI := models.VFSInfoItemListModel{ + ID: types.Int64Value(int64(vfInfoItem.ID)), + Claimed: types.BoolValue(vfInfoItem.Claimed), + VMID: types.Int64Value(int64(vfInfoItem.VMID)), + } + + vfInfoList = append(vfInfoList, vfInfoI) + + } + + vfI.VFSInfo = vfInfoList + vfList = append(vfList, vfI) + + } + + i.VFList = vfList + items = append(items, i) + } + + i.VFS = items + data = append(data, i) + } + state.Items = data + + tflog.Info(ctx, "flattens.VFPoolListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.VFPoolListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/vfpool/flattens/flatten_resource_cb_vfpool.go b/internal/service/cloudbroker/vfpool/flattens/flatten_resource_cb_vfpool.go new file mode 100644 index 0000000..bc157b7 --- /dev/null +++ b/internal/service/cloudbroker/vfpool/flattens/flatten_resource_cb_vfpool.go @@ -0,0 +1,146 @@ +package flattens + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vfpool" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/utilities" +) + +// VFPoolResource flattens data source for vfpool. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VFPoolResource(ctx context.Context, state *models.ResourceItemVFPoolModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VFPoolResource") + + diags := diag.Diagnostics{} + + vfPoolID := uint64(state.VFPoolID.ValueInt64()) + + if vfPoolID == 0 { + id, err := strconv.Atoi(state.ID.ValueString()) + if err != nil { + diags.AddError( + "flattens.VFPoolResource: cannot parse resource ID from state", + err.Error()) + return diags + } + vfPoolID = uint64(id) + } + + record, err := utilities.ResourceVFPoolCheckPresence(ctx, vfPoolID, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about VFpool with ID %v", vfPoolID), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.VFPoolResource: before flatten", map[string]any{"record": record}) + + *state = models.ResourceItemVFPoolModel{ + //required fields + Name: state.Name, + + //optional fields + Description: state.Description, + AccountAccess: state.AccountAccess, + RGAccess: state.RGAccess, + Config: state.Config, + Enable: state.Enable, + Timeouts: state.Timeouts, + + //compute fields + VFPoolID: types.Int64Value(int64(record.ID)), + CreatedTime: types.Int64Value(int64(record.CreatedTime)), + GID: types.Int64Value(int64(record.GID)), + GUID: types.Int64Value(int64(record.GUID)), + Status: types.StringValue(record.Status), + UpdatedTime: types.Int64Value(int64(record.UpdatedTime)), + VFS: flattenVFS(ctx, record.VFS), + ID: types.StringValue(strconv.Itoa(int(vfPoolID))), + } + + tflog.Info(ctx, "flattens.VFPoolResource: after flatten") + + tflog.Info(ctx, "End flattens.VFPoolResource") + return nil +} + +func flattenVFS(ctx context.Context, items []vfpool.VFS) types.List { + tflog.Info(ctx, "Start flattenVFS") + tempSlice := make([]types.Object, 0, len(items)) + for _, v := range items { + temp := models.ResourceIVFSModel{ + NodeID: types.Int64Value(int64(v.NodeID)), + VFList: flattenVFList(ctx, v.VFList), + } + obj, diags := types.ObjectValueFrom(ctx, models.ResourceIVFS, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenVFS struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ResourceIVFS}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenVFS", diags)) + } + + tflog.Info(ctx, "End flattenVFS") + return res +} + +func flattenVFList(ctx context.Context, items vfpool.VFList) types.List { + tflog.Info(ctx, "Start flattenVFList") + tempSlice := make([]types.Object, 0, len(items)) + for _, v := range items { + temp := models.ResourceIVFItemModel{ + NicName: types.StringValue(v.NicName), + VFSInfo: flattenVFSInfo(ctx, v.VFSInfo), + } + obj, diags := types.ObjectValueFrom(ctx, models.ResourceIVFItem, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenVFList struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ResourceIVFItem}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenVFList", diags)) + } + + tflog.Info(ctx, "End flattenVFList") + return res +} + +func flattenVFSInfo(ctx context.Context, items vfpool.VFSInfoList) types.List { + tflog.Info(ctx, "Start flattenVFSInfo") + tempSlice := make([]types.Object, 0, len(items)) + for _, v := range items { + temp := models.ResourceIVFSInfoItemModel{ + ID: types.Int64Value(int64(v.ID)), + Claimed: types.BoolValue(v.Claimed), + VMID: types.Int64Value(int64(v.VMID)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ResourceIVFSInfoItem, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenVFSInfo struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ResourceIVFSInfoItem}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenVFSInfo", diags)) + } + + tflog.Info(ctx, "End flattenVFSInfo") + return res +} diff --git a/internal/service/cloudbroker/vfpool/models/model_data_source_cb_vfpool.go b/internal/service/cloudbroker/vfpool/models/model_data_source_cb_vfpool.go new file mode 100644 index 0000000..81e0d2e --- /dev/null +++ b/internal/service/cloudbroker/vfpool/models/model_data_source_cb_vfpool.go @@ -0,0 +1,40 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ItemVFPoolModel struct { + //required and optional fields + VFPoolID types.Int64 `tfsdk:"vfpool_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // computed fields + AccountAccess types.List `tfsdk:"account_access"` + CreatedTime types.Int64 `tfsdk:"created_time"` + Description types.String `tfsdk:"description"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + RGAccess types.List `tfsdk:"rg_access"` + Status types.String `tfsdk:"status"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + VFS []VFSModel `tfsdk:"vfs"` +} + +type VFSModel struct { + NodeID types.Int64 `tfsdk:"node_id"` + VFList []VFItemModel `tfsdk:"vf_list"` +} + +type VFItemModel struct { + NicName types.String `tfsdk:"nic_name"` + VFSInfo []VFSInfoItemModel `tfsdk:"vfs_info"` +} + +type VFSInfoItemModel struct { + ID types.Int64 `tfsdk:"id"` + Claimed types.Bool `tfsdk:"claimed"` + VMID types.Int64 `tfsdk:"vm_id"` +} diff --git a/internal/service/cloudbroker/vfpool/models/model_data_source_cb_vfpool_list.go b/internal/service/cloudbroker/vfpool/models/model_data_source_cb_vfpool_list.go new file mode 100644 index 0000000..8c60369 --- /dev/null +++ b/internal/service/cloudbroker/vfpool/models/model_data_source_cb_vfpool_list.go @@ -0,0 +1,55 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ListVFPoolModel struct { + // optional fields + ByID types.Int64 `tfsdk:"by_id"` + GID types.Int64 `tfsdk:"gid"` + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + Status types.String `tfsdk:"status"` + AccountAccess types.Int64 `tfsdk:"account_access"` + RgAccess types.Int64 `tfsdk:"rg_access"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // compute fields + Items []ItemVFPoolListModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemVFPoolListModel struct { + VFPoolID types.Int64 `tfsdk:"vfpool_id"` + AccountAccess types.List `tfsdk:"account_access"` + CreatedTime types.Int64 `tfsdk:"created_time"` + Description types.String `tfsdk:"description"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + Name types.String `tfsdk:"name"` + RGAccess types.List `tfsdk:"rg_access"` + Status types.String `tfsdk:"status"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + VFS []VFSListModel `tfsdk:"vfs"` +} + +type VFSListModel struct { + NodeID types.Int64 `tfsdk:"node_id"` + VFList []VFItemListModel `tfsdk:"vf_list"` +} + +type VFItemListModel struct { + NicName types.String `tfsdk:"nic_name"` + VFSInfo []VFSInfoItemListModel `tfsdk:"vfs_info"` +} + +type VFSInfoItemListModel struct { + ID types.Int64 `tfsdk:"id"` + Claimed types.Bool `tfsdk:"claimed"` + VMID types.Int64 `tfsdk:"vm_id"` +} diff --git a/internal/service/cloudbroker/vfpool/models/model_resource_cb_vfpool.go b/internal/service/cloudbroker/vfpool/models/model_resource_cb_vfpool.go new file mode 100644 index 0000000..100867d --- /dev/null +++ b/internal/service/cloudbroker/vfpool/models/model_resource_cb_vfpool.go @@ -0,0 +1,72 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceItemVFPoolModel struct { + //required and optional fields + Name types.String `tfsdk:"name"` + Description types.String `tfsdk:"description"` + AccountAccess types.List `tfsdk:"account_access"` + RGAccess types.List `tfsdk:"rg_access"` + Config types.List `tfsdk:"config"` + Enable types.Bool `tfsdk:"enable"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // computed fields + ID types.String `tfsdk:"id"` + VFPoolID types.Int64 `tfsdk:"vfpool_id"` + CreatedTime types.Int64 `tfsdk:"created_time"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + Status types.String `tfsdk:"status"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + VFS types.List `tfsdk:"vfs"` +} + +type ResourceItemVFPoolConfigModel struct { + NodeID types.Int64 `tfsdk:"node_id"` + NicName types.String `tfsdk:"nic_name"` + VFIDs types.List `tfsdk:"vf_ids"` +} + +var ResourceItemVFPoolConfig = map[string]attr.Type{ + "node_id": types.Int64Type, + "nic_name": types.StringType, + "vf_ids": types.ListType{ElemType: types.Int64Type}, +} + +type ResourceIVFSModel struct { + NodeID types.Int64 `tfsdk:"node_id"` + VFList types.List `tfsdk:"vf_list"` +} + +var ResourceIVFS = map[string]attr.Type{ + "node_id": types.Int64Type, + "vf_list": types.ListType{ElemType: types.ObjectType{AttrTypes: ResourceIVFItem}}, +} + +type ResourceIVFItemModel struct { + NicName types.String `tfsdk:"nic_name"` + VFSInfo types.List `tfsdk:"vfs_info"` +} + +var ResourceIVFItem = map[string]attr.Type{ + "nic_name": types.StringType, + "vfs_info": types.ListType{ElemType: types.ObjectType{AttrTypes: ResourceIVFSInfoItem}}, +} + +type ResourceIVFSInfoItemModel struct { + ID types.Int64 `tfsdk:"id"` + Claimed types.Bool `tfsdk:"claimed"` + VMID types.Int64 `tfsdk:"vm_id"` +} + +var ResourceIVFSInfoItem = map[string]attr.Type{ + "id": types.Int64Type, + "claimed": types.BoolType, + "vm_id": types.Int64Type, +} diff --git a/internal/service/cloudbroker/vfpool/resource_cb_vfpool.go b/internal/service/cloudbroker/vfpool/resource_cb_vfpool.go new file mode 100644 index 0000000..e62abcf --- /dev/null +++ b/internal/service/cloudbroker/vfpool/resource_cb_vfpool.go @@ -0,0 +1,294 @@ +package cbVFpool + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vfpool" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceVFPool{} + _ resource.ResourceWithImportState = &resourceVFPool{} +) + +// NewresourceVFPool is a helper function to simplify the provider implementation. +func NewResourceVFPool() resource.Resource { + return &resourceVFPool{} +} + +// resourceVFPool is the resource implementation. +type resourceVFPool struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceVFPool) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create resource group + var plan models.ResourceItemVFPoolModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceVFPool: Error receiving the plan") + return + } + + contextCreateMap := map[string]any{ + "name": plan.Name.ValueString(), + } + tflog.Info(ctx, "Create resourceVFPool: got plan successfully", contextCreateMap) + tflog.Info(ctx, "Create resourceVFPool: start creating", contextCreateMap) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceVFPool: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceVFPool: set timeouts successfully", map[string]any{ + "name": plan.Name.ValueString(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Make create request and get response + id, diags := utilities.VFpoolResourceCreate(ctx, &plan, r.client) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + plan.ID = types.StringValue(strconv.Itoa(int(*id))) + tflog.Info(ctx, "VFPoolResourceCreatee: VFPool created", map[string]any{"vfpool_id": id}) + + tflog.Info(ctx, "VFPoolResourceCreatee: resource creation is completed", map[string]any{"vfpool_id": id}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.VFPoolResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } + +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceVFPool) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceItemVFPoolModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVFPool: Error get state") + return + } + tflog.Info(ctx, "Read resourceVFPool: got state successfully", map[string]any{"vfpool_id": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVFPool: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceVFPool: set timeouts successfully", map[string]any{ + "vfpool_id": state.ID.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.VFPoolResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVFPool: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVFPool: Error set state") + return + } + tflog.Info(ctx, "End read resourceVFPool") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceVFPool) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceItemVFPoolModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVFPool: Error receiving the plan") + return + } + + logMap := map[string]any{"vfpool_id": plan.ID.ValueString()} + tflog.Info(ctx, "Update resourceVFPool: got plan successfully", logMap) + + // Retrieve values from state + var state models.ResourceItemVFPoolModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVFPool: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceVFPool: got state successfully", logMap) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVFPool: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceVFPool: set timeouts successfully", map[string]any{ + "vfpool_id": state.ID.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + _, err := strconv.Atoi(state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Update resourceVFPool: Cannot parse ID from state", err.Error()) + return + } + + if !plan.Name.Equal(state.Name) || !plan.Description.Equal(state.Description) || !plan.AccountAccess.Equal(state.AccountAccess) || !plan.RGAccess.Equal(state.RGAccess) || !plan.Config.Equal(state.Config) { + resp.Diagnostics.Append(utilities.UpdateVFpool(ctx, &state, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVFPool: Error update VFPool") + return + } + } + + if !plan.Enable.Equal(state.Enable) { + resp.Diagnostics.Append(utilities.EnableDisableVFpool(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVFPool: Error enabling/disabling VFPool") + return + } + } + + tflog.Info(ctx, "Update resourceVFPool: VFPool update is completed", logMap) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.VFPoolResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceVFPool) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceItemVFPoolModel + + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceVFPool: Error get state") + return + } + tflog.Info(ctx, "Delete resourceVFPool: got state successfully", map[string]any{"serice_id": state.ID.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceVFPool: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceVFPool: set timeouts successfully", map[string]any{ + "vfpool_id": state.ID.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + ID, err := strconv.Atoi(state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Delete resourceVFPool: Cannot parse ID from state", err.Error()) + return + } + + if state.Status.ValueString() == "ENABLED" || state.Status.ValueString() == "CREATED" { + reqDisable := vfpool.DisableRequest{ + VFPoolID: uint64(ID), + } + + tflog.Info(ctx, fmt.Sprintf("resourceVFPool Delete: need to disable vfPool with ID: %d, after delete", ID)) + _, err = r.client.CloudBroker().VFPool().Disable(ctx, reqDisable) + if err != nil { + resp.Diagnostics.AddError("Delete resourceVFPool: Error disable VFPool with error: ", err.Error()) + return + } + tflog.Info(ctx, fmt.Sprintf("resourceVFPoolDelete: disable vfPool with ID: %d, complete", ID)) + } + + delReq := vfpool.DeleteRequest{ + VFPoolID: uint64(ID), + } + + tflog.Info(ctx, "Delete resourceVFPool: before call cloudbroker().VFPool().Delete", map[string]any{"req": delReq}) + _, err = r.client.CloudBroker().VFPool().Delete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceVFPool: Error deleting VFPool with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceVFPool", map[string]any{"vfpool_id": state.ID.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceVFPool) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceVFPool(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceVFPool) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vfpool" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceVFPool) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceVFPool") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceVFPool successfully") +} + +func (r *resourceVFPool) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/vfpool/schemas/schema_data_source_cb_vfpool.go b/internal/service/cloudbroker/vfpool/schemas/schema_data_source_cb_vfpool.go new file mode 100644 index 0000000..a878495 --- /dev/null +++ b/internal/service/cloudbroker/vfpool/schemas/schema_data_source_cb_vfpool.go @@ -0,0 +1,79 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceVFPool() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "vfpool_id": schema.Int64Attribute{ + Required: true, + }, + "account_access": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "rg_access": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "vfs": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "node_id": schema.Int64Attribute{ + Computed: true, + }, + "vf_list": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "nic_name": schema.StringAttribute{ + Computed: true, + }, + "vfs_info": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.Int64Attribute{ + Computed: true, + }, + "claimed": schema.BoolAttribute{ + Computed: true, + }, + "vm_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/vfpool/schemas/schema_data_source_cb_vfpool_list.go b/internal/service/cloudbroker/vfpool/schemas/schema_data_source_cb_vfpool_list.go new file mode 100644 index 0000000..66b7b2b --- /dev/null +++ b/internal/service/cloudbroker/vfpool/schemas/schema_data_source_cb_vfpool_list.go @@ -0,0 +1,119 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceVFPoolList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "by_id": schema.Int64Attribute{ + Optional: true, + }, + "gid": schema.Int64Attribute{ + Optional: true, + }, + "name": schema.StringAttribute{ + Optional: true, + }, + "description": schema.StringAttribute{ + Optional: true, + }, + "status": schema.StringAttribute{ + Optional: true, + }, + "account_access": schema.Int64Attribute{ + Optional: true, + }, + "rg_access": schema.Int64Attribute{ + Optional: true, + }, + "sort_by": schema.StringAttribute{ + Optional: true, + }, + "page": schema.Int64Attribute{ + Optional: true, + }, + "size": schema.Int64Attribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_access": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "vfpool_id": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "rg_access": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "vfs": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "node_id": schema.Int64Attribute{ + Computed: true, + }, + "vf_list": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "nic_name": schema.StringAttribute{ + Computed: true, + }, + "vfs_info": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.Int64Attribute{ + Computed: true, + }, + "claimed": schema.BoolAttribute{ + Computed: true, + }, + "vm_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vfpool/schemas/schema_resource_cb_vfpool.go b/internal/service/cloudbroker/vfpool/schemas/schema_resource_cb_vfpool.go new file mode 100644 index 0000000..57e2a52 --- /dev/null +++ b/internal/service/cloudbroker/vfpool/schemas/schema_resource_cb_vfpool.go @@ -0,0 +1,107 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaResourceVFPool() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Required: true, + }, + "description": schema.StringAttribute{ + Optional: true, + }, + "account_access": schema.ListAttribute{ + Optional: true, + ElementType: types.Int64Type, + }, + "rg_access": schema.ListAttribute{ + Optional: true, + ElementType: types.Int64Type, + }, + "config": schema.ListNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "node_id": schema.Int64Attribute{ + Required: true, + }, + "nic_name": schema.StringAttribute{ + Required: true, + }, + "vf_ids": schema.ListAttribute{ + Required: true, + ElementType: types.Int64Type, + }, + }, + }, + }, + "enable": schema.BoolAttribute{ + Optional: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "vfpool_id": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "vfs": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "node_id": schema.Int64Attribute{ + Computed: true, + }, + "vf_list": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "nic_name": schema.StringAttribute{ + Computed: true, + }, + "vfs_info": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.Int64Attribute{ + Computed: true, + }, + "claimed": schema.BoolAttribute{ + Computed: true, + }, + "vm_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + } +} diff --git a/internal/service/cloudbroker/vfpool/utilities/utility_data_source_cb_vfpool.go b/internal/service/cloudbroker/vfpool/utilities/utility_data_source_cb_vfpool.go new file mode 100644 index 0000000..56d8086 --- /dev/null +++ b/internal/service/cloudbroker/vfpool/utilities/utility_data_source_cb_vfpool.go @@ -0,0 +1,25 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vfpool" +) + +func VFPoolCheckPresence(ctx context.Context, vfPoolID uint64, c *decort.DecortClient) (*vfpool.RecordVFPool, + error) { + req := vfpool.GetRequest{VFPoolID: vfPoolID} + + tflog.Info(ctx, "VFPoolCheckPresence: before call CloudBroker().VFPool().Get", map[string]any{"req": req}) + vfPool, err := c.CloudBroker().VFPool().Get(ctx, req) + if err != nil { + return nil, fmt.Errorf("VFPoolCheckPresence: cannot get info about vfpool") + } + + tflog.Info(ctx, "VFPoolCheckPresence: response from CloudBroker().VFPool().Get", map[string]any{"response": vfPool}) + + return vfPool, err +} diff --git a/internal/service/cloudbroker/vfpool/utilities/utility_data_source_cb_vfpool_list.go b/internal/service/cloudbroker/vfpool/utilities/utility_data_source_cb_vfpool_list.go new file mode 100644 index 0000000..9798cb9 --- /dev/null +++ b/internal/service/cloudbroker/vfpool/utilities/utility_data_source_cb_vfpool_list.go @@ -0,0 +1,57 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vfpool" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/models" +) + +func VFPoolListCheckPresence(ctx context.Context, plan *models.ListVFPoolModel, c *decort.DecortClient) (*vfpool.ListVFPool, + error) { + req := vfpool.ListRequest{} + + if !plan.ByID.IsNull() { + req.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.GID.IsNull() { + req.GID = uint64(plan.GID.ValueInt64()) + } + if !plan.Name.IsNull() { + req.Name = plan.Name.ValueString() + } + if !plan.Description.IsNull() { + req.Description = plan.Description.ValueString() + } + if !plan.Status.IsNull() { + req.Status = plan.Status.ValueString() + } + if !plan.AccountAccess.IsNull() { + req.AccountAccess = uint64(plan.AccountAccess.ValueInt64()) + } + if !plan.RgAccess.IsNull() { + req.RGAccess = uint64(plan.RgAccess.ValueInt64()) + } + if !plan.SortBy.IsNull() { + req.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + req.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + req.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "VFPoolListCheckPresence: before call CloudBroker().VFPool().List", map[string]any{"req": req}) + vfPool, err := c.CloudBroker().VFPool().List(ctx, req) + if err != nil { + return nil, fmt.Errorf("VFPoolListCheckPresence: cannot get info about vfpool list") + } + + tflog.Info(ctx, "VFPoolListCheckPresence: response from CloudBroker().VFPool().Get", map[string]any{"response": vfPool}) + + return vfPool, err +} diff --git a/internal/service/cloudbroker/vfpool/utilities/utility_resource_cb_vfpool.go b/internal/service/cloudbroker/vfpool/utilities/utility_resource_cb_vfpool.go new file mode 100644 index 0000000..6725683 --- /dev/null +++ b/internal/service/cloudbroker/vfpool/utilities/utility_resource_cb_vfpool.go @@ -0,0 +1,294 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vfpool" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vfpool/models" +) + +func ResourceVFPoolCheckPresence(ctx context.Context, vfPoolID uint64, c *decort.DecortClient) (*vfpool.RecordVFPool, + error) { + req := vfpool.GetRequest{VFPoolID: vfPoolID} + + tflog.Info(ctx, "ResourceVFPoolCheckPresence: before call CloudBroker().VFPool().Get", map[string]any{"req": req}) + vfPool, err := c.CloudBroker().VFPool().Get(ctx, req) + if err != nil { + return nil, fmt.Errorf("VFPoolCheckPresence: cannot get info about vfpool") + } + + tflog.Info(ctx, "ResourceVFPoolCheckPresence: response from CloudBroker().VFPool().Get", map[string]any{"response": vfPool}) + + return vfPool, err +} + +func VFpoolResourceCreate(ctx context.Context, plan *models.ResourceItemVFPoolModel, c *decort.DecortClient) (*uint64, diag.Diagnostics) { + tflog.Info(ctx, "Start VFpoolResourceCreate", map[string]any{"name": plan.Name.ValueString()}) + + diags := diag.Diagnostics{} + + req := vfpool.CreateRequest{ + Name: plan.Name.ValueString(), + } + + if !plan.Description.IsNull() { + req.Description = plan.Description.ValueString() + } + + if !plan.AccountAccess.IsNull() { + + accountAccessList := make([]uint64, len(plan.AccountAccess.Elements())) + + diags.Append(plan.AccountAccess.ElementsAs(ctx, &accountAccessList, true)...) + if diags.HasError() { + tflog.Error(ctx, "VFpoolResourceCreate: cannot populate VFpoolResourceCreate with plan.AccountAccess object element") + return nil, diags + } + + req.AccountAccess = accountAccessList + } + + if !plan.RGAccess.IsNull() { + + RGAccessList := make([]uint64, len(plan.RGAccess.Elements())) + + diags.Append(plan.RGAccess.ElementsAs(ctx, &RGAccessList, true)...) + if diags.HasError() { + tflog.Error(ctx, "VFpoolResourceCreate: cannot populate VFpoolResourceCreate with plan.RGAccess object element") + return nil, diags + } + req.RGAccess = RGAccessList + } + + diags.Append(checkParamsExistence(ctx, req.AccountAccess, req.RGAccess, c)...) + if diags.HasError() { + tflog.Error(ctx, "VFpoolResourceCreate: RGAccess or AccountAccess does not exist") + return nil, diags + } + + if !plan.Config.IsNull() { + configList := make([]models.ResourceItemVFPoolConfigModel, 0, len(plan.Config.Elements())) + diags.Append(plan.Config.ElementsAs(ctx, &configList, true)...) + if diags.HasError() { + tflog.Error(ctx, "VFpoolResourceCreate: cannot populate VFpoolResourceCreate with v.Config object element") + return nil, diags + } + + config := make([]vfpool.Config, 0, len(configList)) + + for _, v := range configList { + vfIDs := make([]uint64, 0, len(v.VFIDs.Elements())) + diags.Append(v.VFIDs.ElementsAs(ctx, &vfIDs, true)...) + if diags.HasError() { + tflog.Error(ctx, "VFpoolResourceCreate: cannot populate UpdateVFpool with vfIDs object element") + return nil, diags + } + item := vfpool.Config{ + NodeID: uint64(v.NodeID.ValueInt64()), + NicName: v.NicName.ValueString(), + VFIDs: vfIDs} + + config = append(config, item) + } + req.Config = config + + } + + vfPoolID, err := c.CloudBroker().VFPool().Create(ctx, req) + if err != nil { + tflog.Error(ctx, "Error response for create VFpool", map[string]any{"error": err.Error()}) + diags.AddError("Unable to Create VFpool", err.Error()) + return nil, diags + } + + plan.ID = types.StringValue(strconv.Itoa(int(vfPoolID))) + + if !plan.Enable.IsNull() { + EnableDisableVFpool(ctx, plan, c) + } + + tflog.Info(ctx, "End VFpoolResourceCreate", map[string]any{"vfpool_id": vfPoolID}) + return &vfPoolID, diags +} + +// EnableDisableVFpool performs VFpool Enable/Disable request. +// Returns error in case of failures. +func EnableDisableVFpool(ctx context.Context, plan *models.ResourceItemVFPoolModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start EnableDisableVFpool", map[string]any{"vfpool_id": plan.ID.ValueString()}) + + diags := diag.Diagnostics{} + + ID, err := strconv.Atoi(plan.ID.ValueString()) + if err != nil { + diags.AddError("EnableDisableVFpool: Cannot parse ID from state", err.Error()) + return diags + } + + if plan.Enable.ValueBool() { + tflog.Info(ctx, "EnableDisableVFpool: before calling CloudBroker().VFPool().Enable", map[string]any{"vfpool_id": ID}) + res, err := c.CloudBroker().VFPool().Enable(ctx, vfpool.EnableRequest{VFPoolID: uint64(ID)}) + if err != nil { + diags.AddError( + "EnableDisableVFpool: cannot enable VFpool", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "EnableDisableVFpool: response from CloudBroker().VFPool().Enable", map[string]any{"vfpool_id": ID, "response": res}) + return nil + } else { + tflog.Info(ctx, "EnableDisableVFpool: before calling CloudBroker().VFPool().Disable", map[string]any{"vfpool_id": ID}) + res, err := c.CloudBroker().VFPool().Disable(ctx, vfpool.DisableRequest{VFPoolID: uint64(ID)}) + if err != nil { + diags.AddError( + "EnableDisableVFpool: cannot disable VFPool", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "EnableDisableVFpool: response from CloudBroker().VFPool().Disable", map[string]any{"vfpool_id": ID, "response": res}) + } + + return nil +} + +func UpdateVFpool(ctx context.Context, state *models.ResourceItemVFPoolModel, plan *models.ResourceItemVFPoolModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start UpdateVFpool", map[string]any{"vfpool_id": plan.ID.ValueString()}) + + diags := diag.Diagnostics{} + + ID, err := strconv.Atoi(plan.ID.ValueString()) + if err != nil { + diags.AddError("UpdateVFpool: Cannot parse ID from state", err.Error()) + return diags + } + + req := vfpool.UpdateRequest{ + VFPoolID: uint64(ID), + } + + if !plan.Name.Equal(state.Name) { + req.Name = plan.Name.ValueString() + } + + if !plan.Description.Equal(state.Description) { + req.Description = plan.Description.ValueString() + } + + if !plan.Config.Equal(state.Config) && !plan.Config.IsNull() { + configList := make([]models.ResourceItemVFPoolConfigModel, 0, len(plan.Config.Elements())) + diags.Append(plan.Config.ElementsAs(ctx, &configList, true)...) + if diags.HasError() { + tflog.Error(ctx, "UpdateVFpool: cannot populate UpdateVFpool with plan.Config object element") + return diags + } + + config := make([]vfpool.Config, 0, len(configList)) + + for _, v := range configList { + vfIDs := make([]uint64, 0, len(v.VFIDs.Elements())) + diags.Append(v.VFIDs.ElementsAs(ctx, &vfIDs, true)...) + if diags.HasError() { + tflog.Error(ctx, "UpdateVFpool: cannot populate UpdateVFpool with vfIDs object element") + return diags + } + item := vfpool.Config{ + NodeID: uint64(v.NodeID.ValueInt64()), + NicName: v.NicName.ValueString(), + VFIDs: vfIDs} + + config = append(config, item) + } + req.Config = config + } + + if !plan.AccountAccess.Equal(state.AccountAccess) && !plan.AccountAccess.IsNull() { + aaList := make([]uint64, 0, len(plan.AccountAccess.Elements())) + diags.Append(plan.AccountAccess.ElementsAs(ctx, &aaList, true)...) + if diags.HasError() { + tflog.Error(ctx, "UpdateVFpool: cannot populate UpdateVFpool with plan.AccountAccess object element") + return diags + } + + req.AccountAccess = aaList + } + + if !plan.RGAccess.Equal(state.RGAccess) && !plan.RGAccess.IsNull() { + rgAccessList := make([]uint64, 0, len(plan.RGAccess.Elements())) + diags.Append(plan.RGAccess.ElementsAs(ctx, &rgAccessList, true)...) + if diags.HasError() { + tflog.Error(ctx, "UpdateVFpool: cannot populate UpdateVFpool with plan.RGAccess object element") + return diags + } + + req.RGAccess = rgAccessList + } + + if state.Status.ValueString() == "ENABLED" || state.Status.ValueString() == "CREATED" { + reqDisable := vfpool.DisableRequest{ + VFPoolID: uint64(ID), + } + tflog.Info(ctx, fmt.Sprintf("UpdateVFpool: need to disable vfPool with ID: %d, after update", ID)) + _, err = c.CloudBroker().VFPool().Disable(ctx, reqDisable) + if err != nil { + diags.AddWarning( + "UpdateVFpool: cannot disable VFPool", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "utilityVFPoolUpdate: disable VFPool with complete") + } + + _, err = c.CloudBroker().VFPool().Update(ctx, req) + if err != nil { + diags.AddError("EnableDisableVFpool: Cannot update", err.Error()) + return diags + } + tflog.Info(ctx, fmt.Sprintf("utilityVFPoolUpdate: update vfPool with ID: %d, complete with params=%v", ID, req)) + + if plan.Enable.ValueBool() { + reqEnable := vfpool.EnableRequest{ + VFPoolID: uint64(ID), + } + tflog.Info(ctx, fmt.Sprintf("utilityVFPoolUpdate: start to enable vfPool with ID: %d, after update", ID)) + _, err = c.CloudBroker().VFPool().Enable(ctx, reqEnable) + if err != nil { + diags.AddWarning( + "UpdateVFpool: cannot enable VFPool", + err.Error(), + ) + return diags + } + tflog.Info(ctx, fmt.Sprintf("utilityVFPoolUpdate: enable vfPool with ID: %d, complete", ID)) + } else { + diags.AddWarning( + "UpdateVFpool: vfPool is not enabled", + "the vfPool is not enabled after update, you must provide configuration for this resource, after enabling it", + ) + return diags + } + + return nil +} + +func checkParamsExistence(ctx context.Context, accountIDs, rgIDs []uint64, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + if err := ic.ExistAccounts(ctx, accountIDs, c); err != nil { + diags.AddError("Error check input values", err.Error()) + } + + if err := ic.ExistRGs(ctx, rgIDs, c); err != nil { + diags.AddError("Error check input values", err.Error()) + } + + return diags +} diff --git a/internal/service/cloudbroker/vins/data_source_vins.go b/internal/service/cloudbroker/vins/data_source_vins.go new file mode 100644 index 0000000..a026800 --- /dev/null +++ b/internal/service/cloudbroker/vins/data_source_vins.go @@ -0,0 +1,91 @@ +package vins + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVINS{} +) + +func NewDataSourceVINS() datasource.DataSource { + return &dataSourceVINS{} +} + +// dataSourceVINS is the data source implementation. +type dataSourceVINS struct { + client *decort.DecortClient +} + +func (d *dataSourceVINS) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceVINSModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINS: Error get state") + return + } + vinsID := uint64(state.VinsID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceVINS: got state successfully", map[string]any{"vins_id": vinsID}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINS: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVINS: set timeouts successfully", map[string]any{ + "vins_id": vinsID, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VINSDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINS: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINS: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceVINS", map[string]any{"vins_id": vinsID}) +} + +func (d *dataSourceVINS) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVINS(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVINS) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVINS) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVINS") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVINS successfully") +} diff --git a/internal/service/cloudbroker/vins/data_source_vins_audits.go b/internal/service/cloudbroker/vins/data_source_vins_audits.go new file mode 100644 index 0000000..e5607fa --- /dev/null +++ b/internal/service/cloudbroker/vins/data_source_vins_audits.go @@ -0,0 +1,91 @@ +package vins + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVINSAudits{} +) + +func NewDataSourceVINSAudits() datasource.DataSource { + return &dataSourceVINSAudits{} +} + +// dataSourceVINSAudits is the data source implementation. +type dataSourceVINSAudits struct { + client *decort.DecortClient +} + +func (d *dataSourceVINSAudits) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceVINSAuditsModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSAudits: Error get state") + return + } + vinsID := uint64(state.VinsID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceVINSAudits: got state successfully", map[string]any{"vins_id": vinsID}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSAudits: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVINSAudits: set timeouts successfully", map[string]any{ + "vins_id": vinsID, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VINSAuditsDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSAudits: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSAudits: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceVINSAudits", map[string]any{"vins_id": vinsID}) +} + +func (d *dataSourceVINSAudits) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVINSAudits(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVINSAudits) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins_audits" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVINSAudits) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVINSAudits") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVINSAudits successfully") +} diff --git a/internal/service/cloudbroker/vins/data_source_vins_ext_net_list.go b/internal/service/cloudbroker/vins/data_source_vins_ext_net_list.go new file mode 100644 index 0000000..780f3e1 --- /dev/null +++ b/internal/service/cloudbroker/vins/data_source_vins_ext_net_list.go @@ -0,0 +1,91 @@ +package vins + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVINSExtNetList{} +) + +func NewDataSourceVINSExtNetList() datasource.DataSource { + return &dataSourceVINSExtNetList{} +} + +// dataSourceVINSExtNetList is the data source implementation. +type dataSourceVINSExtNetList struct { + client *decort.DecortClient +} + +func (d *dataSourceVINSExtNetList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceVINSExtNetListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSExtNetList: Error get state") + return + } + vinsID := uint64(state.VinsID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceVINSExtNetList: got state successfully", map[string]any{"vins_id": vinsID}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSExtNetList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVINSExtNetList: set timeouts successfully", map[string]any{ + "vins_id": vinsID, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VINSExtNetListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSExtNetList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSExtNetList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceVINSExtNetList", map[string]any{"vins_id": vinsID}) +} + +func (d *dataSourceVINSExtNetList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVINSExtNetList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVINSExtNetList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins_ext_net_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVINSExtNetList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVINSExtNetList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVINSExtNetList successfully") +} diff --git a/internal/service/cloudbroker/vins/data_source_vins_ip_list.go b/internal/service/cloudbroker/vins/data_source_vins_ip_list.go new file mode 100644 index 0000000..a344a79 --- /dev/null +++ b/internal/service/cloudbroker/vins/data_source_vins_ip_list.go @@ -0,0 +1,91 @@ +package vins + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVINSIPList{} +) + +func NewDataSourceVINSIPList() datasource.DataSource { + return &dataSourceVINSIPList{} +} + +// dataSourceVINSIPList is the data source implementation. +type dataSourceVINSIPList struct { + client *decort.DecortClient +} + +func (d *dataSourceVINSIPList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceVINSIPListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSIPList: Error get state") + return + } + vinsID := uint64(state.VinsID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceVINSIPList: got state successfully", map[string]any{"vins_id": vinsID}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSIPList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVINSIPList: set timeouts successfully", map[string]any{ + "vins_id": vinsID, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VINSIPListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSIPList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSIPList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceVINSIPList", map[string]any{"vins_id": vinsID}) +} + +func (d *dataSourceVINSIPList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVINSIPList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVINSIPList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins_ip_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVINSIPList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVINSIPList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVINSIPList successfully") +} diff --git a/internal/service/cloudbroker/vins/data_source_vins_list.go b/internal/service/cloudbroker/vins/data_source_vins_list.go new file mode 100644 index 0000000..f7445de --- /dev/null +++ b/internal/service/cloudbroker/vins/data_source_vins_list.go @@ -0,0 +1,88 @@ +package vins + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVINSList{} +) + +func NewDataSourceVINSList() datasource.DataSource { + return &dataSourceVINSList{} +} + +// dataSourceVINSList is the data source implementation. +type dataSourceVINSList struct { + client *decort.DecortClient +} + +func (d *dataSourceVINSList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceVINSListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceVINSList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVINSList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VINSListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceVINSList") +} + +func (d *dataSourceVINSList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVINSList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVINSList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVINSList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVINSList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVINSList successfully") +} diff --git a/internal/service/cloudbroker/vins/data_source_vins_list_deleted.go b/internal/service/cloudbroker/vins/data_source_vins_list_deleted.go new file mode 100644 index 0000000..52d0924 --- /dev/null +++ b/internal/service/cloudbroker/vins/data_source_vins_list_deleted.go @@ -0,0 +1,88 @@ +package vins + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVINSListDeleted{} +) + +func NewDataSourceVINSListDeleted() datasource.DataSource { + return &dataSourceVINSListDeleted{} +} + +// dataSourceVINSListDeleted is the data source implementation. +type dataSourceVINSListDeleted struct { + client *decort.DecortClient +} + +func (d *dataSourceVINSListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceVINSListDeletedModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSListDeleted: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceVINSListDeleted: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSListDeleted: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVINSListDeleted: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VINSListDeletedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSListDeleted: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSListDeleted: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceVINSListDeleted") +} + +func (d *dataSourceVINSListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVINSListDeleted(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVINSListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins_list_deleted" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVINSListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVINSListDeleted") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVINSListDeleted successfully") +} diff --git a/internal/service/cloudbroker/vins/data_source_vins_nat_rule_list.go b/internal/service/cloudbroker/vins/data_source_vins_nat_rule_list.go new file mode 100644 index 0000000..bbb2716 --- /dev/null +++ b/internal/service/cloudbroker/vins/data_source_vins_nat_rule_list.go @@ -0,0 +1,89 @@ +package vins + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVINSNATRuleList{} +) + +func NewDataSourceVINSNATRuleList() datasource.DataSource { + return &dataSourceVINSNATRuleList{} +} + +// dataSourceVINSNATRuleList is the data source implementation. +type dataSourceVINSNATRuleList struct { + client *decort.DecortClient +} + +func (d *dataSourceVINSNATRuleList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceVINSNATRuleListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSNATRuleList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceVINSNATRuleList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSNATRuleList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVINSNATRuleList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VINSNATRuleListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSNATRuleList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSNATRuleList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceVINSNATRuleList") +} + +func (d *dataSourceVINSNATRuleList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVINSNATRuleList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVINSNATRuleList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins_nat_rule_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVINSNATRuleList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVINSNATRuleList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVINSNATRuleList successfully") + tflog.Info(ctx, "Getting Configure dataSourceVINSNATRuleList successfully") +} diff --git a/internal/service/cloudbroker/vins/data_source_vins_static_route.go b/internal/service/cloudbroker/vins/data_source_vins_static_route.go new file mode 100644 index 0000000..061f7a5 --- /dev/null +++ b/internal/service/cloudbroker/vins/data_source_vins_static_route.go @@ -0,0 +1,88 @@ +package vins + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVINSStaticRoute{} +) + +func NewDataSourceVINSStaticRoute() datasource.DataSource { + return &dataSourceVINSStaticRoute{} +} + +// dataSourceVINSStaticRoute is the data source implementation. +type dataSourceVINSStaticRoute struct { + client *decort.DecortClient +} + +func (d *dataSourceVINSStaticRoute) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceVINSStaticRouteModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSStaticRoute: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceVINSStaticRoute: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSStaticRoute: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVINSStaticRoute: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VINSStaticRouteDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSStaticRoute: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSStaticRoute: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceVINSStaticRoute") +} + +func (d *dataSourceVINSStaticRoute) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVINSStaticRoute(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVINSStaticRoute) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins_static_route" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVINSStaticRoute) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVINSStaticRoute") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVINSStaticRoute successfully") +} diff --git a/internal/service/cloudbroker/vins/data_source_vins_static_route_list.go b/internal/service/cloudbroker/vins/data_source_vins_static_route_list.go new file mode 100644 index 0000000..a58061f --- /dev/null +++ b/internal/service/cloudbroker/vins/data_source_vins_static_route_list.go @@ -0,0 +1,88 @@ +package vins + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceVINSStaticRouteList{} +) + +func NewDataSourceVINSStaticRouteList() datasource.DataSource { + return &dataSourceVINSStaticRouteList{} +} + +// dataSourceVINSStaticRouteList is the data source implementation. +type dataSourceVINSStaticRouteList struct { + client *decort.DecortClient +} + +func (d *dataSourceVINSStaticRouteList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceVINSStaticRouteListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSStaticRouteList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceVINSStaticRouteList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSStaticRouteList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceVINSStaticRouteList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.VINSStaticRouteListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSStaticRouteList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceVINSStaticRouteList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceVINSStaticRouteList") +} + +func (d *dataSourceVINSStaticRouteList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceVINSStaticRouteList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceVINSStaticRouteList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins_static_route_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceVINSStaticRouteList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceVINSStaticRouteList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceVINSStaticRouteList successfully") +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins.go b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins.go new file mode 100644 index 0000000..e17cf9d --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins.go @@ -0,0 +1,585 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSDataSource flattens data source for vins. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VINSDataSource(ctx context.Context, state *models.DataSourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSDataSource") + + diags := diag.Diagnostics{} + + vinsId := uint64(state.VinsID.ValueInt64()) + + recordVins, diags := utilities.VINSDataSourceCheckPresence(ctx, vinsId, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSDataSource: before flatten", map[string]any{"vins_id": vinsId}) + + id := uuid.New() + *state = models.DataSourceVINSModel{ + VinsID: state.VinsID, + Reason: state.Reason, + Timeouts: state.Timeouts, + + VNFDev: flattenVNFDev(ctx, &recordVins.VNFDev), + AccountID: types.Int64Value(int64(recordVins.AccountID)), + AccountName: types.StringValue(recordVins.AccountName), + CreatedBy: types.StringValue(recordVins.CreatedBy), + CreatedTime: types.Int64Value(int64(recordVins.CreatedTime)), + DefaultGW: types.StringValue(recordVins.DefaultGW), + DefaultQOS: flattenQOS(ctx, &recordVins.DefaultQOS), + DeletedBy: types.StringValue(recordVins.DeletedBy), + DeletedTime: types.Int64Value(int64(recordVins.DeletedTime)), + Description: types.StringValue(recordVins.Description), + GID: types.Int64Value(int64(recordVins.GID)), + GUID: types.Int64Value(int64(recordVins.GUID)), + Id: types.StringValue(id.String()), + LockStatus: types.StringValue(recordVins.LockStatus), + ManagerID: types.Int64Value(int64(recordVins.ManagerID)), + ManagerType: types.StringValue(recordVins.ManagerType), + Milestones: types.Int64Value(int64(recordVins.Milestones)), + Name: types.StringValue(recordVins.Name), + NetMask: types.Int64Value(int64(recordVins.NetMask)), + Network: types.StringValue(recordVins.Network), + PreReservationsNum: types.Int64Value(int64(recordVins.PreReservationsNum)), + Redundant: types.BoolValue(recordVins.Redundant), + RGID: types.Int64Value(int64(recordVins.RGID)), + RGName: types.StringValue(recordVins.RGName), + SecVNFDevID: types.Int64Value(int64(recordVins.SecVNFDevID)), + Status: types.StringValue(recordVins.Status), + UpdatedBy: types.StringValue(recordVins.UpdatedBy), + UpdatedTime: types.Int64Value(int64(recordVins.UpdatedTime)), + UserManaged: types.BoolValue(recordVins.UserManaged), + VNFs: flattenVNFs(ctx, &recordVins.VNFs), + VXLANID: types.Int64Value(int64(recordVins.VXLANID)), + } + + tflog.Info(ctx, "flattens.VINSDataSource: after flatten", map[string]any{"vins_id": state.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.VINSDataSource", map[string]any{"vins_id": state.Id.ValueString()}) + return nil +} + +// flattenVNFDev flattens vnfdev. +// Flatten errors are added to tflog. +func flattenVNFDev(ctx context.Context, vnfdev *vins.VNFDev) types.Object { + tflog.Info(ctx, "Start flattenVNFDev") + + temp := models.RecordVNFDevModel{ + CKey: types.StringValue(vnfdev.CKey), + AccountID: types.Int64Value(int64(vnfdev.AccountID)), + Capabilities: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &vnfdev.Capabilities), + Config: flattenVNFDevConfig(ctx, &vnfdev.Config), + ConfigSaved: types.BoolValue(vnfdev.ConfigSaved), + CustomPreConfig: types.BoolValue(vnfdev.CustomPreConfig), + Description: types.StringValue(vnfdev.Description), + GID: types.Int64Value(int64(vnfdev.GID)), + GUID: types.Int64Value(int64(vnfdev.GUID)), + ID: types.Int64Value(int64(vnfdev.ID)), + Interfaces: flattenInterfaces(ctx, &vnfdev.Interfaces), + LockStatus: types.StringValue(vnfdev.LockStatus), + Meta: flattens.FlattenSimpleTypeToList(ctx, types.StringType, &vnfdev.Meta), + Milestones: types.Int64Value(int64(vnfdev.Milestones)), + Name: types.StringValue(vnfdev.Name), + Status: types.StringValue(vnfdev.Status), + TechStatus: types.StringValue(vnfdev.TechStatus), + Type: types.StringValue(vnfdev.Type), + VINS: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, &vnfdev.VINS), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemVNFDev, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenVNFDev struct to obj", err)) + } + + tflog.Info(ctx, "End flattenVNFDev") + return res +} + +// flattenVNFDevConfig flattens config. +// Flatten errors are added to tflog. +func flattenVNFDevConfig(ctx context.Context, config *vins.Config) types.Object { + tflog.Info(ctx, "Start flattenVNFDevConfig") + + temp := models.RecordVNFConfigModel{ + MGMT: flattenMgmtVNFConfig(ctx, &config.MGMT), + Resources: flattenResourcesVNFConfig(ctx, &config.Resources), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemVNFConfig, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenVNFDevConfig struct to obj", err)) + } + + tflog.Info(ctx, "End flattenVNFDevConfig") + return res +} + +// flattenMgmtVNFConfig flattens mgmt. +// Flatten errors are added to tflog. +func flattenMgmtVNFConfig(ctx context.Context, mgmt *vins.MGMT) types.Object { + tflog.Info(ctx, "Start flattenMgmtVNFConfig") + + temp := models.RecordMGMTModel{ + IPAddress: types.StringValue(mgmt.IPAddress), + Password: types.StringValue(mgmt.Password), + SSHKey: types.StringValue(mgmt.SSHKey), + User: types.StringValue(mgmt.User), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemMgmt, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenMgmtVNFConfig struct to obj", err)) + } + + tflog.Info(ctx, "End flattenMgmtVNFConfig") + return res +} + +// flattenResourcesVNFConfig flattens resources. +// Flatten errors are added to tflog. +func flattenResourcesVNFConfig(ctx context.Context, resources *vins.Resources) types.Object { + tflog.Info(ctx, "Start flattenResourcesVNFConfig") + + temp := models.RecordResourcesModel{ + CPU: types.Int64Value(int64(resources.CPU)), + RAM: types.Int64Value(int64(resources.RAM)), + StackID: types.Int64Value(int64(resources.StackID)), + UUID: types.StringValue(resources.UUID), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemResources, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenResourcesVNFConfig struct to obj", err)) + } + + tflog.Info(ctx, "End flattenResourcesVNFConfig") + return res +} + +// flattenInterfaces flattens interfaces. +// Flatten errors are added to tflog. +func flattenInterfaces(ctx context.Context, items *vins.ListInterfaces) types.List { + tflog.Info(ctx, "Start flattenInterfaces") + diags := diag.Diagnostics{} + + tempSlice := make([]types.Object, 0, len(*items)) + for _, item := range *items { + temp := models.VNFInterfaceModel{ + ConnID: types.Int64Value(int64(item.ConnID)), + ConnType: types.StringValue(item.ConnType), + DefGW: types.StringValue(item.DefGW), + Enabled: types.BoolValue(item.Enabled), + FLIPGroupID: types.Int64Value(int64(item.FLIPGroupID)), + GUID: types.StringValue(item.GUID), + IPAddress: types.StringValue(item.IPAddress), + ListenSSH: types.BoolValue(item.ListenSSH), + MAC: types.StringValue(item.MAC), + Name: types.StringValue(item.Name), + NetID: types.Int64Value(int64(item.NetID)), + NetMask: types.Int64Value(int64(item.NetMask)), + NetType: types.StringValue(item.NetType), + NodeID: types.Int64Value(int64(item.NodeID)), + PCISlot: types.Int64Value(int64(item.PCISlot)), + QOS: flattenQOS(ctx, &item.QOS), + Target: types.StringValue(item.Target), + Type: types.StringValue(item.Type), + } + + temp.VNFs, diags = types.ListValueFrom(ctx, types.Int64Type, item.VNFs) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.flattenInterfaces: cannot flatten item.VNFs to temp.VNFs", diags)) + } + + obj, err := types.ObjectValueFrom(ctx, models.ItemInterface, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenInterfaces struct to obj", err)) + } + tempSlice = append(tempSlice, obj) + } + + res, err := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemInterface}, tempSlice) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenInterfaces", err)) + } + + tflog.Info(ctx, "End flattenInterfaces") + return res +} + +// flattenQOS flattens QOS. +// Flatten errors are added to tflog. +func flattenQOS(ctx context.Context, qos *vins.QOS) types.Object { + tflog.Info(ctx, "Start flattenQOS") + + temp := models.QOSModel{ + ERate: types.Int64Value(int64(qos.ERate)), + GUID: types.StringValue(qos.GUID), + InBurst: types.Int64Value(int64(qos.InBurst)), + InRate: types.Int64Value(int64(qos.InRate)), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemQOS, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenQOS struct to obj", err)) + } + + tflog.Info(ctx, "End flattenQOS") + return res +} + +// flattenVNFs flattens vnfs. +func flattenVNFs(ctx context.Context, vnfs *vins.RecordVNFs) types.Object { + tflog.Info(ctx, "Start flattenVNFs") + + temp := models.RecordVNFsModel{ + DHCP: flattenDHCP(ctx, &vnfs.DHCP), + GW: flattenGW(ctx, &vnfs.GW), + NAT: flattenNAT(ctx, &vnfs.NAT), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemVNFs, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenVNFs struct to obj", err)) + } + + tflog.Info(ctx, "End flattenVNFs") + return res +} + +// flattenDHCP flattens dhcp. +// Flatten errors are added to tflog. +func flattenDHCP(ctx context.Context, dhcp *vins.RecordDHCP) types.Object { + tflog.Info(ctx, "Start flattenDHCP") + + temp := models.RecordDHCPModel{ + CKey: types.StringValue(dhcp.CKey), + AccountID: types.Int64Value(int64(dhcp.AccountID)), + Config: flattenDHCPConfig(ctx, &dhcp.Config), + CreatedTime: types.Int64Value(int64(dhcp.CreatedTime)), + Devices: flattenDevices(ctx, &dhcp.Devices), + GID: types.Int64Value(int64(dhcp.GID)), + GUID: types.Int64Value(int64(dhcp.GUID)), + DHCPID: types.Int64Value(int64(dhcp.ID)), + LockStatus: types.StringValue(dhcp.LockStatus), + Milestones: types.Int64Value(int64(dhcp.Milestones)), + OwnerID: types.Int64Value(int64(dhcp.OwnerID)), + OwnerType: types.StringValue(dhcp.OwnerType), + PureVirtual: types.BoolValue(dhcp.PureVirtual), + Routes: flattenRoutes(ctx, &dhcp.Routes), + Status: types.StringValue(dhcp.Status), + TechStatus: types.StringValue(dhcp.TechStatus), + Type: types.StringValue(dhcp.Type), + } + + res, diags := types.ObjectValueFrom(ctx, models.ItemDHCP, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenDHCP struct to obj", diags)) + } + + tflog.Info(ctx, "End flattenDHCP") + return res +} + +// flattenDHCPConfig flattens dhcp config. +// Flatten errors are added to tflog. +func flattenDHCPConfig(ctx context.Context, config *vins.VNFsConfig) types.Object { + tflog.Info(ctx, "Start flattenDHCPConfig") + diags := diag.Diagnostics{} + + temp := models.RecordDHCPConfigModel{ + DefaultGW: types.StringValue(config.DefaultGW), + IPEnd: types.StringValue(config.IPEnd), + IPStart: types.StringValue(config.IPStart), + Lease: types.Int64Value(int64(config.Lease)), + NetMask: types.Int64Value(int64(config.NetMask)), + Network: types.StringValue(config.Network), + Reservations: flattenReservations(ctx, &config.Reservations), + } + + temp.DNS, diags = types.ListValueFrom(ctx, types.StringType, config.DNS) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.flattenDHCPConfig: cannot flatten config.DNS to temp.DNS", diags)) + } + + res, diags := types.ObjectValueFrom(ctx, models.ItemDHCPConfig, temp) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenDHCPConfig struct to obj", diags)) + } + + tflog.Info(ctx, "End flattenDHCPConfig") + return res +} + +// flattenReservations flattens dhcp config reservations. +// Flatten errors are added to tflog. +func flattenReservations(ctx context.Context, items *vins.ListReservations) types.List { + tflog.Info(ctx, "Start flattenReservations") + + tempSlice := make([]types.Object, 0, len(*items)) + for _, item := range *items { + temp := models.ReservationModel{ + ClientType: types.StringValue(item.ClientType), + Description: types.StringValue(item.Description), + DomainName: types.StringValue(item.DomainName), + Hostname: types.StringValue(item.Hostname), + IP: types.StringValue(item.IP), + MAC: types.StringValue(item.MAC), + Type: types.StringValue(item.Type), + VMID: types.Int64Value(int64(item.VMID)), + } + + obj, diags := types.ObjectValueFrom(ctx, models.ItemReservations, temp) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenReservations struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemReservations}, tempSlice) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenReservations", diags)) + } + + tflog.Info(ctx, "End flattenReservations") + return res +} + +// flattenDevices flattens devices. +// Flatten errors are added to tflog. +func flattenDevices(ctx context.Context, devices *vins.Devices) types.Object { + tflog.Info(ctx, "Start flattenDevices") + + temp := models.DevicesModel{ + Primary: flattenPrimaryDevices(ctx, &devices.Primary), + } + + res, diags := types.ObjectValueFrom(ctx, models.ItemDevices, temp) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenDevices struct to obj", diags)) + } + + tflog.Info(ctx, "End flattenDevices") + return res +} + +// flattenPrimaryDevices flattens primary devices. +// Flatten errors are added to tflog. +func flattenPrimaryDevices(ctx context.Context, primary *vins.Primary) types.Object { + tflog.Info(ctx, "Start flattenPrimaryDevices") + + temp := models.PrimaryDevicesModel{ + DevID: types.Int64Value(int64(primary.DevID)), + IFace01: types.StringValue(primary.IFace01), + IFace02: types.StringValue(primary.IFace02), + } + + res, diags := types.ObjectValueFrom(ctx, models.ItemPrimaryDevices, temp) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenPrimaryDevices struct to obj", diags)) + } + + tflog.Info(ctx, "End flattenPrimaryDevices") + return res +} + +// flattenRoutes flattens routes. +// Flatten errors are added to tflog. +func flattenRoutes(ctx context.Context, items *vins.ListRoutes) types.List { + tflog.Info(ctx, "Start flattenRoutes") + diags := diag.Diagnostics{} + + tempSlice := make([]types.Object, 0, len(*items)) + for _, item := range *items { + temp := models.RoutesModel{ + Destination: types.StringValue(item.Destination), + Gateway: types.StringValue(item.Gateway), + GUID: types.StringValue(item.GUID), + RouteID: types.Int64Value(int64(item.ID)), + Netmask: types.StringValue(item.Netmask), + } + + temp.ComputeIds, diags = types.ListValueFrom(ctx, types.Int64Type, item.ComputeIds) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.flattenRoutes: cannot flatten item.ComputeIds to temp.ComputeIds", diags)) + } + + obj, diags := types.ObjectValueFrom(ctx, models.ItemRoute, temp) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenRoutes struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemRoute}, tempSlice) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenRoutes", diags)) + } + + tflog.Info(ctx, "End flattenRoutes") + return res +} + +// flattenGW flattens gw. +// Flatten errors are added to tflog. +func flattenGW(ctx context.Context, gw *vins.RecordGW) types.Object { + tflog.Info(ctx, "Start flattenGW") + diags := diag.Diagnostics{} + + temp := models.RecordGWModel{ + CKey: types.StringValue(gw.CKey), + AccountID: types.Int64Value(int64(gw.AccountID)), + Config: flattenGWConfig(ctx, &gw.Config), + CreatedTime: types.Int64Value(int64(gw.CreatedTime)), + Devices: flattenDevices(ctx, &gw.Devices), + GID: types.Int64Value(int64(gw.GID)), + GUID: types.Int64Value(int64(gw.GUID)), + GWID: types.Int64Value(int64(gw.ID)), + LockStatus: types.StringValue(gw.LockStatus), + Milestones: types.Int64Value(int64(gw.Milestones)), + OwnerID: types.Int64Value(int64(gw.OwnerID)), + OwnerType: types.StringValue(gw.OwnerType), + PureVirtual: types.BoolValue(gw.PureVirtual), + Routes: flattenRoutes(ctx, &gw.Routes), + Status: types.StringValue(gw.Status), + TechStatus: types.StringValue(gw.TechStatus), + Type: types.StringValue(gw.Type), + } + + res, diags := types.ObjectValueFrom(ctx, models.ItemGW, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenGW struct to obj", diags)) + } + + tflog.Info(ctx, "End flattenGW") + return res +} + +// flattenGWConfig flattens gw config. +// Flatten errors are added to tflog. +func flattenGWConfig(ctx context.Context, config *vins.GWConfig) types.Object { + tflog.Info(ctx, "Start flattenGWConfig") + diags := diag.Diagnostics{} + + temp := models.RecordGWConfigModel{ + DefaultGW: types.StringValue(config.DefaultGW), + ExtNetID: types.Int64Value(int64(config.ExtNetID)), + ExtNetIP: types.StringValue(config.ExtNetIP), + ExtNetMask: types.Int64Value(int64(config.ExtNetMask)), + QOS: flattenQOS(ctx, &config.QOS), + } + + res, diags := types.ObjectValueFrom(ctx, models.ItemGWConfig, temp) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenGWConfig struct to obj", diags)) + } + + tflog.Info(ctx, "End flattenGWConfig") + return res +} + +// flattenNAT flattens nat. +// Flatten errors are added to tflog. +func flattenNAT(ctx context.Context, nat *vins.RecordNAT) types.Object { + tflog.Info(ctx, "Start flattenNAT") + + temp := models.RecordNATModel{ + CKey: types.StringValue(nat.CKey), + AccountID: types.Int64Value(int64(nat.AccountID)), + Config: flattenNATConfig(ctx, &nat.Config), + CreatedTime: types.Int64Value(int64(nat.CreatedTime)), + Devices: flattenDevices(ctx, &nat.Devices), + GID: types.Int64Value(int64(nat.GID)), + GUID: types.Int64Value(int64(nat.GUID)), + NatID: types.Int64Value(int64(nat.ID)), + LockStatus: types.StringValue(nat.LockStatus), + Milestones: types.Int64Value(int64(nat.Milestones)), + OwnerID: types.Int64Value(int64(nat.OwnerID)), + OwnerType: types.StringValue(nat.OwnerType), + PureVirtual: types.BoolValue(nat.PureVirtual), + Routes: flattenRoutes(ctx, &nat.Routes), + Status: types.StringValue(nat.Status), + TechStatus: types.StringValue(nat.TechStatus), + Type: types.StringValue(nat.Type), + } + + res, diags := types.ObjectValueFrom(ctx, models.ItemNAT, temp) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenNAT struct to obj", diags)) + } + + tflog.Info(ctx, "End flattenNAT") + return res +} + +// flattenRules flattens nat rules. +// Flatten errors are added to tflog. +func flattenRules(ctx context.Context, items *vins.ListNATRule) types.List { + tflog.Info(ctx, "Start flattenRules") + + tempSlice := make([]types.Object, 0, len(*items)) + for _, item := range *items { + temp := models.NATRuleModel{ + RuleID: types.Int64Value(int64(item.ID)), + LocalIP: types.StringValue(item.LocalIP), + LocalPort: types.Int64Value(int64(item.LocalPort)), + Protocol: types.StringValue(item.Protocol), + PublicPortEnd: types.Int64Value(int64(item.PublicPortEnd)), + PublicPortStart: types.Int64Value(int64(item.PublicPortStart)), + VMID: types.Int64Value(int64(item.VMID)), + VMName: types.StringValue(item.VMName), + } + + obj, diags := types.ObjectValueFrom(ctx, models.ItemNATRule, temp) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenRules struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemNATRule}, tempSlice) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenRules", diags)) + } + + tflog.Info(ctx, "End flattenRules") + return res +} + +// flattenNATConfig flattens nat config. +// Flatten errors are added to tflog. +func flattenNATConfig(ctx context.Context, config *vins.NATConfig) types.Object { + tflog.Info(ctx, "Start flattenNATConfig") + diags := diag.Diagnostics{} + + temp := models.NATConfigModel{ + NetMask: types.Int64Value(int64(config.NetMask)), + Network: types.StringValue(config.Network), + Rules: flattenRules(ctx, &config.Rules), + } + + res, diags := types.ObjectValueFrom(ctx, models.ItemNATConfig, temp) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenNATConfig struct to obj", diags)) + } + + tflog.Info(ctx, "End flattenNATConfig") + return res +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_audits.go b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_audits.go new file mode 100644 index 0000000..15e23a5 --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_audits.go @@ -0,0 +1,55 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSAuditsDataSource flattens data source for vins audits. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VINSAuditsDataSource(ctx context.Context, state *models.DataSourceVINSAuditsModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSAuditsDataSource") + + diags := diag.Diagnostics{} + + vinsId := uint64(state.VinsID.ValueInt64()) + + audits, diags := utilities.VINSAuditsDataSourceCheckPresence(ctx, vinsId, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSAuditsDataSource: before flatten", map[string]any{"vins_id": vinsId}) + + id := uuid.New() + *state = models.DataSourceVINSAuditsModel{ + VinsID: state.VinsID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemAuditModel, 0, len(*audits)) + for _, item := range *audits { + a := models.ItemAuditModel{ + Call: types.StringValue(item.Call), + ResponseTime: types.Float64Value(item.ResponseTime), + StatusCode: types.Int64Value(int64(item.StatusCode)), + Timestamp: types.Float64Value(item.Timestamp), + User: types.StringValue(item.User), + } + items = append(items, a) + } + state.Items = items + + tflog.Info(ctx, "End flattens.VINSAuditsDataSource", map[string]any{"vins_id": state.Id.ValueString()}) + return nil +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_ext_net_list.go b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_ext_net_list.go new file mode 100644 index 0000000..e34fb6f --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_ext_net_list.go @@ -0,0 +1,57 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSExtNetListDataSource flattens data source for vins ext net list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VINSExtNetListDataSource(ctx context.Context, state *models.DataSourceVINSExtNetListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSExtNetListDataSource") + + diags := diag.Diagnostics{} + + vinsId := uint64(state.VinsID.ValueInt64()) + + extnetList, diags := utilities.VINSExtNetListDataSourceCheckPresence(ctx, vinsId, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSExtNetListDataSource: before flatten", map[string]any{"vins_id": vinsId}) + + id := uuid.New() + *state = models.DataSourceVINSExtNetListModel{ + VinsID: state.VinsID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(extnetList.EntryCount)), + } + + items := make([]models.ItemExtNetVinsModel, 0, len(extnetList.Data)) + for _, item := range extnetList.Data { + en := models.ItemExtNetVinsModel{ + DefaultGW: types.StringValue(item.DefaultGW), + ExtNetID: types.Int64Value(int64(item.ExtNetID)), + IP: types.StringValue(item.IP), + PrefixLen: types.Int64Value(int64(item.PrefixLen)), + Status: types.StringValue(item.Status), + TechStatus: types.StringValue(item.TechStatus), + } + items = append(items, en) + } + state.Items = items + + tflog.Info(ctx, "End flattens.VINSExtNetListDataSource", map[string]any{"vins_id": state.Id.ValueString()}) + return nil +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_ip_list.go b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_ip_list.go new file mode 100644 index 0000000..0d2e7e3 --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_ip_list.go @@ -0,0 +1,58 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSIPListDataSource flattens data source for vins ip list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VINSIPListDataSource(ctx context.Context, state *models.DataSourceVINSIPListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSIPListDataSource") + + diags := diag.Diagnostics{} + + vinsId := uint64(state.VinsID.ValueInt64()) + + ipList, diags := utilities.VINSIPListDataSourceCheckPresence(ctx, vinsId, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSIPListDataSource: before flatten", map[string]any{"vins_id": vinsId}) + + id := uuid.New() + *state = models.DataSourceVINSIPListModel{ + VinsID: state.VinsID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(ipList.EntryCount)), + } + + items := make([]models.ItemIPVinsModel, 0, len(ipList.Data)) + for _, item := range ipList.Data { + ip := models.ItemIPVinsModel{ + ClientType: types.StringValue(item.ClientType), + DomainName: types.StringValue(item.DomainName), + Hostname: types.StringValue(item.Hostname), + IP: types.StringValue(item.IP), + MAC: types.StringValue(item.MAC), + Type: types.StringValue(item.Type), + VMID: types.Int64Value(int64(item.VMID)), + } + items = append(items, ip) + } + state.Items = items + + tflog.Info(ctx, "End flattens.VINSIPListDataSource", map[string]any{"vins_id": state.Id.ValueString()}) + return nil +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_list.go b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_list.go new file mode 100644 index 0000000..8db3feb --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_list.go @@ -0,0 +1,117 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSListDataSource flattens data source for vins list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VINSListDataSource(ctx context.Context, state *models.DataSourceVINSListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSListDataSource") + + diags := diag.Diagnostics{} + + vinsList, diags := utilities.VINSListDataSourceCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceVINSListModel{ + ByID: state.ByID, + Name: state.Name, + AccountID: state.AccountID, + RGID: state.RGID, + ExtIP: state.ExtIP, + VNFDevID: state.VNFDevID, + IncludeDeleted: state.IncludeDeleted, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(vinsList.EntryCount)), + } + + items := make([]models.ItemVinsModel, 0, len(vinsList.Data)) + for _, item := range vinsList.Data { + v := models.ItemVinsModel{ + AccountID: types.Int64Value(int64(item.AccountID)), + AccountName: types.StringValue(item.AccountName), + CreatedBy: types.StringValue(item.CreatedBy), + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + DefaultGW: types.StringValue(item.DefaultGW), + DefaultQOS: flattenQOS(ctx, &item.DefaultQOS), + DeletedBy: types.StringValue(item.DeletedBy), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + Description: types.StringValue(item.Description), + ExternalIP: types.StringValue(item.ExternalIP), + ExtnetID: types.Int64Value(int64(item.ExtnetId)), + FreeIPs: types.Int64Value(int64(item.FreeIPs)), + GID: types.Int64Value(int64(item.GID)), + GUID: types.Int64Value(int64(item.GUID)), + ID: types.Int64Value(int64(item.ID)), + LockStatus: types.StringValue(item.LockStatus), + ManagerID: types.Int64Value(int64(item.ManagerID)), + ManagerType: types.StringValue(item.ManagerType), + Milestones: types.Int64Value(int64(item.Milestones)), + Name: types.StringValue(item.Name), + Netmask: types.Int64Value(int64(item.NetMask)), + Network: types.StringValue(item.Network), + PreReservationsNum: types.Int64Value(int64(item.PreReservationsNum)), + PriVNFDevID: types.Int64Value(int64(item.PriVNFDevID)), + Redundant: types.BoolValue(item.Redundant), + RGID: types.Int64Value(int64(item.RGID)), + RGName: types.StringValue(item.RGName), + SecVNFDefID: types.Int64Value(int64(item.SecVNFDevID)), + Status: types.StringValue(item.Status), + UpdatedBy: types.StringValue(item.UpdatedBy), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + UserManaged: types.BoolValue(item.UserManaged), + VNFs: flattenListVNFs(ctx, &item.VNFs), + VXLANID: types.Int64Value(int64(item.VXLANID)), + } + items = append(items, v) + } + state.Items = items + + tflog.Info(ctx, "End flattens.VINSListDataSource") + return nil +} + +// flattenListVNFs flattens ItemListVNFs. +// Flatten errors are added to tflog. +func flattenListVNFs(ctx context.Context, iv *vins.ItemVNFs) types.Object { + tflog.Info(ctx, "Start flattenListVNFs") + + temp := models.ListVNFsModel{ + DHCP: types.Int64Value(int64(iv.DHCP)), + DNS: types.Int64Value(int64(iv.DNS)), + FW: types.Int64Value(int64(iv.FW)), + GW: types.Int64Value(int64(iv.GW)), + NAT: types.Int64Value(int64(iv.NAT)), + VPN: types.Int64Value(int64(iv.VPN)), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemListVNFs, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenListVNFs struct to obj", err)) + } + + tflog.Info(ctx, "End flattenListVNFs") + return res +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_list_deleted.go b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_list_deleted.go new file mode 100644 index 0000000..34740b9 --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_list_deleted.go @@ -0,0 +1,88 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSListDeletedDataSource flattens data source for vins list deleted. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VINSListDeletedDataSource(ctx context.Context, state *models.DataSourceVINSListDeletedModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSListDeletedDataSource") + + diags := diag.Diagnostics{} + + vinsList, diags := utilities.VINSListDeletedDataSourceCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSListDeletedDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceVINSListDeletedModel{ + ByID: state.ByID, + Name: state.Name, + AccountID: state.AccountID, + RGID: state.RGID, + ExtIP: state.ExtIP, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(vinsList.EntryCount)), + } + + items := make([]models.ItemVinsDeletedModel, 0, len(vinsList.Data)) + for _, item := range vinsList.Data { + v := models.ItemVinsDeletedModel{ + AccountID: types.Int64Value(int64(item.AccountID)), + AccountName: types.StringValue(item.AccountName), + CreatedBy: types.StringValue(item.CreatedBy), + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + DefaultGW: types.StringValue(item.DefaultGW), + DefaultQOS: flattenQOS(ctx, &item.DefaultQOS), + DeletedBy: types.StringValue(item.DeletedBy), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + Description: types.StringValue(item.Description), + ExternalIP: types.StringValue(item.ExternalIP), + GID: types.Int64Value(int64(item.GID)), + GUID: types.Int64Value(int64(item.GUID)), + ID: types.Int64Value(int64(item.ID)), + LockStatus: types.StringValue(item.LockStatus), + ManagerID: types.Int64Value(int64(item.ManagerID)), + ManagerType: types.StringValue(item.ManagerType), + Milestones: types.Int64Value(int64(item.Milestones)), + Name: types.StringValue(item.Name), + Netmask: types.Int64Value(int64(item.NetMask)), + Network: types.StringValue(item.Network), + PreReservationsNum: types.Int64Value(int64(item.PreReservationsNum)), + PriVNFDevID: types.Int64Value(int64(item.PriVNFDevID)), + Redundant: types.BoolValue(item.Redundant), + RGID: types.Int64Value(int64(item.RGID)), + RGName: types.StringValue(item.RGName), + SecVNFDefID: types.Int64Value(int64(item.SecVNFDevID)), + Status: types.StringValue(item.Status), + UpdatedBy: types.StringValue(item.UpdatedBy), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + UserManaged: types.BoolValue(item.UserManaged), + VNFs: flattenListVNFs(ctx, &item.VNFs), + VXLANID: types.Int64Value(int64(item.VXLANID)), + } + items = append(items, v) + } + state.Items = items + + tflog.Info(ctx, "End flattens.VINSListDeletedDataSource") + return nil +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_nat_rule_list.go b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_nat_rule_list.go new file mode 100644 index 0000000..ef60d56 --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_nat_rule_list.go @@ -0,0 +1,58 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSNATRuleListDataSource flattens data source for vins nat rule list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VINSNATRuleListDataSource(ctx context.Context, state *models.DataSourceVINSNATRuleListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSNATRuleListDataSource") + + diags := diag.Diagnostics{} + + natRulesList, diags := utilities.VINSNATRuleListDataSourceCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSNATRuleListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceVINSNATRuleListModel{ + VinsID: state.VinsID, + Reason: state.Reason, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(natRulesList.EntryCount)), + } + + items := make([]models.ItemVINSNATRuleModel, 0, len(natRulesList.Data)) + for _, item := range natRulesList.Data { + v := models.ItemVINSNATRuleModel{ + ID: types.Int64Value(int64(item.ID)), + LocalIP: types.StringValue(item.LocalIP), + LocalPort: types.Int64Value(int64(item.LocalPort)), + Protocol: types.StringValue(item.Protocol), + PublicPortEnd: types.Int64Value(int64(item.PublicPortEnd)), + PublicPortStart: types.Int64Value(int64(item.PublicPortStart)), + VMID: types.Int64Value(int64(item.VMID)), + VMName: types.StringValue(item.VMName), + } + items = append(items, v) + } + state.Items = items + + tflog.Info(ctx, "End flattens.VINSNATRuleListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_static_route.go b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_static_route.go new file mode 100644 index 0000000..259e83b --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_static_route.go @@ -0,0 +1,51 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSStaticRouteDataSource flattens data source for vins static route. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VINSStaticRouteDataSource(ctx context.Context, state *models.DataSourceVINSStaticRouteModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSStaticRouteDataSource") + + diags := diag.Diagnostics{} + + route, diags := utilities.VINSStaticRouteDataSourceCheckPresence(ctx, uint64(state.VinsID.ValueInt64()), uint64(state.RouteID.ValueInt64()), c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSStaticRouteDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceVINSStaticRouteModel{ + VinsID: state.VinsID, + RouteID: state.RouteID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + Destination: types.StringValue(route.Destination), + Gateway: types.StringValue(route.Gateway), + GUID: types.StringValue(route.GUID), + Netmask: types.StringValue(route.Netmask), + } + + state.ComputeIds, diags = types.ListValueFrom(ctx, types.Int64Type, route.ComputeIds) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattens.VINSStaticRouteDataSource: cannot flatten route.ComputeIds to state.ComputeIds", diags)) + } + + tflog.Info(ctx, "End flattens.VINSStaticRouteDataSource") + return nil +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_static_route_list.go b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_static_route_list.go new file mode 100644 index 0000000..7b93620 --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_data_source_vins_static_route_list.go @@ -0,0 +1,63 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSStaticRouteListDataSource flattens data source for vins static route list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func VINSStaticRouteListDataSource(ctx context.Context, state *models.DataSourceVINSStaticRouteListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSStaticRouteListDataSource") + + diags := diag.Diagnostics{} + + vinsId := uint64(state.VinsID.ValueInt64()) + + routesList, diags := utilities.VINSStaticRouteListDataSourceCheckPresence(ctx, vinsId, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSStaticRouteListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceVINSStaticRouteListModel{ + VinsID: state.VinsID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(routesList.EntryCount)), + } + + items := make([]models.ItemVinsStaticRouteModel, 0, len(routesList.Data)) + for _, item := range routesList.Data { + v := models.ItemVinsStaticRouteModel{ + Destination: types.StringValue(item.Destination), + Gateway: types.StringValue(item.Gateway), + GUID: types.StringValue(item.GUID), + ID: types.Int64Value(int64(item.ID)), + Netmask: types.StringValue(item.Netmask), + } + + v.ComputeIds, diags = types.ListValueFrom(ctx, types.Int64Type, item.ComputeIds) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattens.VINSStaticRouteListDataSource: cannot flatten item.ComputeIds to v.ComputeIds", diags)) + } + + items = append(items, v) + } + state.Items = items + + tflog.Info(ctx, "End flattens.VINSStaticRouteListDataSource") + return nil +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_resource_vins.go b/internal/service/cloudbroker/vins/flattens/flatten_resource_vins.go new file mode 100644 index 0000000..21edc75 --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_resource_vins.go @@ -0,0 +1,161 @@ +package flattens + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSResource flattens resource for vins. +// Return error in case resource is not found on the platform. +// Flatten errors are added to tflog. +func VINSResource(ctx context.Context, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSResource") + + diags := diag.Diagnostics{} + + vinsId, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64) + if err != nil { + diags.AddError("Cannot parse vins ID from state", err.Error()) + return diags + } + + recordVins, diags := utilities.VINSResourceCheckPresence(ctx, vinsId, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSResource: before flatten", map[string]any{"vins_id": vinsId}) + + *plan = models.ResourceVINSModel{ + Name: types.StringValue(recordVins.Name), + RGID: plan.RGID, + AccountID: plan.AccountID, + + IPCIDR: plan.IPCIDR, + PreReservationsNum: plan.PreReservationsNum, + Description: plan.Description, + GID: plan.GID, + DNS: plan.DNS, + + Enable: plan.Enable, + Permanently: plan.Permanently, + Force: plan.Force, + Restore: plan.Restore, + VnfdevRestart: plan.VnfdevRestart, + VnfdevRedeploy: plan.VnfdevRedeploy, + + ExtNet: plan.ExtNet, + IP: plan.IP, + NatRule: plan.NatRule, + Timeouts: plan.Timeouts, + + VinsID: types.Int64Value(int64(vinsId)), + Id: types.StringValue(strconv.Itoa(int(recordVins.ID))), + LastUpdated: plan.LastUpdated, + VNFDev: flattenVNFDev(ctx, &recordVins.VNFDev), + AccountName: types.StringValue(recordVins.AccountName), + CreatedBy: types.StringValue(recordVins.CreatedBy), + CreatedTime: types.Int64Value(int64(recordVins.CreatedTime)), + DefaultGW: types.StringValue(recordVins.DefaultGW), + DefaultQOS: flattenQOS(ctx, &recordVins.DefaultQOS), + DeletedBy: types.StringValue(recordVins.DeletedBy), + DeletedTime: types.Int64Value(int64(recordVins.DeletedTime)), + GUID: types.Int64Value(int64(recordVins.GUID)), + LockStatus: types.StringValue(recordVins.LockStatus), + ManagerID: types.Int64Value(int64(recordVins.ManagerID)), + ManagerType: types.StringValue(recordVins.ManagerType), + Milestones: types.Int64Value(int64(recordVins.Milestones)), + NetMask: types.Int64Value(int64(recordVins.NetMask)), + Network: types.StringValue(recordVins.Network), + Redundant: types.BoolValue(recordVins.Redundant), + RGName: types.StringValue(recordVins.RGName), + SecVNFDevID: types.Int64Value(int64(recordVins.SecVNFDevID)), + Status: types.StringValue(recordVins.Status), + UpdatedBy: types.StringValue(recordVins.UpdatedBy), + UpdatedTime: types.Int64Value(int64(recordVins.UpdatedTime)), + UserManaged: types.BoolValue(recordVins.UserManaged), + VNFs: flattenVNFs(ctx, &recordVins.VNFs), + VXLANID: types.Int64Value(int64(recordVins.VXLANID)), + } + + if plan.RGID.IsUnknown() { + plan.RGID = types.Int64Value(int64(recordVins.RGID)) + } + if plan.AccountID.IsUnknown() { + plan.AccountID = types.Int64Value(int64(recordVins.AccountID)) + } + if plan.GID.IsUnknown() { + plan.GID = types.Int64Value(int64(recordVins.GID)) + } + if plan.PreReservationsNum.IsUnknown() { + plan.PreReservationsNum = types.Int64Value(int64(recordVins.PreReservationsNum)) + } + if plan.Description.IsUnknown() { + plan.Description = types.StringValue(recordVins.Description) + } + if plan.DNS.IsNull() { + plan.DNS = types.SetNull(types.StringType) + } + + if !plan.NatRule.IsNull() { + plan.NatRule = flattenNatRule(ctx, plan, &recordVins.VNFs.NAT.Config.Rules) + } + + tflog.Info(ctx, "flattens.VINSResource: after flatten", map[string]any{"vins_id": plan.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.VINSResource", map[string]any{"vins_id": plan.Id.ValueString()}) + return nil +} + +// flattenNatRule flattens nat rule parameters: +// - rule_id (computed), +// - int_port, ext_port_end, proto (optional & computed). +// Flatten errors are added to tflog. +func flattenNatRule(ctx context.Context, plan *models.ResourceVINSModel, rules *vins.ListNATRule) types.List { + tflog.Info(ctx, "Start flattenRuleIdInNatRule") + diags := diag.Diagnostics{} + + itemsNatRulePlan := make([]models.NatRuleResourceModel, 0, len(plan.NatRule.Elements())) + diags.Append(plan.NatRule.ElementsAs(ctx, &itemsNatRulePlan, false)...) + if diags.HasError() { + tflog.Error(ctx, "flattenRuleIdInNatRule: cannot populate itemsNatRulePlan with plan.NatRule list elements") + } + + for i, natRule := range itemsNatRulePlan { + ruleFromPlatform := natRule.GetNatRule(*rules) + + if ruleFromPlatform == nil { + tflog.Error(ctx, fmt.Sprintf("flattenRuleIdInNatRule: rule_id can not be flatten for natRule %v because such nat_rule does not exist", natRule)) + continue + } + + itemsNatRulePlan[i].RuleID = types.Int64Value(int64(ruleFromPlatform.ID)) + + if itemsNatRulePlan[i].IntPort.ValueInt64() == 0 { + itemsNatRulePlan[i].IntPort = types.Int64Value(int64(ruleFromPlatform.LocalPort)) + } + if itemsNatRulePlan[i].ExtPortEnd.ValueInt64() == 0 { + itemsNatRulePlan[i].ExtPortEnd = types.Int64Value(int64(ruleFromPlatform.PublicPortEnd)) + } + if itemsNatRulePlan[i].Proto.ValueString() == "" { + itemsNatRulePlan[i].Proto = types.StringValue(ruleFromPlatform.Protocol) + } + } + + res, err := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemNatRuleResource}, itemsNatRulePlan) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenRuleIdInNatRule", err)) + } + + tflog.Info(ctx, "End flattenRuleIdInNatRule") + return res +} diff --git a/internal/service/cloudbroker/vins/flattens/flatten_resource_vins_static_route.go b/internal/service/cloudbroker/vins/flattens/flatten_resource_vins_static_route.go new file mode 100644 index 0000000..b4fb9a8 --- /dev/null +++ b/internal/service/cloudbroker/vins/flattens/flatten_resource_vins_static_route.go @@ -0,0 +1,68 @@ +package flattens + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// VINSStaticRouteResource flattens resource for vins static route. +// Return error in case resource is not found on the platform. +// Flatten errors are added to tflog. +func VINSStaticRouteResource(ctx context.Context, plan *models.ResourceVINSStaticRouteModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.VINSStaticRouteResource") + + vinsId, routeId, diags := utilities.GetVinsIDAndRouteID(ctx, plan) + if diags.HasError() { + return diags + } + + recordRoute, diags := utilities.VINSStaticRouteResourceCheckPresence(ctx, vinsId, routeId, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.VINSStaticRouteResource: before flatten", map[string]any{"id": plan.Id.ValueString()}) + + *plan = models.ResourceVINSStaticRouteModel{ + VinsID: types.Int64Value(int64(vinsId)), + Destination: types.StringValue(recordRoute.Destination), + Netmask: types.StringValue(recordRoute.Netmask), + Gateway: types.StringValue(recordRoute.Gateway), + Timeouts: plan.Timeouts, + + RouteID: types.Int64Value(int64(routeId)), + ComputeIDs: plan.ComputeIDs, + + Id: plan.Id, + LastUpdated: plan.LastUpdated, + GUID: types.StringValue(recordRoute.GUID), + } + + if plan.ComputeIDs.IsUnknown() { + plan.ComputeIDs, diags = types.ListValueFrom(ctx, types.Int64Type, recordRoute.ComputeIds) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error VINSStaticRouteResource: cannot flatten recordRoute.ComputeIds to plan.ComputeIDs", diags)) + } + } + + if plan.Id.IsUnknown() { + plan.Id = types.StringValue(fmt.Sprintf("%d#%d", vinsId, routeId)) + } + + if plan.LastUpdated.IsUnknown() { + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + } + + tflog.Info(ctx, "flattens.VINSStaticRouteResource: after flatten", map[string]any{"id": plan.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.VINSStaticRouteResource", map[string]any{"id": plan.Id.ValueString()}) + return nil +} diff --git a/internal/service/cloudbroker/vins/input_checks.go b/internal/service/cloudbroker/vins/input_checks.go new file mode 100644 index 0000000..79a0b53 --- /dev/null +++ b/internal/service/cloudbroker/vins/input_checks.go @@ -0,0 +1,91 @@ +package vins + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/ic" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" +) + +// resourceVINSInputChecks checks if user provided rg_id, account_id, ext_net_id and gid are valid. +// It also checks that either rg_id or account_id is specified. +func resourceVINSInputChecks(ctx context.Context, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + if plan.RGID.IsUnknown() && plan.AccountID.IsUnknown() { + tflog.Error(ctx, "resourceVINSInputChecks: Error providing rg_id or account_id") + diags.AddError(fmt.Sprintf("Unable to validate rg_id and account_id"), "Either accountId or resource group ID should be specified") + return diags + } + + if !plan.RGID.IsUnknown() && !plan.AccountID.IsUnknown() { + tflog.Error(ctx, "resourceVINSInputChecks: Error providing rg_id and account_id at the same time") + diags.AddError(fmt.Sprintf("Unable to validate rg_id and account_id"), "Either rg_id or account_id must be provided") + return diags + } + + if !plan.RGID.IsUnknown() { // RGID is optional & computed + rgId := uint64(plan.RGID.ValueInt64()) + tflog.Info(ctx, "resourceVINSInputChecks: exist resource group", map[string]any{"rg_id": rgId}) + err := ic.ExistRG(ctx, rgId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about rg with ID %v", rgId), err.Error()) + } + } + + if !plan.ExtNet.IsNull() { // ExtNet is optional + var extnetPlan models.ExtNetModel + tflog.Info(ctx, "resourceVINSInputChecks: new extnet specified", map[string]any{"name": plan.Name.ValueString()}) + diags.Append(plan.ExtNet.As(ctx, &extnetPlan, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true})...) + if diags.HasError() { + tflog.Error(ctx, "resourceVINSInputChecks: cannot populate extnet with plan.ExtNet object element") + return diags + } + + extnetId := int(extnetPlan.ExtNetID.ValueInt64()) + tflog.Info(ctx, "resourceVINSInputChecks: exist ext_net check", map[string]any{"ext_net_id": extnetId}) + err := ic.ExistExtNetInVins(ctx, extnetId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about ext net with ID %v", extnetId), err.Error()) + } + } + + if !plan.AccountID.IsUnknown() { // AccountID is optional & computed + accountId := uint64(plan.AccountID.ValueInt64()) + tflog.Info(ctx, "resourceVINSInputChecks: exist account check", map[string]any{"account_id": accountId}) + err := ic.ExistAccount(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error()) + } + } + + if !plan.GID.IsUnknown() { // GID is optional & computed + gid := uint64(plan.GID.ValueInt64()) + tflog.Info(ctx, "resourceVINSInputChecks: exist gid check", map[string]any{"gid": gid}) + err := ic.ExistGID(ctx, gid, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about gid with ID %v", gid), err.Error()) + } + } + + return diags +} + +// resourceVINSStaticRouteInputChecks checks if user provided vins_id is valid. +func resourceVINSStaticRouteInputChecks(ctx context.Context, plan *models.ResourceVINSStaticRouteModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + vinsId := uint64(plan.VinsID.ValueInt64()) + tflog.Info(ctx, "resourceVINSStaticRouteInputChecks: exist vins", map[string]any{"vins_id": vinsId}) + err := ic.ExistVins(ctx, vinsId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about vins with ID %v", vinsId), err.Error()) + } + + return diags +} diff --git a/internal/service/cloudbroker/vins/models/model_data_source_vins.go b/internal/service/cloudbroker/vins/models/model_data_source_vins.go new file mode 100644 index 0000000..9d531ca --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_data_source_vins.go @@ -0,0 +1,454 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// models + +type DataSourceVINSModel struct { + // request fields + VinsID types.Int64 `tfsdk:"vins_id"` + Reason types.String `tfsdk:"reason"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + VNFDev types.Object `tfsdk:"vnf_dev"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DefaultGW types.String `tfsdk:"default_gw"` + DefaultQOS types.Object `tfsdk:"default_qos"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"description"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + LockStatus types.String `tfsdk:"lock_status"` + ManagerID types.Int64 `tfsdk:"manager_id"` + ManagerType types.String `tfsdk:"manager_type"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + NetMask types.Int64 `tfsdk:"netmask"` + Network types.String `tfsdk:"network"` + PreReservationsNum types.Int64 `tfsdk:"pre_reservations_num"` + Redundant types.Bool `tfsdk:"redundant"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + SecVNFDevID types.Int64 `tfsdk:"sec_vnf_dev_id"` + Status types.String `tfsdk:"status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` + VNFs types.Object `tfsdk:"vnfs"` + VXLANID types.Int64 `tfsdk:"vxlan_id"` +} + +type RecordVNFDevModel struct { + CKey types.String `tfsdk:"ckey"` + AccountID types.Int64 `tfsdk:"account_id"` + Capabilities types.List `tfsdk:"capabilities"` + Config types.Object `tfsdk:"config"` + ConfigSaved types.Bool `tfsdk:"config_saved"` + CustomPreConfig types.Bool `tfsdk:"custom_precfg"` + Description types.String `tfsdk:"description"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"id"` + Interfaces types.List `tfsdk:"interfaces"` + LockStatus types.String `tfsdk:"lock_status"` + Meta types.List `tfsdk:"meta"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VINS types.List `tfsdk:"vins"` +} + +type RecordVNFConfigModel struct { + MGMT types.Object `tfsdk:"mgmt"` + Resources types.Object `tfsdk:"resources"` +} + +type RecordMGMTModel struct { + IPAddress types.String `tfsdk:"ip_addr"` + Password types.String `tfsdk:"password"` + SSHKey types.String `tfsdk:"ssh_key"` + User types.String `tfsdk:"user"` +} + +type RecordResourcesModel struct { + CPU types.Int64 `tfsdk:"cpu"` + RAM types.Int64 `tfsdk:"ram"` + StackID types.Int64 `tfsdk:"stack_id"` + UUID types.String `tfsdk:"uuid"` +} + +type VNFInterfaceModel struct { + ConnID types.Int64 `tfsdk:"conn_id"` + ConnType types.String `tfsdk:"conn_type"` + DefGW types.String `tfsdk:"def_gw"` + Enabled types.Bool `tfsdk:"enabled"` + FLIPGroupID types.Int64 `tfsdk:"flipgroup_id"` + GUID types.String `tfsdk:"guid"` + IPAddress types.String `tfsdk:"ip_address"` + ListenSSH types.Bool `tfsdk:"listen_ssh"` + MAC types.String `tfsdk:"mac"` + Name types.String `tfsdk:"name"` + NetID types.Int64 `tfsdk:"net_id"` + NetMask types.Int64 `tfsdk:"net_mask"` + NetType types.String `tfsdk:"net_type"` + NodeID types.Int64 `tfsdk:"node_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + QOS types.Object `tfsdk:"qos"` + Target types.String `tfsdk:"target"` + Type types.String `tfsdk:"type"` + VNFs types.List `tfsdk:"vnfs"` +} + +type QOSModel struct { + ERate types.Int64 `tfsdk:"e_rate"` + GUID types.String `tfsdk:"guid"` + InBurst types.Int64 `tfsdk:"in_burst"` + InRate types.Int64 `tfsdk:"in_rate"` +} + +type RecordVNFsModel struct { + DHCP types.Object `tfsdk:"dhcp"` + GW types.Object `tfsdk:"gw"` + NAT types.Object `tfsdk:"nat"` +} + +type RecordDHCPModel struct { + CKey types.String `tfsdk:"ckey"` + AccountID types.Int64 `tfsdk:"account_id"` + Config types.Object `tfsdk:"config"` + CreatedTime types.Int64 `tfsdk:"created_time"` + Devices types.Object `tfsdk:"devices"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + DHCPID types.Int64 `tfsdk:"dhcp_id"` + LockStatus types.String `tfsdk:"lock_status"` + Milestones types.Int64 `tfsdk:"milestones"` + OwnerID types.Int64 `tfsdk:"owner_id"` + OwnerType types.String `tfsdk:"owner_type"` + PureVirtual types.Bool `tfsdk:"pure_virtual"` + Routes types.List `tfsdk:"routes"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` +} + +type RoutesModel struct { + ComputeIds types.List `tfsdk:"compute_ids"` + Destination types.String `tfsdk:"destination"` + Gateway types.String `tfsdk:"gateway"` + GUID types.String `tfsdk:"guid"` + RouteID types.Int64 `tfsdk:"route_id"` + Netmask types.String `tfsdk:"netmask"` +} + +type DevicesModel struct { + Primary types.Object `tfsdk:"primary"` +} + +type PrimaryDevicesModel struct { + DevID types.Int64 `tfsdk:"dev_id"` + IFace01 types.String `tfsdk:"iface01"` + IFace02 types.String `tfsdk:"iface02"` +} + +type RecordDHCPConfigModel struct { + DefaultGW types.String `tfsdk:"default_gw"` + DNS types.List `tfsdk:"dns"` + IPEnd types.String `tfsdk:"ip_end"` + IPStart types.String `tfsdk:"ip_start"` + Lease types.Int64 `tfsdk:"lease"` + NetMask types.Int64 `tfsdk:"netmask"` + Network types.String `tfsdk:"network"` + Reservations types.List `tfsdk:"reservations"` +} + +type ReservationModel struct { + ClientType types.String `tfsdk:"client_type"` + Description types.String `tfsdk:"desc"` + DomainName types.String `tfsdk:"domainname"` + Hostname types.String `tfsdk:"hostname"` + IP types.String `tfsdk:"ip"` + MAC types.String `tfsdk:"mac"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vm_id"` +} + +type RecordNATModel struct { + CKey types.String `tfsdk:"ckey"` + AccountID types.Int64 `tfsdk:"account_id"` + Config types.Object `tfsdk:"config"` + CreatedTime types.Int64 `tfsdk:"created_time"` + Devices types.Object `tfsdk:"devices"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + NatID types.Int64 `tfsdk:"nat_id"` + LockStatus types.String `tfsdk:"lock_status"` + Milestones types.Int64 `tfsdk:"milestones"` + OwnerID types.Int64 `tfsdk:"owner_id"` + OwnerType types.String `tfsdk:"owner_type"` + PureVirtual types.Bool `tfsdk:"pure_virtual"` + Routes types.List `tfsdk:"routes"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` +} + +type NATConfigModel struct { + NetMask types.Int64 `tfsdk:"net_mask"` + Network types.String `tfsdk:"network"` + Rules types.List `tfsdk:"rules"` +} + +type NATRuleModel struct { + RuleID types.Int64 `tfsdk:"rule_id"` + LocalIP types.String `tfsdk:"local_ip"` + LocalPort types.Int64 `tfsdk:"local_port"` + Protocol types.String `tfsdk:"protocol"` + PublicPortEnd types.Int64 `tfsdk:"public_port_end"` + PublicPortStart types.Int64 `tfsdk:"public_port_start"` + VMID types.Int64 `tfsdk:"vm_id"` + VMName types.String `tfsdk:"vm_name"` +} + +type RecordGWModel struct { + CKey types.String `tfsdk:"ckey"` + AccountID types.Int64 `tfsdk:"account_id"` + Config types.Object `tfsdk:"config"` + CreatedTime types.Int64 `tfsdk:"created_time"` + Devices types.Object `tfsdk:"devices"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + GWID types.Int64 `tfsdk:"gw_id"` + LockStatus types.String `tfsdk:"lock_status"` + Milestones types.Int64 `tfsdk:"milestones"` + OwnerID types.Int64 `tfsdk:"owner_id"` + OwnerType types.String `tfsdk:"owner_type"` + PureVirtual types.Bool `tfsdk:"pure_virtual"` + Routes types.List `tfsdk:"routes"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` +} + +type RecordGWConfigModel struct { + DefaultGW types.String `tfsdk:"default_gw"` + ExtNetID types.Int64 `tfsdk:"ext_net_id"` + ExtNetIP types.String `tfsdk:"ext_net_ip"` + ExtNetMask types.Int64 `tfsdk:"ext_netmask"` + QOS types.Object `tfsdk:"qos"` +} + +// variables for models + +var ItemVNFDev = map[string]attr.Type{ + "ckey": types.StringType, + "account_id": types.Int64Type, + "capabilities": types.ListType{ElemType: types.StringType}, + "config": types.ObjectType{AttrTypes: ItemVNFConfig}, + "config_saved": types.BoolType, + "custom_precfg": types.BoolType, + "description": types.StringType, + "gid": types.Int64Type, + "guid": types.Int64Type, + "id": types.Int64Type, + "interfaces": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemInterface}}, + "lock_status": types.StringType, + "meta": types.ListType{ElemType: types.StringType}, + "milestones": types.Int64Type, + "name": types.StringType, + "status": types.StringType, + "tech_status": types.StringType, + "type": types.StringType, + "vins": types.ListType{ElemType: types.Int64Type}, +} + +var ItemInterface = map[string]attr.Type{ + "conn_id": types.Int64Type, + "conn_type": types.StringType, + "def_gw": types.StringType, + "enabled": types.BoolType, + "flipgroup_id": types.Int64Type, + "guid": types.StringType, + "ip_address": types.StringType, + "listen_ssh": types.BoolType, + "mac": types.StringType, + "name": types.StringType, + "net_id": types.Int64Type, + "net_mask": types.Int64Type, + "net_type": types.StringType, + "node_id": types.Int64Type, + "pci_slot": types.Int64Type, + "qos": types.ObjectType{AttrTypes: ItemQOS}, + "target": types.StringType, + "type": types.StringType, + "vnfs": types.ListType{ElemType: types.Int64Type}, +} + +var ItemQOS = map[string]attr.Type{ + "e_rate": types.Int64Type, + "guid": types.StringType, + "in_burst": types.Int64Type, + "in_rate": types.Int64Type, +} + +var ItemVNFConfig = map[string]attr.Type{ + "mgmt": types.ObjectType{AttrTypes: ItemMgmt}, + "resources": types.ObjectType{AttrTypes: ItemResources}, +} + +var ItemMgmt = map[string]attr.Type{ + "ip_addr": types.StringType, + "password": types.StringType, + "ssh_key": types.StringType, + "user": types.StringType, +} + +var ItemResources = map[string]attr.Type{ + "cpu": types.Int64Type, + "ram": types.Int64Type, + "stack_id": types.Int64Type, + "uuid": types.StringType, +} + +var ItemVNFs = map[string]attr.Type{ + "dhcp": types.ObjectType{AttrTypes: ItemDHCP}, + "gw": types.ObjectType{AttrTypes: ItemGW}, + "nat": types.ObjectType{AttrTypes: ItemNAT}, +} + +var ItemDHCP = map[string]attr.Type{ + "ckey": types.StringType, + "account_id": types.Int64Type, + "config": types.ObjectType{AttrTypes: ItemDHCPConfig}, + "created_time": types.Int64Type, + "devices": types.ObjectType{AttrTypes: ItemDevices}, + "gid": types.Int64Type, + "guid": types.Int64Type, + "dhcp_id": types.Int64Type, + "lock_status": types.StringType, + "milestones": types.Int64Type, + "owner_id": types.Int64Type, + "owner_type": types.StringType, + "pure_virtual": types.BoolType, + "routes": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemRoute}}, + "status": types.StringType, + "tech_status": types.StringType, + "type": types.StringType, +} + +var ItemRoute = map[string]attr.Type{ + "compute_ids": types.ListType{ElemType: types.Int64Type}, + "destination": types.StringType, + "gateway": types.StringType, + "guid": types.StringType, + "route_id": types.Int64Type, + "netmask": types.StringType, +} + +var ItemDevices = map[string]attr.Type{ + "primary": types.ObjectType{AttrTypes: ItemPrimaryDevices}, +} +var ItemPrimaryDevices = map[string]attr.Type{ + "dev_id": types.Int64Type, + "iface01": types.StringType, + "iface02": types.StringType, +} + +var ItemDHCPConfig = map[string]attr.Type{ + "default_gw": types.StringType, + "dns": types.ListType{ElemType: types.StringType}, + "ip_end": types.StringType, + "ip_start": types.StringType, + "lease": types.Int64Type, + "netmask": types.Int64Type, + "network": types.StringType, + "reservations": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemReservations}}, +} + +var ItemReservations = map[string]attr.Type{ + "client_type": types.StringType, + "desc": types.StringType, + "domainname": types.StringType, + "hostname": types.StringType, + "ip": types.StringType, + "mac": types.StringType, + "type": types.StringType, + "vm_id": types.Int64Type, +} + +var ItemGW = map[string]attr.Type{ + "ckey": types.StringType, + "account_id": types.Int64Type, + "config": types.ObjectType{AttrTypes: ItemGWConfig}, + "created_time": types.Int64Type, + "devices": types.ObjectType{AttrTypes: ItemDevices}, + "gid": types.Int64Type, + "guid": types.Int64Type, + "gw_id": types.Int64Type, + "lock_status": types.StringType, + "milestones": types.Int64Type, + "owner_id": types.Int64Type, + "owner_type": types.StringType, + "pure_virtual": types.BoolType, + "routes": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemRoute}}, + "status": types.StringType, + "tech_status": types.StringType, + "type": types.StringType, +} + +var ItemGWConfig = map[string]attr.Type{ + "default_gw": types.StringType, + "ext_net_id": types.Int64Type, + "ext_net_ip": types.StringType, + "ext_netmask": types.Int64Type, + "qos": types.ObjectType{AttrTypes: ItemQOS}, +} + +var ItemNAT = map[string]attr.Type{ + "ckey": types.StringType, + "account_id": types.Int64Type, + "config": types.ObjectType{AttrTypes: ItemNATConfig}, + "created_time": types.Int64Type, + "devices": types.ObjectType{AttrTypes: ItemDevices}, + "gid": types.Int64Type, + "guid": types.Int64Type, + "nat_id": types.Int64Type, + "lock_status": types.StringType, + "milestones": types.Int64Type, + "owner_id": types.Int64Type, + "owner_type": types.StringType, + "pure_virtual": types.BoolType, + "routes": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemRoute}}, + "status": types.StringType, + "tech_status": types.StringType, + "type": types.StringType, +} + +var ItemNATConfig = map[string]attr.Type{ + "net_mask": types.Int64Type, + "network": types.StringType, + "rules": types.ListType{ElemType: types.ObjectType{AttrTypes: ItemNATRule}}, +} + +var ItemNATRule = map[string]attr.Type{ + "rule_id": types.Int64Type, + "local_ip": types.StringType, + "local_port": types.Int64Type, + "protocol": types.StringType, + "public_port_end": types.Int64Type, + "public_port_start": types.Int64Type, + "vm_id": types.Int64Type, + "vm_name": types.StringType, +} diff --git a/internal/service/cloudbroker/vins/models/model_data_source_vins_audits.go b/internal/service/cloudbroker/vins/models/model_data_source_vins_audits.go new file mode 100644 index 0000000..3934f62 --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_data_source_vins_audits.go @@ -0,0 +1,24 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceVINSAuditsModel struct { + // request fields + VinsID types.Int64 `tfsdk:"vins_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAuditModel `tfsdk:"items"` +} + +type ItemAuditModel struct { + Call types.String `tfsdk:"call"` + ResponseTime types.Float64 `tfsdk:"response_time"` + StatusCode types.Int64 `tfsdk:"statuscode"` + Timestamp types.Float64 `tfsdk:"timestamp"` + User types.String `tfsdk:"user"` +} diff --git a/internal/service/cloudbroker/vins/models/model_data_source_vins_ext_net_list.go b/internal/service/cloudbroker/vins/models/model_data_source_vins_ext_net_list.go new file mode 100644 index 0000000..c663173 --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_data_source_vins_ext_net_list.go @@ -0,0 +1,26 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceVINSExtNetListModel struct { + // request fields + VinsID types.Int64 `tfsdk:"vins_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemExtNetVinsModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemExtNetVinsModel struct { + DefaultGW types.String `tfsdk:"default_gw"` + ExtNetID types.Int64 `tfsdk:"ext_net_id"` + IP types.String `tfsdk:"ip"` + PrefixLen types.Int64 `tfsdk:"prefix_len"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` +} diff --git a/internal/service/cloudbroker/vins/models/model_data_source_vins_ip_list.go b/internal/service/cloudbroker/vins/models/model_data_source_vins_ip_list.go new file mode 100644 index 0000000..bc6c0af --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_data_source_vins_ip_list.go @@ -0,0 +1,27 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceVINSIPListModel struct { + // request fields + VinsID types.Int64 `tfsdk:"vins_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemIPVinsModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemIPVinsModel struct { + ClientType types.String `tfsdk:"client_type"` + DomainName types.String `tfsdk:"domain_name"` + Hostname types.String `tfsdk:"host_name"` + IP types.String `tfsdk:"ip"` + MAC types.String `tfsdk:"mac"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vm_id"` +} diff --git a/internal/service/cloudbroker/vins/models/model_data_source_vins_list.go b/internal/service/cloudbroker/vins/models/model_data_source_vins_list.go new file mode 100644 index 0000000..7063433 --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_data_source_vins_list.go @@ -0,0 +1,82 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceVINSListModel struct { + // request fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + AccountID types.Int64 `tfsdk:"account_id"` + RGID types.Int64 `tfsdk:"rg_id"` + ExtIP types.String `tfsdk:"ext_ip"` + VNFDevID types.Int64 `tfsdk:"vnfdev_id"` + IncludeDeleted types.Bool `tfsdk:"include_deleted"` + Page types.Int64 `tfsdk:"page"` + SortBy types.String `tfsdk:"sort_by"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemVinsModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemVinsModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DefaultGW types.String `tfsdk:"default_gw"` + DefaultQOS types.Object `tfsdk:"default_qos"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"description"` + ExternalIP types.String `tfsdk:"external_ip"` + ExtnetID types.Int64 `tfsdk:"extnet_id"` + FreeIPs types.Int64 `tfsdk:"free_ips"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"vins_id"` + LockStatus types.String `tfsdk:"lock_status"` + ManagerID types.Int64 `tfsdk:"manager_id"` + ManagerType types.String `tfsdk:"manager_type"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + Netmask types.Int64 `tfsdk:"netmask"` + Network types.String `tfsdk:"network"` + PreReservationsNum types.Int64 `tfsdk:"pre_reservations_num"` + PriVNFDevID types.Int64 `tfsdk:"pri_vnf_dev_id"` + Redundant types.Bool `tfsdk:"redundant"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + SecVNFDefID types.Int64 `tfsdk:"sec_vnf_dev_id"` + Status types.String `tfsdk:"status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` + VNFs types.Object `tfsdk:"vnfs"` + VXLANID types.Int64 `tfsdk:"vxlan_id"` +} + +type ListVNFsModel struct { + DHCP types.Int64 `tfsdk:"dhcp"` + DNS types.Int64 `tfsdk:"dns"` + FW types.Int64 `tfsdk:"fw"` + GW types.Int64 `tfsdk:"gw"` + NAT types.Int64 `tfsdk:"nat"` + VPN types.Int64 `tfsdk:"vpn"` +} + +var ItemListVNFs = map[string]attr.Type{ + "dhcp": types.Int64Type, + "dns": types.Int64Type, + "fw": types.Int64Type, + "gw": types.Int64Type, + "nat": types.Int64Type, + "vpn": types.Int64Type, +} diff --git a/internal/service/cloudbroker/vins/models/model_data_source_vins_list_deleted.go b/internal/service/cloudbroker/vins/models/model_data_source_vins_list_deleted.go new file mode 100644 index 0000000..7e16b3b --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_data_source_vins_list_deleted.go @@ -0,0 +1,59 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceVINSListDeletedModel struct { + // request fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + AccountID types.Int64 `tfsdk:"account_id"` + RGID types.Int64 `tfsdk:"rg_id"` + ExtIP types.String `tfsdk:"ext_ip"` + Page types.Int64 `tfsdk:"page"` + SortBy types.String `tfsdk:"sort_by"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemVinsDeletedModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemVinsDeletedModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DefaultGW types.String `tfsdk:"default_gw"` + DefaultQOS types.Object `tfsdk:"default_qos"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"description"` + ExternalIP types.String `tfsdk:"external_ip"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"vins_id"` + LockStatus types.String `tfsdk:"lock_status"` + ManagerID types.Int64 `tfsdk:"manager_id"` + ManagerType types.String `tfsdk:"manager_type"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"name"` + Netmask types.Int64 `tfsdk:"netmask"` + Network types.String `tfsdk:"network"` + PreReservationsNum types.Int64 `tfsdk:"pre_reservations_num"` + PriVNFDevID types.Int64 `tfsdk:"pri_vnf_dev_id"` + Redundant types.Bool `tfsdk:"redundant"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + SecVNFDefID types.Int64 `tfsdk:"sec_vnf_dev_id"` + Status types.String `tfsdk:"status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` + VNFs types.Object `tfsdk:"vnfs"` + VXLANID types.Int64 `tfsdk:"vxlan_id"` +} diff --git a/internal/service/cloudbroker/vins/models/model_data_source_vins_nat_rule_list.go b/internal/service/cloudbroker/vins/models/model_data_source_vins_nat_rule_list.go new file mode 100644 index 0000000..3eebd6e --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_data_source_vins_nat_rule_list.go @@ -0,0 +1,29 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceVINSNATRuleListModel struct { + // request fields + VinsID types.Int64 `tfsdk:"vins_id"` + Reason types.String `tfsdk:"reason"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemVINSNATRuleModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemVINSNATRuleModel struct { + ID types.Int64 `tfsdk:"id"` + LocalIP types.String `tfsdk:"local_ip"` + LocalPort types.Int64 `tfsdk:"local_port"` + Protocol types.String `tfsdk:"protocol"` + PublicPortEnd types.Int64 `tfsdk:"public_port_end"` + PublicPortStart types.Int64 `tfsdk:"public_port_start"` + VMID types.Int64 `tfsdk:"vm_id"` + VMName types.String `tfsdk:"vm_name"` +} diff --git a/internal/service/cloudbroker/vins/models/model_data_source_vins_static_route.go b/internal/service/cloudbroker/vins/models/model_data_source_vins_static_route.go new file mode 100644 index 0000000..e5d2353 --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_data_source_vins_static_route.go @@ -0,0 +1,21 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceVINSStaticRouteModel struct { + // request fields + VinsID types.Int64 `tfsdk:"vins_id"` + RouteID types.Int64 `tfsdk:"route_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + ComputeIds types.List `tfsdk:"compute_ids"` + Destination types.String `tfsdk:"destination"` + Gateway types.String `tfsdk:"gateway"` + GUID types.String `tfsdk:"guid"` + Netmask types.String `tfsdk:"netmask"` +} diff --git a/internal/service/cloudbroker/vins/models/model_data_source_vins_static_route_list.go b/internal/service/cloudbroker/vins/models/model_data_source_vins_static_route_list.go new file mode 100644 index 0000000..1a294f3 --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_data_source_vins_static_route_list.go @@ -0,0 +1,26 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceVINSStaticRouteListModel struct { + // request fields + VinsID types.Int64 `tfsdk:"vins_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemVinsStaticRouteModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemVinsStaticRouteModel struct { + ComputeIds types.List `tfsdk:"compute_ids"` + Destination types.String `tfsdk:"destination"` + Gateway types.String `tfsdk:"gateway"` + GUID types.String `tfsdk:"guid"` + ID types.Int64 `tfsdk:"route_id"` + Netmask types.String `tfsdk:"netmask"` +} diff --git a/internal/service/cloudbroker/vins/models/model_resource_vins.go b/internal/service/cloudbroker/vins/models/model_resource_vins.go new file mode 100644 index 0000000..718ab73 --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_resource_vins.go @@ -0,0 +1,131 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" +) + +type ResourceVINSModel struct { + // required fields + Name types.String `tfsdk:"name"` + RGID types.Int64 `tfsdk:"rg_id"` + AccountID types.Int64 `tfsdk:"account_id"` + + // optional fields + IPCIDR types.String `tfsdk:"ipcidr"` + PreReservationsNum types.Int64 `tfsdk:"pre_reservations_num"` + Description types.String `tfsdk:"description"` + GID types.Int64 `tfsdk:"gid"` + DNS types.Set `tfsdk:"dns"` + + Enable types.Bool `tfsdk:"enable"` + Permanently types.Bool `tfsdk:"permanently"` + Force types.Bool `tfsdk:"force"` + Restore types.Bool `tfsdk:"restore"` + VnfdevStart types.Bool `tfsdk:"vnfdev_start"` + VnfdevReset types.Bool `tfsdk:"vnfdev_reset"` + VnfdevRestart types.Bool `tfsdk:"vnfdev_restart"` + VnfdevRedeploy types.Bool `tfsdk:"vnfdev_redeploy"` + DefaultQOS types.Object `tfsdk:"default_qos"` + + ExtNet types.Object `tfsdk:"ext_net"` + IP types.List `tfsdk:"ip"` + NatRule types.List `tfsdk:"nat_rule"` + Reason types.String `tfsdk:"reason"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + VinsID types.Int64 `tfsdk:"vins_id"` + Id types.String `tfsdk:"id"` + LastUpdated types.String `tfsdk:"last_updated"` + VNFDev types.Object `tfsdk:"vnf_dev"` + AccountName types.String `tfsdk:"account_name"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DefaultGW types.String `tfsdk:"default_gw"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + GUID types.Int64 `tfsdk:"guid"` + LockStatus types.String `tfsdk:"lock_status"` + ManagerID types.Int64 `tfsdk:"manager_id"` + ManagerType types.String `tfsdk:"manager_type"` + Milestones types.Int64 `tfsdk:"milestones"` + NetMask types.Int64 `tfsdk:"net_mask"` + Network types.String `tfsdk:"network"` + Redundant types.Bool `tfsdk:"redundant"` + RGName types.String `tfsdk:"rg_name"` + SecVNFDevID types.Int64 `tfsdk:"sec_vnf_dev_id"` + Status types.String `tfsdk:"status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` + VNFs types.Object `tfsdk:"vnfs"` + VXLANID types.Int64 `tfsdk:"vxlan_id"` +} + +type ExtNetModel struct { + ExtNetID types.Int64 `tfsdk:"ext_net_id"` + ExtNetIP types.String `tfsdk:"ext_net_ip"` +} + +type IPModel struct { + Type types.String `tfsdk:"type"` + IPAddr types.String `tfsdk:"ip_addr"` + MacAddr types.String `tfsdk:"mac_addr"` + ComputeID types.Int64 `tfsdk:"compute_id"` + Reason types.String `tfsdk:"reason"` +} + +type NatRuleResourceModel struct { + IntIP types.String `tfsdk:"int_ip"` + IntPort types.Int64 `tfsdk:"int_port"` + ExtPortStart types.Int64 `tfsdk:"ext_port_start"` + ExtPortEnd types.Int64 `tfsdk:"ext_port_end"` + Proto types.String `tfsdk:"proto"` + RuleID types.Int64 `tfsdk:"rule_id"` + Reason types.String `tfsdk:"reason"` +} + +var ItemNatRuleResource = map[string]attr.Type{ + "int_ip": types.StringType, + "int_port": types.Int64Type, + "ext_port_start": types.Int64Type, + "ext_port_end": types.Int64Type, + "proto": types.StringType, + "rule_id": types.Int64Type, +} + +// Contains returns true if NatRuleResourceModel contains n as an element. Otherwise it returns false. +func (n *NatRuleResourceModel) Contains(natRuleList []NatRuleResourceModel) bool { + for _, natRuleElem := range natRuleList { + if n.IntIP.Equal(natRuleElem.IntIP) && + n.IntPort.Equal(natRuleElem.IntPort) && + n.ExtPortStart.Equal(natRuleElem.ExtPortStart) { + return true + } + } + return false +} + +// GetNatRule returns nat_rule from the platform equivalent to NatRuleResourceModel. If the rule doesn't exist it returns nil. +func (n *NatRuleResourceModel) GetNatRule(rules vins.ListNATRule) *vins.ItemNATRule { + for _, rule := range rules { + if n.IntIP.Equal(types.StringValue(rule.LocalIP)) && + n.ExtPortStart.Equal(types.Int64Value(int64(rule.PublicPortStart))) { + return &rule + } + } + return nil +} + +// Contains returns true if IPModel contains i as an element. Otherwise it returns false. +func (i *IPModel) Contains(ipList []IPModel) bool { + for _, ipElem := range ipList { + if i.IPAddr.Equal(ipElem.IPAddr) { + return true + } + } + return false +} diff --git a/internal/service/cloudbroker/vins/models/model_resource_vins_static_route.go b/internal/service/cloudbroker/vins/models/model_resource_vins_static_route.go new file mode 100644 index 0000000..f235de5 --- /dev/null +++ b/internal/service/cloudbroker/vins/models/model_resource_vins_static_route.go @@ -0,0 +1,24 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceVINSStaticRouteModel struct { + // required fields + VinsID types.Int64 `tfsdk:"vins_id"` + Destination types.String `tfsdk:"destination"` + Netmask types.String `tfsdk:"netmask"` + Gateway types.String `tfsdk:"gateway"` + + // optional fields + RouteID types.Int64 `tfsdk:"route_id"` + ComputeIDs types.List `tfsdk:"compute_ids"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + LastUpdated types.String `tfsdk:"last_updated"` + GUID types.String `tfsdk:"guid"` +} diff --git a/internal/service/cloudbroker/vins/resource_vins.go b/internal/service/cloudbroker/vins/resource_vins.go new file mode 100644 index 0000000..1807027 --- /dev/null +++ b/internal/service/cloudbroker/vins/resource_vins.go @@ -0,0 +1,424 @@ +package vins + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceVINS{} + _ resource.ResourceWithImportState = &resourceVINS{} +) + +// NewResourceVINS is a helper function to simplify the provider implementation. +func NewResourceVINS() resource.Resource { + return &resourceVINS{} +} + +// resourceVINS is the resource implementation. +type resourceVINS struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceVINS) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create vins + var plan models.ResourceVINSModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceVINS: Error receiving the plan") + return + } + tflog.Info(ctx, "Create resourceVINS: got plan successfully", map[string]any{"name": plan.Name.ValueString()}) + tflog.Info(ctx, "Create resourceVINS: start creating", map[string]any{"name": plan.Name.ValueString()}) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout20m) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceVINS: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceVINS: set timeouts successfully", map[string]any{ + "name": plan.Name.ValueString(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceVINS: starting input checks", map[string]any{"name": plan.Name.ValueString()}) + resp.Diagnostics.Append(resourceVINSInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceVINS: Error input checks") + return + } + tflog.Info(ctx, "Create resourceVINS: input checks successful", map[string]any{"name": plan.Name.ValueString()}) + + var vinsId uint64 + + // Make create request and get response for creation in RG + if !plan.RGID.IsUnknown() { + vinsId, diags = utilities.CreateInRGResourceVINS(ctx, &plan, r.client) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + tflog.Error(ctx, "Create resourceVINS: Error response for create in RG of resource vins") + return + } + } + + // Make create request and get response for creation in account + if !plan.AccountID.IsUnknown() { + vinsId, diags = utilities.CreateInAccountResourceVINS(ctx, &plan, r.client) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + tflog.Error(ctx, "Create resourceVINS: Error response for create in account of resource vins") + return + } + } + + plan.Id = types.StringValue(strconv.Itoa(int(vinsId))) + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + tflog.Info(ctx, "Create resourceVINS: vins created", map[string]any{"vins_id": vinsId, "name": plan.Name.ValueString()}) + + // additional settings after vins creation: in case of failures, warnings are added to resp.Diagnostics, + // because additional settings failure is not critical. If errors were added instead of warnings, terraform + // framework would mark resource as tainted and delete it, which would be unwanted behaviour. + + // reserve ip for vins after creation, warnings added to resp.Diagnostics in case of failure. + if !plan.IP.IsNull() { // IP is optional + resp.Diagnostics.Append(utilities.IPCreateVINS(ctx, vinsId, &plan, r.client)...) + } + + // add nat rules for vins after creation, warnings added to resp.Diagnostics in case of failure. + if !plan.NatRule.IsNull() { // NatRule is optional + resp.Diagnostics.Append(utilities.NATRuleCreateVINS(ctx, vinsId, &plan, r.client)...) + } + + // update default qos for vins after creation, warnings added to resp.Diagnostics in case of failure. + if !plan.DefaultQOS.IsUnknown() { // DefaultQOS is optional && computed + resp.Diagnostics.Append(utilities.DefaultQosCreateVINS(ctx, vinsId, &plan, r.client)...) + } + + tflog.Info(ctx, "Create resourceVINS: resource creation is completed", map[string]any{"vins_id": vinsId}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.VINSResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceVINS) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceVINSModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVINS: Error get state") + return + } + tflog.Info(ctx, "Read resourceVINS: got state successfully", map[string]any{"vins_id": state.Id.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVINS: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceVINS: set timeouts successfully", map[string]any{ + "vins_id": state.Id.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // read status + tflog.Info(ctx, "Read resourceVINS: before VINSReadStatus", map[string]any{"vins_id": state.Id.ValueString()}) + resp.Diagnostics.Append(utilities.VINSReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVINS: Error reading status") + return + } + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.VINSResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVINS: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVINS: Error set state") + return + } + tflog.Info(ctx, "End read resourceVINS") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceVINS) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceVINSModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Error receiving the plan") + return + } + tflog.Info(ctx, "Update resourceVINS: got plan successfully", map[string]any{"vins_id": plan.Id.ValueString()}) + + // Retrieve values from state + var state models.ResourceVINSModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceVINS: got state successfully", map[string]any{"vins_id": state.Id.ValueString()}) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout20m) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Error set timeout") + return + } + tflog.Info(ctx, "Update resourceVINS: set timeouts successfully", map[string]any{ + "vins_id": state.Id.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // Checking for values in the platform + tflog.Info(ctx, "Update resourceVINS: starting input checks", map[string]any{"vins_id": plan.Id.ValueString()}) + resp.Diagnostics.Append(resourceVINSInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Error input checks") + return + } + tflog.Info(ctx, "Update resourceVINS: input checks successful", map[string]any{"vins_id": state.Id.ValueString()}) + + vinsId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64) + if err != nil { + resp.Diagnostics.AddError(fmt.Sprintf("Cannot parse vins ID %s from state", state.Id.ValueString()), err.Error()) + return + } + + // enable/disable vins if needed + if !plan.Enable.Equal(state.Enable) && !plan.Enable.IsNull() { + resp.Diagnostics.Append(utilities.EnableDisableUpdateVINS(ctx, vinsId, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Error enabling/disabling vins") + return + } + } + + // connect/disconnect extnet for vins if needed + if !plan.ExtNet.Equal(state.ExtNet) { + resp.Diagnostics.Append(utilities.ExtNetUpdateVINS(ctx, vinsId, &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Error updating vins extnet") + return + } + } + + // reserve/release ip for vins if needed + if !plan.IP.Equal(state.IP) { + resp.Diagnostics.Append(utilities.IPUpdateVINS(ctx, vinsId, &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Error releasing/reserving vins ip") + return + } + } + + // add/delete nat rules for vins if needed + if !plan.NatRule.Equal(state.NatRule) { + resp.Diagnostics.Append(utilities.NATRuleUpdateVINS(ctx, vinsId, &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Error adding/deleting nat rules for vins") + return + } + } + + // add/delete dns for vins if needed. Empty "dns" is allowed, it will update vnfs.dhcp.config.dns from current values to empty list + if !plan.DNS.IsNull() && !plan.DNS.Equal(state.DNS) { + resp.Diagnostics.Append(utilities.UpdateDNSlistVINS(ctx, vinsId, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Error updating DNSList") + return + } + } + + if !plan.DefaultQOS.IsUnknown() && !plan.DefaultQOS.Equal(state.DefaultQOS) { + resp.Diagnostics.Append(utilities.UpdateDefaultQosVINS(ctx, vinsId, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Error updating DefaultQos") + return + } + } + + // restart vnf_dev for vins if needed + if !plan.VnfdevRestart.Equal(state.VnfdevRestart) && !plan.VnfdevRestart.IsNull() { + resp.Diagnostics.Append(utilities.VnfdevRestartUpdateVINS(ctx, vinsId, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Unable to restart vnf_def for VINS") + return + } + } + + // redeploy vnf_dev for vins if needed + if !plan.VnfdevRedeploy.Equal(state.VnfdevRedeploy) && !plan.VnfdevRedeploy.IsNull() { + resp.Diagnostics.Append(utilities.VnfdevRedeployUpdateVINS(ctx, vinsId, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Unable to redeploy vnf_def for VINS") + return + } + } + + // reset vnf_dev for vins if needed + if !plan.VnfdevReset.Equal(state.VnfdevReset) && !plan.VnfdevReset.IsNull() { + resp.Diagnostics.Append(utilities.VnfdevResetUpdateVINS(ctx, vinsId, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Unable to reset vnf_def for VINS") + return + } + } + + // start/stop vnf_dev for vins if needed + if !plan.VnfdevStart.Equal(state.VnfdevStart) && !plan.VnfdevStart.IsNull() { + resp.Diagnostics.Append(utilities.VnfdevStartStopUpdateVINS(ctx, vinsId, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Unable to start/stop vnf_def for VINS") + return + } + } + + tflog.Info(ctx, "Update resourceVINS: resource update is completed", map[string]any{"vins_id": plan.Id.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.VINSResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceVINS) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceVINSModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceVINS: Error get state") + return + } + tflog.Info(ctx, "Delete resourceVINS: got state successfully", map[string]any{"vins_id": state.Id.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceVINS: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceVINS: set timeouts successfully", map[string]any{ + "vins_id": state.Id.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + // Delete existing vins + delReq := vins.DeleteRequest{ + VINSID: uint64(state.VinsID.ValueInt64()), + } + + if state.Force.IsNull() { + delReq.Force = true // default value + } else { + delReq.Force = state.Force.ValueBool() + } + if state.Permanently.IsNull() { + delReq.Permanently = true // default value + } else { + delReq.Permanently = state.Permanently.ValueBool() + } + + tflog.Info(ctx, "Delete resourceVINS: calling cloudbroker().VINS().Delete", map[string]any{ + "vins_id": state.Id.ValueString(), + "req": delReq, + }) + _, err := r.client.CloudBroker().VINS().Delete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceVINS: Error deleting", err.Error()) + return + } + + tflog.Info(ctx, "End delete resource vins", map[string]any{"vins_id": state.Id.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceVINS) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceVINS(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceVINS) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceVINS) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceVINS") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceVINS successfully") +} + +func (r *resourceVINS) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/vins/resource_vins_static_route.go b/internal/service/cloudbroker/vins/resource_vins_static_route.go new file mode 100644 index 0000000..baf9fce --- /dev/null +++ b/internal/service/cloudbroker/vins/resource_vins_static_route.go @@ -0,0 +1,312 @@ +package vins + +import ( + "context" + "fmt" + "time" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceVINSStaticRoute{} + _ resource.ResourceWithImportState = &resourceVINSStaticRoute{} +) + +// NewResourceVINSStaticRoute is a helper function to simplify the provider implementation. +func NewResourceVINSStaticRoute() resource.Resource { + return &resourceVINSStaticRoute{} +} + +// resourceVINSStaticRoute is the resource implementation. +type resourceVINSStaticRoute struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceVINSStaticRoute) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create vins + var plan models.ResourceVINSStaticRouteModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceVINSStaticRoute: Error receiving the plan") + return + } + tflog.Info(ctx, "Create resourceVINSStaticRoute: got plan successfully", map[string]any{"id": plan.Id.ValueString()}) + tflog.Info(ctx, "Create resourceVINSStaticRoute: start creating", map[string]any{"id": plan.Id.ValueString()}) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout20m) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceVINSStaticRoute: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceVINSStaticRoute: set timeouts successfully", map[string]any{ + "id": plan.Id.ValueString(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceVINSStaticRoute: starting input checks", map[string]any{"id": plan.Id.ValueString()}) + resp.Diagnostics.Append(resourceVINSStaticRouteInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceVINSStaticRoute: Error input checks") + return + } + tflog.Info(ctx, "Create resourceVINSStaticRoute: input checks successful", map[string]any{"id": plan.Id.ValueString()}) + + // Make create request and get response for vins static route creation + staticReq := vins.StaticRouteAddRequest{ + VINSID: uint64(plan.VinsID.ValueInt64()), + Destination: plan.Destination.ValueString(), + Netmask: plan.Netmask.ValueString(), + Gateway: plan.Gateway.ValueString(), + } + + if !plan.ComputeIDs.IsUnknown() { + computes := make([]uint64, 0, len(plan.ComputeIDs.Elements())) + diags = plan.ComputeIDs.ElementsAs(ctx, &computes, false) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + tflog.Error(ctx, "Create resourceVINSStaticRoute: cannot populate computes with plan.ComputeIDs List elements") + return + } + staticReq.ComputeIds = computes + } + + _, err := r.client.CloudBroker().VINS().StaticRouteAdd(ctx, staticReq) + if err != nil { + resp.Diagnostics.AddError("Create resourceVINSStaticRoute: Error adding static route to vins", err.Error()) + return + } + + routeId, diags := utilities.GetStaticRouteID(ctx, &plan, r.client) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + tflog.Error(ctx, "Create resourceVINSStaticRoute: cannot get route id") + return + } + + plan.Id = types.StringValue(fmt.Sprintf("%d#%d", plan.VinsID.ValueInt64(), routeId)) + + tflog.Info(ctx, "Create resourceVINSStaticRoute: resource creation is completed", map[string]any{"id": plan.Id.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.VINSStaticRouteResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceVINSStaticRoute) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceVINSStaticRouteModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVINSStaticRoute: Error get state") + return + } + tflog.Info(ctx, "Read resourceVINSStaticRoute: got state successfully", map[string]any{"vins_id": state.Id.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVINSStaticRoute: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceVINSStaticRoute: set timeouts successfully", map[string]any{ + "vins_id": state.Id.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.VINSStaticRouteResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVINSStaticRoute: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceVINSStaticRoute: Error set state") + return + } + tflog.Info(ctx, "End read resourceVINSStaticRoute") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceVINSStaticRoute) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceVINSStaticRouteModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINSStaticRoute: Error receiving the plan") + return + } + tflog.Info(ctx, "Update resourceVINSStaticRoute: got plan successfully", map[string]any{"id": plan.Id.ValueString()}) + + // Retrieve values from state + var state models.ResourceVINSStaticRouteModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINSStaticRoute: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceVINSStaticRoute: got state successfully", map[string]any{"id": state.Id.ValueString()}) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout20m) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Error set timeout") + return + } + tflog.Info(ctx, "Update resourceVINSStaticRoute: set timeouts successfully", map[string]any{ + "id": state.Id.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // Checking for values in the platform + tflog.Info(ctx, "Update resourceVINSStaticRoute: starting input checks", map[string]any{"id": plan.Id.ValueString()}) + resp.Diagnostics.Append(resourceVINSStaticRouteInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINS: Error input checks") + return + } + tflog.Info(ctx, "Update resourceVINSStaticRoute: input checks successful", map[string]any{"id": state.Id.ValueString()}) + + // change compute_ids, if needed + if !plan.ComputeIDs.Equal(state.ComputeIDs) { + resp.Diagnostics.Append(utilities.UpdateComputeIDsVINSStaticRoute(ctx, &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceVINSStaticRoute: Error updating compute_ids") + return + } + } + + tflog.Info(ctx, "Update resourceVINSStaticRoute: resource update is completed", map[string]any{"id": plan.Id.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.VINSStaticRouteResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceVINSStaticRoute) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceVINSStaticRouteModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceVINSStaticRoute: Error get state") + return + } + tflog.Info(ctx, "Delete resourceVINSStaticRoute: got state successfully", map[string]any{"id": state.Id.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceVINSStaticRoute: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceVINSStaticRoute: set timeouts successfully", map[string]any{ + "id": state.Id.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + vinsId, routeId, diags := utilities.GetVinsIDAndRouteID(ctx, &state) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return + } + + // Delete static route + delReq := vins.StaticRouteDelRequest{ + VINSID: vinsId, + RouteId: routeId, + } + + tflog.Info(ctx, "Delete resourceVINSStaticRoute: calling cloudbroker().VINS().StaticRouteDel", map[string]any{ + "id": state.Id.ValueString(), + "req": delReq, + }) + _, err := r.client.CloudBroker().VINS().StaticRouteDel(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceVINSStaticRoute: Error deleting", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceVINSStaticRoute", map[string]any{"id": state.Id.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceVINSStaticRoute) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceVINSStaticRoute(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceVINSStaticRoute) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_cb_vins_static_route" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceVINSStaticRoute) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceVINSStaticRoute") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceVINSStaticRoute successfully") +} + +func (r *resourceVINSStaticRoute) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_data_source_vins.go b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins.go new file mode 100644 index 0000000..644f65f --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins.go @@ -0,0 +1,717 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceVINS() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "vins_id": schema.Int64Attribute{ + Required: true, + Description: "Unique ID of the ViNS.", + }, + + // optional attributes + "reason": schema.StringAttribute{ + Optional: true, + Description: "reason for action", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "vnf_dev": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ckey": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "capabilities": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "config": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "mgmt": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ip_addr": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + }, + "ssh_key": schema.StringAttribute{ + Computed: true, + }, + "user": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "resources": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "stack_id": schema.Int64Attribute{ + Computed: true, + }, + "uuid": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + "config_saved": schema.BoolAttribute{ + Computed: true, + }, + "custom_precfg": schema.BoolAttribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "id": schema.Int64Attribute{ + Computed: true, + }, + "interfaces": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "conn_id": schema.Int64Attribute{ + Computed: true, + }, + "conn_type": schema.StringAttribute{ + Computed: true, + }, + "def_gw": schema.StringAttribute{ + Computed: true, + }, + "enabled": schema.BoolAttribute{ + Computed: true, + }, + "flipgroup_id": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "ip_address": schema.StringAttribute{ + Computed: true, + }, + "listen_ssh": schema.BoolAttribute{ + Computed: true, + }, + "mac": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "net_id": schema.Int64Attribute{ + Computed: true, + }, + "net_mask": schema.Int64Attribute{ + Computed: true, + }, + "net_type": schema.StringAttribute{ + Computed: true, + }, + "node_id": schema.Int64Attribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "qos": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "e_rate": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "in_burst": schema.Int64Attribute{ + Computed: true, + }, + "in_rate": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "target": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vnfs": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + }, + }, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "meta": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vins": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + }, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "default_qos": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "e_rate": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "in_burst": schema.Int64Attribute{ + Computed: true, + }, + "in_rate": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "manager_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_type": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.Int64Attribute{ + Computed: true, + }, + "network": schema.StringAttribute{ + Computed: true, + }, + "pre_reservations_num": schema.Int64Attribute{ + Computed: true, + }, + "redundant": schema.BoolAttribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "sec_vnf_dev_id": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "vnfs": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "dhcp": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ckey": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "config": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "dns": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "ip_end": schema.StringAttribute{ + Computed: true, + }, + "ip_start": schema.StringAttribute{ + Computed: true, + }, + "lease": schema.Int64Attribute{ + Computed: true, + }, + "netmask": schema.Int64Attribute{ + Computed: true, + }, + "network": schema.StringAttribute{ + Computed: true, + }, + "reservations": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "client_type": schema.StringAttribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "domainname": schema.StringAttribute{ + Computed: true, + }, + "hostname": schema.StringAttribute{ + Computed: true, + }, + "ip": schema.StringAttribute{ + Computed: true, + }, + "mac": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vm_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "devices": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "primary": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "dev_id": schema.Int64Attribute{ + Computed: true, + }, + "iface01": schema.StringAttribute{ + Computed: true, + }, + "iface02": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "dhcp_id": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "owner_id": schema.Int64Attribute{ + Computed: true, + }, + "owner_type": schema.StringAttribute{ + Computed: true, + }, + "pure_virtual": schema.BoolAttribute{ + Computed: true, + }, + "routes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "route_id": schema.Int64Attribute{ + Computed: true, + }, + "compute_ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "destination": schema.StringAttribute{ + Computed: true, + }, + "gateway": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "gw": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ckey": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "config": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "ext_net_id": schema.Int64Attribute{ + Computed: true, + }, + "ext_net_ip": schema.StringAttribute{ + Computed: true, + }, + "ext_netmask": schema.Int64Attribute{ + Computed: true, + }, + "qos": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "e_rate": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "in_burst": schema.Int64Attribute{ + Computed: true, + }, + "in_rate": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "devices": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "primary": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "dev_id": schema.Int64Attribute{ + Computed: true, + }, + "iface01": schema.StringAttribute{ + Computed: true, + }, + "iface02": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "gw_id": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "owner_id": schema.Int64Attribute{ + Computed: true, + }, + "owner_type": schema.StringAttribute{ + Computed: true, + }, + "pure_virtual": schema.BoolAttribute{ + Computed: true, + }, + "routes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "route_id": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "destination": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.StringAttribute{ + Computed: true, + }, + "gateway": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "nat": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ckey": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "config": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "net_mask": schema.Int64Attribute{ + Computed: true, + }, + "network": schema.StringAttribute{ + Computed: true, + }, + "rules": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "rule_id": schema.Int64Attribute{ + Computed: true, + }, + "local_ip": schema.StringAttribute{ + Computed: true, + }, + "local_port": schema.Int64Attribute{ + Computed: true, + }, + "protocol": schema.StringAttribute{ + Computed: true, + }, + "public_port_end": schema.Int64Attribute{ + Computed: true, + }, + "public_port_start": schema.Int64Attribute{ + Computed: true, + }, + "vm_id": schema.Int64Attribute{ + Computed: true, + }, + "vm_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "devices": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "primary": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "dev_id": schema.Int64Attribute{ + Computed: true, + }, + "iface01": schema.StringAttribute{ + Computed: true, + }, + "iface02": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "nat_id": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "owner_id": schema.Int64Attribute{ + Computed: true, + }, + "owner_type": schema.StringAttribute{ + Computed: true, + }, + "pure_virtual": schema.BoolAttribute{ + Computed: true, + }, + "routes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "route_id": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "destination": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.StringAttribute{ + Computed: true, + }, + "gateway": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + "vxlan_id": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_audits.go b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_audits.go new file mode 100644 index 0000000..0f76be6 --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_audits.go @@ -0,0 +1,42 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceVINSAudits() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "vins_id": schema.Int64Attribute{ + Required: true, + Description: "Unique ID of the ViNS.", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "call": schema.StringAttribute{ + Computed: true, + }, + "response_time": schema.Float64Attribute{ + Computed: true, + }, + "statuscode": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Float64Attribute{ + Computed: true, + }, + "user": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_ext_net_list.go b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_ext_net_list.go new file mode 100644 index 0000000..6dfbc9e --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_ext_net_list.go @@ -0,0 +1,48 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceVINSExtNetList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "vins_id": schema.Int64Attribute{ + Required: true, + Description: "Unique ID of the ViNS.", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "ext_net_id": schema.Int64Attribute{ + Computed: true, + }, + "ip": schema.StringAttribute{ + Computed: true, + }, + "prefix_len": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_ip_list.go b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_ip_list.go new file mode 100644 index 0000000..97d2473 --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_ip_list.go @@ -0,0 +1,51 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceVINSIPList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "vins_id": schema.Int64Attribute{ + Required: true, + Description: "Unique ID of the ViNS", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "client_type": schema.StringAttribute{ + Computed: true, + }, + "domain_name": schema.StringAttribute{ + Computed: true, + }, + "host_name": schema.StringAttribute{ + Computed: true, + }, + "ip": schema.StringAttribute{ + Computed: true, + }, + "mac": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vm_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_list.go b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_list.go new file mode 100644 index 0000000..155654b --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_list.go @@ -0,0 +1,202 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceVINSList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "Filter by ID", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "Filter by Name", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "Filter by Account ID", + }, + "rg_id": schema.Int64Attribute{ + Optional: true, + Description: "Filter by RG ID", + }, + "ext_ip": schema.StringAttribute{ + Optional: true, + Description: "Filter by external IP address", + }, + "vnfdev_id": schema.Int64Attribute{ + Optional: true, + Description: "find by VNF Device id", + }, + "include_deleted": schema.BoolAttribute{ + Optional: true, + Description: "Include deleted computes", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "Page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "Page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "default_qos": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "e_rate": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "in_burst": schema.Int64Attribute{ + Computed: true, + }, + "in_rate": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "external_ip": schema.StringAttribute{ + Computed: true, + }, + "extnet_id": schema.Int64Attribute{ + Computed: true, + }, + "free_ips": schema.Int64Attribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "vins_id": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "manager_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_type": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.Int64Attribute{ + Computed: true, + }, + "network": schema.StringAttribute{ + Computed: true, + }, + "pre_reservations_num": schema.Int64Attribute{ + Computed: true, + }, + "pri_vnf_dev_id": schema.Int64Attribute{ + Computed: true, + }, + "redundant": schema.BoolAttribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "sec_vnf_dev_id": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "vnfs": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "dhcp": schema.Int64Attribute{ + Computed: true, + }, + "dns": schema.Int64Attribute{ + Computed: true, + }, + "fw": schema.Int64Attribute{ + Computed: true, + }, + "gw": schema.Int64Attribute{ + Computed: true, + }, + "nat": schema.Int64Attribute{ + Computed: true, + }, + "vpn": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "vxlan_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_list_deleted.go b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_list_deleted.go new file mode 100644 index 0000000..784cff9 --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_list_deleted.go @@ -0,0 +1,188 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceVINSListDeleted() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "Filter by ID", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "Filter by Name", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "Filter by Account ID", + }, + "rg_id": schema.Int64Attribute{ + Optional: true, + Description: "Filter by RG ID", + }, + "ext_ip": schema.StringAttribute{ + Optional: true, + Description: "Filter by external IP address", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "Page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "Page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "default_qos": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "e_rate": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "in_burst": schema.Int64Attribute{ + Computed: true, + }, + "in_rate": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "external_ip": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "vins_id": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "manager_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_type": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.Int64Attribute{ + Computed: true, + }, + "network": schema.StringAttribute{ + Computed: true, + }, + "pre_reservations_num": schema.Int64Attribute{ + Computed: true, + }, + "pri_vnf_dev_id": schema.Int64Attribute{ + Computed: true, + }, + "redundant": schema.BoolAttribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "sec_vnf_dev_id": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "vnfs": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "dhcp": schema.Int64Attribute{ + Computed: true, + }, + "dns": schema.Int64Attribute{ + Computed: true, + }, + "fw": schema.Int64Attribute{ + Computed: true, + }, + "gw": schema.Int64Attribute{ + Computed: true, + }, + "nat": schema.Int64Attribute{ + Computed: true, + }, + "vpn": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "vxlan_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_nat_rule_list.go b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_nat_rule_list.go new file mode 100644 index 0000000..f688c6a --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_nat_rule_list.go @@ -0,0 +1,59 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceVINSNATRuleList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "vins_id": schema.Int64Attribute{ + Required: true, + Description: "VINS id", + }, + // optional attributes + "reason": schema.StringAttribute{ + Optional: true, + Description: "reason for action", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.Int64Attribute{ + Computed: true, + }, + "local_ip": schema.StringAttribute{ + Computed: true, + }, + "local_port": schema.Int64Attribute{ + Computed: true, + }, + "protocol": schema.StringAttribute{ + Computed: true, + }, + "public_port_end": schema.Int64Attribute{ + Computed: true, + }, + "public_port_start": schema.Int64Attribute{ + Computed: true, + }, + "vm_id": schema.Int64Attribute{ + Computed: true, + }, + "vm_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_static_route.go b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_static_route.go new file mode 100644 index 0000000..c7b3ee2 --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_static_route.go @@ -0,0 +1,41 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceVINSStaticRoute() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "vins_id": schema.Int64Attribute{ + Required: true, + Description: "vins ID", + }, + "route_id": schema.Int64Attribute{ + Required: true, + Description: "static route ID", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "compute_ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "destination": schema.StringAttribute{ + Computed: true, + }, + "gateway": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.StringAttribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_static_route_list.go b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_static_route_list.go new file mode 100644 index 0000000..be22557 --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_data_source_vins_static_route_list.go @@ -0,0 +1,50 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceVINSStaticRouteList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "vins_id": schema.Int64Attribute{ + Required: true, + Description: "vins ID", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "destination": schema.StringAttribute{ + Computed: true, + }, + "gateway": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.StringAttribute{ + Computed: true, + }, + "route_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_resource_vins.go b/internal/service/cloudbroker/vins/schemas/schema_resource_vins.go new file mode 100644 index 0000000..70ceefb --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_resource_vins.go @@ -0,0 +1,884 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaResourceVINS() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "name": schema.StringAttribute{ + Required: true, + Description: "vins name", + }, + + // optional attributes + "rg_id": schema.Int64Attribute{ + Optional: true, // either rg_id or account_id must be specified + Computed: true, + Description: "resource group id, used for creating vins in resource group", + }, + "account_id": schema.Int64Attribute{ + Optional: true, // either rg_id or account_id must be specified + Computed: true, + Description: "account id, used for creating vins in account", + }, + + "ipcidr": schema.StringAttribute{ + Optional: true, + Description: "private network IP CIDR, used for creating vins either in resource group or in account", + }, + "pre_reservations_num": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "number of pre created reservations, used for creating vins either in resource group or in account", + // default is 32 + }, + "description": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "description, used for creating vins either in resource group or in account", + // default is "" + }, + "dns": schema.SetAttribute{ + Optional: true, + ElementType: types.StringType, + Description: "list of DNS ip address", + }, + + "gid": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "grid (platform) ID, used for creating vins in account", + }, + + "enable": schema.BoolAttribute{ + Optional: true, + Description: "flag to enable/disable vins", + // default is true + }, + "permanently": schema.BoolAttribute{ + Optional: true, + Description: "flag to delete vins permanently", + // default is false + }, + "force": schema.BoolAttribute{ + Optional: true, + Description: "flag to force delete of non-empty vins", + // default is false + }, + "restore": schema.BoolAttribute{ + Optional: true, + Description: "flag to restore vins", + // default is false + }, + "vnfdev_start": schema.BoolAttribute{ + Optional: true, + Description: "true to start vnfdev, false to stop vnfdev", + // default is false + }, + "vnfdev_reset": schema.BoolAttribute{ + Optional: true, + Description: "reset ViNS's primary vnf device", + // default is false + }, + "vnfdev_restart": schema.BoolAttribute{ + Optional: true, + Description: "flag to restart vnfdev", + // default is false + }, + "vnfdev_redeploy": schema.BoolAttribute{ + Optional: true, + Description: "flag to redeploy vnfdev", + // default is false + }, + "reason": schema.StringAttribute{ + Optional: true, + Description: "reason for action", + }, + + "ext_net": schema.SingleNestedAttribute{ + Optional: true, + Description: "connect/disconnect vins to/from external network", + Attributes: map[string]schema.Attribute{ + "ext_net_id": schema.Int64Attribute{ + Optional: true, + Description: "external network ID", + // default is -1 + }, + "ext_net_ip": schema.StringAttribute{ + Optional: true, + Description: "directly set IP address", + // default is "" + }, + }, + }, + "ip": schema.ListNestedAttribute{ + Optional: true, + Description: "ip reserve/release", + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "type": schema.StringAttribute{ + Required: true, + Description: "type of the reservation", + Validators: []validator.String{ + stringvalidator.OneOfCaseInsensitive("DHCP", "VIP", "EXCLUDED"), // case is ignored + }, + }, + "ip_addr": schema.StringAttribute{ + Optional: true, + Description: "IP address to use. Non-empty string is required for type EXCLUDE. Ignored for types DHCP and VIP.", + }, + "mac_addr": schema.StringAttribute{ + Optional: true, + Description: "MAC address to associate with IP reservation. Ignored for type EXCLUDE, non-empty string is required for DHCP and VIP.", + }, + "compute_id": schema.Int64Attribute{ + Optional: true, + Description: "ID of the compute, associated with this reservation of type DHCP. Ignored for other types.", + }, + "reason": schema.StringAttribute{ + Optional: true, + }, + }, + }, + }, + "nat_rule": schema.ListNestedAttribute{ + Optional: true, + Description: "create/delete NAT (port forwarding rule) on vins", + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "int_ip": schema.StringAttribute{ + Required: true, + Description: "internal IP address to apply this rule to", + }, + "int_port": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "internal IP port number to use for this rule", + }, + "ext_port_start": schema.Int64Attribute{ + Required: true, + Description: "external IP start port to use for this rule", + }, + "ext_port_end": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "external IP end port to use for this rule", + }, + "proto": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "IP protocol type (tcp or udp)", + Validators: []validator.String{ + stringvalidator.OneOf("tcp", "udp"), // case is not ignored + }, + }, + "rule_id": schema.Int64Attribute{ + Computed: true, + Description: "rule id", + }, + "reason": schema.StringAttribute{ + Optional: true, + }, + }, + }, + }, + "default_qos": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "e_rate": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "in_burst": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "in_rate": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + }, + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "vins_id": schema.Int64Attribute{ + Computed: true, + Description: "Unique ID of the ViNS. If ViNS ID is specified, then ViNS name, rg_id and account_id are ignored.", + }, + "last_updated": schema.StringAttribute{ + Computed: true, + }, + "vnf_dev": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ckey": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "capabilities": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "config": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "mgmt": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ip_addr": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + }, + "ssh_key": schema.StringAttribute{ + Computed: true, + }, + "user": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "resources": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "stack_id": schema.Int64Attribute{ + Computed: true, + }, + "uuid": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + "config_saved": schema.BoolAttribute{ + Computed: true, + }, + "custom_precfg": schema.BoolAttribute{ + Computed: true, + }, + "description": schema.StringAttribute{ + Computed: true, + }, + "id": schema.Int64Attribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "interfaces": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "conn_id": schema.Int64Attribute{ + Computed: true, + }, + "conn_type": schema.StringAttribute{ + Computed: true, + }, + "def_gw": schema.StringAttribute{ + Computed: true, + }, + "enabled": schema.BoolAttribute{ + Computed: true, + }, + "flipgroup_id": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "ip_address": schema.StringAttribute{ + Computed: true, + }, + "listen_ssh": schema.BoolAttribute{ + Computed: true, + }, + "mac": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "net_id": schema.Int64Attribute{ + Computed: true, + }, + "net_mask": schema.Int64Attribute{ + Computed: true, + }, + "net_type": schema.StringAttribute{ + Computed: true, + }, + "node_id": schema.Int64Attribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "qos": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "e_rate": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "in_burst": schema.Int64Attribute{ + Computed: true, + }, + "in_rate": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "target": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vnfs": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + }, + }, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "meta": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vins": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + }, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "manager_id": schema.Int64Attribute{ + Computed: true, + }, + "manager_type": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "net_mask": schema.Int64Attribute{ + Computed: true, + }, + "network": schema.StringAttribute{ + Computed: true, + }, + "redundant": schema.BoolAttribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "sec_vnf_dev_id": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "vnfs": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "dhcp": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ckey": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "config": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "dns": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "ip_end": schema.StringAttribute{ + Computed: true, + }, + "ip_start": schema.StringAttribute{ + Computed: true, + }, + "lease": schema.Int64Attribute{ + Computed: true, + }, + "netmask": schema.Int64Attribute{ + Computed: true, + }, + "network": schema.StringAttribute{ + Computed: true, + }, + "reservations": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "client_type": schema.StringAttribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "domainname": schema.StringAttribute{ + Computed: true, + }, + "hostname": schema.StringAttribute{ + Computed: true, + }, + "ip": schema.StringAttribute{ + Computed: true, + }, + "mac": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vm_id": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "devices": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "primary": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "dev_id": schema.Int64Attribute{ + Computed: true, + }, + "iface01": schema.StringAttribute{ + Computed: true, + }, + "iface02": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "dhcp_id": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "owner_id": schema.Int64Attribute{ + Computed: true, + }, + "owner_type": schema.StringAttribute{ + Computed: true, + }, + "pure_virtual": schema.BoolAttribute{ + Computed: true, + }, + "routes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "route_id": schema.Int64Attribute{ + Computed: true, + }, + "compute_ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "destination": schema.StringAttribute{ + Computed: true, + }, + "gateway": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "gw": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ckey": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "config": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "ext_net_id": schema.Int64Attribute{ + Computed: true, + }, + "ext_net_ip": schema.StringAttribute{ + Computed: true, + }, + "ext_netmask": schema.Int64Attribute{ + Computed: true, + }, + "qos": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "e_rate": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "in_burst": schema.Int64Attribute{ + Computed: true, + }, + "in_rate": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "devices": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "primary": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "dev_id": schema.Int64Attribute{ + Computed: true, + }, + "iface01": schema.StringAttribute{ + Computed: true, + }, + "iface02": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "gw_id": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "owner_id": schema.Int64Attribute{ + Computed: true, + }, + "owner_type": schema.StringAttribute{ + Computed: true, + }, + "pure_virtual": schema.BoolAttribute{ + Computed: true, + }, + "routes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "route_id": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "destination": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.StringAttribute{ + Computed: true, + }, + "gateway": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "nat": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "ckey": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "config": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "net_mask": schema.Int64Attribute{ + Computed: true, + }, + "network": schema.StringAttribute{ + Computed: true, + }, + "rules": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "rule_id": schema.Int64Attribute{ + Computed: true, + }, + "local_ip": schema.StringAttribute{ + Computed: true, + }, + "local_port": schema.Int64Attribute{ + Computed: true, + }, + "protocol": schema.StringAttribute{ + Computed: true, + }, + "public_port_end": schema.Int64Attribute{ + Computed: true, + }, + "public_port_start": schema.Int64Attribute{ + Computed: true, + }, + "vm_id": schema.Int64Attribute{ + Computed: true, + }, + "vm_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "devices": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "primary": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "dev_id": schema.Int64Attribute{ + Computed: true, + }, + "iface01": schema.StringAttribute{ + Computed: true, + }, + "iface02": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "nat_id": schema.Int64Attribute{ + Computed: true, + }, + "lock_status": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "owner_id": schema.Int64Attribute{ + Computed: true, + }, + "owner_type": schema.StringAttribute{ + Computed: true, + }, + "pure_virtual": schema.BoolAttribute{ + Computed: true, + }, + "routes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_ids": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "route_id": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "destination": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.StringAttribute{ + Computed: true, + }, + "gateway": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + "vxlan_id": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vins/schemas/schema_resource_vins_static_route.go b/internal/service/cloudbroker/vins/schemas/schema_resource_vins_static_route.go new file mode 100644 index 0000000..5d26853 --- /dev/null +++ b/internal/service/cloudbroker/vins/schemas/schema_resource_vins_static_route.go @@ -0,0 +1,51 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaResourceVINSStaticRoute() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "vins_id": schema.Int64Attribute{ + Required: true, + Description: "Unique ID of the ViNS.", + }, + "destination": schema.StringAttribute{ + Required: true, + }, + "netmask": schema.StringAttribute{ + Required: true, + }, + "gateway": schema.StringAttribute{ + Required: true, + }, + + // optional attributes + "compute_ids": schema.ListAttribute{ + Computed: true, + Optional: true, + ElementType: types.Int64Type, + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "last_updated": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "route_id": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_data_source_vins.go b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins.go new file mode 100644 index 0000000..6baeef3 --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" +) + +func VINSDataSourceCheckPresence(ctx context.Context, vinsId uint64, c *decort.DecortClient) (*vins.RecordVINS, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("VINSDataSourceCheckPresence: Get info about vins with ID - %v", vinsId)) + + diags := diag.Diagnostics{} + + recordVINS, err := c.CloudBroker().VINS().Get(ctx, vins.GetRequest{VINSID: vinsId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about vins with ID %v", vinsId), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "VINSDataSourceCheckPresence: response from CloudBroker().VINS().Get", map[string]any{"vins_id": vinsId, "response": recordVINS}) + + return recordVINS, nil +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_audits.go b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_audits.go new file mode 100644 index 0000000..804760d --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_audits.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" +) + +func VINSAuditsDataSourceCheckPresence(ctx context.Context, vinsId uint64, c *decort.DecortClient) (*vins.ListAudits, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("VINSAuditsDataSourceCheckPresence: Get info about vins audit with vins ID - %v", vinsId)) + + diags := diag.Diagnostics{} + + audits, err := c.CloudBroker().VINS().Audits(ctx, vins.AuditsRequest{VINSID: vinsId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about audits for vins with ID %v", vinsId), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "VINSAuditsDataSourceCheckPresence: successful response from CloudBroker().VINS().Audits", map[string]any{"vins_id": vinsId}) + + return &audits, nil +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_ext_net_list.go b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_ext_net_list.go new file mode 100644 index 0000000..b1a8ae5 --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_ext_net_list.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" +) + +func VINSExtNetListDataSourceCheckPresence(ctx context.Context, vinsId uint64, c *decort.DecortClient) (*vins.ListExtNets, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("VINSExtNetListDataSourceCheckPresence: Get info about vins audit with vins ID - %v", vinsId)) + + diags := diag.Diagnostics{} + + extnetList, err := c.CloudBroker().VINS().ExtNetList(ctx, vins.ExtNetListRequest{VINSID: vinsId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about extnet list for vins with ID %v", vinsId), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "VINSExtNetListDataSourceCheckPresence: successful response from CloudBroker().VINS().ExtNetList", map[string]any{"vins_id": vinsId}) + + return extnetList, nil +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_ip_list.go b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_ip_list.go new file mode 100644 index 0000000..3e8c6e1 --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_ip_list.go @@ -0,0 +1,27 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" +) + +func VINSIPListDataSourceCheckPresence(ctx context.Context, vinsId uint64, c *decort.DecortClient) (*vins.ListIPs, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("VINSIPListDataSourceCheckPresence: Get info about vins audit with vins ID - %v", vinsId)) + + diags := diag.Diagnostics{} + + ipList, err := c.CloudBroker().VINS().IPList(ctx, vins.IPListRequest{VINSID: vinsId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about ip list for vins with ID %v", vinsId), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "VINSIPListDataSourceCheckPresence: successful response from CloudBroker().VINS().IPList", map[string]any{"vins_id": vinsId}) + + return ipList, nil +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_list.go b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_list.go new file mode 100644 index 0000000..b7b3734 --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_list.go @@ -0,0 +1,59 @@ +package utilities + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" +) + +func VINSListDataSourceCheckPresence(ctx context.Context, plan *models.DataSourceVINSListModel, c *decort.DecortClient) (*vins.ListVINS, diag.Diagnostics) { + tflog.Info(ctx, "VINSListDataSourceCheckPresence: Get info about vins list") + + diags := diag.Diagnostics{} + + listReq := vins.ListRequest{} + if !plan.ByID.IsNull() { + listReq.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.Name.IsNull() { + listReq.Name = plan.Name.ValueString() + } + if !plan.AccountID.IsNull() { + listReq.AccountID = uint64(plan.AccountID.ValueInt64()) + } + if !plan.RGID.IsNull() { + listReq.RGID = uint64(plan.RGID.ValueInt64()) + } + if !plan.ExtIP.IsNull() { + listReq.ExtIP = plan.ExtIP.ValueString() + } + if !plan.VNFDevID.IsNull() { + listReq.VNFDevID = uint64(plan.VNFDevID.ValueInt64()) + } + if !plan.IncludeDeleted.IsNull() { + listReq.IncludeDeleted = plan.IncludeDeleted.ValueBool() + } + if !plan.SortBy.IsNull() { + listReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + listReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + listReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "VINSListDataSourceCheckPresence: before call CloudBroker().VINS().List", map[string]any{"req": listReq}) + list, err := c.CloudBroker().VINS().List(ctx, listReq) + if err != nil { + diags.AddError("Cannot get info about vins list", err.Error()) + return nil, diags + } + tflog.Info(ctx, "VINSListDataSourceCheckPresence: successful response from CloudBroker().VINS().List") + + return list, nil +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_list_deleted.go b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_list_deleted.go new file mode 100644 index 0000000..5383168 --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_list_deleted.go @@ -0,0 +1,53 @@ +package utilities + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" +) + +func VINSListDeletedDataSourceCheckPresence(ctx context.Context, plan *models.DataSourceVINSListDeletedModel, c *decort.DecortClient) (*vins.ListVINS, diag.Diagnostics) { + tflog.Info(ctx, "VINSListDeletedDataSourceCheckPresence: Get info about vins list") + + diags := diag.Diagnostics{} + + listReq := vins.ListDeletedRequest{} + if !plan.ByID.IsNull() { + listReq.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.Name.IsNull() { + listReq.Name = plan.Name.ValueString() + } + if !plan.AccountID.IsNull() { + listReq.AccountID = uint64(plan.AccountID.ValueInt64()) + } + if !plan.RGID.IsNull() { + listReq.RGID = uint64(plan.RGID.ValueInt64()) + } + if !plan.ExtIP.IsNull() { + listReq.ExtIP = plan.ExtIP.ValueString() + } + if !plan.SortBy.IsNull() { + listReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + listReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + listReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "VINSListDeletedDataSourceCheckPresence: before call CloudBroker().VINS().ListDeleted", map[string]any{"req": listReq}) + list, err := c.CloudBroker().VINS().ListDeleted(ctx, listReq) + if err != nil { + diags.AddError("Cannot get info about vins deleted list", err.Error()) + return nil, diags + } + tflog.Info(ctx, "VINSListDeletedDataSourceCheckPresence: successful response from CloudBroker().VINS().ListDeleted") + + return list, nil +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_nat_rule_list.go b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_nat_rule_list.go new file mode 100644 index 0000000..59f8650 --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_nat_rule_list.go @@ -0,0 +1,34 @@ +package utilities + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" +) + +func VINSNATRuleListDataSourceCheckPresence(ctx context.Context, state *models.DataSourceVINSNATRuleListModel, c *decort.DecortClient) (*vins.ListNATRules, diag.Diagnostics) { + tflog.Info(ctx, "VINSNATRuleListDataSourceCheckPresence: Get info about vins nat rule list") + + diags := diag.Diagnostics{} + + req := vins.NATRuleListRequest{ + VINSID: uint64(state.VinsID.ValueInt64()), + } + + if !state.Reason.IsNull() { + req.Reason = state.Reason.ValueString() + } + + natRuleList, err := c.CloudBroker().VINS().NATRuleList(ctx, req) + if err != nil { + diags.AddError("Cannot get info about vins nat rule list", err.Error()) + return nil, diags + } + tflog.Info(ctx, "VINSNATRuleListDataSourceCheckPresence: successful response from CloudBroker().VINS().NATRuleList") + + return natRuleList, nil +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_static_route.go b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_static_route.go new file mode 100644 index 0000000..f2c383d --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_static_route.go @@ -0,0 +1,39 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" +) + +func VINSStaticRouteDataSourceCheckPresence(ctx context.Context, vinsId, routeId uint64, c *decort.DecortClient) (*vins.ItemRoutes, diag.Diagnostics) { + tflog.Info(ctx, "VINSStaticRouteDataSourceCheckPresence: Get info about vins static route", map[string]any{ + "vins_id": vinsId, + "route_id": routeId, + }) + + diags := diag.Diagnostics{} + + routesList, err := c.CloudBroker().VINS().StaticRouteList(ctx, vins.StaticRouteListRequest{VINSID: vinsId}) + if err != nil { + diags.AddError("Cannot get info about vins static route", err.Error()) + return nil, diags + } + tflog.Info(ctx, "VINSStaticRouteDataSourceCheckPresence: successful response from CloudBroker().VINS().StaticRouteList") + + staticRoute := &vins.ItemRoutes{} + for _, route := range routesList.Data { + if routeId == route.ID { + staticRoute = &route + return staticRoute, nil + } + } + + diags.AddError("Static route not found", + fmt.Sprintf("Static route with id %d not found for vins with id %d", routeId, vinsId)) + return nil, diags +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_static_route_list.go b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_static_route_list.go new file mode 100644 index 0000000..92217a1 --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_data_source_vins_static_route_list.go @@ -0,0 +1,25 @@ +package utilities + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" +) + +func VINSStaticRouteListDataSourceCheckPresence(ctx context.Context, vinsId uint64, c *decort.DecortClient) (*vins.ListStaticRoutes, diag.Diagnostics) { + tflog.Info(ctx, "VINSStaticRouteListDataSourceCheckPresence: Get info about vins static route list") + + diags := diag.Diagnostics{} + + routesList, err := c.CloudBroker().VINS().StaticRouteList(ctx, vins.StaticRouteListRequest{VINSID: vinsId}) + if err != nil { + diags.AddError("Cannot get info about vins static route list", err.Error()) + return nil, diags + } + tflog.Info(ctx, "VINSStaticRouteListDataSourceCheckPresence: successful response from CloudBroker().VINS().StaticRouteList") + + return routesList, nil +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_resource_vins.go b/internal/service/cloudbroker/vins/utilities/utility_resource_vins.go new file mode 100644 index 0000000..1e3d692 --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_resource_vins.go @@ -0,0 +1,948 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status" +) + +func VINSResourceCheckPresence(ctx context.Context, vinsId uint64, c *decort.DecortClient) (*vins.RecordVINS, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("VINSResourceCheckPresence: Get info about vins with ID - %v", vinsId)) + + diags := diag.Diagnostics{} + + recordVINS, err := c.CloudBroker().VINS().Get(ctx, vins.GetRequest{VINSID: vinsId}) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about vins with ID %v", vinsId), err.Error()) + return nil, diags + } + + tflog.Info(ctx, "VINSResourceCheckPresence: response from CloudBroker().VINS().Get", map[string]any{"vins_id": vinsId, "response": recordVINS}) + + return recordVINS, nil +} + +// CreateInRGResourceVINS creates vins in resource group based on plan. +// Returns vins_id for created vins and errors in case of failures. +func CreateInRGResourceVINS(ctx context.Context, plan *models.ResourceVINSModel, c *decort.DecortClient) (uint64, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("Start CreateInRGResourceVINS: vins_name %s", plan.Name.ValueString())) + + diags := diag.Diagnostics{} + + createReq := vins.CreateInRGRequest{ + Name: plan.Name.ValueString(), + RGID: uint64(plan.RGID.ValueInt64()), + } + + if !plan.IPCIDR.IsNull() { // IPCIDR is optional + createReq.IPCIDR = plan.IPCIDR.ValueString() + } + + if !plan.ExtNet.IsNull() { // ExtNet is optional + var extnetPlan models.ExtNetModel + tflog.Info(ctx, "CreateInRGResourceVINS: extnet specified", map[string]any{"name": plan.Name.ValueString()}) + diags.Append(plan.ExtNet.As(ctx, &extnetPlan, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true})...) + if diags.HasError() { + tflog.Error(ctx, "CreateInRGResourceVINS: cannot populate extnet with plan.ExtNet object element") + return 0, diags + } + + if extnetPlan.ExtNetID.IsNull() { + createReq.ExtNetID = -1 // default value + } else { + createReq.ExtNetID = extnetPlan.ExtNetID.ValueInt64() + } + + if !extnetPlan.ExtNetIP.IsNull() { + createReq.ExtIP = extnetPlan.ExtNetIP.ValueString() + } + } else { + createReq.ExtNetID = -1 // default value + } + + if !plan.Description.IsNull() { // Description is optional + createReq.Description = plan.Description.ValueString() + } + if plan.PreReservationsNum.IsUnknown() { // PreReservationsNum is optional & computed + createReq.PreReservationsNum = uint64(32) // default value + } else { + createReq.PreReservationsNum = uint64(plan.PreReservationsNum.ValueInt64()) + } + if !plan.Reason.IsNull() { + createReq.Reason = plan.Reason.ValueString() + } + if !plan.DNS.IsNull() { + result := make([]string, 0, len(plan.DNS.Elements())) + for _, val := range plan.DNS.Elements() { + result = append(result, strings.Trim(val.String(), "\"")) + } + createReq.DNSList = result + } + + tflog.Info(ctx, "CreateInRGResourceVINS: before call CloudBroker().VINS().CreateInRG", map[string]any{"req": createReq}) + + vinsId, err := c.CloudBroker().VINS().CreateInRG(ctx, createReq) + if err != nil { + diags.AddError( + "Create resourceVINS: unable to Create VINS in RG", + err.Error(), + ) + return 0, diags + } + tflog.Info(ctx, "CreateInRGResourceVINS: vins created", map[string]any{"vins_id": vinsId, "vins_name": plan.Name.ValueString()}) + + return vinsId, nil +} + +// CreateInAccountResourceVINS creates vins in account based on plan. +// Returns vins_id for created vins and errors in case of failures. +func CreateInAccountResourceVINS(ctx context.Context, plan *models.ResourceVINSModel, c *decort.DecortClient) (uint64, diag.Diagnostics) { + tflog.Info(ctx, fmt.Sprintf("Start CreateInAccountResourceVINS: vins_name %s", plan.Name.ValueString())) + + diags := diag.Diagnostics{} + + createReq := vins.CreateInAccountRequest{ + Name: plan.Name.ValueString(), + AccountID: uint64(plan.AccountID.ValueInt64()), + } + + if !plan.GID.IsUnknown() { // IPCIDR is optional & computed + createReq.GID = uint64(plan.GID.ValueInt64()) + } + + if !plan.IPCIDR.IsNull() { // IPCIDR is optional + createReq.IPCIDR = plan.IPCIDR.ValueString() + } + + if !plan.Description.IsNull() { // Description is optional + createReq.Description = plan.Description.ValueString() + } + + if plan.PreReservationsNum.IsNull() { // PreReservationsNum is optional + createReq.PreReservationsNum = uint64(32) // default value + } else { + createReq.PreReservationsNum = uint64(plan.PreReservationsNum.ValueInt64()) + } + + if !plan.Reason.IsNull() { + createReq.Reason = plan.Reason.ValueString() + } + + if !plan.DNS.IsNull() { + result := make([]string, 0, len(plan.DNS.Elements())) + for _, val := range plan.DNS.Elements() { + result = append(result, strings.Trim(val.String(), "\"")) + } + createReq.DNSList = result + } + + tflog.Info(ctx, "CreateInAccountResourceVINS: before call CloudBroker().VINS().CreateInAccount", map[string]any{"req": createReq}) + + vinsId, err := c.CloudBroker().VINS().CreateInAccount(ctx, createReq) + if err != nil { + diags.AddError( + "Create resourceVINS: unable to Create VINS in Account", + err.Error(), + ) + return 0, diags + } + tflog.Info(ctx, "CreateInAccountResourceVINS: vins created", map[string]any{"vins_id": vinsId, "vins_name": plan.Name.ValueString()}) + + return vinsId, nil +} + +// IPCreateVINS reserves ips that user specified in ip field for created resource. +// In case of failure returns warnings. +func IPCreateVINS(ctx context.Context, vinsId uint64, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + // plan.IP is not null as it was checked before call + ipPlan := make([]models.IPModel, 0, len(plan.IP.Elements())) + tflog.Info(ctx, "IPCreateVINS: new ip specified", map[string]any{"vins_id": vinsId}) + diagsItem := plan.IP.ElementsAs(ctx, &ipPlan, true) + if diagsItem.HasError() { + tflog.Error(ctx, fmt.Sprintf("IPCreateVINS: cannot populate ipPlan with plan.IP list elements: %v", diagsItem)) + diags.AddWarning("IPCreateVINS: Unable to read ip for vins", + fmt.Sprintf("%v", diagsItem)) + return diags + } + + for _, ip := range ipPlan { + ipReserveReq := vins.IPReserveRequest{ + VINSID: vinsId, + Type: ip.Type.ValueString(), + } + + if ip.IPAddr.ValueString() != "" { + ipReserveReq.IPAddr = ip.IPAddr.ValueString() + } + if ip.MacAddr.ValueString() != "" { + ipReserveReq.MAC = ip.MacAddr.ValueString() + } + if ip.ComputeID.ValueInt64() != 0 { + ipReserveReq.ComputeID = uint64(ip.ComputeID.ValueInt64()) + } + if ip.Reason.ValueString() != "" { + ipReserveReq.Reason = ip.Reason.ValueString() + } + + tflog.Info(ctx, "IPCreateVINS: before calling CloudBroker().VINS().IPReserve", map[string]any{ + "vins_id": vinsId, + "ipReserveReq": ipReserveReq}) + res, err := c.CloudBroker().VINS().IPReserve(ctx, ipReserveReq) + if err != nil { + diags.AddWarning("IPCreateVINS: Unable to reserve ip for vins", + err.Error()) + } + tflog.Info(ctx, "IPCreateVINS: response from CloudBroker().VINS().IPReserve", map[string]any{ + "vins_id": vinsId, + "response": res}) + } + + return diags +} + +// IPUpdateVINS reserves/releases ips that user specified in ip field for updated resource. +// In case of failure returns errors. +func IPUpdateVINS(ctx context.Context, vinsId uint64, plan, state *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start IPUpdateVINS: new ip specified", map[string]any{"vins_id": vinsId}) + diags := diag.Diagnostics{} + + ipPlan := make([]models.IPModel, 0, len(plan.IP.Elements())) + tflog.Info(ctx, "IPUpdateVINS: new ip specified", map[string]any{"vins_id": vinsId}) + diags.Append(plan.IP.ElementsAs(ctx, &ipPlan, true)...) + if diags.HasError() { + tflog.Error(ctx, "IPUpdateVINS: cannot populate ipPlan with plan.IP list elements") + return diags + } + + ipState := make([]models.IPModel, 0, len(state.IP.Elements())) + tflog.Info(ctx, "IPUpdateVINS: new ip specified", map[string]any{"vins_id": vinsId}) + diags.Append(state.IP.ElementsAs(ctx, &ipState, true)...) + if diags.HasError() { + tflog.Error(ctx, "IPUpdateVINS: cannot populate ipState with state.IP list elements") + return diags + } + + // define ip to be released and release them + var deletedIP []models.IPModel + for _, ipStateElem := range ipState { + if !ipStateElem.Contains(ipPlan) { + deletedIP = append(deletedIP, ipStateElem) + } + } + if len(deletedIP) == 0 { + tflog.Info(ctx, "IPUpdateVINS: no ip needs to be release", map[string]any{"vins_id": plan.Id.ValueString()}) + } + if len(deletedIP) > 0 { + tflog.Info(ctx, "IPUpdateVINS: ip needs to be released", map[string]any{ + "vins_id": plan.Id.ValueString(), + "deletedIP": deletedIP}) + + for _, deletedIPItem := range deletedIP { + releaseIPReq := vins.IPReleaseRequest{ + VINSID: vinsId, + IPAddr: deletedIPItem.IPAddr.ValueString(), + MAC: deletedIPItem.MacAddr.ValueString(), + } + + tflog.Info(ctx, "IPUpdateVINS: before calling CloudBroker().VINS().IPRelese", map[string]any{"vins_id": plan.Id.ValueString(), "req": releaseIPReq}) + res, err := c.CloudBroker().VINS().IPRelease(ctx, releaseIPReq) + tflog.Info(ctx, "IPUpdateVINS: response from CloudBroker().VINS().IPRelese", map[string]any{"vins_id": plan.Id.ValueString(), "response": res}) + if err != nil { + diags.AddError( + "IPUpdateVINS: can not release ip for VINS", + err.Error()) + } + } + } + + // define ips to be reserved and reserve them + var addedIP []models.IPModel + for _, ipPlanElem := range ipPlan { + if !ipPlanElem.Contains(ipState) { + addedIP = append(addedIP, ipPlanElem) + } + } + + if len(addedIP) == 0 { + tflog.Info(ctx, "IPUpdateVINS: no ip needs to be reserved", map[string]any{"vins_id": plan.Id.ValueString()}) + } + if len(addedIP) > 0 { + tflog.Info(ctx, "IPUpdateVINS: ip needs to be reserved", map[string]any{ + "vins_id": plan.Id.ValueString(), + "addedIP": addedIP}) + + for _, addedIPItem := range addedIP { + ipReserveReq := vins.IPReserveRequest{ + VINSID: vinsId, + Type: addedIPItem.Type.ValueString(), + } + + if addedIPItem.IPAddr.ValueString() != "" { + ipReserveReq.IPAddr = addedIPItem.IPAddr.ValueString() + } + if addedIPItem.MacAddr.ValueString() != "" { + ipReserveReq.MAC = addedIPItem.MacAddr.ValueString() + } + if addedIPItem.ComputeID.ValueInt64() != 0 { + ipReserveReq.ComputeID = uint64(addedIPItem.ComputeID.ValueInt64()) + } + if addedIPItem.Reason.ValueString() != "" { + ipReserveReq.MAC = addedIPItem.Reason.ValueString() + } + + tflog.Info(ctx, "IPUpdateVINS: before calling CloudBroker().VINS().IPReserve", map[string]any{ + "vins_id": vinsId, + "ipReserveReq": ipReserveReq}) + res, err := c.CloudBroker().VINS().IPReserve(ctx, ipReserveReq) + if err != nil { + diags.AddError("IPUpdateVINS: Unable to reserve ip for vins", + err.Error()) + } + tflog.Info(ctx, "IPUpdateVINS: response from CloudBroker().VINS().IPReserve", map[string]any{ + "vins_id": vinsId, + "response": res}) + } + } + + return diags +} + +// ExtNetUpdateVINS updates ext_net_id and/or ext_net_ip that user specified in ext_net block for updated resource. +// In case of failure returns errors. +func ExtNetUpdateVINS(ctx context.Context, vinsId uint64, plan, state *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start ExtNetUpdateVINS: new ext_net specified", map[string]any{ + "vins_id": vinsId, + }) + diags := diag.Diagnostics{} + + if !state.ExtNet.IsNull() { + disconReq := vins.ExtNetDisconnectRequest{ + VINSID: vinsId, + } + tflog.Info(ctx, "ExtNetUpdateVINS: before calling CloudBroker().VINS().ExtNetDisconnect", map[string]any{"vins_id": plan.Id.ValueString(), "req": disconReq}) + res, err := c.CloudBroker().VINS().ExtNetDisconnect(ctx, disconReq) + tflog.Info(ctx, "ExtNetUpdateVINS: response from CloudBroker().VINS().ExtNetDisconnect", map[string]any{"vins_id": plan.Id.ValueString(), "response": res}) + if err != nil { + diags.AddError( + "ExtNetUpdateVINS: can not disconnect extnet for VINS", + err.Error()) + } + } + + if !plan.ExtNet.IsNull() { + var extnetPlan models.ExtNetModel + tflog.Info(ctx, "ExtNetUpdateVINS: new extnet specified", map[string]any{"name": plan.Name.ValueString()}) + diags.Append(plan.ExtNet.As(ctx, &extnetPlan, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true})...) + if diags.HasError() { + tflog.Error(ctx, "ExtNetUpdateVINS: cannot populate extnet with plan.ExtNet object element") + return diags + } + + conReq := vins.ExtNetConnectRequest{ + VINSID: vinsId, + } + if !extnetPlan.ExtNetID.IsNull() { + conReq.NetID = uint64(extnetPlan.ExtNetID.ValueInt64()) + } + if !extnetPlan.ExtNetIP.IsNull() { + conReq.IP = extnetPlan.ExtNetIP.ValueString() + } + + tflog.Info(ctx, "ExtNetUpdateVINS: before calling CloudBroker().VINS().ExtNetConnect", map[string]any{"vins_id": plan.Id.ValueString(), "req": conReq}) + res, err := c.CloudBroker().VINS().ExtNetConnect(ctx, conReq) + tflog.Info(ctx, "ExtNetUpdateVINS: response from CloudBroker().VINS().ExtNetConnect", map[string]any{"vins_id": plan.Id.ValueString(), "response": res}) + if err != nil { + diags.AddError( + "ExtNetUpdateVINS: can not connect extnet to VINS", + err.Error()) + } + } + + return diags +} + +// NATRuleCreateVINS adds nat rules that user specified in nat_rule field for created resource. +// In case of failure returns warnings. +func NATRuleCreateVINS(ctx context.Context, vinsId uint64, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + // plan.NatRule is not null as it was checked before call + natRulePlan := make([]models.NatRuleResourceModel, 0, len(plan.NatRule.Elements())) + tflog.Info(ctx, "NATRuleCreateVINS: new natRule specified", map[string]any{"vins_id": vinsId}) + diagsItem := plan.NatRule.ElementsAs(ctx, &natRulePlan, false) + if diagsItem.HasError() { + tflog.Error(ctx, fmt.Sprintf("NATRuleCreateVINS: cannot populate natRulePlan with plan.NatRule list elements: %v", diagsItem)) + diags.AddWarning("NATRuleCreateVINS: Unable to add nat rule for vins", + fmt.Sprintf("%v", diagsItem)) + return diags + } + + for _, nat := range natRulePlan { + natAddReq := vins.NATRuleAddRequest{ + VINSID: vinsId, + IntIP: nat.IntIP.ValueString(), + IntPort: uint64(nat.IntPort.ValueInt64()), + ExtPortStart: uint64(nat.ExtPortStart.ValueInt64()), + } + + if !nat.ExtPortEnd.IsUnknown() { + natAddReq.ExtPortEnd = uint64(nat.ExtPortEnd.ValueInt64()) + } + if !nat.Proto.IsUnknown() { + natAddReq.Proto = nat.Proto.ValueString() + } + if !nat.Reason.IsNull() { + natAddReq.Reason = nat.Reason.ValueString() + } + + tflog.Info(ctx, "NATRuleCreateVINS: before calling CloudBroker().VINS().NATRuleAdd", map[string]any{ + "vins_id": vinsId, + "natAddReq": natAddReq}) + res, err := c.CloudBroker().VINS().NATRuleAdd(ctx, natAddReq) + if err != nil { + diags.AddWarning("NATRuleCreateVINS: Unable to add nat rule for vins", + err.Error()) + } + tflog.Info(ctx, "NATRuleCreateVINS: response from CloudBroker().VINS().NATRuleAdd", map[string]any{ + "vins_id": vinsId, + "response": res}) + } + + return diags +} + +// DefaultQosCreateVINS update qos that user specified in defaultQos field for created resource. +// In case of failure returns warnings. +func DefaultQosCreateVINS(ctx context.Context, vinsId uint64, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + // plan.DefaultQOS is not null as it was checked before call + var defaultQosPlan models.QOSModel + tflog.Info(ctx, "DefaultQosCreateVINS: defaultQos specified", map[string]any{"name": plan.Name.ValueString()}) + diags.Append(plan.DefaultQOS.As(ctx, &defaultQosPlan, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true})...) + if diags.HasError() { + tflog.Error(ctx, "DefaultQosCreateVINS: cannot populate defaultQosPlan with plan.DefaultQOS object") + diags.AddWarning("DefaultQosCreateVINS: Unable to update defaultQos for vins", + fmt.Sprintf("cannot populate defaultQosPlan with plan.DefaultQOS object")) + return diags + } + + qosReq := vins.DefaultQOSUpdateRequest{ + VINSID: vinsId, + } + + if !defaultQosPlan.InRate.IsUnknown() { + qosReq.IngressRate = uint64(defaultQosPlan.InRate.ValueInt64()) + } + if !defaultQosPlan.InBurst.IsUnknown() { + qosReq.IngressBirst = uint64(defaultQosPlan.InBurst.ValueInt64()) + } + if !defaultQosPlan.ERate.IsUnknown() { + qosReq.EgressRate = uint64(defaultQosPlan.ERate.ValueInt64()) + } + + tflog.Info(ctx, "DefaultQosCreateVINS: before calling CloudBroker().VINS().DefaultQOSUpdate", map[string]any{ + "vins_id": vinsId, + "natAddReq": qosReq}) + res, err := c.CloudBroker().VINS().DefaultQOSUpdate(ctx, qosReq) + if err != nil { + diags.AddWarning("DefaultQosCreateVINS: Unable to update defaultQos for vins", + err.Error()) + } + tflog.Info(ctx, "DefaultQosCreateVINS: response from CloudBroker().VINS().DefaultQOSUpdate", map[string]any{ + "vins_id": vinsId, + "response": res}) + + return diags +} + +// NATRuleUpdateVINS adds/deleted nat rules that user specified in nat_rule field for updated resource. +// In case of failure returns errors. +func NATRuleUpdateVINS(ctx context.Context, vinsId uint64, plan, state *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start NATRuleUpdateVINS: new natRule specified", map[string]any{"vins_id": vinsId}) + diags := diag.Diagnostics{} + + itemsNatRulePlan := make([]models.NatRuleResourceModel, 0, len(plan.NatRule.Elements())) + diags.Append(plan.NatRule.ElementsAs(ctx, &itemsNatRulePlan, false)...) + if diags.HasError() { + tflog.Error(ctx, "NATRuleUpdateVINS: cannot populate natRulePlan with plan.NatRule list elements") + return diags + } + + itemsNatRuleState := make([]models.NatRuleResourceModel, 0, len(state.NatRule.Elements())) + diags.Append(state.NatRule.ElementsAs(ctx, &itemsNatRuleState, false)...) + if diags.HasError() { + tflog.Error(ctx, "NATRuleUpdateVINS: cannot populate natRuleState with state.NatRule list elements") + return diags + } + + // define nat rules to be deleted and delete them + var deletedNatRule []models.NatRuleResourceModel + for _, natRuleStateElem := range itemsNatRuleState { + if !natRuleStateElem.Contains(itemsNatRulePlan) { + deletedNatRule = append(deletedNatRule, natRuleStateElem) + } + } + + if len(deletedNatRule) == 0 { + tflog.Info(ctx, "NATRuleUpdateVINS: no natRule needs to be deleted", map[string]any{"vins_id": plan.Id.ValueString()}) + } + if len(deletedNatRule) > 0 { + tflog.Info(ctx, "NATRuleUpdateVINS: natRule needs to be deleted", map[string]any{ + "vins_id": plan.Id.ValueString(), + "deletedNatRule": deletedNatRule}) + + for _, deletedNatRuleItem := range deletedNatRule { + deleteNATReq := vins.NATRuleDelRequest{ + VINSID: vinsId, + RuleID: deletedNatRuleItem.RuleID.ValueInt64(), + } + + if deletedNatRuleItem.Reason.ValueString() != "" { + deleteNATReq.Reason = deletedNatRuleItem.Reason.ValueString() + } + + tflog.Info(ctx, "NATRuleUpdateVINS: before calling CloudBroker().VINS().NATRuleDel", map[string]any{"vins_id": plan.Id.ValueString(), "req": deleteNATReq}) + res, err := c.CloudBroker().VINS().NATRuleDel(ctx, deleteNATReq) + tflog.Info(ctx, "NATRuleUpdateVINS: response from CloudBroker().VINS().NATRuleDel", map[string]any{"vins_id": plan.Id.ValueString(), "response": res}) + if err != nil { + diags.AddError( + "NATRuleUpdateVINS: can not delete nat rule for VINS", + err.Error()) + } + } + } + + // define nat rules to be added and add them + var addedNatRules []models.NatRuleResourceModel + for _, natRulePlanElem := range itemsNatRulePlan { + if !natRulePlanElem.Contains(itemsNatRuleState) { + addedNatRules = append(addedNatRules, natRulePlanElem) + } + } + + if len(addedNatRules) == 0 { + tflog.Info(ctx, "NATRuleUpdateVINS: no nat rule needs to be added", map[string]any{"vins_id": plan.Id.ValueString()}) + } + if len(addedNatRules) > 0 { + tflog.Info(ctx, "NATRuleUpdateVINS: nat rule needs to be added", map[string]any{ + "vins_id": plan.Id.ValueString(), + "addedNatRules": addedNatRules}) + + for _, addedNatRuleItem := range addedNatRules { + natAddReq := vins.NATRuleAddRequest{ + VINSID: vinsId, + IntIP: addedNatRuleItem.IntIP.ValueString(), + IntPort: uint64(addedNatRuleItem.IntPort.ValueInt64()), + ExtPortStart: uint64(addedNatRuleItem.ExtPortStart.ValueInt64()), + } + if !addedNatRuleItem.ExtPortEnd.IsUnknown() { + natAddReq.ExtPortEnd = uint64(addedNatRuleItem.ExtPortEnd.ValueInt64()) + } + if !addedNatRuleItem.Proto.IsUnknown() { + natAddReq.Proto = addedNatRuleItem.Proto.ValueString() + } + if !addedNatRuleItem.Reason.IsUnknown() { + natAddReq.Reason = addedNatRuleItem.Reason.ValueString() + } + + tflog.Info(ctx, "NATRuleUpdateVINS: before calling CloudBroker().VINS().NATRuleAdd", map[string]any{ + "vins_id": vinsId, + "natAddReq": natAddReq}) + res, err := c.CloudBroker().VINS().NATRuleAdd(ctx, natAddReq) + if err != nil { + diags.AddError("NATRuleUpdateVINS: Unable to add nat rule for vins", + err.Error()) + } + tflog.Info(ctx, "NATRuleUpdateVINS: response from CloudBroker().VINS().NATRuleAdd", map[string]any{ + "vins_id": vinsId, + "response": res}) + } + } + + return diags +} + +// VINSReadStatus loads vins resource by ids id, gets it current status. Performs restore and enable if needed for +// Deleted status. +// In case of failure returns errors. +func VINSReadStatus(ctx context.Context, state *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "VINSReadStatus: Read status vins with ID", map[string]any{"vins_id": state.Id.ValueString()}) + + diags := diag.Diagnostics{} + + vinsId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64) + if err != nil { + diags.AddError("VINSReadStatus: Cannot parse vins ID from state", err.Error()) + return diags + } + + recordVINS, diags := VINSResourceCheckPresence(ctx, vinsId, c) + if err != nil { + diags.AddError("VINSReadStatus: Unable to Read/Update VINS before status check", err.Error()) + return diags + } + + // check resource status + switch recordVINS.Status { + case status.Modeled: + diags.AddError( + "VINS is in status Modeled", + "please, contact support for more information", + ) + return diags + case status.Deleted: + // attempt to restore vins + tflog.Info(ctx, "VINSReadStatus: vins with status.Deleted is being read or updated, attempt to restore it", map[string]any{ + "vins_id": recordVINS.ID, + "status": recordVINS.Status}) + if state.Restore.IsNull() || state.Restore.ValueBool() { // default true or user set-up true + diags.Append(RestoreVINS(ctx, vinsId, c)...) + if diags.HasError() { + tflog.Error(ctx, "VINSReadStatus: cannot restore vins") + return diags + } + tflog.Info(ctx, "VINSReadStatus: vins restored successfully", map[string]any{"vins_id": vinsId}) + + if state.Enable.IsNull() || state.Enable.ValueBool() { + diags.Append(EnableVINS(ctx, vinsId, c)...) + if diags.HasError() { + tflog.Error(ctx, "VINSReadStatus: cannot enable vins") + return diags + } + tflog.Info(ctx, "VINSReadStatus: vins enabled successfully", map[string]any{"vins_id": vinsId}) + } + + state.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + } + case status.Enabled: + if !state.Enable.ValueBool() && !state.Enable.IsNull() { + tflog.Info(ctx, "VINSReadStatus: vins with status.Enabled is being read or updated but should not be according to configuration (enable=false), attempt to disable it", map[string]any{ + "vins_id": recordVINS.ID, + "status": recordVINS.Status}) + diags.Append(DisableVINS(ctx, vinsId, c)...) + if diags.HasError() { + tflog.Error(ctx, "VINSReadStatus: cannot disable vins") + return diags + } + tflog.Info(ctx, "VINSReadStatus: vins disabled successfully", map[string]any{"vins_id": vinsId}) + state.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + } + case status.Disabled: + if state.Enable.ValueBool() { + tflog.Info(ctx, "VINSReadStatus: vins with status.Disabled is being read or updated but should not be according to configuration (enable=true), attempt to enable it", map[string]any{ + "vins_id": recordVINS.ID, + "status": recordVINS.Status}) + diags.Append(EnableVINS(ctx, vinsId, c)...) + if diags.HasError() { + tflog.Error(ctx, "VINSReadStatus: cannot enable vins") + return diags + } + tflog.Info(ctx, "VINSReadStatus: vins enabled successfully", map[string]any{"vins_id": vinsId}) + state.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + } + case status.Destroyed: + diags.AddError( + "VINSReadStatus: vins is in status Destroyed", + fmt.Sprintf("the resource with vins_id %d cannot be read or updated because it has been destroyed", vinsId), + ) + return diags + } + + return nil +} + +// RestoreVINS performs vins Restore request. +// Returns error in case of failures. +func RestoreVINS(ctx context.Context, vinsId uint64, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + tflog.Info(ctx, "RestoreVINS: before calling CloudBroker().VINS().Restore", map[string]any{"vinsId": vinsId, "req": vins.RestoreRequest{VINSID: vinsId}}) + + res, err := c.CloudBroker().VINS().Restore(ctx, vins.RestoreRequest{VINSID: vinsId}) + if err != nil { + diags.AddError( + "RestoreVINS: cannot restore vins", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "RestoreVINS: response from CloudBroker().VINS().Restore", map[string]any{"vinsId": vinsId, "response": res}) + + return nil +} + +// DisableVINS performs vins Disable request. +// Returns error in case of failures. +func DisableVINS(ctx context.Context, vinsId uint64, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + tflog.Info(ctx, "DisableVINS: before calling CloudBroker().VINS().Disable", map[string]any{"vinsId": vinsId}) + + res, err := c.CloudBroker().VINS().Disable(ctx, vins.DisableRequest{VINSID: vinsId}) + if err != nil { + diags.AddError( + "DisableVINS: cannot disable vins", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "DisableVINS: response from CloudBroker().VINS().Disable", map[string]any{"vinsId": vinsId, "response": res}) + + return nil +} + +// EnableVINS performs vins Enable request. +// Returns error in case of failures. +func EnableVINS(ctx context.Context, vinsId uint64, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + tflog.Info(ctx, "EnableVINS: before calling CloudBroker().VINS().Enable", map[string]any{"vinsId": vinsId}) + + res, err := c.CloudBroker().VINS().Enable(ctx, vins.EnableRequest{VINSID: vinsId}) + if err != nil { + diags.AddError( + "EnableVINS: cannot enable vins", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "EnableVINS: response from CloudBroker().VINS().Enable", map[string]any{"vinsId": vinsId, "response": res}) + + return nil +} + +// EnableDisableUpdateVINS performs vins Enable/disable request. +// Returns errors in case of failures. +func EnableDisableUpdateVINS(ctx context.Context, vinsId uint64, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + enable := plan.Enable.ValueBool() + tflog.Info(ctx, "Start EnableDisableUpdateVINS", map[string]any{"vinsId": vinsId, "enable": enable}) + + if enable { + diags.Append(EnableVINS(ctx, vinsId, c)...) + return diags + } + + if !enable { + diags.Append(DisableVINS(ctx, vinsId, c)...) + return diags + } + + return nil +} + +// VnfdevRestartUpdateVINS restarts vnf_dev for vins. +// Returns error in case of failures. +func VnfdevRestartUpdateVINS(ctx context.Context, vinsId uint64, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + tflog.Info(ctx, "VnfdevRestartUpdateVINS: before calling CloudBroker().VINS().VNFDevRestart", map[string]any{"vinsId": vinsId}) + + req := vins.VNFDevRestartRequest{ + VINSID: vinsId, + } + + if !plan.Reason.IsNull() { + req.Reason = plan.Reason.ValueString() + } + + res, err := c.CloudBroker().VINS().VNFDevRestart(ctx, req) + if err != nil { + diags.AddError( + "VnfdevRestartUpdateVINS: cannot restart vnf_dev for vins", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "VnfdevRestartUpdateVINS: response from CloudBroker().VINS().VNFDevRestart", map[string]any{"vinsId": vinsId, "response": res}) + + return nil +} + +// VnfdevRedeployUpdateVINS redeploys vnf_dev for vins. +// Returns error in case of failures. +func VnfdevRedeployUpdateVINS(ctx context.Context, vinsId uint64, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + tflog.Info(ctx, "VnfdevRedeployUpdateVINS: before calling CloudBroker().VINS().VNFDevRedeploy", map[string]any{"vinsId": vinsId}) + + req := vins.VNFDevRedeployRequest{ + VINSID: vinsId, + } + + if !plan.Reason.IsNull() { + req.Reason = plan.Reason.ValueString() + } + + res, err := c.CloudBroker().VINS().VNFDevRedeploy(ctx, req) + if err != nil { + diags.AddError( + "VnfdevRedeployUpdateVINS: cannot redeploy vnf_dev for vins", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "VnfdevRedeployUpdateVINS: response from CloudBroker().VINS().VNFDevRedeploy", map[string]any{"vinsId": vinsId, "response": res}) + + return nil +} + +// VnfdevResetUpdateVINS reset vnf_dev for vins. +// Returns error in case of failures. +func VnfdevResetUpdateVINS(ctx context.Context, vinsId uint64, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + tflog.Info(ctx, "VnfdevResetUpdateVINS: before calling CloudBroker().VINS().VNFDevReset", map[string]any{"vinsId": vinsId}) + + req := vins.VNFDevResetRequest{ + VINSID: vinsId, + } + + if !plan.Reason.IsNull() { + req.Reason = plan.Reason.ValueString() + } + + res, err := c.CloudBroker().VINS().VNFDevReset(ctx, req) + if err != nil { + diags.AddError( + "VnfdevResetUpdateVINS: cannot reset vnf_dev for vins", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "VnfdevResetUpdateVINS: response from CloudBroker().VINS().VNFDevReset", map[string]any{"vinsId": vinsId, "response": res}) + + return nil +} + +// VnfdevStartStopUpdateVINS start/stop vnf_dev for vins. +// Returns error in case of failures. +func VnfdevStartStopUpdateVINS(ctx context.Context, vinsId uint64, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + if plan.VnfdevStart.ValueBool() { + req := vins.VNFDevStartRequest{ + VINSID: vinsId, + } + if !plan.Reason.IsNull() { + req.Reason = plan.Reason.ValueString() + } + tflog.Info(ctx, "VnfdevResetUpdateVINS: before calling CloudBroker().VINS().VNFDevStart", map[string]any{"vinsId": vinsId}) + res, err := c.CloudBroker().VINS().VNFDevStart(ctx, req) + if err != nil { + diags.AddError( + "VnfdevResetUpdateVINS: cannot start vnf_dev for vins", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "VnfdevResetUpdateVINS: response from CloudBroker().VINS().VNFDevStart", map[string]any{"vinsId": vinsId, "response": res}) + return nil + } + + req := vins.VNFDevStopRequest{ + VINSID: vinsId, + } + if !plan.Reason.IsNull() { + req.Reason = plan.Reason.ValueString() + } + tflog.Info(ctx, "VnfdevResetUpdateVINS: before calling CloudBroker().VINS().VNFDevStop", map[string]any{"vinsId": vinsId}) + res, err := c.CloudBroker().VINS().VNFDevStop(ctx, req) + if err != nil { + diags.AddError( + "VnfdevResetUpdateVINS: cannot start vnf_dev for vins", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "VnfdevResetUpdateVINS: response from CloudBroker().VINS().VNFDevStop", map[string]any{"vinsId": vinsId, "response": res}) + + return nil +} + +// UpdateDNSlistVINS apply new DNS list in VINS +// Returns error in case of failures. +func UpdateDNSlistVINS(ctx context.Context, vinsId uint64, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + req := vins.DNSApplyRequest{ + VINSID: vinsId, + } + dnsList := make([]string, 0, len(plan.DNS.Elements())) + for _, val := range plan.DNS.Elements() { + dnsList = append(dnsList, strings.Trim(val.String(), "\"")) + } + req.DNSList = dnsList + + tflog.Info(ctx, "UpdateDNSListVINS: before calling CloudBroker().VINS().DNSApply", map[string]any{"vinsId": vinsId}) + + res, err := c.CloudBroker().VINS().DNSApply(ctx, req) + if err != nil { + diags.AddError( + "UpdateDNSListVINS: cannot apply DNSList for vins", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "UpdateDNSListVINS: after calling CloudBroker().VINS().DNSApply", map[string]any{"vinsId": vinsId, "response": res}) + + return nil + +} + +// UpdateDefaultQosVINS update qos that user specified in defaultQos field for update resource. +// In case of failure returns error. +func UpdateDefaultQosVINS(ctx context.Context, vinsId uint64, plan *models.ResourceVINSModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + // plan.DefaultQOS is not null as it was checked before call + var defaultQosPlan models.QOSModel + tflog.Info(ctx, "DefaultQosUpdateVINS: defaultQos specified", map[string]any{"name": plan.Name.ValueString()}) + diags.Append(plan.DefaultQOS.As(ctx, &defaultQosPlan, basetypes.ObjectAsOptions{UnhandledNullAsEmpty: true})...) + if diags.HasError() { + tflog.Error(ctx, "DefaultQosUpdateVINS: cannot populate defaultQosPlan with plan.DefaultQOS object") + return diags + } + + qosReq := vins.DefaultQOSUpdateRequest{ + VINSID: vinsId, + } + + if !defaultQosPlan.InRate.IsUnknown() { + qosReq.IngressRate = uint64(defaultQosPlan.InRate.ValueInt64()) + } + if !defaultQosPlan.InBurst.IsUnknown() { + qosReq.IngressBirst = uint64(defaultQosPlan.InBurst.ValueInt64()) + } + if !defaultQosPlan.ERate.IsUnknown() { + qosReq.EgressRate = uint64(defaultQosPlan.ERate.ValueInt64()) + } + + tflog.Info(ctx, "DefaultQosUpdateVINS: before calling CloudBroker().VINS().DefaultQOSUpdate", map[string]any{ + "vins_id": vinsId, + "natAddReq": qosReq}) + res, err := c.CloudBroker().VINS().DefaultQOSUpdate(ctx, qosReq) + if err != nil { + diags.AddError("DefaultQosUpdateVINS: Unable to update defaultQos for vins", + err.Error()) + } + tflog.Info(ctx, "DefaultQosUpdateVINS: response from CloudBroker().VINS().DefaultQOSUpdate", map[string]any{ + "vins_id": vinsId, + "response": res}) + + return diags +} diff --git a/internal/service/cloudbroker/vins/utilities/utility_resource_vins_static_route.go b/internal/service/cloudbroker/vins/utilities/utility_resource_vins_static_route.go new file mode 100644 index 0000000..8b22dad --- /dev/null +++ b/internal/service/cloudbroker/vins/utilities/utility_resource_vins_static_route.go @@ -0,0 +1,195 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudbroker/vins" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/vins/models" +) + +func VINSStaticRouteResourceCheckPresence(ctx context.Context, vinsId, routeId uint64, c *decort.DecortClient) (*vins.ItemRoutes, diag.Diagnostics) { + tflog.Info(ctx, "VINSStaticRouteResourceCheckPresence: Get info about vins static route") + + diags := diag.Diagnostics{} + + tflog.Info(ctx, "VINSStaticRouteResourceCheckPresence: before call to CloudBroker().VINS().StaticRouteList", map[string]any{"vins_id": vinsId, "route_id": routeId}) + staticRouteList, err := c.CloudBroker().VINS().StaticRouteList(ctx, vins.StaticRouteListRequest{VINSID: vinsId}) + if err != nil { + diags.AddError( + fmt.Sprintf("VINSStaticRouteResourceCheckPresence: Cannot get info about vins static route %v", vinsId), + err.Error()) + return nil, diags + } + + tflog.Info(ctx, "VINSStaticRouteResourceCheckPresence: response from CloudBroker().VINS().StaticRouteList", map[string]any{"vins_id": vinsId, "response": staticRouteList}) + + staticRoute := &vins.ItemRoutes{} + for _, route := range staticRouteList.Data { + if routeId == route.ID { + staticRoute = &route + return staticRoute, nil + } + } + + diags.AddError( + "VINSStaticRouteResourceCheckPresence: static route not found", + fmt.Sprintf("static route not found for route_id=%d and vins_id=%d", routeId, vinsId)) + + return nil, diags + +} + +func GetVinsIDAndRouteID(ctx context.Context, plan *models.ResourceVINSStaticRouteModel) (uint64, uint64, diag.Diagnostics) { + tflog.Info(ctx, "Start GetVinsIDAndRouteID") + + var err error + diags := diag.Diagnostics{} + + vinsId := uint64(plan.VinsID.ValueInt64()) + routeId := uint64(plan.RouteID.ValueInt64()) + + if plan.Id.ValueString() != "" { + vals := strings.Split(plan.Id.ValueString(), "#") + if len(vals) != 2 { + diags.AddError( + "GetVinsIDAndRouteID: broken state id", + fmt.Sprintf("state id expected: #, got: %v", plan.Id.ValueString())) + return 0, 0, diags + } + + vinsId, err = strconv.ParseUint(vals[0], 10, 64) + if err != nil { + diags.AddError("GetVinsIDAndRouteID: can not parse vinsId from state", err.Error()) + return 0, 0, diags + } + + routeId, err = strconv.ParseUint(vals[1], 10, 64) + if err != nil { + diags.AddError("GetVinsIDAndRouteID: can not parse routeId from state", err.Error()) + return 0, 0, diags + } + } + + return vinsId, routeId, nil +} + +func GetStaticRouteID(ctx context.Context, plan *models.ResourceVINSStaticRouteModel, c *decort.DecortClient) (uint64, diag.Diagnostics) { + tflog.Info(ctx, "Start GetStaticRouteID") + + diags := diag.Diagnostics{} + + vinsId := uint64(plan.VinsID.ValueInt64()) + + tflog.Info(ctx, "GetStaticRouteID: before call to CloudBroker().VINS().StaticRouteList", map[string]any{"vins_id": vinsId}) + staticRouteList, err := c.CloudBroker().VINS().StaticRouteList(ctx, vins.StaticRouteListRequest{VINSID: vinsId}) + if err != nil { + diags.AddError( + fmt.Sprintf("GetStaticRouteID: Cannot get info about vins static routes %v", vinsId), + err.Error()) + return 0, diags + } + + tflog.Info(ctx, "GetStaticRouteID: response from CloudBroker().VINS().StaticRouteList", map[string]any{"vins_id": vinsId, "response": staticRouteList}) + + destination := plan.Destination.ValueString() + gateway := plan.Gateway.ValueString() + + staticRoute := &vins.ItemRoutes{} + for _, route := range staticRouteList.Data { + if destination == route.Destination && gateway == route.Gateway { + staticRoute = &route + return staticRoute.ID, nil + } + } + + diags.AddError( + "GetStaticRouteID: static route not found", + fmt.Sprintf("Static route (destination=%s, gateway=%s) not found for vins with vins_id=%d", destination, gateway, vinsId)) + + return 0, diags +} + +func UpdateComputeIDsVINSStaticRoute(ctx context.Context, plan, state *models.ResourceVINSStaticRouteModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start UpdateComputeIDsVINSStaticRoute", map[string]any{"id": plan.Id.ValueString()}) + + vinsId, routeId, diags := GetVinsIDAndRouteID(ctx, plan) + if diags.HasError() { + return diags + } + + computesPlan := make([]uint64, 0, len(plan.ComputeIDs.Elements())) + diags = plan.ComputeIDs.ElementsAs(ctx, &computesPlan, false) + if diags.HasError() { + tflog.Error(ctx, "UpdateComputeIDsVINSStaticRoute: cannot populate computes with plan.ComputeIDs List elements") + return diags + } + + computesState := make([]uint64, 0, len(state.ComputeIDs.Elements())) + diags = state.ComputeIDs.ElementsAs(ctx, &computesState, false) + if diags.HasError() { + tflog.Error(ctx, "UpdateComputeIDsVINSStaticRoute: cannot populate computes with state.ComputeIDs List elements") + return diags + } + + var deletedComputes []uint64 + for _, comp := range computesState { + if !contains(comp, computesPlan) { + deletedComputes = append(deletedComputes, comp) + } + } + if len(deletedComputes) != 0 { + revokeReq := vins.StaticRouteAccessRevokeRequest{ + VINSID: vinsId, + RouteId: routeId, + ComputeIds: deletedComputes, + } + + tflog.Info(ctx, "UpdateComputeIDsVINSStaticRoute: before call to CloudBroker().VINS().StaticRouteAccessRevoke", map[string]any{"revokeReq": revokeReq}) + _, err := c.CloudBroker().VINS().StaticRouteAccessRevoke(ctx, revokeReq) + if err != nil { + diags.AddError( + fmt.Sprintf("UpdateComputeIDsVINSStaticRoute: Cannot revoke static routes for vins with id %v", vinsId), + err.Error()) + } + } + + var addedComputes []uint64 + for _, comp := range computesPlan { + if !contains(comp, computesState) { + addedComputes = append(addedComputes, comp) + } + } + if len(addedComputes) != 0 { + grantReq := vins.StaticRouteAccessGrantRequest{ + VINSID: vinsId, + RouteId: routeId, + ComputeIds: addedComputes, + } + + tflog.Info(ctx, "UpdateComputeIDsVINSStaticRoute: before call to CloudBroker().VINS().StaticRouteAccessGrant", map[string]any{"grantReq": grantReq}) + _, err := c.CloudBroker().VINS().StaticRouteAccessGrant(ctx, grantReq) + if err != nil { + diags.AddError( + fmt.Sprintf("UpdateComputeIDsVINSStaticRoute: Cannot grant static routes for vins with id %v", vinsId), + err.Error()) + } + } + + return diags +} + +// contains returns true if slice contains element. Otherwise it returns false. +func contains(element uint64, slice []uint64) bool { + for _, s := range slice { + if s == element { + return true + } + } + return false +} diff --git a/samples/cloudapi/account/data_account/main.tf b/samples/cloudapi/account/data_account/main.tf index 94d0190..cacad2b 100644 --- a/samples/cloudapi/account/data_account/main.tf +++ b/samples/cloudapi/account/data_account/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_audits/main.tf b/samples/cloudapi/account/data_account_audits/main.tf index acc72eb..1ca8e2c 100644 --- a/samples/cloudapi/account/data_account_audits/main.tf +++ b/samples/cloudapi/account/data_account_audits/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_computes_list/main.tf b/samples/cloudapi/account/data_account_computes_list/main.tf index 41384fe..5520933 100644 --- a/samples/cloudapi/account/data_account_computes_list/main.tf +++ b/samples/cloudapi/account/data_account_computes_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_consumed_units/main.tf b/samples/cloudapi/account/data_account_consumed_units/main.tf index 548e60e..7557d4d 100644 --- a/samples/cloudapi/account/data_account_consumed_units/main.tf +++ b/samples/cloudapi/account/data_account_consumed_units/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_consumed_units_by_type/main.tf b/samples/cloudapi/account/data_account_consumed_units_by_type/main.tf index 630fe07..30e882f 100644 --- a/samples/cloudapi/account/data_account_consumed_units_by_type/main.tf +++ b/samples/cloudapi/account/data_account_consumed_units_by_type/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_deleted_list/main.tf b/samples/cloudapi/account/data_account_deleted_list/main.tf index 8001a85..4717819 100644 --- a/samples/cloudapi/account/data_account_deleted_list/main.tf +++ b/samples/cloudapi/account/data_account_deleted_list/main.tf @@ -20,9 +20,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_disks_list/main.tf b/samples/cloudapi/account/data_account_disks_list/main.tf index 6ff391b..7856039 100644 --- a/samples/cloudapi/account/data_account_disks_list/main.tf +++ b/samples/cloudapi/account/data_account_disks_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "oauth2" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_flipgroups_list/main.tf b/samples/cloudapi/account/data_account_flipgroups_list/main.tf index f90e672..450b0ee 100644 --- a/samples/cloudapi/account/data_account_flipgroups_list/main.tf +++ b/samples/cloudapi/account/data_account_flipgroups_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_get_resource_consumption/main.tf b/samples/cloudapi/account/data_account_get_resource_consumption/main.tf index 96092b3..06f84b0 100644 --- a/samples/cloudapi/account/data_account_get_resource_consumption/main.tf +++ b/samples/cloudapi/account/data_account_get_resource_consumption/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_list/main.tf b/samples/cloudapi/account/data_account_list/main.tf index 73ce20a..1b57fbb 100644 --- a/samples/cloudapi/account/data_account_list/main.tf +++ b/samples/cloudapi/account/data_account_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_reserved_units/main.tf b/samples/cloudapi/account/data_account_reserved_units/main.tf index bd718e6..164d3bc 100644 --- a/samples/cloudapi/account/data_account_reserved_units/main.tf +++ b/samples/cloudapi/account/data_account_reserved_units/main.tf @@ -16,9 +16,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_resource_consumption_list/main.tf b/samples/cloudapi/account/data_account_resource_consumption_list/main.tf index 57d805e..1cefaa8 100644 --- a/samples/cloudapi/account/data_account_resource_consumption_list/main.tf +++ b/samples/cloudapi/account/data_account_resource_consumption_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_rg_list/main.tf b/samples/cloudapi/account/data_account_rg_list/main.tf index 2f337ac..a63fdd2 100644 --- a/samples/cloudapi/account/data_account_rg_list/main.tf +++ b/samples/cloudapi/account/data_account_rg_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_templates_list/main.tf b/samples/cloudapi/account/data_account_templates_list/main.tf index 7a5ac8d..3b90936 100644 --- a/samples/cloudapi/account/data_account_templates_list/main.tf +++ b/samples/cloudapi/account/data_account_templates_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/data_account_vins_list/main.tf b/samples/cloudapi/account/data_account_vins_list/main.tf index ebfe1c9..c161b2e 100644 --- a/samples/cloudapi/account/data_account_vins_list/main.tf +++ b/samples/cloudapi/account/data_account_vins_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/account/resource_account/main.tf b/samples/cloudapi/account/resource_account/main.tf index 5d8e38f..ceb4b33 100644 --- a/samples/cloudapi/account/resource_account/main.tf +++ b/samples/cloudapi/account/resource_account/main.tf @@ -23,9 +23,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/bservice/data_bservice/main.tf b/samples/cloudapi/bservice/data_bservice/main.tf index cfc3d99..94c8f5b 100644 --- a/samples/cloudapi/bservice/data_bservice/main.tf +++ b/samples/cloudapi/bservice/data_bservice/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/bservice/data_bservice_deleted_list/main.tf b/samples/cloudapi/bservice/data_bservice_deleted_list/main.tf index a4f8420..86e787b 100644 --- a/samples/cloudapi/bservice/data_bservice_deleted_list/main.tf +++ b/samples/cloudapi/bservice/data_bservice_deleted_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/bservice/data_bservice_group/main.tf b/samples/cloudapi/bservice/data_bservice_group/main.tf index c2c8c14..b682526 100644 --- a/samples/cloudapi/bservice/data_bservice_group/main.tf +++ b/samples/cloudapi/bservice/data_bservice_group/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/bservice/data_bservice_list/main.tf b/samples/cloudapi/bservice/data_bservice_list/main.tf index 6e4736a..a827739 100644 --- a/samples/cloudapi/bservice/data_bservice_list/main.tf +++ b/samples/cloudapi/bservice/data_bservice_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/bservice/data_bservice_snapshot_list/main.tf b/samples/cloudapi/bservice/data_bservice_snapshot_list/main.tf index ec05fc9..ebdcdc3 100644 --- a/samples/cloudapi/bservice/data_bservice_snapshot_list/main.tf +++ b/samples/cloudapi/bservice/data_bservice_snapshot_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/bservice/resource_bservice/main.tf b/samples/cloudapi/bservice/resource_bservice/main.tf index 5f5264b..d82bb62 100644 --- a/samples/cloudapi/bservice/resource_bservice/main.tf +++ b/samples/cloudapi/bservice/resource_bservice/main.tf @@ -26,9 +26,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/bservice/resource_bservice_group/main.tf b/samples/cloudapi/bservice/resource_bservice_group/main.tf index a8aeb25..85915d7 100644 --- a/samples/cloudapi/bservice/resource_bservice_group/main.tf +++ b/samples/cloudapi/bservice/resource_bservice_group/main.tf @@ -23,9 +23,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/data_disk/main.tf b/samples/cloudapi/disk/data_disk/main.tf index dc5611d..e93dfc1 100644 --- a/samples/cloudapi/disk/data_disk/main.tf +++ b/samples/cloudapi/disk/data_disk/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/data_disk_list/main.tf b/samples/cloudapi/disk/data_disk_list/main.tf index 682543f..84d293b 100644 --- a/samples/cloudapi/disk/data_disk_list/main.tf +++ b/samples/cloudapi/disk/data_disk_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/data_disk_list_deleted/main.tf b/samples/cloudapi/disk/data_disk_list_deleted/main.tf index 82568f1..87b7b4c 100644 --- a/samples/cloudapi/disk/data_disk_list_deleted/main.tf +++ b/samples/cloudapi/disk/data_disk_list_deleted/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/data_disk_list_types/main.tf b/samples/cloudapi/disk/data_disk_list_types/main.tf index 81f7fa1..02e7775 100644 --- a/samples/cloudapi/disk/data_disk_list_types/main.tf +++ b/samples/cloudapi/disk/data_disk_list_types/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/data_disk_list_types_detailed/main.tf b/samples/cloudapi/disk/data_disk_list_types_detailed/main.tf index 166507e..b7e746f 100644 --- a/samples/cloudapi/disk/data_disk_list_types_detailed/main.tf +++ b/samples/cloudapi/disk/data_disk_list_types_detailed/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/data_disk_list_unattached/main.tf b/samples/cloudapi/disk/data_disk_list_unattached/main.tf index 8250e09..96dbe1d 100644 --- a/samples/cloudapi/disk/data_disk_list_unattached/main.tf +++ b/samples/cloudapi/disk/data_disk_list_unattached/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/data_disk_replication/main.tf b/samples/cloudapi/disk/data_disk_replication/main.tf index 29fc0f9..029a479 100644 --- a/samples/cloudapi/disk/data_disk_replication/main.tf +++ b/samples/cloudapi/disk/data_disk_replication/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/data_disk_snapshot/main.tf b/samples/cloudapi/disk/data_disk_snapshot/main.tf index 4023a49..31b59ab 100644 --- a/samples/cloudapi/disk/data_disk_snapshot/main.tf +++ b/samples/cloudapi/disk/data_disk_snapshot/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/data_disk_snapshot_list/main.tf b/samples/cloudapi/disk/data_disk_snapshot_list/main.tf index 7d2957e..7c7496e 100644 --- a/samples/cloudapi/disk/data_disk_snapshot_list/main.tf +++ b/samples/cloudapi/disk/data_disk_snapshot_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/resource_disk/main.tf b/samples/cloudapi/disk/resource_disk/main.tf index 3c585bb..0bbf3d1 100644 --- a/samples/cloudapi/disk/resource_disk/main.tf +++ b/samples/cloudapi/disk/resource_disk/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/resource_disk_replication/main.tf b/samples/cloudapi/disk/resource_disk_replication/main.tf index a608214..776023d 100644 --- a/samples/cloudapi/disk/resource_disk_replication/main.tf +++ b/samples/cloudapi/disk/resource_disk_replication/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/disk/resource_disk_snapshot/main.tf b/samples/cloudapi/disk/resource_disk_snapshot/main.tf index 117ccc7..6302c1d 100644 --- a/samples/cloudapi/disk/resource_disk_snapshot/main.tf +++ b/samples/cloudapi/disk/resource_disk_snapshot/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/extnet/data_extnet/main.tf b/samples/cloudapi/extnet/data_extnet/main.tf index f556f6e..121f4d6 100644 --- a/samples/cloudapi/extnet/data_extnet/main.tf +++ b/samples/cloudapi/extnet/data_extnet/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/extnet/data_extnet_computes_list/main.tf b/samples/cloudapi/extnet/data_extnet_computes_list/main.tf index 07a571e..b0ddc48 100644 --- a/samples/cloudapi/extnet/data_extnet_computes_list/main.tf +++ b/samples/cloudapi/extnet/data_extnet_computes_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/extnet/data_extnet_default/main.tf b/samples/cloudapi/extnet/data_extnet_default/main.tf index c796216..8ff24d1 100644 --- a/samples/cloudapi/extnet/data_extnet_default/main.tf +++ b/samples/cloudapi/extnet/data_extnet_default/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/extnet/data_extnet_list/main.tf b/samples/cloudapi/extnet/data_extnet_list/main.tf index 881f0d9..4ddee0b 100644 --- a/samples/cloudapi/extnet/data_extnet_list/main.tf +++ b/samples/cloudapi/extnet/data_extnet_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/flipgroup/data_flipgroup/main.tf b/samples/cloudapi/flipgroup/data_flipgroup/main.tf index 574055d..5fa9123 100644 --- a/samples/cloudapi/flipgroup/data_flipgroup/main.tf +++ b/samples/cloudapi/flipgroup/data_flipgroup/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/flipgroup/data_flipgroup_list/maint.tf b/samples/cloudapi/flipgroup/data_flipgroup_list/maint.tf index ca86ae7..c2cb6ce 100644 --- a/samples/cloudapi/flipgroup/data_flipgroup_list/maint.tf +++ b/samples/cloudapi/flipgroup/data_flipgroup_list/maint.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/flipgroup/resource_flipgroup/main.tf b/samples/cloudapi/flipgroup/resource_flipgroup/main.tf index 173c8a2..c70fb73 100644 --- a/samples/cloudapi/flipgroup/resource_flipgroup/main.tf +++ b/samples/cloudapi/flipgroup/resource_flipgroup/main.tf @@ -24,9 +24,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/image/data_image/main.tf b/samples/cloudapi/image/data_image/main.tf index 8d8b4fb..0a6ab0b 100644 --- a/samples/cloudapi/image/data_image/main.tf +++ b/samples/cloudapi/image/data_image/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/image/data_image_list/main.tf b/samples/cloudapi/image/data_image_list/main.tf index bb82228..fce34df 100644 --- a/samples/cloudapi/image/data_image_list/main.tf +++ b/samples/cloudapi/image/data_image_list/main.tf @@ -20,9 +20,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/image/resource_image/main.tf b/samples/cloudapi/image/resource_image/main.tf index 31a3882..633b876 100644 --- a/samples/cloudapi/image/resource_image/main.tf +++ b/samples/cloudapi/image/resource_image/main.tf @@ -15,9 +15,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/image/resource_image_virtual/main.tf b/samples/cloudapi/image/resource_image_virtual/main.tf index 0ba98d9..4976da1 100644 --- a/samples/cloudapi/image/resource_image_virtual/main.tf +++ b/samples/cloudapi/image/resource_image_virtual/main.tf @@ -23,9 +23,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/k8s/data_k8ci_list/main.tf b/samples/cloudapi/k8s/data_k8ci_list/main.tf index 29110ae..2f9f695 100644 --- a/samples/cloudapi/k8s/data_k8ci_list/main.tf +++ b/samples/cloudapi/k8s/data_k8ci_list/main.tf @@ -20,9 +20,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/k8s/data_k8s/main.tf b/samples/cloudapi/k8s/data_k8s/main.tf index cffed6c..32363e8 100644 --- a/samples/cloudapi/k8s/data_k8s/main.tf +++ b/samples/cloudapi/k8s/data_k8s/main.tf @@ -20,9 +20,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/k8s/data_k8s_computes/main.tf b/samples/cloudapi/k8s/data_k8s_computes/main.tf index 07fc244..8c100c9 100644 --- a/samples/cloudapi/k8s/data_k8s_computes/main.tf +++ b/samples/cloudapi/k8s/data_k8s_computes/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/k8s/data_k8s_list/main.tf b/samples/cloudapi/k8s/data_k8s_list/main.tf index d4ebf79..32017a4 100644 --- a/samples/cloudapi/k8s/data_k8s_list/main.tf +++ b/samples/cloudapi/k8s/data_k8s_list/main.tf @@ -20,9 +20,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/k8s/data_k8s_list_deleted/main.tf b/samples/cloudapi/k8s/data_k8s_list_deleted/main.tf index cc13465..7ac2daf 100644 --- a/samples/cloudapi/k8s/data_k8s_list_deleted/main.tf +++ b/samples/cloudapi/k8s/data_k8s_list_deleted/main.tf @@ -20,9 +20,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/k8s/data_k8s_wg/main.tf b/samples/cloudapi/k8s/data_k8s_wg/main.tf index 308bccc..042f12d 100644 --- a/samples/cloudapi/k8s/data_k8s_wg/main.tf +++ b/samples/cloudapi/k8s/data_k8s_wg/main.tf @@ -20,9 +20,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/k8s/data_k8s_wg_cloud_init/main.tf b/samples/cloudapi/k8s/data_k8s_wg_cloud_init/main.tf index 17976fa..2146887 100644 --- a/samples/cloudapi/k8s/data_k8s_wg_cloud_init/main.tf +++ b/samples/cloudapi/k8s/data_k8s_wg_cloud_init/main.tf @@ -20,9 +20,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/k8s/data_k8s_wg_list/main.tf b/samples/cloudapi/k8s/data_k8s_wg_list/main.tf index 3d7eadc..b3ed7ed 100644 --- a/samples/cloudapi/k8s/data_k8s_wg_list/main.tf +++ b/samples/cloudapi/k8s/data_k8s_wg_list/main.tf @@ -20,9 +20,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/k8s/resource_k8s_cp/main.tf b/samples/cloudapi/k8s/resource_k8s_cp/main.tf index dc0a354..74afbfa 100644 --- a/samples/cloudapi/k8s/resource_k8s_cp/main.tf +++ b/samples/cloudapi/k8s/resource_k8s_cp/main.tf @@ -25,9 +25,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/k8s/resource_k8s_wg/main.tf b/samples/cloudapi/k8s/resource_k8s_wg/main.tf index 4c06f26..2f532e4 100644 --- a/samples/cloudapi/k8s/resource_k8s_wg/main.tf +++ b/samples/cloudapi/k8s/resource_k8s_wg/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm/main.tf b/samples/cloudapi/kvmvm/data_kvmvm/main.tf index fc43f39..79071e1 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_audits/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_audits/main.tf index 3ce9536..2c621a9 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_audits/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_audits/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_get_audits/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_get_audits/main.tf index b630f34..696a6c8 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_get_audits/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_get_audits/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_get_console_url/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_get_console_url/main.tf index e64f79f..b9e168a 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_get_console_url/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_get_console_url/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_get_log/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_get_log/main.tf index 2d08235..55716ae 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_get_log/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_get_log/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_list/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_list/main.tf index 466f2bb..8353590 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_list/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_list_deleted/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_list_deleted/main.tf index 1f4414d..7df9c7b 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_list_deleted/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_list_deleted/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_pci_device_list/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_pci_device_list/main.tf index 948cb20..fa07bd7 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_pci_device_list/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_pci_device_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_pfw_list/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_pfw_list/main.tf index 4cdb21b..4b15a6b 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_pfw_list/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_pfw_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_snapshot_usage/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_snapshot_usage/main.tf index cbe0c41..9628d65 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_snapshot_usage/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_snapshot_usage/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_user_list/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_user_list/main.tf index 3dcda1b..997771a 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_user_list/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_user_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/data_kvmvm_vgpu_list/main.tf b/samples/cloudapi/kvmvm/data_kvmvm_vgpu_list/main.tf index 92d5750..12c6e0f 100644 --- a/samples/cloudapi/kvmvm/data_kvmvm_vgpu_list/main.tf +++ b/samples/cloudapi/kvmvm/data_kvmvm_vgpu_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/kvmvm/resource_kvmvm/main.tf b/samples/cloudapi/kvmvm/resource_kvmvm/main.tf index d50efe9..76afba5 100644 --- a/samples/cloudapi/kvmvm/resource_kvmvm/main.tf +++ b/samples/cloudapi/kvmvm/resource_kvmvm/main.tf @@ -23,9 +23,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/lb/data_lb/main.tf b/samples/cloudapi/lb/data_lb/main.tf index 4cbe54e..ce9e49c 100644 --- a/samples/cloudapi/lb/data_lb/main.tf +++ b/samples/cloudapi/lb/data_lb/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } @@ -34,5 +34,5 @@ data "dynamix_lb" "lb" { } output "test" { - value = data.dynamix_lb + value = data.dynamix_lb.lb } diff --git a/samples/cloudapi/lb/data_lb_list/main.tf b/samples/cloudapi/lb/data_lb_list/main.tf index e500932..ce45af3 100644 --- a/samples/cloudapi/lb/data_lb_list/main.tf +++ b/samples/cloudapi/lb/data_lb_list/main.tf @@ -20,9 +20,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/lb/data_lb_list_deleted/main.tf b/samples/cloudapi/lb/data_lb_list_deleted/main.tf index 08495b6..2386ff2 100644 --- a/samples/cloudapi/lb/data_lb_list_deleted/main.tf +++ b/samples/cloudapi/lb/data_lb_list_deleted/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/lb/resource_lb/main.tf b/samples/cloudapi/lb/resource_lb/main.tf index e997088..af570d8 100644 --- a/samples/cloudapi/lb/resource_lb/main.tf +++ b/samples/cloudapi/lb/resource_lb/main.tf @@ -23,9 +23,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/lb/resource_lb_backend/main.tf b/samples/cloudapi/lb/resource_lb_backend/main.tf index 4c1d354..e42c7d1 100644 --- a/samples/cloudapi/lb/resource_lb_backend/main.tf +++ b/samples/cloudapi/lb/resource_lb_backend/main.tf @@ -23,9 +23,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/lb/resource_lb_backend_server/main.tf b/samples/cloudapi/lb/resource_lb_backend_server/main.tf index b3e6b3c..4d10c1b 100644 --- a/samples/cloudapi/lb/resource_lb_backend_server/main.tf +++ b/samples/cloudapi/lb/resource_lb_backend_server/main.tf @@ -23,9 +23,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/lb/resource_lb_frontend/main.tf b/samples/cloudapi/lb/resource_lb_frontend/main.tf index b2ce07d..3438701 100644 --- a/samples/cloudapi/lb/resource_lb_frontend/main.tf +++ b/samples/cloudapi/lb/resource_lb_frontend/main.tf @@ -22,9 +22,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/lb/resource_lb_frontend_bind/main.tf b/samples/cloudapi/lb/resource_lb_frontend_bind/main.tf index 557fe98..c816224 100644 --- a/samples/cloudapi/lb/resource_lb_frontend_bind/main.tf +++ b/samples/cloudapi/lb/resource_lb_frontend_bind/main.tf @@ -23,9 +23,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg/main.tf b/samples/cloudapi/rg/data_rg/main.tf index da8917a..4d4cbae 100644 --- a/samples/cloudapi/rg/data_rg/main.tf +++ b/samples/cloudapi/rg/data_rg/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_affinity_group_computes/main.tf b/samples/cloudapi/rg/data_rg_affinity_group_computes/main.tf index 20e13bf..d7be957 100644 --- a/samples/cloudapi/rg/data_rg_affinity_group_computes/main.tf +++ b/samples/cloudapi/rg/data_rg_affinity_group_computes/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_affinity_groups_get/main.tf b/samples/cloudapi/rg/data_rg_affinity_groups_get/main.tf index 0965271..f7db98f 100644 --- a/samples/cloudapi/rg/data_rg_affinity_groups_get/main.tf +++ b/samples/cloudapi/rg/data_rg_affinity_groups_get/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_affinity_groups_list/main.tf b/samples/cloudapi/rg/data_rg_affinity_groups_list/main.tf index cb3d7f8..20ea61a 100644 --- a/samples/cloudapi/rg/data_rg_affinity_groups_list/main.tf +++ b/samples/cloudapi/rg/data_rg_affinity_groups_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_audits/main.tf b/samples/cloudapi/rg/data_rg_audits/main.tf index 325df21..b1abe78 100644 --- a/samples/cloudapi/rg/data_rg_audits/main.tf +++ b/samples/cloudapi/rg/data_rg_audits/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_list/main.tf b/samples/cloudapi/rg/data_rg_list/main.tf index 0e05e26..de9f9b1 100644 --- a/samples/cloudapi/rg/data_rg_list/main.tf +++ b/samples/cloudapi/rg/data_rg_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_list_computes/main.tf b/samples/cloudapi/rg/data_rg_list_computes/main.tf index 7aa2e38..277095a 100644 --- a/samples/cloudapi/rg/data_rg_list_computes/main.tf +++ b/samples/cloudapi/rg/data_rg_list_computes/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_list_deleted/main.tf b/samples/cloudapi/rg/data_rg_list_deleted/main.tf index 6a922df..e925be7 100644 --- a/samples/cloudapi/rg/data_rg_list_deleted/main.tf +++ b/samples/cloudapi/rg/data_rg_list_deleted/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_list_lb/main.tf b/samples/cloudapi/rg/data_rg_list_lb/main.tf index 868c27e..7cf4272 100644 --- a/samples/cloudapi/rg/data_rg_list_lb/main.tf +++ b/samples/cloudapi/rg/data_rg_list_lb/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_list_pfw/main.tf b/samples/cloudapi/rg/data_rg_list_pfw/main.tf index a3c3525..237ff9c 100644 --- a/samples/cloudapi/rg/data_rg_list_pfw/main.tf +++ b/samples/cloudapi/rg/data_rg_list_pfw/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_list_vins/main.tf b/samples/cloudapi/rg/data_rg_list_vins/main.tf index ef3f0fa..0f52c81 100644 --- a/samples/cloudapi/rg/data_rg_list_vins/main.tf +++ b/samples/cloudapi/rg/data_rg_list_vins/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_resource_consumption_get/main.tf b/samples/cloudapi/rg/data_rg_resource_consumption_get/main.tf index 69f29e8..f5182d2 100644 --- a/samples/cloudapi/rg/data_rg_resource_consumption_get/main.tf +++ b/samples/cloudapi/rg/data_rg_resource_consumption_get/main.tf @@ -18,9 +18,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_resource_consumption_list/main.tf b/samples/cloudapi/rg/data_rg_resource_consumption_list/main.tf index 7c2eb01..2f23f63 100644 --- a/samples/cloudapi/rg/data_rg_resource_consumption_list/main.tf +++ b/samples/cloudapi/rg/data_rg_resource_consumption_list/main.tf @@ -18,9 +18,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/data_rg_usage/main.tf b/samples/cloudapi/rg/data_rg_usage/main.tf index f764e55..acdb5f8 100644 --- a/samples/cloudapi/rg/data_rg_usage/main.tf +++ b/samples/cloudapi/rg/data_rg_usage/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/rg/resource_rg/main.tf b/samples/cloudapi/rg/resource_rg/main.tf index 0dbb428..bc54399 100644 --- a/samples/cloudapi/rg/resource_rg/main.tf +++ b/samples/cloudapi/rg/resource_rg/main.tf @@ -23,9 +23,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/stack/data_stack/main.tf b/samples/cloudapi/stack/data_stack/main.tf index 6e80f5d..e474f6b 100644 --- a/samples/cloudapi/stack/data_stack/main.tf +++ b/samples/cloudapi/stack/data_stack/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/stack/data_stack_list/main.tf b/samples/cloudapi/stack/data_stack_list/main.tf index 0ba4569..c1930ad 100644 --- a/samples/cloudapi/stack/data_stack_list/main.tf +++ b/samples/cloudapi/stack/data_stack_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/vins/data_vins/main.tf b/samples/cloudapi/vins/data_vins/main.tf index 9609e47..fe9514b 100644 --- a/samples/cloudapi/vins/data_vins/main.tf +++ b/samples/cloudapi/vins/data_vins/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/vins/data_vins_audits/main.tf b/samples/cloudapi/vins/data_vins_audits/main.tf index 2efb8d0..c3351f2 100644 --- a/samples/cloudapi/vins/data_vins_audits/main.tf +++ b/samples/cloudapi/vins/data_vins_audits/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/vins/data_vins_ext_net_list/main.tf b/samples/cloudapi/vins/data_vins_ext_net_list/main.tf index dc3c0a4..7dd783e 100644 --- a/samples/cloudapi/vins/data_vins_ext_net_list/main.tf +++ b/samples/cloudapi/vins/data_vins_ext_net_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/vins/data_vins_ip_list/main.tf b/samples/cloudapi/vins/data_vins_ip_list/main.tf index ec87bd7..d1aac4e 100644 --- a/samples/cloudapi/vins/data_vins_ip_list/main.tf +++ b/samples/cloudapi/vins/data_vins_ip_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/vins/data_vins_list/main.tf b/samples/cloudapi/vins/data_vins_list/main.tf index 70bd2b1..e20107d 100644 --- a/samples/cloudapi/vins/data_vins_list/main.tf +++ b/samples/cloudapi/vins/data_vins_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/vins/data_vins_list_deleted/main.tf b/samples/cloudapi/vins/data_vins_list_deleted/main.tf index d3fd50b..0bd486c 100644 --- a/samples/cloudapi/vins/data_vins_list_deleted/main.tf +++ b/samples/cloudapi/vins/data_vins_list_deleted/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/vins/data_vins_nat_rule_list/main.tf b/samples/cloudapi/vins/data_vins_nat_rule_list/main.tf index 92f6cbf..0a9cf0e 100644 --- a/samples/cloudapi/vins/data_vins_nat_rule_list/main.tf +++ b/samples/cloudapi/vins/data_vins_nat_rule_list/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/vins/data_vins_static_route/main.tf b/samples/cloudapi/vins/data_vins_static_route/main.tf index 5480525..268a7ef 100644 --- a/samples/cloudapi/vins/data_vins_static_route/main.tf +++ b/samples/cloudapi/vins/data_vins_static_route/main.tf @@ -19,9 +19,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/vins/data_vins_static_route_list/main.tf b/samples/cloudapi/vins/data_vins_static_route_list/main.tf index c2e8a15..324250c 100644 --- a/samples/cloudapi/vins/data_vins_static_route_list/main.tf +++ b/samples/cloudapi/vins/data_vins_static_route_list/main.tf @@ -18,9 +18,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/samples/cloudapi/vins/resource_vins/main.tf b/samples/cloudapi/vins/resource_vins/main.tf index 8a4486b..ab49c74 100644 --- a/samples/cloudapi/vins/resource_vins/main.tf +++ b/samples/cloudapi/vins/resource_vins/main.tf @@ -28,9 +28,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } @@ -51,15 +51,20 @@ resource "dynamix_vins" "vins" { #тип - целое число account_id = 2023 - #id внешней сети для подключения к ней ресурса + #блок правил extnet #опциональный параметр - #тип - целое число - ext_net_id = 2222 + #тип - блок extnet + ext_net = { + #id внешней сети для подключения к ней ресурса + #опциональный параметр + #тип - целое число + ext_net_id = 2222 - #ip внешней сети для подключения к нему ресурса - #опциональный параметр - #тип - строка - ext_ip_addr = "1.1.1.1" + #ip внешней сети для подключения к нему ресурса + #опциональный параметр + #тип - строка + ext_ip_addr = "1.1.1.1" + } #private network IP CIDR #опциональный параметр @@ -99,10 +104,10 @@ resource "dynamix_vins" "vins" { #тип - булев force = true - #блок для резервирования ip + #список для резервирования ip #опциональный параметр - #тип - блок ip - ip { + #тип - массив объектов ip + ip = [{ #тип подключения #обязательный параметр #тип - строка @@ -117,12 +122,12 @@ resource "dynamix_vins" "vins" { #опциональный параметр #тип - строка mac_addr = "ff:ff:ff:ff:ff:ff" - } + }] - #блок для добавления natRule + #список для резервирования ip #опциональный параметр - #тип - блок правил nat - nat_rule { + #тип - массив объектов ip + nat_rule = [{ #ip внутренний #опциональный параметр #тип - строка @@ -147,7 +152,7 @@ resource "dynamix_vins" "vins" { #опциональный параметр #тип - строка proto = "tcp" - } + }] #восстановление ресурса #опциональный параметр diff --git a/samples/cloudapi/vins/resource_vins_static_route/main.tf b/samples/cloudapi/vins/resource_vins_static_route/main.tf index 66ae0e0..f2a69a6 100644 --- a/samples/cloudapi/vins/resource_vins_static_route/main.tf +++ b/samples/cloudapi/vins/resource_vins_static_route/main.tf @@ -24,9 +24,9 @@ terraform { provider "dynamix" { authenticator = "decs3o" - #controller_url = + #controller_url = controller_url = "https://ds1.digitalenergy.online" - #oauth2_url = + #oauth2_url = oauth2_url = "https://sso.digitalenergy.online" allow_unverified_ssl = true } diff --git a/wiki/1.0.0/08.-Полезные-советы.md b/wiki/1.0.0/08.-Полезные-советы.md index e408946..d98dc69 100644 --- a/wiki/1.0.0/08.-Полезные-советы.md +++ b/wiki/1.0.0/08.-Полезные-советы.md @@ -40,3 +40,13 @@ terraform { } } ``` +## 8.4 OpenTofu +`Terraform-provider-dynamix` полностью совместим с OpenTofu версии 1.8. OpenTofu - это форк Terraform, созданный в ответ на переход HashiCorp с лицензии с открытым исходным кодом на BUSL. Инструкция по его установке находится на официальном сайте https://opentofu.org/docs/intro/install/. Все основные команды terraform можно использовать и в OpenTofu +Основные используемые команды: + +- `tofu init` — инициализирует рабочий каталог OpenTofu. +- `tofu validate` — подтверждает правильность синтаксиса файла. +- `tofu plan` — генерирует файл изменений и показывает, что изменится при запуске. +- `tofu apply` — строит или изменяет инфраструктуру. Команда покажет план - выполнения и потребует ответа «да» или «нет». +- `tofu refresh` — обновляет локальный файл состояния относительно реальных ресурсов. +- `tofu destroy` — удаляет инфраструктуру, управляемую OpenTofu. \ No newline at end of file diff --git a/wiki/1.0.0/Home.md b/wiki/1.0.0/Home.md index 5291010..0633daf 100644 --- a/wiki/1.0.0/Home.md +++ b/wiki/1.0.0/Home.md @@ -292,3 +292,4 @@ Terraform провайдер версии 1.0.0, для платформы DYNAM - [Как хранить авторизационную информацию отдельно от tf-файла](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.0/08.-Полезные-советы.md#81-Как-хранить-авторизационную-информацию-отдельно-от-tf-файла) - [Если Terraform провайдер DYNAMIX жалуется на SSL сертификаты.](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.0/08.-Полезные-советы.md#82-Если-terraform-провайдер-жалуется-на-ssl-сертификаты) - [Настройка локального провайдера для работы с новыми версиями Terraform.](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.0/08.-Полезные-советы.md#82-Если-terraform-провайдер-жалуется-на-ssl-сертификаты) + - [OpenTofu](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.0/08.-Полезные-советы.md#8.4-OpenTofu) diff --git a/wiki/1.0.1/01.-Введение.md b/wiki/1.0.1/01.-Введение.md new file mode 100644 index 0000000..4cbac9e --- /dev/null +++ b/wiki/1.0.1/01.-Введение.md @@ -0,0 +1,7 @@ +DYNAMIX Terraform Provider позволяет управлять облачными ресурсами на платформе DYNAMIX версии 4.0.0 и выше посредством Terraform. + +С помощью данного провайдера можно организовать программное управление вычислительными ресурсами (_compute_), ресурсными группами, сетевыми и дисковыми ресурсами, образами дисков, кластером, а также другими параметрами облачной платформы DYNAMIX. + +Если вы хорошо знакомы с инструментом Terraform и хотите максимально быстро начать использовать платформу DYNAMIX в своих Terraform-проектах, то можете сразу перейти к разделу [Пример работы](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.1/02.-Пример-работы.md), где приведён подробно откомментированный пример работы с основными видами ресурсов платформы. Если у вас всё же возникнут вопросы по облачной платформе DYNAMIX и порядку авторизации в ней, то обратитесь к главе [«Обзор облачной платформы DYNAMIX»](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.1/03.-Обзор-облачной-платформы-DYNAMIX.md). Также может оказаться полезной глава [«Инициализация Terraform провайдера DYNAMIX»](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.1/04.02-Инициализация-Terraform-провайдера-DYNAMIX.md). + +Если вы только начинаете использовать инструмент Terraform и облачную платформу DYNAMIX, то рекомендуем вам начать с главы [«Обзор облачной платформы DYNAMIX»](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.1/03.-Обзор-облачной-платформы-DYNAMIX.md), после чего изучить главы [«_Data source_ функции Terraform провайдера DYNAMIX»](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.1/06.-Data-source-функции-Terraform-провайдера-DYNAMIX.md) и [«_Resource_ функции Terraform провайдера DYNAMIX»](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.1/07.-Resource-функции-Terraform-провайдера-DYNAMIX.md). Примеры, приведенные в этих разделах, помогут вам быстро освоить базовые приёмы работы с инструментом Terraform и провайдером DYNAMIX. diff --git a/wiki/1.0.1/02.-Пример-работы.md b/wiki/1.0.1/02.-Пример-работы.md new file mode 100644 index 0000000..65c070f --- /dev/null +++ b/wiki/1.0.1/02.-Пример-работы.md @@ -0,0 +1,92 @@ +Данный раздел предназначен для тех, кто хорошо знаком с инструментом Terraform, а также имеет представление об основных понятиях и способах авторизации в облачной платформе DYNAMIX. + +Ниже приведён подробно откомментированный пример, показывающий, как создать виртуальный сервер (aka _compute_ на базе системы виртуализации KVM x86) в облачной платформе DYNAMIX с помощью соответствующего Terraform провайдера. Сервер будет создан в новой ресурсной группе, к нему будет подключён один предварительно созданный диск, у сервера будет прямое сетевое подключение во внешнюю сеть. + +Идентификатор образа операционной системы, на базе которого должен быть создан виртуальный сервер, считывается из облачной платформы с помощью _data source_ функции `dynamix_image`. + +Далее мы с помощью _resource_ функции `dynamix_resgroup` создаём новую ресурсную группу, в которую будет помещён этот виртуальный сервер. В качестве альтернативы, для получения информации об уже имеющейся ресурсной группе можно использовать _data source_ функцию с таким же названием. + +Затем с помощью _resource_ функции `dynamix_disk` создаётся диск, который будет подключён к виртуальному серверу в качестве дополнительного. Помимо этого дополнительного диска у сервера будет также и загрузочный диск, на который в процессе создания сервера клонируется выбранный образ операционной системы. + +Виртуальный сервер - в данном примере на базе системы виртуализации KVM x86 - создаётся посредством _resource_ функции `dynamix_kvmvm`. + +Только авторизованные в контроллере облачной платформы пользователи могут управлять облачными ресурсами. Подробнее о способах авторизации см. [Обзор облачной платформы DYNAMIX](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.1/03.-Обзор-облачной-платформы-DYNAMIX.md). + +```terraform +# 1. Initialize DYNAMIX plugin and connection to DYNAMIX cloud controller +# NOTE: in this example credentials are expected to come from +# DYNAMIX_APP_ID and DYNAMIX_APP_SECRET environmental variables - set them +# in the shell before calling terraform. +# Alternatively you may define plugin parameters app_id and app_secret in +# the TF file, however, this may not be secure if you plan to share this TF +# file with others. + +provider "dynamix" { + authenticator = "decs3o" + controller_url = "<>" # specify correct DYNAMIX controller URL, e.g. "https://ds1.digitalenergy.online" + oauth2_url = "<>" # specify corresponding DYNAMIX OAUTH2 URL, e.g. "https://sso.digitalenergy.online" + app_id = "<>" # application secret to access DYNAMIX cloud API in 'decs3o' and 'bvs' authentication mode, e.g. "ewqfrvea7s890avw804389qwguf234h0otfi3w4eiu" + app_secret = "<>" # application ID to access DYNAMIX cloud API in 'decs3o' and 'bvs' authentication mode, e.g. "ewqfrvea7s890avw804389qwguf234h0otfi3w4eiu" + # allow_unverified_ssl = true +} + +# 2. Load account to use - new VM will belong to this account +data "dynamix_account" "my_account" { + account_id = # Specify account ID +} + +# 3. Load OS image to use for VM deployment +data "dynamix_image" "os_image" { + image_id = # Specify OS image id, e.g. 1234. You can get accessible image id from data source "dynamix_image_list" +} + +# 4. Create new Resource Group in the selected account, new VM will be created in this RG +resource "dynamix_resgroup" "my_rg" { + name = "NewRgByTF" + account_id = data.dynamix_account.my_account.account_id + gid = # Grid (platform) ID + # if you want to set resource quota on this Resource Group, uncomment + # the following code fragment + # quota { + # cpu = 8 # CPU limit + # ram = 8912 # RAM limit in MB + # disk = 96 # disk volume limit in GB + #} +} + +# 5. Create extra disk, which will be attached to the new VM. +# This step is optional - if you do not want extra disks on your VM, skip it +# and comment out extra_disks parameter when creating VM below. +resource "dynamix_disk" "extra_disk" { + disk_name = "extra-disk-for-vm" + account_id = data.dynamix_account.my_account.account_id + gid = # Grid (platform) ID + size_max = 5 # disk size in GB + type = "D" # disk type, always use "D" for extra disks + sep_id = data.dynamix_image.os_image.sep_id # use the same SEP ID as the OS image + pool = "<>" # consult your DYNAMIX platform admin for configured storage pool names +} + +# 6. Create virtual machine (a compute of type KVM VM x86 in this example) +# Now that we have all necessary components at hand, we may create a virtual machine. +# This VM will be based on the previsouly obtained OS image, located in the specified +# Resource Group, directly connected to an external network, have a boot disk of +# specified size and one extra disk attached. +resource "dynamix_kvmvm" "my_new_vm" { + name = "tf-managed-vm" + driver = "KVM_X86" # Compute virtualization driver + rg_id = dynamix_resgroup.my_rg.id + cpu = 1 # CPU count + ram = 1024 # RAM size in MB, must be even number, ideally a power of 2 + boot_disk_size = 10 # Boot disk size in GB + image_id = data.dynamix_image.os_image.image_id + description = "Test KVM VM Compute managed by Terraform" + extra_disks = [ dynamix_disk.extra_disk.id ] + + network { + net_type = "EXTNET" + net_id = <> # specify external network ID to use, consult your DYNAMIX platform admin for correct IDs + # ip_address = "<>" # you may optionally request a specific IP address + } +} +``` diff --git a/wiki/1.0.1/03.-Обзор-облачной-платформы-DYNAMIX.md b/wiki/1.0.1/03.-Обзор-облачной-платформы-DYNAMIX.md new file mode 100644 index 0000000..ab02490 --- /dev/null +++ b/wiki/1.0.1/03.-Обзор-облачной-платформы-DYNAMIX.md @@ -0,0 +1,32 @@ +## Основные понятия +Ниже перечислены основные понятия с указанием соответствующих им аргументов в Terraform провайдере DYNAMIX. +1. **Контроллер облачной платформы DYNAMIX** – управляющее приложение, которое обеспечивает авторизацию пользователей и оркестрацию облачных ресурсов. + - Адрес контроллера задается в обязательном аргументе `controller_url` на стадии инициализации Terraform провайдера DYNAMIX. Например, `controller_url= "https://ds1.digitalenergy.online"` +2. **Авторизационный провайдер** – приложение, работающее по протоколу Oauth2, предназначенное для выпуска и валидации токенов доступа к контроллеру облачной платформы в соответствующих режимах авторизации. Все действия в платформе должны выполняться авторизованными пользователями, и авторизационное приложение позволяет получить токен доступа, действующий некоторое ограниченное время, наличие которого подтверждает успешную авторизацию. + - Адрес авторизационного провайдера задается в аргументе`oauth2_url` на стадии инициализации Terraform провайдера DYNAMIX. Например `oauth2_url= "https://sso.digitalenergy.online"` +3. **Подписчик** (_account_) – сущность, которая используется для группирования облачных ресурсов по принадлежности к определенному клиенту для целей учета потребления и биллинга. + - Имя подписчика задается аргументом `account_name` при вызове _resource_ или _data_ функций провайдера. Альтернативной является задание численного идентификатора подписчика в аргументе `account_id`. +4. **Пользователь** (_user_) – пользователь облачной инфраструктуры, представленный учетной записью. Чтобы получить возможность управлять облачными ресурсами (например, создавать виртуальные серверы или дискт) пользователь должен быть ассоциирован с одним или несколькими подписчиками и иметь соответствующие права, определяемые ролевой моделью, принятой в облачной платформе DYNAMIX. Для доступа к платформе пользователь должен авторизоваться одним из способов, описанных ниже в разделе «Способы авторизации». +5. **Ресурсная группа** (_resource group_) – способ группирования вычислительных ресурсов (например, виртуальных серверов по функциональному признаку или принадлежности к одному и тому же проекту). Ресурсную группу можно рассматривать как небольшой персональный дата-центр, в котором размещаются один или несколько серверов и виртуальных сетевых сегментов. Ресурсная группа идентифицируется по комбинации параметров `account` и `name`. Обратите внимание, что имя имя ресурсной группы уникально только в рамках одного и того же `account`. +6. **Вычислительный ресурс** (_compute_) - универсальная абстракция пользовательского сервера в платформе DYNAMIX. Благодаря использованию такой абстракции можно, например, создать одну виртуальную машину на базе KVM Intel x86, а другую - на базе KVM IBM Power, а потом управлять ими - изменять количество CPU/RAM, подключать/отключать диски и т.п. - одинаковым образом, не задумываясь об их архитектурных различиях. В то же время, так как типизация ресурсов в Terraform не поддерживает наследование, различные типы вычислительных ресурсов, доступных на платформе DYNAMIX и абстрагируемых через понятие унифицированный _compute_, в Terraform представлены разными типами (напр., свой тип для виртуальных серверов на базе KVM и свой тип для перспективных x86-совместимых bare metal серверов). +7. **Ресурс хранения** (_disk_) - универсальная абстракция дискового ресурса в платформе DYNAMIX. Платформа поддерживает различные типы систем хранения данных, но при этом управление созданными на разных системах хранения дисками осуществляется посредством унифицированного набора действий, например, "подключить диск к _compute_", "увеличить размер диска", "сделать мгновенный снимок диска", "настроить параметры быстродействия диска". +8. **Виртуальный сервер** – экземпляр _compute_, в основе технической реализации которого лежит виртуальная машина, работающая в облаке и доступна по сети. Виртуальный сервер характеризуется количеством выделенных ему CPU (аргумент`cpu`), объемом ОЗУ (`ram`), размером загрузочного диска (`boot_disk size`). При создании виртуального сервера на загрузочный диск устанавливается образ операционной системы, заданный в аргументе `image_id`. Помимо загрузочного диска к виртуальному серверу можно подключить несколько дисков для хранения прикладных данных, список которых задается аргументами `extra_disks`. Виртуальный сервер идентифицируется по комбинации аргументов `name` (имя сервера) и `rgid` (идентификатор ресурсной группы). Обратите внимание, что имя виртуального сервера `name` уникально только в рамках одной и той же ресурсной группы. +9. **Виртуальный сетевой сегмент** (_Virtual Network Segment_ или _ViNS_) - сетевой сегмент и обеспечивающая его функционирование виртуальная инфраструктура, которые пользователь может создавать для своих нужд на уровне ресурсной группы или подписчика (_account_). ViNS можно создать полностью изолированным от внешних сетей (см. ниже _External Network_) или с подключением во внешнюю сеть. Внутри ViNS работает DHCP-сервис, обеспечивающий управление IP адресами экземпляров _compute_, подключённых в этот ViNS. +10. **Внешняя сеть** (_External Network_) - сетевой сегмент, через который платформа DYNAMIX взаимодействует с внешними по отношению к ней сетевыми ресурсами. Например, в случае с публичным облаком на базе DYNAMIX в качестве внешней сети выступает сеть Интернет. В отличие от ViNS платформа не управляет внешней сетью, а лишь пользуется её ресурсами. В платформе может быть настроено несколько внешних сетей с различными диапазонами IP адресов, и существует механизм управления доступом пользователей к внешним сетям. +11. Сетевой доступ к экземпляру _compute_ (виртуальному серверу) реализуется через его подключение к ViNS и/или прямое подключение во внешнюю сеть (External Network). Один и тот же экземпляр _compute_ может одновременно иметь несколько подключений в разные ViNS и/или различные внешние сети. + +## Способы авторизации +Облачная платформа DYNAMIX поддерживает три базовых типа авторизации: +1. С использованием авторизационного провайдера, работающего по протоколу _Oauth2_. Данный способ является предпочтительным, так как обеспечивает бОльшую гибкость и безопасность. Для авторизации в этом режиме при инициализации Terrafrom провайдера DYNAMIX необходимо указать параметры `oauth2_url` и `controller_url`, а также предоставить одно из нижеперечисленного: + - Комбинация Application ID & Application secret, соответствующих пользователю, от имени которого будет осуществляться управление облачными ресурсами в текущей сессии. В процессе проверки предоставленных Application ID & Application secret модуль получает от авторизационного провайдера токен (JSON Web Token, JWT), который затем используется для доступа к указанному контроллеру DYNAMIX. Для авторизации по данному варианту, при инициализации Terraform провайдера DYNAMIX следует установить аргумент `authenticator=decs3o` и задать аргументы `app_id` и `app_secret` (или определить соответствующие переменные окружения `DYNAMIX_APP_ID` и `DYNAMIX_APP_SECRET`). + - JSON Web Token – заранее полученный от авторизационного провайдера токен доступа, ассоциированный с определенным пользователем, от имени которого будет осуществляться управление облачными ресурсами в текущей сессии. Для авторизации по данному варианту, при инициализации Terraform провайдера DYNAMIX следует установить аргумент `authenticator=jwt` и задать аргумент `jwt` (или определить переменную окружения `DYNAMIX_JWT`). +2. С использованием комбинации _имя пользователя : пароль_. Данный режим не использует внешних авторизационных провайдеров и подразумевает, что пользователь с такой комбинацией зарегистрирован непосредственно на указанном в параметре `controller_url` контроллере облачной платформы DYNAMIX. + - Чтобы провайдер авторизовался по данному варианту, при его инициализации следует установить аргумент `authenticator=legacy` и задать аргументы `user` и `password` (или определить соответствующие переменные окружения `DYNAMIX_USER` и `DYNAMIX_PASSWORD`). +3. С использованием авторизационного провайдера, работающего по протоколу _Oauth2_oidc_. Для авторизации в этом режиме при инициализации Terrafrom провайдера DYNAMIX необходимо указать параметры `oauth2_url` и `controller_url`, а также Application ID & Application secret, _имя пользователя и пароль_, соответствующих пользователю, от имени которого будет осуществляться управление облачными ресурсами в текущей сессии, и _имя домена_. В процессе проверки предоставленных Application ID & Application secret и пары _имя пользователя-пароль_ модуль получает от авторизационного провайдера токен (JSON Web Token, JWT), который затем используется для доступа к указанному контроллеру DYNAMIX. Для авторизации по данному варианту, при инициализации Terraform провайдера DYNAMIX следует установить аргумент `authenticator=bvs`, задать аргументы `app_id` и `app_secret` (или определить соответствующие переменные окружения `DYNAMIX_APP_ID` и `DYNAMIX_APP_SECRET`), `bvs_user` и `bvs_password` (или определить соответствующие переменные окружения `DYNAMIX_BVS_USER` и `DYNAMIX_BVS_PASSWORD`), а также указать `domain` (или определить соответствующие переменные окружения `DYNAMIX_DOMAIN`). + +После успешной авторизации пользователь (или приложение-клиент) получает доступ к ресурсам, находящимся под управлением соответствующего DYNAMIX контроллера. Доступ предоставляется в рамках подписчиков (_account_), с которыми ассоциирован данный пользователь (_user_), и в соответствии с присвоенными ему ролями. + +## Пользовательская и административная группы API +Пользовательская группа API - группа API платформы DYNAMIX, которая позволяет выполнять операции с платформой с правами обычного пользователя. Покрывает большую часть задач. +Административная группа API - группа API платформы DYNAMIX, которая позволяет выполнять операции с платформой с расширенными правами. Данные права подразумевают расширенный перечень операций над ресурсами, расширенный перечень ресурсов, расширенную информацию. Требуются права администратора для взаимодействия с этой группой API. +В Terraform провайдере DYNAMIX версии 1.0.1 реализованы функции только пользовательской группа API. diff --git a/wiki/1.0.1/04.-Начало-работы-с-terraform-провайдером-DYNAMIX.md b/wiki/1.0.1/04.-Начало-работы-с-terraform-провайдером-DYNAMIX.md new file mode 100644 index 0000000..edaa664 --- /dev/null +++ b/wiki/1.0.1/04.-Начало-работы-с-terraform-провайдером-DYNAMIX.md @@ -0,0 +1,6 @@ +Данный раздел описывает: +- Системные требования +- Установку провайдера +- Инициализацию провайдера +- Переключение режима работы между разными группами API +- Получение gid/grid_id площадки diff --git a/wiki/1.0.1/04.01-Установка-Terraform-провайдера-DYNAMIX.md b/wiki/1.0.1/04.01-Установка-Terraform-провайдера-DYNAMIX.md new file mode 100644 index 0000000..07be906 --- /dev/null +++ b/wiki/1.0.1/04.01-Установка-Terraform-провайдера-DYNAMIX.md @@ -0,0 +1,147 @@ +## Системные требования + +Для запуска провайдера вам потребуется машина, на которой установлен Terraform. + +Кроме того, в связи с тем, что начиная с версии 0.12 Terraform изменил алгоритм поиска и инициализации локальных провайдеров, настройка данного провайдера для работы с Terraform 0.12 или более новыми версиями потребует выполнения ряда дополнительных действий. Подробнее см. [8.3 Настройка локального провайдера для работы с новыми версиями Terraform](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.1/08.-Полезные-советы#user-content-8-3-настройка-локального-провайдера-для-работы-с-новыми-версиями-terraform.md). + +## Установка +В релизном архиве находятся скрипты-инсталляторы. +Чтобы выполнить установку, необходимо: +1. Перейти по адресу: https://repository.basistech.ru/BASIS/terraform-provider-dynamix/releases +2. Выбрать необходимую версию провайдера подходящую под операционную систему. +3. Скачать архив. +4. Распаковать архив. +5. Выполнить скрипт установщика, `install.sh` или `install.bat` для Windows.
+*Для запуска `install.sh` не забудьте изменить права доступа к файлу* +```bash +chmod u+x install.sh +``` +6. Дождаться сообщения об успешной установке. Установщик выведет актуальный блок конфигурации провайдера, скопируйте его +```bash +terraform { + required_providers { + dynamix = { + version = "1.0.1" + source = "basistech.ru/tf/dynamix" + } + } +} +``` +7. После этого, создайте файл `main.tf` в рабочей директории, которая может находится в любом удобном для пользователя месте. +В данном примере, рабочая директория с файлом main.tf находится по пути: +```bash +~/work/tfdir/main.tf +``` +8. Вставьте в `main.tf` блок конфигурации провайдера, который был выведен на экран установщиком: +```terraform +terraform { + required_providers { + dynamix = { + version = "1.0.1" + source = "basistech.ru/tf/dynamix" + } + } +} +``` +9. Добавьте в файл блок с инициализацией провайдера. +```terraform +provider "dynamix" { + authenticator = "decs3o" + controller_url = "https://mr4.digitalenergy.online" + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} +``` + +10. В консоли выполните команду +```bash +terraform init +``` + +11. В случае успешной установки, Terraform инициализирует провайдер и будет готов к дальнейшей работе. + +## Установка из релизов +Terraform провайдер DYNAMIX имеет скомпилированные релизные версии, которые расположены по адресу: [Релизы](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/releases). +Чтобы выполнить установку из релиза, необходимо: +1. Перейти по адресу: https://repository.basistech.ru/BASIS/terraform-provider-dynamix/releases +2. Выбрать необходимую версию провайдера подходящую под операционную систему. +3. Скачать архив. +4. Распаковать архив. +5. Полученный файл (в директории `bin/`) необходимо поместить: +Linux: +```bash +~/.terraform.d/plugins/${host_name}/${namespace}/${type}/${version}/${target} +``` +Windows: +```powershell +%APPDATA%\terraform.d\plugins\${host_name}\${namespace}\${type}\${version}\${target} +``` +Где: +- host_name - имя хоста, держателя провайдера, например, basis +- namespace - пространство имен хоста, например decort +- type - тип провайдера, может совпадать с пространством имен, например, decort +- version - версия провайдера, например 4.3.0 +- target - архитектура операционной системы, например windows_amd64 + +В примере ниже используется путь до провайдера на машине с ОС Linux: + +```bash +~/.terraform.d/plugins/basis/basistech.ru/tf/dynamix/1.0.1/linux_amd64/tf-provider + ^ ^ ^ ^ ^ ^ + host_name | | | | | | + | | | | | + namespace | | | | | + | | | | + type | | | | + | | | + version | | | + | | + target | | + | + исполняемый файл | +``` + +6. После этого, создайте файл `main.tf` в рабочей директории, которая может находится в любом удобном для пользователя месте. +В данном примере, рабочая директория с файлом main.tf находится по пути: +```bash +~/work/tfdir/main.tf +``` +7. Добавьте в `main.tf` следующий блок +```terraform +terraform { + required_providers { + dynamix = { + source = "basistech.ru/tf/dynamix" + version = "1.0.1" + } + } +} +``` +В поле `version` указывается версия провайдера. +
+**ВНИМАНИЕ: Версии в блоке и в пути к исполняемому файлу провайдера должны совпадать!** + +В поле `source` помещается путь до репозитория с версией вида: + +```bash +${host_name}/${namespace}/${type} +``` + +**ВНИМАНИЕ: Версии в блоке и в пути к исполняемому файлу провайдера должны совпадать!** + +8. Добавьте в файл блок с инициализацией провайдера. +```terraform +provider "dynamix" { + authenticator = "decs3o" + controller_url = "https://mr4.digitalenergy.online" + oauth2_url = "https://sso.digitalenergy.online" + allow_unverified_ssl = true +} +``` + +9. В консоли выполните команду +```bash +terraform init +``` + +10. В случае успешной установки, Terraform инициализирует провайдер и будет готов к дальнейшей работе. diff --git a/wiki/1.0.1/04.02-Инициализация-Terraform-провайдера-DYNAMIX.md b/wiki/1.0.1/04.02-Инициализация-Terraform-провайдера-DYNAMIX.md new file mode 100644 index 0000000..c96120b --- /dev/null +++ b/wiki/1.0.1/04.02-Инициализация-Terraform-провайдера-DYNAMIX.md @@ -0,0 +1,64 @@ +## Список аргументов для инициализации +Перед началом использования любой Terraform провайдер должен быть инициализирован. + +В процессе инициализации Terraform провайдера DYNAMIX проверяется корректность переданных аргументов и выполняется авторизация в указанном контроллере облачной инфраструктуры. Подробнее о способах авторизации в платформе DYNAMIX смотри соответствующий [раздел](https://repository.basistech.ru/BASIS/terraform-provider-dynamix/src/branch/main/wiki/1.0.1/03.-Обзор-облачной-платформы-DYNAMIX#user-content-способы-авторизации.md). + +При инициализации Terraform провайдера DYNAMIX используются следующие аргументы: + +| Аргумент | Переменная окружения | Описание | +| --- | --- | --- | +| allow_unverified_ssl | - | Если данный аргумент явно установлен в `true`, то провайдер **не будет** проверять SSL сертификаты при взаимодействии с авторизационным сервисом OAuth2 и контроллером облачной платформы.
Отключение проверок может быть полезным при работе в доверенной среде, использующей самоподписанные SSL сертификаты. Однако, так как отключение проверок несёт потенциальные риски безопасности, данную настройку следует использовать с осторожностью.
Разрешённые значения: `false` (значение по умолчанию) и `true`. | +| app_id | DYNAMIX_APP_ID | Идентификатор приложения (клиента) для авторизации в контроллере облачной платформы в режиме `decs3o` или `bvs`.
Аргументы `app_id` и `app_secret` являются обязательными для режимов авторизации `authenticator=decs3o` и `authenticator=bvs`.
Если `app_id` не задан в tf-файле, то провайдер будет использовать значение из переменной окружения `DYNAMIX_APP_ID`. | +| app_secret | DYNAMIX_APP_SECRET | Секретный код приложения (клиента) для авторизации в контроллере облачной платформы в режиме `decs3o` или `bvs`.
Аргументы `app_id` и `app_secret` являются обязательными для режимов авторизации `authenticator=decs3o` и `authenticator=bvs`.
Если `app_secret` не задан в tf-файле, то провайдер будет использовать значение из переменной окружения `DYNAMIX_APP_SECRET`. | +| authenticator | - | Режим авторизации при подключении к контроллеру облачной платформы.
Доступные режимы: `decs3o`, `legacy`, `jwt` или `bvs`.
Данный аргумент является обязательным. | +| bvs_user | DYNAMIX_BVS_USER | Имя пользователя для авторизации в контроллере облачной платформы в режиме `bvs`.
Аргументы `bvs_password` и `bvs_user` являются обязательными для режима авторизации `authenticator=bvs`.
Если `bvs_user` не задан в tf-файле, то провайдер будет использовать значение из переменной окружения `DYNAMIX_BVS_USER`. | +| bvs_password | DYNAMIX_BVS_PASSWORD | Пароль пользователя для авторизации в контроллере облачной платформы в режиме `bvs`.
Аргументы `bvs_user` и `bvs_password` являются обязательными для режима авторизации `authenticator=bvs`.
Если `bvs_password` не задан в tf-файле, то провайдер будет использовать значение из переменной окружения `DYNAMIX_BVS_PASSWORD`. | +| domain | DYNAMIX_DOMAIN | Имя домена в контроллере облачной платформы в режиме `bvs`.
Данный аргумент является обязательным.
Если `domain` не задан в tf-файле, то провайдер будет использовать значение из переменной окружения `DYNAMIX_DOMAIN`. | +| controller_url | DYNAMIX_CONTROLLER_URL | URL контроллера облачной платформы, через который будет осуществляться управление облачными ресурсами.
Данный аргумент является обязательным. | +| jwt | DYNAMIX_JWT | JSON Web Token (JWT), который используется для авторизации в контроллере облачной платформы в режиме `jwt`.
Данный аргумент является обязательным для режима авторизации `authenticator=jwt`.
Если `jwt` не задан в tf-файле, то провайдер будет использовать значение из переменной окружения `DYNAMIX_JWT` | +| oauth2_url | DYNAMIX_OAUTH2_URL | URL авторизационного сервиса OAuth2, который используется для управления доступом пользователей (или программных клиентов) к контроллеру облачной платформы.
Данный аргумент является обязательным для режимов авторизации `authenticator=decs3o`, `authenticator=bvs` и `authenticator=jwt`.
Если `oauth2_url` не задан в tf-файле, то провайдер будет использовать значение из переменной окружения `DYNAMIX_OAUTH2_URL` | +| password | DYNAMIX_PASSWORD | Пароль для авторизации в контроллере облачной платформы в режиме `legacy`.
Аргументы `password` и `user` являются обязательными для режима авторизации `authenticator=legacy`.
Если `password` не задан в tf-файле, то провайдер будет использовать значение из переменной окружения `DYNAMIX_PASSWORD`. | +| user | DYNAMIX_USER | Имя пользователя для авторизации в контроллере облачной платформы в режиме `legacy`.
Аргументы `user` и `password` являются обязательными для режима авторизации `authenticator=legacy`.
Если `user` не задан в tf-файле, то провайдер будет использовать значение из переменной окружения `DYNAMIX_USER`. | + +## Пример инициализации в режиме авторизации `decs3o` +Пример инициализации Terraform провайдера DYNAMIX: +```terraform +provider "dynamix" { + authenticator = "decs3o" + controller_url = "https://ctrl.decort.online" + oauth2_url = "https://oauth2.decort.online:7777" +} +``` + +В данном примере используется режим авторизации `decs3o`. + +Как отмечено выше, в данном режиме требуется указать аргументы `app_id` и `app_secret`, идентифицирующие пользователя (или приложение-клиент), от лица которого будут выполняться дальнейшие действия. Однако, так как данная информация является конфиденциальной (по сути, она эквивалентна паре _имя пользователя : пароль_), то в общем случае заносить такого рода данные в tf-файл не следует. Рекомендуется определять в среде запуска Terraform переменные окружения `DYNAMIX_APP_ID` и `DYNAMIX_APP_SECRET`, из которых провайдер извлечёт нужные данные. Приведенный пример подразумевает, что нужная информация будет получена из этих переменных окружения. + +Пользователь, от лица которого Terrafrom будет выполнять действия в облачной платформе, должен заранее создать пару _Application ID_ и _Application Secret_ в авторизационном приложении OAuth2. Именно эти значения, а также URL авторизационного приложения Oauth2, должны присваиваться аргументам `app_id`, `app_secret` и `oauth2_url` соответственно для успешной инициализации провайдера. + +Также обратите внимание на формат задания аргументов `controller_url` и `oauth2_url`. В общем случае они должны содержать идентификатор протокола (_https://_) и сетевой порт, если он отличается от порта по умолчанию (в примере для авторизационного сервиса OAuth2 указан порт _7777_). Эту информацию вы можете узнать у администратора вашей облачной инфраструктуры. + +## Пример инициализации в режиме авторизации `bvs` +Пример инициализации Terraform провайдера DYNAMIX: +```terraform +provider "dynamix" { + authenticator = "bvs" + controller_url = "https://delta.qa.loc" + oauth2_url = "https://bvs-delta.qa.loc:8443" + app_id = "delta" + app_secret = "" + bvs_password = "" + bvs_user = "" + domain = "dynamix" +} +``` + +В данном примере используется режим авторизации `bvs`. + +Как отмечено выше, в данном режиме требуется указать аргументы `app_id` - идентификатор площадки - delta, alpha, poc, etc. Можно найти на странице администратора по следующему пути: вкладка безопасность - клиентские сервисы - наименование площадки. `app_secret` - пароль площадки. Можно найти на странице администратора по следующему пути: вкладка безопасность - клиентские сервисы - наименование площадки (символ i) - поле "Пароль". Однако, так как данная информация является конфиденциальной, то в общем случае заносить такого рода данные в tf-файл не следует. Рекомендуется определять в среде запуска Terraform переменные окружения `DYNAMIX_APP_ID` и `DYNAMIX_APP_SECRET`, из которых провайдер извлечёт нужные данные. Приведенный пример подразумевает, что нужная информация будет получена из этих переменных окружения. +Также обязательными аргументами являются: `bvs_user` - имя пользователя, `bvs_password` - пароль пользователя. Рекомендуется не заносить их в tf-файл, а определять в среде запуска Terraform переменные окружения `DYNAMIX_BVS_USER` и `DYNAMIX_BVS_PASSWORD`, из которых провайдер извлечёт нужные данные. Приведенный пример подразумевает, что нужная информация будет получена из этих переменных окружения. +Домен для подключения `domain` - указывается наименование площадки. Данный аргумент является обязательным. Рекомендуется не заносить его в tf-файл, а определять в среде запуска Terraform переменную окружения `DYNAMIX_DOMAIN`, из которой провайдер извлечёт нужные данные. Приведенный пример подразумевает, что нужная информация будет получена из этих переменных окружения. + +Пользователь, от лица которого Terrafrom будет выполнять действия в облачной платформе, должен заранее получить от администратора _Application ID_ и _Application Secret_, _bvs user_ и _bvs password_, а также _domain_. А также осуществить первичный вход на платформу посредством браузера. + +Также обратите внимание на формат задания аргументов `controller_url` и `oauth2_url`. В общем случае они должны содержать идентификатор протокола (_https://_) и сетевой порт, если он отличается от порта по умолчанию (в примере для авторизационного сервиса OAuth2 указан порт _8443_). Эту информацию вы можете узнать у администратора вашей облачной инфраструктуры. diff --git a/wiki/1.0.1/04.03-Переключение-между-группами-API.md b/wiki/1.0.1/04.03-Переключение-между-группами-API.md new file mode 100644 index 0000000..79557f2 --- /dev/null +++ b/wiki/1.0.1/04.03-Переключение-между-группами-API.md @@ -0,0 +1,38 @@ +Так как платформа DYNAMIX предоставляет для работы две группы API, то terraform провайдер позволяет свободно переключать режимы работы между этими группами. +По умолчанию стоит пользовательская группа API. Ее можно сменить на административную группу. +Если прав у пользователя будет достаточно, то запрос будет выполнен, если нет, то будет ошибка: +```bash +Permission denied +``` +Которая говорит о том, что прав недостаточно. Тогда для выполнения операции обратитесь к администратору платформы. +Установка режима взаимодействия с группами API осуществляется через установку переменной окружения _DECORT_ADMIN_MODE_. +Для более подробного описания возможностей каждой группы API см. соответствующий раздел. + +## Переключение режима работы в Windows +Используйте сл. команду: +```Powershell +$Env:DECORT_ADMIN_MODE=1 +``` +Для отключения: +```Powershell +$Env:DECORT_ADMIN_MODE=0 +``` +## Переключение режима работы в Linux +Используйте сл. команду: +```bash +DECORT_ADMIN_MODE=1 +``` +или +```bash +export DECORT_ADMIN_MODE=1 +``` +Для отключения: +```bash +DECORT_ADMIN_MODE=0 +``` +или +```bash +export DECORT_ADMIN_MODE=0 +``` +**ОБРАТИТЕ ВНИМАНИЕ** +Переменные окружения создаются для терминальной сессии. В сл. раз их придется задавать еще раз, если требуется режим, отличный от пользовательского. diff --git a/wiki/1.0.1/04.04-Получение-gid-или-grid_id.md b/wiki/1.0.1/04.04-Получение-gid-или-grid_id.md new file mode 100644 index 0000000..1161149 --- /dev/null +++ b/wiki/1.0.1/04.04-Получение-gid-или-grid_id.md @@ -0,0 +1,17 @@ +Платформа может располагаться на нескольких площадках(grid). +Такие площадки имеют свой id. +Для создания некоторых ресурсов требуется ввести grid_id или gid площадки. +Получение gid различается для пользовательского и административного API. + +## Получение gid для пользовательского API +Для получения gid с помощью пользовательского API, необходимо получить информацию из _data_source_ функции _dynamix_locations_list_, как указано ниже: +```terraform +data "dynamix_locations_list" "ll" { + +} + +output "test" { + value = data.dynamix_locations_list.ll +} +``` +В файл состояния будет сохранен результат, где можно посмотреть доступные для работы площадки. diff --git a/wiki/1.0.1/04.05-Сборка-terraform-провайдера-в-образ.md b/wiki/1.0.1/04.05-Сборка-terraform-провайдера-в-образ.md new file mode 100644 index 0000000..c0a96bf --- /dev/null +++ b/wiki/1.0.1/04.05-Сборка-terraform-провайдера-в-образ.md @@ -0,0 +1,43 @@ +Образ приложения - современный способ запуска приложений. Образ приложения представляет собой контейнер, в который входит ОС и необходимые для работы приложения пакеты. +Способов создать образ приложения довольно много, для этого существуют программы контейнеризации: +- Docker +- Podman +- и другие +Образ представляет собой "зафиксированную" версию приложения, что означает, что никакие изменения в приложения внесены быть не могут. Так же означает то, что приложение не может создавать побочные файлы при работе. +Контейнер - это запущенный экземпляр образа. То есть, один образ может порождать множество контейнеров, каждый из которых будет включать в себя отдельный экземпляр приложения. +Одно из преимуществ работы приложения в контейнере - кроссплатформенность. Это преимущество обуславливается тем, что образ приложения уже включает в себя все необходимое для успешной работы приложения, в том числе операционную систему. Поэтому, пользователю достаточно установить на вычислительной машине программу, которая обеспечивает работу с образами приложений. + +## Docker +Docker является одной из самых популярных программ для работы с образами. +Docker позволяет: +- Создавать образы +- Запускать контейнеры +- Управлять контейнерами +- Управлять образами +Скачать и установить Docker можно по ссылке https://docs.docker.com/get-docker/ + +## Сборка terraform провайдера +### Требования: +- Docker +- git +- Компилятор языка GO += make +### Установка необходимых программ +1. Компилятор языка GO можно скачать и установить по ссылке: https://go.dev/dl/ +2. Docker можно скачать и установить по ссылке выше. +3. Git можно скачать и установить по ссылке: https://git-scm.com/ +4. Программа make входит в пакет установленных программ для ОС Linux. Для Windows можно воспользоваться инструкцией со stack overflow: https://stackoverflow.com/questions/32127524/how-to-install-and-use-make-in-windows +### Порядок действий +1. Склонировать репозиторий с провайдером: +```bash +git clone https://repository.basistech.ru/BASIS/terraform-provider-dynamix +``` +2. Перейти в директорию со скачанным кодом: +```bash +cd terraform-provider-dynamix +``` +3. Выполнить команду: +```bash +make image +``` +В результате выполнения данной последовательности, будет создан docker образ, который содержит в себе приложение terraform, terraform провайдер. diff --git a/wiki/1.0.1/05.-Работа-с-terraform.md b/wiki/1.0.1/05.-Работа-с-terraform.md new file mode 100644 index 0000000..675f2ea --- /dev/null +++ b/wiki/1.0.1/05.-Работа-с-terraform.md @@ -0,0 +1,4 @@ +Раздел описывает некоторые практики работы с terraform, которые могут быть полезны пользователю. +Раздел включает в себя следующие статьи: +- Импортирование ресурсов +- Работа с таймаутами diff --git a/wiki/1.0.1/05.01-Импортирование-ресурсов.md b/wiki/1.0.1/05.01-Импортирование-ресурсов.md new file mode 100644 index 0000000..6983e22 --- /dev/null +++ b/wiki/1.0.1/05.01-Импортирование-ресурсов.md @@ -0,0 +1,75 @@ +Импортирование ресурсов в terraform позволяет привести в соответствие состояние terraform (.tfstate) к состоянию ресурса в платформе. +Необходимость такого приведения возникает в нескольких случаях: +- Ресурс был создан через портал платформы, работа продолжается через terraform провайдер, +- Ресурс был создан через terraform провайдер, однако был изменен через портал платформы, +- Ресурс был создан через terraform провайдер, однако был изменен другим пользователем через terraform провайдер, +- И так далее + +Такие расхождения в состоянии ресурсов нередки, путей их решения несколько: +- Использовать импортирование ресурсов, +- Использовать общие файлы состояний ресурсов, к которым будут иметь доступ все участники, занятые в работе с платформой. +В текущем разделе рассматривается первый вариант. + +## Импортирование ресурсов +Импортирование ресурсов позволяет совершить запрос к платформе, чтобы сформировать файл состояния. +Чтобы совершить импортирование ресурсов необходимо ввести сл. команду: +```bash +terraform import . +``` +## Пример +Предположим, что у нас ресурс, описывающий диск: +```terraform +resource "dynamix_disk" "disk" { + account_id = 121212 + gid = 3333 + disk_name = "mySuperDisk" + size_max = 100500 +} +``` +Если запустить команду: +```bash +terraform apply +``` +То у нас будет создан новый диск. +Но, такой диск уже есть на площадке и мы хотели бы сформировать .tfstate для этого ресурса. +Поэтому, для начала, необходимо получить список дисков: +```terraform +data "dynamix_disk_list" "dl"{ + +} +output "test" { + value = data.dynamix_disk_list.dl +} +``` +В полученных данных необходимо найти требуемый диск, получить его id - параметр disk_id. Пусть это будет - 777777 +Теперь можно выполнить импортирование: +```bash +terraform import dynamix_disk.disk 777777 +``` +Команда должна успешно завершиться, появиться файл состояний, который позволит манипулировать ресурсом. + +## Ошибки при импортировании +При импортировании ресурса может возникнуть сл. ошибка: +```bash +Error: : required field is not set +``` +Где - наименование поля. +Ошибка возникает в том случае, если в описании ресурса отсутствует обязательное поле. +Например: +```terraform +resource "dynamix_disk" "disk" { + account_id = 121212 + gid = 3333 + size_max = 100500 +} +``` +В приведенном выше описании отсутствует поле disk_name, поэтому, при попытке импортирования возникнет ошибка. +Для ее устранения, необходимо выполнить запрос на получение списка дисков, найти недостающее поле, после чего добавить его в описание ресурса. +После этого повторить попытку импортирования. + +## Общий алгоритм устранения ошибок +1. Выполнить запрос импортирования +2. В случае ошибки - внести недостающие поля. +3. Повторить п.1. + + diff --git a/wiki/1.0.1/05.02-Работа-с-таймаутами.md b/wiki/1.0.1/05.02-Работа-с-таймаутами.md new file mode 100644 index 0000000..ecffe69 --- /dev/null +++ b/wiki/1.0.1/05.02-Работа-с-таймаутами.md @@ -0,0 +1,100 @@ +Terraform провайдер DYNAMIX поддерживает тонкую настройку таймаутов выполнения запросов к платформе. Таймауты необходимы для определения максимального времени выполнения запроса. При превышении этого времени соединение рвется и запрос считается невыполненным. +Таймауты применяются при работе с _resource_ функциями провайдера. _Data source_ функции по-умолчанию имеют таймаут в 20 минут и изменяться не может. + +## Стандартные таймауты terraform +| Операция | Время | Описание | +| --- | --- | --- | +| create | 20 минут | Создание ресурса | +| read | 20 минут | Чтение ресурса | +| update | 20 минут | Обновление ресурса | +| delete | 20 минут | Удаление ресурса | +| default | 20 минут | Значение по умолчанию. Устанавливает значение для всех операций | + +## Стандартные таймауты провайдера DYNAMIX +В провайдере DYNAMIX таймауты переопределены для того, чтобы уменьшить нагрузку на платформу. +| Операция | Время | Описание | +| --- | --- | --- | +| create | 10 минут | Создание ресурса | +| read | 5 минут | Чтение ресурса | +| update | 5 минут | Обновление ресурса | +| delete | 5 минут | Удаление ресурса | +| default | 5 минут | Значение по умолчанию. Устанавливает значение для всех операций | + +## Установка таймаутов +Все таймауты можно установить самостоятельно для каждого ресурса. +Для этого используется блок _timeouts_, который имеется в каждом ресурсе провайдера. +Пример: +```terraform +resource "dynamix_res" "res_name" { + timeouts { + create = "10m" + update = "1m" + delete = "2m" + read = "7m" + #default = "15m" + } +} +``` +Где: +- create - операция создания ресурса +- read - операция чтения ресурса +- update - операция обновления ресурса +- delete - операция удаления ресурса +- default - установит заданное время для всех операций +## Формат установления времени +Как видно из примера выше, провайдер принимает на вход строку вида: +``` +"" +``` +Где: +- time-num - число +- time-val - сокращенная запись значения временного отрезка. + +Таблица с временными отрезками: + +| Отрезок | Значение | +| --- | --- | +| n | наносекунда | +| ms | миллисекунда | +| s | секунда | +| m | минута | +| h | час | + +Примеры: +``` +"10m" +"1s" +"1h10m" +``` +И так далее + +## Работа с таймером через .tf-файл +В .tf-файле, в блоке ресурса можно задавать таймауты для операций над ресурсом, однако, при работе с таймаутом, следует помнить о правиле: +__В случае изменения таймаутов в .tf-файле, операции с новыми таймаутами будут производиться только после apply/plan/destroy__ +То есть, если изменить таймауты и выполнить операцию, то она выполнится со старыми таймаутами, а сл. операция уже будет выполнена с новыми таймаутами. +Это объясняется тем, что значения таймаутов считываются из файла состояний .tfstate при выполнении операции, и новые значения таймаутов попадут туда только при успешно выполненной операции. + +## Ошибки при работе с таймаутом +### context deadline exceeded +Если время таймаута слишком короткое, то можно получить сл. ошибку: +``` +context deadline exceeded +``` +Которая говорит, что было выполнено прерывание работы программы из-за истечения времени на операцию. +Для исправления можно увеличить размер окна таймаута и выполнить успешный запрос (например, с помощью терминала), чтобы новое значения таймаутов было добавлено в .tfstate. В противном случае, файл состояния придется править в ручную, либо удалить его и импортировать ресурс для формирования .tfstate. + +### 504 ошибка +Данная ошибка говорит о том, что сервер принудительно разорвал соединения из-за истечения времени на ответ. +В случае получения данной ошибки, обратитесь в службу технической поддержки. + +## Работа с таймаутами через терминал +Сл. команда выполнит операцию terraform с заданным таймаутом: +```bash +timeout