From 6f40af6a5f83e489adacbdccb4ac91be2a6c08db Mon Sep 17 00:00:00 2001 From: asteam Date: Thu, 25 Jul 2024 14:33:38 +0300 Subject: [PATCH] 1.0.0 --- .gitignore | 12 + CHANGELOG.md | 136 ++ LICENSE | 201 +++ Makefile | 86 ++ README.md | 143 ++ cmd/dynamix/main.go | 24 + docs/data-sources/account.md | 106 ++ docs/data-sources/account_audits_list.md | 48 + docs/data-sources/account_computes_list.md | 75 ++ docs/data-sources/account_consumed_units.md | 42 + .../account_consumed_units_by_type.md | 37 + docs/data-sources/account_deleted_list.md | 66 + docs/data-sources/account_disks_list.md | 58 + docs/data-sources/account_flipgroups_list.md | 73 ++ docs/data-sources/account_list.md | 67 + docs/data-sources/account_reserved_units.md | 41 + .../account_resource_consumption_get.md | 102 ++ .../account_resource_consumption_list.md | 95 ++ docs/data-sources/account_rg_list.md | 148 +++ docs/data-sources/account_templates_list.md | 61 + docs/data-sources/account_vins_list.md | 69 + docs/data-sources/bservice.md | 104 ++ docs/data-sources/bservice_deleted_list.md | 66 + docs/data-sources/bservice_group.md | 84 ++ docs/data-sources/bservice_list.md | 72 + docs/data-sources/bservice_snapshot_list.md | 47 + docs/data-sources/disk.md | 111 ++ docs/data-sources/disk_list.md | 131 ++ docs/data-sources/disk_list_deleted.md | 128 ++ docs/data-sources/disk_list_types.md | 36 + docs/data-sources/disk_list_types_detailed.md | 55 + docs/data-sources/disk_list_unattached.md | 126 ++ docs/data-sources/disk_replication.md | 126 ++ docs/data-sources/disk_snapshot.md | 41 + docs/data-sources/disk_snapshot_list.md | 49 + docs/data-sources/extnet.md | 107 ++ docs/data-sources/extnet_computes_list.md | 66 + docs/data-sources/extnet_default.md | 32 + docs/data-sources/extnet_list.md | 54 + docs/data-sources/flipgroup.md | 60 + docs/data-sources/flipgroup_list.md | 73 ++ docs/data-sources/image.md | 83 ++ docs/data-sources/image_list.md | 74 ++ docs/data-sources/k8ci_list.md | 58 + docs/data-sources/k8s.md | 185 +++ docs/data-sources/k8s_computes.md | 61 + docs/data-sources/k8s_list.md | 117 ++ docs/data-sources/k8s_list_deleted.md | 115 ++ docs/data-sources/k8s_wg.md | 68 + docs/data-sources/k8s_wg_cloud_init.md | 37 + docs/data-sources/k8s_wg_list.md | 75 ++ docs/data-sources/kvmvm.md | 331 +++++ docs/data-sources/kvmvm_audits.md | 48 + docs/data-sources/kvmvm_get_audits.md | 45 + docs/data-sources/kvmvm_get_console_url.md | 36 + docs/data-sources/kvmvm_get_log.md | 37 + docs/data-sources/kvmvm_list.md | 211 +++ docs/data-sources/kvmvm_list_deleted.md | 209 +++ docs/data-sources/kvmvm_pci_device_list.md | 61 + docs/data-sources/kvmvm_pfw_list.md | 51 + docs/data-sources/kvmvm_snapshot_usage.md | 48 + docs/data-sources/kvmvm_user_list.md | 85 ++ docs/data-sources/kvmvm_vgpu_list.md | 69 + docs/data-sources/lb.md | 171 +++ docs/data-sources/lb_list.md | 188 +++ docs/data-sources/lb_list_deleted.md | 186 +++ docs/data-sources/resgroup.md | 92 ++ .../rg_affinity_group_computes.md | 51 + docs/data-sources/rg_affinity_groups_get.md | 37 + docs/data-sources/rg_affinity_groups_list.md | 56 + docs/data-sources/rg_audits.md | 48 + docs/data-sources/rg_list.md | 108 ++ docs/data-sources/rg_list_computes.md | 103 ++ docs/data-sources/rg_list_deleted.md | 106 ++ docs/data-sources/rg_list_lb.md | 183 +++ docs/data-sources/rg_list_pfw.md | 52 + docs/data-sources/rg_list_vins.md | 69 + .../rg_resource_consumption_get.md | 104 ++ .../rg_resource_consumption_list.md | 109 ++ docs/data-sources/rg_usage.md | 55 + docs/data-sources/stack.md | 41 + docs/data-sources/stack_list.md | 50 + docs/data-sources/vfpool.md | 70 + docs/data-sources/vfpool_list.md | 85 ++ docs/data-sources/vins.md | 430 ++++++ docs/data-sources/vins_audits.md | 48 + docs/data-sources/vins_ext_net_list.md | 50 + docs/data-sources/vins_ip_list.md | 51 + docs/data-sources/vins_list.md | 67 + docs/data-sources/vins_list_deleted.md | 65 + docs/data-sources/vins_nat_rule_list.md | 52 + docs/data-sources/vins_static_route.md | 41 + docs/data-sources/vins_static_route_list.md | 50 + docs/index.md | 37 + docs/resources/account.md | 128 ++ docs/resources/bservice.md | 116 ++ docs/resources/bservice_group.md | 97 ++ docs/resources/disk.md | 118 ++ docs/resources/disk_replication.md | 137 ++ docs/resources/disk_snapshot.md | 46 + docs/resources/flipgroup.md | 63 + docs/resources/image.md | 90 ++ docs/resources/image_virtual.md | 85 ++ docs/resources/k8s_cp.md | 167 +++ docs/resources/k8s_wg.md | 75 ++ docs/resources/kvmvm.md | 534 ++++++++ docs/resources/lb.md | 191 +++ docs/resources/lb_backend.md | 78 ++ docs/resources/lb_backend_server.md | 52 + docs/resources/lb_frontend.md | 53 + docs/resources/lb_frontend_bind.md | 43 + docs/resources/resgroup.md | 133 ++ docs/resources/vins.md | 487 +++++++ docs/resources/vins_static_route.md | 45 + go.mod | 81 ++ go.sum | 221 ++++ internal/client/client.go | 43 + internal/constants/constants.go | 22 + internal/constants/timeouts.go | 12 + internal/flattens/flatten_simple_type.go | 19 + internal/flattens/meta.go | 38 + internal/provider/data_sources_map.go | 131 ++ internal/provider/provider.go | 228 ++++ internal/provider/provider_validation.go | 178 +++ internal/provider/resources_map.go | 40 + .../cloudapi/account/data_source_account.go | 91 ++ .../data_source_account_audits_list.go | 91 ++ .../data_source_account_computes_list.go | 88 ++ ...ta_source_account_consumed_unit_by_type.go | 88 ++ .../data_source_account_consumed_units.go | 91 ++ .../account/data_source_account_disks_list.go | 88 ++ .../data_source_account_flipgroups_list.go | 88 ++ ...source_account_get_resource_consumption.go | 91 ++ ...e_account_get_resource_consumption_list.go | 89 ++ .../account/data_source_account_list.go | 88 ++ .../data_source_account_list_deleted.go | 88 ++ .../data_source_account_reserved_units.go | 91 ++ .../account/data_source_account_rg_list.go | 88 ++ .../data_source_account_templates_list.go | 88 ++ .../account/data_source_account_vins_list.go | 91 ++ .../flattens/flatten_account_list_deleted.go | 84 ++ .../flattens/flatten_data_source_account.go | 162 +++ ...flatten_data_source_account_audits_list.go | 59 + ...atten_data_source_account_computes_list.go | 83 ++ ...tten_data_source_account_consumed_units.go | 49 + ...a_source_account_consumed_units_by_type.go | 47 + .../flatten_data_source_account_disks_list.go | 66 + ...ten_data_source_account_flipgroups_list.go | 80 ++ ...source_account_get_resource_consumption.go | 91 ++ ...e_account_get_resource_consumption_list.go | 97 ++ .../flatten_data_source_account_list.go | 83 ++ ...tten_data_source_account_reserved_units.go | 50 + .../flatten_data_source_account_rg_list.go | 124 ++ ...tten_data_source_account_templates_list.go | 68 + .../flatten_data_source_account_vins_list.go | 76 ++ .../flattens/flatten_resource_account.go | 139 ++ .../models/model_data_source_account.go | 101 ++ .../model_data_source_account_audits_list.go | 24 + ...model_data_source_account_computes_list.go | 53 + ...odel_data_source_account_consumed_units.go | 22 + ...a_source_account_consumed_units_by_type.go | 17 + .../model_data_source_account_disks_list.go | 36 + ...del_data_source_account_flipgroups_list.go | 49 + ...source_account_get_resource_consumption.go | 45 + ...e_account_get_resource_consumption_list.go | 40 + .../models/model_data_source_account_list.go | 43 + .../model_data_source_account_list_deleted.go | 33 + ...odel_data_source_account_reserved_units.go | 21 + .../model_data_source_account_rg_list.go | 81 ++ ...odel_data_source_account_templates_list.go | 39 + .../model_data_source_account_vins_list.go | 45 + .../account/models/model_resource_account.go | 74 ++ .../cloudapi/account/resource_account.go | 253 ++++ .../schemas/schema_data_source_account.go | 162 +++ .../schema_data_source_account_audits.go | 42 + ...chema_data_source_account_computes_list.go | 136 ++ ...hema_data_source_account_consumed_units.go | 41 + ...a_source_account_consumed_units_by_type.go | 27 + .../schema_data_source_account_disks_list.go | 81 ++ ...ema_data_source_account_flipgroups_list.go | 127 ++ ...source_account_get_resource_consumption.go | 131 ++ ...e_account_get_resource_consumption_list.go | 113 ++ .../schema_data_source_account_list.go | 102 ++ ...schema_data_source_account_list_deleted.go | 98 ++ ...hema_data_source_account_reserved_units.go | 37 + .../schema_data_source_account_rg_list.go | 232 ++++ ...hema_data_source_account_templates_list.go | 90 ++ .../schema_data_source_account_vins_list.go | 102 ++ .../schemas/schema_resource_account.go | 216 +++ .../utilities/utility_data_source_account.go | 24 + ...utility_data_source_account_audits_list.go | 21 + ...ility_data_source_account_computes_list.go | 63 + ...lity_data_source_account_consumed_units.go | 24 + ...a_source_account_consumed_units_by_type.go | 32 + .../utility_data_source_account_disks_list.go | 51 + ...ity_data_source_account_flipgroups_list.go | 52 + ...source_account_get_resource_consumption.go | 24 + ...e_account_get_resource_consumption_list.go | 24 + .../utility_data_source_account_list.go | 49 + ...tility_data_source_account_list_deleted.go | 46 + ...lity_data_source_account_reserved_units.go | 29 + .../utility_data_source_account_rg_list.go | 52 + ...lity_data_source_account_templates_list.go | 51 + .../utility_data_source_account_vins_list.go | 52 + .../utilities/utility_resource_account.go | 392 ++++++ .../cloudapi/bservice/data_source_bservice.go | 91 ++ .../data_source_bservice__deleted_list.go | 89 ++ .../bservice/data_source_bservice_group.go | 91 ++ .../bservice/data_source_bservice_list.go | 89 ++ .../data_source_bservice_snapshot_list.go | 91 ++ .../flattens/flatten_data_source_bservice.go | 116 ++ ...atten_data_source_bservice_deleted_list.go | 75 ++ .../flatten_data_source_bservice_group.go | 118 ++ .../flatten_data_source_bservice_list.go | 75 ++ ...tten_data_source_bservice_snapshot_list.go | 56 + .../flattens/flatten_resource_bservice.go | 144 ++ .../flatten_resource_bservice_group.go | 151 +++ .../models/model_data_source_bservice.go | 69 + ...model_data_source_bservice_deleted_list.go | 45 + .../model_data_source_bservice_group.go | 57 + .../models/model_data_source_bservice_list.go | 48 + ...odel_data_source_bservice_snapshot_list.go | 23 + .../models/model_resource_bservice.go | 112 ++ .../models/model_resource_bservice_group.go | 82 ++ .../cloudapi/bservice/resource_bservice.go | 316 +++++ .../bservice/resource_bservice_group.go | 347 +++++ .../schemas/schema_data_source_bservice.go | 172 +++ ...chema_data_source_bservice_deleted_list.go | 103 ++ .../schema_data_source_bservice_group.go | 137 ++ .../schema_data_source_bservice_list.go | 121 ++ ...hema_data_source_bservice_snapshot_list.go | 35 + .../schemas/schema_resource_bservice.go | 199 +++ .../schemas/schema_resource_bservice_group.go | 196 +++ .../utilities/utility_data_source_bservice.go | 22 + ...ility_data_source_bservice_deleted_list.go | 41 + .../utility_data_source_bservice_group.go | 25 + .../utility_data_source_bservice_list.go | 59 + ...lity_data_source_bservice_snapshot_list.go | 24 + .../utilities/utility_resource_bservice.go | 386 ++++++ .../utility_resource_bservice_group.go | 403 ++++++ .../cloudapi/disks/data_source_disk.go | 91 ++ .../cloudapi/disks/data_source_disk_list.go | 88 ++ .../disks/data_source_disk_list_deleted.go | 88 ++ .../disks/data_source_disk_list_types.go | 89 ++ .../data_source_disk_list_types_detailed.go | 88 ++ .../disks/data_source_disk_list_unattached.go | 88 ++ .../disks/data_source_disk_replication.go | 91 ++ .../disks/data_source_disk_snapshot.go | 96 ++ .../disks/data_source_disk_snapshot_list.go | 91 ++ .../flattens/flatten_data_source_disk.go | 108 ++ .../flattens/flatten_data_source_disk_list.go | 128 ++ .../flatten_data_source_disk_list_deleted.go | 125 ++ .../flatten_data_source_disk_list_types.go | 50 + ...en_data_source_disk_list_types_detailed.go | 82 ++ ...latten_data_source_disk_list_unattached.go | 132 ++ .../flatten_data_source_disk_replication.go | 130 ++ .../flatten_data_source_disk_snapshot.go | 50 + .../flatten_data_source_disk_snapshot_list.go | 46 + .../disks/flattens/flatten_resource_disk.go | 189 +++ .../flatten_resource_disk_replication.go | 146 +++ .../flatten_resource_disk_snapshot.go | 65 + .../service/cloudapi/disks/input_checks.go | 99 ++ .../disks/models/model_data_source_disk.go | 49 + .../models/model_data_source_disk_list.go | 69 + .../model_data_source_disk_list_deleted.go | 26 + .../model_data_source_disk_list_types.go | 19 + ...el_data_source_disk_list_types_detailed.go | 31 + .../model_data_source_disk_list_unattached.go | 73 ++ .../model_data_source_disk_replication.go | 86 ++ .../models/model_data_source_disk_snapshot.go | 21 + .../model_data_source_disk_snapshot_list.go | 16 + .../disks/models/model_resource_disk.go | 116 ++ .../models/model_resource_disk_replication.go | 110 ++ .../models/model_resource_disk_snapshot.go | 25 + .../service/cloudapi/disks/resource_disk.go | 354 +++++ .../disks/resource_disk_replication.go | 326 +++++ .../cloudapi/disks/resource_disk_snapshot.go | 295 +++++ .../disks/schemas/schema_data_source_disk.go | 197 +++ .../schemas/schema_data_source_disk_list.go | 265 ++++ .../schema_data_source_disk_list_deleted.go | 253 ++++ .../schema_data_source_disk_list_types.go | 36 + ...ma_data_source_disk_list_types_detailed.go | 62 + ...schema_data_source_disk_list_unattached.go | 265 ++++ .../schema_data_source_disk_replication.go | 225 ++++ .../schema_data_source_disk_snapshot.go | 39 + .../schema_data_source_disk_snapshot_list.go | 45 + .../disks/schemas/schema_resource_disk.go | 269 ++++ .../schema_resource_disk_replication.go | 253 ++++ .../schemas/schema_resource_disk_snapshot.go | 58 + .../utilities/utility_data_source_disk.go | 25 + .../utility_data_source_disk_list.go | 72 + .../utility_data_source_disk_list_deleted.go | 63 + .../utility_data_source_disk_list_types.go | 42 + ...ty_data_source_disk_list_types_detailed.go | 42 + ...tility_data_source_disk_list_unattached.go | 66 + .../utility_data_source_disk_replication.go | 34 + .../utility_data_source_disk_snapshot.go | 43 + .../utility_data_source_disk_snapshot_list.go | 25 + .../disks/utilities/utility_resource_disk.go | 373 ++++++ .../utility_resource_disk_replication.go | 172 +++ .../utility_resource_disk_snapshot.go | 94 ++ .../cloudapi/extnet/data_source_extnet.go | 91 ++ .../data_source_extnet_computes_list.go | 90 ++ .../extnet/data_source_extnet_default.go | 89 ++ .../extnet/data_source_extnet_list.go | 88 ++ .../flattens/flatten_data_source_extnet.go | 120 ++ ...latten_data_source_extnet_computes_list.go | 76 ++ .../flatten_data_source_extnet_default.go | 43 + .../flatten_data_source_extnet_list.go | 65 + .../extnet/models/model_data_source_extnet.go | 82 ++ .../model_data_source_extnet_computes_list.go | 41 + .../model_data_source_extnet_default.go | 15 + .../models/model_data_source_extnet_list.go | 33 + .../schemas/schema_data_source_extnet.go | 169 +++ ...schema_data_source_extnet_computes_list.go | 89 ++ .../schema_data_source_extnet_default.go | 17 + .../schemas/schema_data_source_extnet_list.go | 78 ++ .../utilities/utility_data_source_extnet.go | 23 + ...tility_data_source_extnet_computes_list.go | 46 + .../utility_data_source_extnet_default.go | 22 + .../utility_data_source_extnet_list.go | 59 + .../flipgroup/data_source_flipgroup.go | 91 ++ .../flipgroup/data_source_flipgroup_list.go | 91 ++ .../flattens/flatten_data_source_flipgroup.go | 70 + .../flatten_data_source_flipgroup_list.go | 80 ++ .../flipgroup/flattens/flatten_resource.go | 78 ++ .../service/cloudapi/flipgroup/input_check.go | 45 + .../models/model_data_source_flipgroup.go | 41 + .../model_data_source_flipgroup_list.go | 53 + .../models/model_resource_flipgroup.go | 42 + .../cloudapi/flipgroup/resource_flipgroup.go | 300 +++++ .../schemas/schema_data_source_flipgroup.go | 93 ++ .../schema_data_source_flipgroup_list.go | 140 ++ .../schemas/schema_resource_flipgroup.go | 116 ++ .../utility_data_source_flipgroup.go | 27 + .../utility_data_source_flipgroup_list.go | 90 ++ .../utilities/utility_resource_flipgroup.go | 227 ++++ internal/service/cloudapi/ic/input_checks.go | 500 +++++++ .../cloudapi/image/data_source_image.go | 88 ++ .../cloudapi/image/data_source_image_list.go | 87 ++ .../flattens/flatten_data_source_image.go | 108 ++ .../flatten_data_source_image_list.go | 91 ++ .../image/flattens/flatten_resource_image.go | 127 ++ .../flatten_resource_image_virtual.go | 91 ++ .../image/models/model_data_source_image.go | 58 + .../models/model_data_source_image_list.go | 53 + .../image/models/model_resource_image.go | 69 + .../models/model_resource_image_virtual.go | 57 + .../service/cloudapi/image/resource_image.go | 280 ++++ .../cloudapi/image/resource_image_virtual.go | 275 ++++ .../image/schemas/schema_data_source_image.go | 151 +++ .../schemas/schema_data_source_image_list.go | 130 ++ .../image/schemas/schema_resource_image.go | 200 +++ .../schemas/schema_resource_image_virtual.go | 156 +++ .../utilities/resource_check_input_vales.go | 26 + .../utilities/utility_data_source_image.go | 32 + .../utility_data_source_image_list.go | 70 + .../image/utilities/utility_resource_image.go | 143 ++ .../utility_resource_image_virtual.go | 62 + .../cloudapi/k8s/data_source_k8ci_list.go | 88 ++ .../service/cloudapi/k8s/data_source_k8s.go | 88 ++ .../cloudapi/k8s/data_source_k8s_computes.go | 88 ++ .../cloudapi/k8s/data_source_k8s_list.go | 88 ++ .../k8s/data_source_k8s_list_deleted.go | 88 ++ .../cloudapi/k8s/data_source_k8s_wg.go | 88 ++ .../k8s/data_source_k8s_wg_cloud_init.go | 88 ++ .../cloudapi/k8s/data_source_k8s_wg_list.go | 88 ++ .../flattens/flatten_data_source_k8ci_list.go | 77 ++ .../k8s/flattens/flatten_data_source_k8s.go | 235 ++++ .../flatten_data_source_k8s_computes.go | 81 ++ .../flattens/flatten_data_source_k8s_list.go | 162 +++ .../flatten_data_source_k8s_list_deleted.go | 160 +++ .../flattens/flatten_data_source_k8s_wg.go | 128 ++ .../flatten_data_source_k8s_wg_cloud_init.go | 38 + .../flatten_data_source_k8s_wg_list.go | 135 ++ .../k8s/flattens/flatten_resource_k8s_cp.go | 231 ++++ .../k8s/flattens/flatten_resource_k8s_wg.go | 138 ++ .../k8s/models/model_data_source_k8ci_list.go | 36 + .../k8s/models/model_data_source_k8s.go | 93 ++ .../models/model_data_source_k8s_computes.go | 24 + .../k8s/models/model_data_source_k8s_list.go | 85 ++ .../model_data_source_k8s_list_deleted.go | 83 ++ .../k8s/models/model_data_source_k8s_wg.go | 40 + .../model_data_source_k8s_wg_cloud_init.go | 16 + .../models/model_data_source_k8s_wg_list.go | 45 + .../k8s/models/model_resource_k8s_cp.go | 124 ++ .../k8s/models/model_resource_k8s_wg.go | 60 + .../service/cloudapi/k8s/resource_k8s_cp.go | 319 +++++ .../service/cloudapi/k8s/resource_k8s_wg.go | 270 ++++ .../schemas/schema_data_source_k8ci_list.go | 82 ++ .../k8s/schemas/schema_data_source_k8s.go | 304 +++++ .../schema_data_source_k8s_computes.go | 62 + .../schemas/schema_data_source_k8s_list.go | 215 +++ .../schema_data_source_k8s_list_deleted.go | 209 +++ .../k8s/schemas/schema_data_source_k8s_wg.go | 90 ++ .../schema_data_source_k8s_wg_cloud_init.go | 24 + .../schemas/schema_data_source_k8s_wg_list.go | 97 ++ .../k8s/schemas/schema_resource_k8s_cp.go | 354 +++++ .../k8s/schemas/schema_resource_k8s_wg.go | 132 ++ .../utilities/resource_check_input_vales.go | 42 + .../utility_data_source_k8ci_list.go | 55 + .../utilities/utility_data_source_k8s_list.go | 61 + .../utility_data_source_k8s_list_deleted.go | 55 + .../k8s/utilities/utility_resource_k8s_cp.go | 488 +++++++ .../k8s/utilities/utility_resource_k8s_wg.go | 220 ++++ .../cloudapi/kvmvm/data_source_compute.go | 87 ++ .../kvmvm/data_source_compute_audits.go | 87 ++ .../kvmvm/data_source_compute_get_audits.go | 87 ++ .../data_source_compute_get_console_url.go | 87 ++ .../kvmvm/data_source_compute_get_log.go | 87 ++ .../kvmvm/data_source_compute_list.go | 87 ++ .../kvmvm/data_source_compute_list_deleted.go | 87 ++ .../data_source_compute_pci_device_list.go | 87 ++ .../kvmvm/data_source_compute_pfw_list.go | 87 ++ .../data_source_compute_snapshot_usage.go | 87 ++ .../kvmvm/data_source_compute_user_list.go | 87 ++ .../kvmvm/data_source_compute_vgpu_list.go | 87 ++ .../flattens/flatten_data_source_compute.go | 367 ++++++ .../flatten_data_source_compute_audits.go | 57 + .../flatten_data_source_compute_get_audits.go | 54 + ...ten_data_source_compute_get_console_url.go | 37 + .../flatten_data_source_compute_get_log.go | 38 + .../flatten_data_source_compute_list.go | 272 ++++ ...latten_data_source_compute_list_deleted.go | 270 ++++ ...ten_data_source_compute_pci_device_list.go | 71 + .../flatten_data_source_compute_pfw_list.go | 60 + ...tten_data_source_compute_snapshot_usage.go | 57 + .../flatten_data_source_compute_user_list.go | 72 + .../flatten_data_source_compute_vgpu_list.go | 78 ++ .../flattens/flatten_resource_compute.go | 494 +++++++ .../service/cloudapi/kvmvm/input_check.go | 83 ++ .../kvmvm/models/model_data_source_compute.go | 227 ++++ .../model_data_source_compute_audits.go | 23 + .../model_data_source_compute_get_audits.go | 20 + ...del_data_source_compute_get_console_url.go | 15 + .../model_data_source_compute_get_log.go | 16 + .../models/model_data_source_compute_list.go | 153 +++ .../model_data_source_compute_list_deleted.go | 151 +++ ...del_data_source_compute_pci_device_list.go | 36 + .../model_data_source_compute_pfw_list.go | 26 + ...odel_data_source_compute_snapshot_usage.go | 23 + .../model_data_source_compute_user_list.go | 31 + .../model_data_source_compute_vgpu_list.go | 44 + .../kvmvm/models/model_resource_compute.go | 336 +++++ .../cloudapi/kvmvm/resource_compute.go | 554 ++++++++ .../schemas/schema_data_source_compute.go | 670 ++++++++++ .../schema_data_source_compute_audits.go | 40 + .../schema_data_source_compute_get_audits.go | 31 + ...ema_data_source_compute_get_console_url.go | 21 + .../schema_data_source_compute_get_log.go | 24 + .../schema_data_source_compute_list.go | 443 +++++++ ...schema_data_source_compute_list_deleted.go | 435 +++++++ ...ema_data_source_compute_pci_device_list.go | 81 ++ .../schema_data_source_compute_pfw_list.go | 49 + ...hema_data_source_compute_snapshot_usage.go | 41 + .../schema_data_source_compute_user_list.go | 101 ++ .../schema_data_source_compute_vgpu_list.go | 105 ++ .../kvmvm/schemas/schema_resource_compute.go | 952 ++++++++++++++ .../utilities/utility_data_source_compute.go | 28 + .../utility_data_source_compute_audits.go | 28 + .../utility_data_source_compute_get_audits.go | 28 + ...ity_data_source_compute_get_console_url.go | 32 + .../utility_data_source_compute_get_log.go | 29 + .../utility_data_source_compute_list.go | 83 ++ ...tility_data_source_compute_list_deleted.go | 66 + ...ity_data_source_compute_pci_device_list.go | 50 + ...tility_data_source_compute_pci_pfw_list.go | 28 + ...lity_data_source_compute_snapshot_usage.go | 31 + .../utility_data_source_compute_user_list.go | 28 + .../utility_data_source_compute_vgpu_list.go | 50 + .../utility_resource_compute_create.go | 807 ++++++++++++ .../utility_resource_compute_difference.go | 232 ++++ .../utility_resource_compute_update.go | 1155 +++++++++++++++++ .../service/cloudapi/lb/data_source_lb.go | 91 ++ .../cloudapi/lb/data_source_lb_list.go | 88 ++ .../lb/data_source_lb_list_deleted.go | 89 ++ .../lb/flattens/flatten_data_source_lb.go | 74 ++ .../flattens/flatten_data_source_lb_list.go | 93 ++ .../flatten_data_source_lb_list_deleted.go | 91 ++ .../lb/flattens/flatten_resource_lb.go | 148 +++ .../flattens/flatten_resource_lb_backend.go | 95 ++ .../flatten_resource_lb_backend_server.go | 45 + .../flattens/flatten_resoursce_lb_frontend.go | 64 + .../flatten_resoursce_lb_frontend_bind.go | 37 + internal/service/cloudapi/lb/input_check.go | 118 ++ .../lb/models/model_data_source_lb.go | 74 ++ .../lb/models/model_data_source_lb_list.go | 64 + .../model_data_source_lb_list_deleted.go | 62 + .../cloudapi/lb/models/model_resource_lb.go | 96 ++ .../lb/models/model_resource_lb_backend.go | 55 + .../model_resource_lb_backend_server.go | 26 + .../lb/models/model_resource_lb_frontend.go | 24 + .../models/model_resource_lb_frontend_bind.go | 17 + internal/service/cloudapi/lb/resource_lb.go | 359 +++++ .../cloudapi/lb/resource_lb_backend.go | 267 ++++ .../cloudapi/lb/resource_lb_backend_server.go | 268 ++++ .../cloudapi/lb/resource_lb_frontend.go | 208 +++ .../cloudapi/lb/resource_lb_frontend_bind.go | 268 ++++ .../lb/schemas/schema_data_source_lb.go | 278 ++++ .../lb/schemas/schema_data_soursce_lb_list.go | 341 +++++ .../schema_data_soursce_lb_list_deleted.go | 333 +++++ .../cloudapi/lb/schemas/schema_resource_lb.go | 326 +++++ .../lb/schemas/schema_resource_lb_backend.go | 122 ++ .../schema_resource_lb_backend_server.go | 83 ++ .../lb/schemas/schema_resource_lb_frontend.go | 49 + .../schema_resource_lb_frontend_bind.go | 36 + .../lb/utilities/utility_data_source_lb.go | 27 + .../utilities/utility_data_source_lb_list.go | 63 + .../utility_data_source_lb_list_deleted.go | 57 + .../lb/utilities/utility_resource_lb.go | 355 +++++ .../utilities/utility_resource_lb_backend.go | 167 +++ .../utility_resource_lb_backend_server.go | 187 +++ .../utilities/utility_resource_lb_frontend.go | 75 ++ .../utility_resource_lb_frontend_bind.go | 115 ++ .../service/cloudapi/rg/data_source_rg.go | 91 ++ .../data_source_rg_affinity_group_computes.go | 89 ++ .../rg/data_source_rg_affinity_groups_get.go | 89 ++ .../rg/data_source_rg_affinity_groups_list.go | 89 ++ .../cloudapi/rg/data_source_rg_audits.go | 89 ++ ...data_source_rg_get_resource_consumption.go | 89 ++ .../cloudapi/rg/data_source_rg_list.go | 89 ++ .../rg/data_source_rg_list_computes.go | 89 ++ .../rg/data_source_rg_list_deleted.go | 89 ++ .../cloudapi/rg/data_source_rg_list_lb.go | 89 ++ .../cloudapi/rg/data_source_rg_list_pfw.go | 89 ++ .../cloudapi/rg/data_source_rg_list_vins.go | 89 ++ ...ata_source_rg_resource_consumption_list.go | 89 ++ .../cloudapi/rg/data_source_rg_usage.go | 89 ++ .../rg/flattens/flatten_data_source_rg.go | 101 ++ ..._data_source_rg_affinity_group_computes.go | 82 ++ ...tten_data_source_rg_affinity_groups_get.go | 51 + ...ten_data_source_rg_affinity_groups_list.go | 68 + .../flattens/flatten_data_source_rg_audits.go | 56 + ...data_source_rg_get_resource_consumption.go | 98 ++ .../flattens/flatten_data_source_rg_list.go | 120 ++ .../flatten_data_source_rg_list_computes.go | 120 ++ .../flatten_data_source_rg_list_deleted.go | 119 ++ .../flatten_data_source_rg_list_lb.go | 172 +++ .../flatten_data_source_rg_list_pfw.go | 62 + .../flatten_data_source_rg_list_vins.go | 79 ++ ...ata_source_rg_resource_consumption_list.go | 108 ++ .../flattens/flatten_data_source_rg_usage.go | 65 + .../rg/flattens/flatten_resource_rg.go | 153 +++ internal/service/cloudapi/rg/input_checks.go | 41 + .../rg/models/model_data_source_rg.go | 55 + ..._data_source_rg_affinity_group_computes.go | 29 + ...odel_data_source_rg_affinity_groups_get.go | 19 + ...del_data_source_rg_affinity_groups_list.go | 31 + .../rg/models/model_data_source_rg_audits.go | 24 + ...odel_data_source_rg_get_resource_limits.go | 36 + .../rg/models/model_data_source_rg_list.go | 70 + .../model_data_source_rg_list_computes.go | 75 ++ .../model_data_source_rg_list_deleted.go | 59 + .../rg/models/model_data_source_rg_list_lb.go | 109 ++ .../models/model_data_source_rg_list_pfw.go | 28 + .../models/model_data_source_rg_list_vins.go | 47 + ...del_data_source_rg_resource_limits_list.go | 23 + .../rg/models/model_data_source_rg_usage.go | 24 + .../cloudapi/rg/models/model_resource_rg.go | 129 ++ internal/service/cloudapi/rg/resource_rg.go | 410 ++++++ .../rg/schemas/schema_data_source_rg.go | 164 +++ ..._data_source_rg_affinity_group_computes.go | 59 + ...hema_data_source_rg_affinity_groups_get.go | 29 + ...ema_data_source_rg_affinity_groups_list.go | 56 + .../schemas/schema_data_source_rg_audits.go | 41 + ...data_source_rg_get_resource_consumption.go | 136 ++ .../rg/schemas/schema_data_source_rg_list.go | 215 +++ .../schema_data_source_rg_list_computes.go | 189 +++ .../schema_data_source_rg_list_deleted.go | 207 +++ .../schemas/schema_data_source_rg_list_lb.go | 343 +++++ .../schemas/schema_data_source_rg_list_pfw.go | 54 + .../schema_data_source_rg_list_vins.go | 114 ++ ...ata_source_rg_resource_consumption_list.go | 143 ++ .../rg/schemas/schema_data_source_rg_usage.go | 66 + .../cloudapi/rg/schemas/schema_resource_rg.go | 275 ++++ ..._data_source_rg_affinity_group_computes.go | 29 + ...lity_data_source_rg_affinity_groups_get.go | 29 + ...ity_data_source_rg_affinity_groups_list.go | 33 + .../utility_data_source_rg_audits.go | 27 + ...data_source_rg_get_resource_consumption.go | 25 + .../utilities/utility_data_source_rg_list.go | 62 + .../utility_data_source_rg_list_computes.go | 61 + .../utility_data_source_rg_list_deleted.go | 56 + .../utility_data_source_rg_list_lb.go | 55 + .../utility_data_source_rg_list_pfw.go | 27 + .../utility_data_source_rg_list_vins.go | 49 + ...ata_source_rg_resource_consumption_list.go | 22 + .../utilities/utility_data_source_rg_usage.go | 29 + .../rg/utilities/utility_resource_rg.go | 731 +++++++++++ .../cloudapi/stack/data_source_stack.go | 91 ++ .../cloudapi/stack/data_source_stack_list.go | 89 ++ .../flatten_schema_data_source_stack.go | 51 + .../flattens/flatten_utility_stack_list.go | 61 + .../stack/models/model_data_source_stack.go | 23 + .../models/model_data_source_stack_list.go | 29 + .../stack/schemas/schema_data_source_stack.go | 36 + .../schemas/schema_data_source_stack_list.go | 53 + .../utilities/utility_data_source_stack.go | 27 + .../utilities/utility_utility_stack_list.go | 48 + .../cloudapi/vfpool/data_source_vfpool.go | 91 ++ .../vfpool/data_source_vfpool_list.go | 89 ++ .../flattens/flatten_data_source_vfpool.go | 92 ++ .../flatten_data_source_vfpool_list.go | 104 ++ .../vfpool/models/model_data_source_vfpool.go | 40 + .../models/model_data_source_vfpool_list.go | 55 + .../schemas/schema_data_source_vfpool.go | 79 ++ .../schemas/schema_data_source_vfpool_list.go | 119 ++ .../utilities/utility_data_source_vfpool.go | 25 + .../utility_data_source_vfpool_list.go | 57 + .../service/cloudapi/vins/data_source_vins.go | 91 ++ .../cloudapi/vins/data_source_vins_audits.go | 91 ++ .../vins/data_source_vins_ext_net_list.go | 91 ++ .../cloudapi/vins/data_source_vins_ip_list.go | 91 ++ .../cloudapi/vins/data_source_vins_list.go | 88 ++ .../vins/data_source_vins_list_deleted.go | 88 ++ .../vins/data_source_vins_nat_rule_list.go | 89 ++ .../vins/data_source_vins_static_route.go | 88 ++ .../data_source_vins_static_route_list.go | 88 ++ .../vins/flattens/flatten_data_source_vins.go | 619 +++++++++ .../flatten_data_source_vins_audits.go | 55 + .../flatten_data_source_vins_ext_net_list.go | 57 + .../flatten_data_source_vins_ip_list.go | 58 + .../flattens/flatten_data_source_vins_list.go | 75 ++ .../flatten_data_source_vins_list_deleted.go | 73 ++ .../flatten_data_source_vins_nat_rule_list.go | 59 + .../flatten_data_source_vins_static_route.go | 51 + ...tten_data_source_vins_static_route_list.go | 63 + .../vins/flattens/flatten_resource_vins.go | 162 +++ .../flatten_resource_vins_static_route.go | 68 + .../service/cloudapi/vins/input_checks.go | 95 ++ .../vins/models/model_data_source_vins.go | 462 +++++++ .../models/model_data_source_vins_audits.go | 24 + .../model_data_source_vins_ext_net_list.go | 26 + .../models/model_data_source_vins_ip_list.go | 27 + .../models/model_data_source_vins_list.go | 46 + .../model_data_source_vins_list_deleted.go | 44 + .../model_data_source_vins_nat_rule_list.go | 28 + .../model_data_source_vins_static_route.go | 21 + ...odel_data_source_vins_static_route_list.go | 26 + .../vins/models/model_resource_vins.go | 127 ++ .../model_resource_vins_static_route.go | 24 + .../service/cloudapi/vins/resource_vins.go | 393 ++++++ .../vins/resource_vins_static_route.go | 312 +++++ .../vins/schemas/schema_data_source_vins.go | 720 ++++++++++ .../schemas/schema_data_source_vins_audits.go | 42 + .../schema_data_source_vins_ext_net_list.go | 48 + .../schema_data_source_vins_ip_list.go | 51 + .../schemas/schema_data_source_vins_list.go | 116 ++ .../schema_data_source_vins_list_deleted.go | 110 ++ .../schema_data_source_vins_nat_rule_list.go | 54 + .../schema_data_source_vins_static_route.go | 41 + ...hema_data_source_vins_static_route_list.go | 50 + .../vins/schemas/schema_resource_vins.go | 869 +++++++++++++ .../schema_resource_vins_static_route.go | 52 + .../utilities/utility_data_source_vins.go | 27 + .../utility_data_source_vins_audits.go | 27 + .../utility_data_source_vins_ext_net_list.go | 27 + .../utility_data_source_vins_ip_list.go | 27 + .../utility_data_source_vins_list.go | 56 + .../utility_data_source_vins_list_deleted.go | 56 + .../utility_data_source_vins_nat_rule_list.go | 25 + .../utility_data_source_vins_static_route.go | 39 + ...lity_data_source_vins_static_route_list.go | 25 + .../vins/utilities/utility_resource_vins.go | 748 +++++++++++ .../utility_resource_vins_static_route.go | 195 +++ .../account/data_source_cb_account.go | 91 ++ .../account/data_source_cb_account_list.go | 88 ++ .../data_source_cb_account_vins_list.go | 91 ++ .../flatten_data_source_cb_account.go | 106 ++ .../flatten_data_source_cb_account_list.go | 129 ++ ...latten_data_source_cb_account_vins_list.go | 76 ++ .../models/model_data_source_cb_account.go | 58 + .../model_data_source_cb_account_list.go | 70 + .../model_data_source_cb_account_vins_list.go | 45 + .../schemas/schema_data_source_cb_account.go | 139 ++ .../schema_data_source_cb_account_list.go | 177 +++ ...schema_data_source_cb_account_vins_list.go | 102 ++ .../utility_data_source_cb_account.go | 24 + .../utility_data_source_cb_account_list.go | 47 + ...tility_data_source_cb_account_vins_list.go | 52 + .../cloudbroker/stack/data_sourse_stack.go | 91 ++ .../stack/data_sourse_stack_list.go | 89 ++ .../flattens/flatten_data_sourse_stack.go | 103 ++ .../flatten_data_sourse_stack_list.go | 120 ++ .../cloudbroker/stack/flattens/flatten_eco.go | 18 + .../stack/models/model_data_sourse_stack.go | 81 ++ .../models/model_data_sourse_stack_list.go | 93 ++ .../stack/schemas/schema_data_sourse_stack.go | 163 +++ .../schemas/schema_data_sourse_stack_list.go | 200 +++ .../utilities/utility_data_sourse_stack.go | 24 + .../utility_data_sourse_stack_list.go | 47 + internal/status/status.go | 169 +++ internal/techstatus/techstatus.go | 100 ++ internal/validate/divisibleBy.go | 47 + samples/cloudapi/account/data_account/main.tf | 38 + .../account/data_account_audits/main.tf | 38 + .../data_account_computes_list/main.tf | 96 ++ .../data_account_consumed_units/main.tf | 38 + .../main.tf | 49 + .../account/data_account_deleted_list/main.tf | 67 + .../account/data_account_disks_list/main.tf | 76 ++ .../data_account_flipgroups_list/main.tf | 78 ++ .../main.tf | 38 + .../account/data_account_list/main.tf | 71 + .../data_account_reserved_units/main.tf | 35 + .../main.tf | 35 + .../account/data_account_rg_list/main.tf | 81 ++ .../data_account_templates_list/main.tf | 76 ++ .../account/data_account_vins_list/main.tf | 76 ++ .../cloudapi/account/resource_account/main.tf | 154 +++ .../cloudapi/bservice/data_bservice/main.tf | 38 + .../data_bservice_deleted_list/main.tf | 64 + .../bservice/data_bservice_group/main.tf | 44 + .../bservice/data_bservice_list/main.tf | 93 ++ .../data_bservice_snapshot_list/main.tf | 38 + .../bservice/resource_bservice/main.tf | 106 ++ .../bservice/resource_bservice_group/main.tf | 168 +++ samples/cloudapi/disk/data_disk/main.tf | 38 + samples/cloudapi/disk/data_disk_list/main.tf | 100 ++ .../disk/data_disk_list_deleted/main.tf | 85 ++ .../disk/data_disk_list_types/main.tf | 51 + .../data_disk_list_types_detailed/main.tf | 51 + .../disk/data_disk_list_unattached/main.tf | 92 ++ .../disk/data_disk_replication/main.tf | 43 + .../cloudapi/disk/data_disk_snapshot/main.tf | 43 + .../disk/data_disk_snapshot_list/main.tf | 38 + samples/cloudapi/disk/resource_disk/main.tf | 117 ++ .../disk/resource_disk_replication/main.tf | 84 ++ .../disk/resource_disk_snapshot/main.tf | 55 + samples/cloudapi/extnet/data_extnet/main.tf | 38 + .../extnet/data_extnet_computes_list/main.tf | 64 + .../extnet/data_extnet_default/main.tf | 35 + .../cloudapi/extnet/data_extnet_list/main.tf | 84 ++ .../cloudapi/flipgroup/data_flipgroup/main.tf | 38 + .../flipgroup/data_flipgroup_list/maint.tf | 107 ++ .../flipgroup/resource_flipgroup/main.tf | 78 ++ samples/cloudapi/image/data_image/main.tf | 44 + .../cloudapi/image/data_image_list/main.tf | 112 ++ samples/cloudapi/image/resource_image/main.tf | 110 ++ .../image/resource_image_virtual/main.tf | 55 + samples/cloudapi/k8s/data_k8ci_list/main.tf | 85 ++ samples/cloudapi/k8s/data_k8s/main.tf | 39 + .../cloudapi/k8s/data_k8s_computes/main.tf | 38 + samples/cloudapi/k8s/data_k8s_list/main.tf | 98 ++ .../k8s/data_k8s_list_deleted/main.tf | 88 ++ samples/cloudapi/k8s/data_k8s_wg/main.tf | 44 + .../k8s/data_k8s_wg_cloud_init/main.tf | 47 + samples/cloudapi/k8s/data_k8s_wg_list/main.tf | 39 + .../k8s/resource_k8s_cp/initconfig.tftpl | 9 + samples/cloudapi/k8s/resource_k8s_cp/main.tf | 176 +++ .../k8s/resource_k8s_wg/initconfig.tftpl | 9 + samples/cloudapi/k8s/resource_k8s_wg/main.tf | 76 ++ samples/cloudapi/kvmvm/data_kvmvm/main.tf | 39 + .../cloudapi/kvmvm/data_kvmvm_audits/main.tf | 38 + .../kvmvm/data_kvmvm_get_audits/main.tf | 38 + .../kvmvm/data_kvmvm_get_console_url/main.tf | 38 + .../cloudapi/kvmvm/data_kvmvm_get_log/main.tf | 43 + .../cloudapi/kvmvm/data_kvmvm_list/main.tf | 108 ++ .../kvmvm/data_kvmvm_list_deleted/main.tf | 96 ++ .../kvmvm/data_kvmvm_pci_device_list/main.tf | 76 ++ .../kvmvm/data_kvmvm_pfw_list/main.tf | 38 + .../kvmvm/data_kvmvm_snapshot_usage/main.tf | 43 + .../kvmvm/data_kvmvm_user_list/main.tf | 38 + .../kvmvm/data_kvmvm_vgpu_list/main.tf | 76 ++ .../kvmvm/resource_kvmvm/initconfig.tftpl | 14 + samples/cloudapi/kvmvm/resource_kvmvm/main.tf | 396 ++++++ samples/cloudapi/lb/data_lb/main.tf | 38 + samples/cloudapi/lb/data_lb_list/main.tf | 99 ++ .../cloudapi/lb/data_lb_list_deleted/main.tf | 86 ++ samples/cloudapi/lb/resource_lb/main.tf | 121 ++ .../cloudapi/lb/resource_lb_backend/main.tf | 109 ++ .../lb/resource_lb_backend_server/main.tf | 120 ++ .../cloudapi/lb/resource_lb_frontend/main.tf | 52 + .../lb/resource_lb_frontend_bind/main.tf | 63 + samples/cloudapi/rg/data_rg/main.tf | 43 + .../data_rg_affinity_group_computes/main.tf | 43 + .../rg/data_rg_affinity_groups_get/main.tf | 43 + .../rg/data_rg_affinity_groups_list/main.tf | 50 + samples/cloudapi/rg/data_rg_audits/main.tf | 38 + samples/cloudapi/rg/data_rg_list/main.tf | 94 ++ .../cloudapi/rg/data_rg_list_computes/main.tf | 96 ++ .../cloudapi/rg/data_rg_list_deleted/main.tf | 84 ++ samples/cloudapi/rg/data_rg_list_lb/main.tf | 86 ++ samples/cloudapi/rg/data_rg_list_pfw/main.tf | 38 + samples/cloudapi/rg/data_rg_list_vins/main.tf | 76 ++ .../data_rg_resource_consumption_get/main.tf | 38 + .../data_rg_resource_consumption_list/main.tf | 35 + samples/cloudapi/rg/data_rg_usage/main.tf | 43 + samples/cloudapi/rg/resource_rg/main.tf | 189 +++ samples/cloudapi/stack/data_stack/main.tf | 38 + .../cloudapi/stack/data_stack_list/main.tf | 71 + samples/cloudapi/vfpool/data_vfpool/main.tf | 29 + .../cloudapi/vfpool/data_vfpool_list/main.tf | 77 ++ samples/cloudapi/vins/data_vins/main.tf | 38 + .../cloudapi/vins/data_vins_audits/main.tf | 38 + .../vins/data_vins_ext_net_list/main.tf | 38 + .../cloudapi/vins/data_vins_ip_list/main.tf | 38 + samples/cloudapi/vins/data_vins_list/main.tf | 82 ++ .../vins/data_vins_list_deleted/main.tf | 79 ++ .../vins/data_vins_nat_rule_list/main.tf | 38 + .../vins/data_vins_static_route/main.tf | 43 + .../vins/data_vins_static_route_list/main.tf | 37 + samples/cloudapi/vins/resource_vins/main.tf | 179 +++ .../vins/resource_vins_static_route/main.tf | 63 + scripts/install.bat | 74 ++ scripts/install.sh | 71 + wiki/1.0.0/01.-Введение.md | 7 + wiki/1.0.0/02.-Пример-работы.md | 92 ++ ...лачной-платформы-DYNAMIX.md | 32 + ...erraform-провайдером-DYNAMIX.md | 6 + ...-Terraform-провайдера-DYNAMIX.md | 147 +++ ...-Terraform-провайдера-DYNAMIX.md | 64 + ...ние-между-группами-API.md | 38 + ...4-Получение-gid-или-grid_id.md | 17 + ...form-провайдера-в-образ.md | 43 + wiki/1.0.0/05.-Работа-с-terraform.md | 4 + ...ортирование-ресурсов.md | 75 ++ ...02-Работа-с-таймаутами.md | 100 ++ ...становление-ресурсов.md | 29 + ...урсов.-Мета-аргументы.md | 452 +++++++ ...05.05-Удаление-ресурсов.md | 262 ++++ .../05.06-Установка-Terraform.md | 46 + ...струкция-по-миграции.md | 702 ++++++++++ ...-Terraform-провайдера-DYNAMIX.md | 1 + ...овательская-группа-API.md | 1 + wiki/1.0.0/06.01.01-Data_dynamix_kvmvm.md | 269 ++++ wiki/1.0.0/06.01.02-Data_dynamix_resgroup.md | 121 ++ wiki/1.0.0/06.01.03-Data_dynamix_disk.md | 110 ++ wiki/1.0.0/06.01.04-Data_dynamix_vins.md | 279 ++++ wiki/1.0.0/06.01.05-Data_dynamix_account.md | 113 ++ .../1.0.0/06.01.06-Data_dynamix_image_list.md | 158 +++ wiki/1.0.0/06.01.07-Data_dynamix_image.md | 82 ++ .../06.01.08-Data_dynamix_locations_list.md | 76 ++ .../06.01.09-Data_dynamix_location_url.md | 23 + .../06.01.11-Data_dynamix_snapshot_list.md | 34 + wiki/1.0.0/06.01.12-Data_dynamix_rg_list.md | 122 ++ wiki/1.0.0/06.01.13-Data_dynamix_disk_list.md | 186 +++ wiki/1.0.0/06.01.14-Data_dynamix_vins_list.md | 102 ++ .../06.01.15-Data_dynamix_extnet_list.md | 88 ++ .../06.01.16-Data_dynamix_extnet_default.md | 24 + ...01.17-Data_dynamix_extnet_computes_list.md | 76 ++ wiki/1.0.0/06.01.18-Data_dynamix_extnet.md | 53 + ....01.19-Data_dynamix_account_audits_list.md | 36 + ...1.20-Data_dynamix_account_computes_list.md | 118 ++ ....21-Data_dynamix_account_consumed_units.md | 38 + ..._dynamix_account_consumed_units_by_type.md | 44 + ...6.01.23-Data_dynamix_account_disks_list.md | 79 ++ ...24-Data_dynamix_account_flipgroups_list.md | 97 ++ ....25-Data_dynamix_account_reserved_units.md | 38 + .../06.01.26-Data_dynamix_account_rg_list.md | 93 ++ ....27-Data_dynamix_account_templates_list.md | 84 ++ ...06.01.28-Data_dynamix_account_vins_list.md | 94 ++ .../06.01.29-Data_dynamix_account_list.md | 76 ++ ...01.30-Data_dynamix_account_deleted_list.md | 70 + .../06.01.31-Data_dynamix_bservice_list.md | 116 ++ ...1.32-Data_dynamix_bservice_deleted_list.md | 81 ++ ....33-Data_dynamix_bservice_snapshot_list.md | 33 + wiki/1.0.0/06.01.34-Data_dynamix_bservice.md | 92 ++ .../06.01.35-Data_dynamix_bservice_group.md | 68 + wiki/1.0.0/06.01.36-Data_dynamix_lb.md | 109 ++ wiki/1.0.0/06.01.37-Data_dynamix_lb_list.md | 181 +++ .../06.01.38-Data_dynamix_lb_list_deleted.md | 167 +++ .../06.01.39-Data_dynamix_disk_list_types.md | 45 + ...0-Data_dynamix_disk_list_types_detailed.md | 52 + ...06.01.41-Data_dynamix_disk_list_deleted.md | 129 ++ ...01.42-Data_dynamix_disk_list_unattached.md | 160 +++ ...6.01.43-Data_dynamix_disk_snapshot_list.md | 32 + .../06.01.44-Data_dynamix_disk_snapshot.md | 38 + wiki/1.0.0/06.01.45-Data_dynamix_k8s.md | 121 ++ wiki/1.0.0/06.01.46-Data_dynamix_k8s_list.md | 171 +++ .../06.01.47-Data_dynamix_k8s_list_deleted.md | 156 +++ wiki/1.0.0/06.01.48-Data_dynamix_k8s_wg.md | 66 + .../06.01.49-Data_dynamix_k8s_wg_list.md | 61 + .../06.01.50-Data_dynamix_vins_audits.md | 37 + ...06.01.51-Data_dynamix_vins_ext_net_list.md | 37 + .../06.01.52-Data_dynamix_vins_ip_list.md | 38 + ...06.01.53-Data_dynamix_vins_list_deleted.md | 96 ++ ...6.01.54-Data_dynamix_vins_nat_rule_list.md | 38 + .../06.01.55-Data_dynamix_kvmvm_audits.md | 33 + .../06.01.56-Data_dynamix_kvmvm_get_audits.md | 30 + ...1.57-Data_dynamix_kvmvm_get_console_url.md | 28 + .../06.01.58-Data_dynamix_kvmvm_get_log.md | 34 + .../1.0.0/06.01.59-Data_dynamix_kvmvm_list.md | 242 ++++ .../06.01.60-Data_dynamix_kvmvm_pfw_list.md | 35 + .../06.01.61-Data_dynamix_kvmvm_user_list.md | 41 + wiki/1.0.0/06.01.62-Data_dynamix_rg_list.md | 155 +++ ...Data_dynamix_rg_affinity_group_computes.md | 50 + ...64-Data_dynamix_rg_affinity_groups_list.md | 54 + ....65-Data_dynamix_rg_affinity_groups_get.md | 44 + wiki/1.0.0/06.01.66-Data_dynamix_rg_audits.md | 42 + .../06.01.67-Data_dynamix_rg_list_computes.md | 139 ++ .../06.01.68-Data_dynamix_rg_list_deleted.md | 143 ++ .../1.0.0/06.01.69-Data_dynamix_rg_list_lb.md | 194 +++ .../06.01.70-Data_dynamix_rg_list_pfw.md | 45 + .../06.01.71-Data_dynamix_rg_list_vins.md | 100 ++ wiki/1.0.0/06.01.72-Data_dynamix_rg_usage.md | 57 + ...01.73-Data_dynamix_kvmvm_snapshot_usage.md | 45 + .../06.01.74-Data _dynamix_k8s_computes.md | 47 + wiki/1.0.0/06.01.75-Data_dynamix_flipgroup.md | 49 + .../06.01.75-Data_dynamix_flipgroup_list.md | 125 ++ wiki/1.0.0/06.01.76-Data_dynamix_k8ci_list.md | 88 ++ ...06.01.77-Data_dynamix_vins_static_route.md | 39 + ....78-Data_dynamix_vins_static_route_list.md | 34 + ...ynamix_account_resource_consumption_get.md | 68 + ...namix_account_resource_consumption_list.md | 49 + ...06.01.81-Data_dynamix_k8s_wg_cloud_init.md | 39 + ...6.01.82-Data_dynamix_kvmvm_list_deleted.md | 228 ++++ ...1.83-Data_dynamix_kvmvm_pci_device_list.md | 84 ++ .../06.01.84-Data_dynamix_kvmvm_vgpu_list.md | 92 ++ ...ata_dynamix_rg_resource_consumption_get.md | 68 + ...ta_dynamix_rg_resource_consumption_list.md | 61 + .../06.01.87-Data_dynamix_disk_replication.md | 118 ++ wiki/1.0.0/06.01.87-Data_dynamix_vfpool.md | 63 + .../06.01.88-Data_dynamix_vfpool_list.md | 122 ++ wiki/1.0.0/06.01.89-Data_dynamix_stack.md | 42 + .../1.0.0/06.01.90-Data_dynamix_stack_list.md | 76 ++ ...-Terraform-провайдера-dynamix.md | 2 + ...овательская-группа-API.md | 1 + wiki/1.0.0/07.01.01-Resource_dynamix_kvmvm.md | 860 ++++++++++++ .../07.01.02-Resource_dynamix_resgroup.md | 277 ++++ wiki/1.0.0/07.01.03-Resource_dynamix_disk.md | 173 +++ wiki/1.0.0/07.01.04-Resource_dynamix_vins.md | 455 +++++++ .../07.01.05-Resource_dynamix_snapshot.md | 48 + wiki/1.0.0/07.01.06-Resource_dynamix_k8s.md | 571 ++++++++ .../1.0.0/07.01.07-Resource_dynamix_k8s_wg.md | 169 +++ ...07.01.08-Resource_dynamix_image_virtual.md | 87 ++ wiki/1.0.0/07.01.09-Resource_dynamix_image.md | 165 +++ wiki/1.0.0/07.01.10-Resource_dynamix_pfw.md | 59 + .../07.01.11-Resource_dynamix_account.md | 227 ++++ .../07.01.12-Resource_dynamix_bservice.md | 154 +++ ...7.01.13-Resource_dynamix_bservice_group.md | 252 ++++ wiki/1.0.0/07.01.14-Resource_dynamix_lb.md | 203 +++ .../07.01.15-Resource_dynamix_lb_frontend.md | 73 ++ ...01.16-Resource_dynamix_lb_frontend_bind.md | 71 + .../07.01.17-Resource_dynamix_lb_backend.md | 151 +++ ...1.18-Resource_dynamix_lb_backend_server.md | 154 +++ ...07.01.19-Resource_dynamix_disk_snapshot.md | 51 + .../07.01.20-Resource_dynamix_flipgroup.md | 96 ++ .../1.0.0/07.01.21-Resource_dynamix_k8s_cp.md | 357 +++++ ...1.22-Resource_dynamix_vins_static_route.md | 60 + ...source_dynamix_image_from_blank_compute.md | 145 +++ ...source_dynamix_image_from_platform_disk.md | 168 +++ ...01.25-Resource_dynamix_disk_replication.md | 167 +++ .../08.-Полезные-советы.md | 42 + wiki/1.0.0/Home.md | 294 +++++ wiki/Home.md | 3 + 946 files changed, 98335 insertions(+) create mode 100644 .gitignore create mode 100644 CHANGELOG.md create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 cmd/dynamix/main.go create mode 100644 docs/data-sources/account.md create mode 100644 docs/data-sources/account_audits_list.md create mode 100644 docs/data-sources/account_computes_list.md create mode 100644 docs/data-sources/account_consumed_units.md create mode 100644 docs/data-sources/account_consumed_units_by_type.md create mode 100644 docs/data-sources/account_deleted_list.md create mode 100644 docs/data-sources/account_disks_list.md create mode 100644 docs/data-sources/account_flipgroups_list.md create mode 100644 docs/data-sources/account_list.md create mode 100644 docs/data-sources/account_reserved_units.md create mode 100644 docs/data-sources/account_resource_consumption_get.md create mode 100644 docs/data-sources/account_resource_consumption_list.md create mode 100644 docs/data-sources/account_rg_list.md create mode 100644 docs/data-sources/account_templates_list.md create mode 100644 docs/data-sources/account_vins_list.md create mode 100644 docs/data-sources/bservice.md create mode 100644 docs/data-sources/bservice_deleted_list.md create mode 100644 docs/data-sources/bservice_group.md create mode 100644 docs/data-sources/bservice_list.md create mode 100644 docs/data-sources/bservice_snapshot_list.md create mode 100644 docs/data-sources/disk.md create mode 100644 docs/data-sources/disk_list.md create mode 100644 docs/data-sources/disk_list_deleted.md create mode 100644 docs/data-sources/disk_list_types.md create mode 100644 docs/data-sources/disk_list_types_detailed.md create mode 100644 docs/data-sources/disk_list_unattached.md create mode 100644 docs/data-sources/disk_replication.md create mode 100644 docs/data-sources/disk_snapshot.md create mode 100644 docs/data-sources/disk_snapshot_list.md create mode 100644 docs/data-sources/extnet.md create mode 100644 docs/data-sources/extnet_computes_list.md create mode 100644 docs/data-sources/extnet_default.md create mode 100644 docs/data-sources/extnet_list.md create mode 100644 docs/data-sources/flipgroup.md create mode 100644 docs/data-sources/flipgroup_list.md create mode 100644 docs/data-sources/image.md create mode 100644 docs/data-sources/image_list.md create mode 100644 docs/data-sources/k8ci_list.md create mode 100644 docs/data-sources/k8s.md create mode 100644 docs/data-sources/k8s_computes.md create mode 100644 docs/data-sources/k8s_list.md create mode 100644 docs/data-sources/k8s_list_deleted.md create mode 100644 docs/data-sources/k8s_wg.md create mode 100644 docs/data-sources/k8s_wg_cloud_init.md create mode 100644 docs/data-sources/k8s_wg_list.md create mode 100644 docs/data-sources/kvmvm.md create mode 100644 docs/data-sources/kvmvm_audits.md create mode 100644 docs/data-sources/kvmvm_get_audits.md create mode 100644 docs/data-sources/kvmvm_get_console_url.md create mode 100644 docs/data-sources/kvmvm_get_log.md create mode 100644 docs/data-sources/kvmvm_list.md create mode 100644 docs/data-sources/kvmvm_list_deleted.md create mode 100644 docs/data-sources/kvmvm_pci_device_list.md create mode 100644 docs/data-sources/kvmvm_pfw_list.md create mode 100644 docs/data-sources/kvmvm_snapshot_usage.md create mode 100644 docs/data-sources/kvmvm_user_list.md create mode 100644 docs/data-sources/kvmvm_vgpu_list.md create mode 100644 docs/data-sources/lb.md create mode 100644 docs/data-sources/lb_list.md create mode 100644 docs/data-sources/lb_list_deleted.md create mode 100644 docs/data-sources/resgroup.md create mode 100644 docs/data-sources/rg_affinity_group_computes.md create mode 100644 docs/data-sources/rg_affinity_groups_get.md create mode 100644 docs/data-sources/rg_affinity_groups_list.md create mode 100644 docs/data-sources/rg_audits.md create mode 100644 docs/data-sources/rg_list.md create mode 100644 docs/data-sources/rg_list_computes.md create mode 100644 docs/data-sources/rg_list_deleted.md create mode 100644 docs/data-sources/rg_list_lb.md create mode 100644 docs/data-sources/rg_list_pfw.md create mode 100644 docs/data-sources/rg_list_vins.md create mode 100644 docs/data-sources/rg_resource_consumption_get.md create mode 100644 docs/data-sources/rg_resource_consumption_list.md create mode 100644 docs/data-sources/rg_usage.md create mode 100644 docs/data-sources/stack.md create mode 100644 docs/data-sources/stack_list.md create mode 100644 docs/data-sources/vfpool.md create mode 100644 docs/data-sources/vfpool_list.md create mode 100644 docs/data-sources/vins.md create mode 100644 docs/data-sources/vins_audits.md create mode 100644 docs/data-sources/vins_ext_net_list.md create mode 100644 docs/data-sources/vins_ip_list.md create mode 100644 docs/data-sources/vins_list.md create mode 100644 docs/data-sources/vins_list_deleted.md create mode 100644 docs/data-sources/vins_nat_rule_list.md create mode 100644 docs/data-sources/vins_static_route.md create mode 100644 docs/data-sources/vins_static_route_list.md create mode 100644 docs/index.md create mode 100644 docs/resources/account.md create mode 100644 docs/resources/bservice.md create mode 100644 docs/resources/bservice_group.md create mode 100644 docs/resources/disk.md create mode 100644 docs/resources/disk_replication.md create mode 100644 docs/resources/disk_snapshot.md create mode 100644 docs/resources/flipgroup.md create mode 100644 docs/resources/image.md create mode 100644 docs/resources/image_virtual.md create mode 100644 docs/resources/k8s_cp.md create mode 100644 docs/resources/k8s_wg.md create mode 100644 docs/resources/kvmvm.md create mode 100644 docs/resources/lb.md create mode 100644 docs/resources/lb_backend.md create mode 100644 docs/resources/lb_backend_server.md create mode 100644 docs/resources/lb_frontend.md create mode 100644 docs/resources/lb_frontend_bind.md create mode 100644 docs/resources/resgroup.md create mode 100644 docs/resources/vins.md create mode 100644 docs/resources/vins_static_route.md create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internal/client/client.go create mode 100644 internal/constants/constants.go create mode 100644 internal/constants/timeouts.go create mode 100644 internal/flattens/flatten_simple_type.go create mode 100644 internal/flattens/meta.go create mode 100644 internal/provider/data_sources_map.go create mode 100644 internal/provider/provider.go create mode 100644 internal/provider/provider_validation.go create mode 100644 internal/provider/resources_map.go create mode 100644 internal/service/cloudapi/account/data_source_account.go create mode 100644 internal/service/cloudapi/account/data_source_account_audits_list.go create mode 100644 internal/service/cloudapi/account/data_source_account_computes_list.go create mode 100644 internal/service/cloudapi/account/data_source_account_consumed_unit_by_type.go create mode 100644 internal/service/cloudapi/account/data_source_account_consumed_units.go create mode 100644 internal/service/cloudapi/account/data_source_account_disks_list.go create mode 100644 internal/service/cloudapi/account/data_source_account_flipgroups_list.go create mode 100644 internal/service/cloudapi/account/data_source_account_get_resource_consumption.go create mode 100644 internal/service/cloudapi/account/data_source_account_get_resource_consumption_list.go create mode 100644 internal/service/cloudapi/account/data_source_account_list.go create mode 100644 internal/service/cloudapi/account/data_source_account_list_deleted.go create mode 100644 internal/service/cloudapi/account/data_source_account_reserved_units.go create mode 100644 internal/service/cloudapi/account/data_source_account_rg_list.go create mode 100644 internal/service/cloudapi/account/data_source_account_templates_list.go create mode 100644 internal/service/cloudapi/account/data_source_account_vins_list.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_account_list_deleted.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_audits_list.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_computes_list.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_consumed_units.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_consumed_units_by_type.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_disks_list.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_flipgroups_list.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_get_resource_consumption.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_get_resource_consumption_list.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_list.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_reserved_units.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_rg_list.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_templates_list.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_data_source_account_vins_list.go create mode 100644 internal/service/cloudapi/account/flattens/flatten_resource_account.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_audits_list.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_computes_list.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_consumed_units.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_consumed_units_by_type.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_disks_list.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_flipgroups_list.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_get_resource_consumption.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_get_resource_consumption_list.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_list.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_list_deleted.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_reserved_units.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_rg_list.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_templates_list.go create mode 100644 internal/service/cloudapi/account/models/model_data_source_account_vins_list.go create mode 100644 internal/service/cloudapi/account/models/model_resource_account.go create mode 100644 internal/service/cloudapi/account/resource_account.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_audits.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_computes_list.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_consumed_units.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_consumed_units_by_type.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_disks_list.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_flipgroups_list.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_get_resource_consumption.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_get_resource_consumption_list.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_list.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_list_deleted.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_reserved_units.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_rg_list.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_templates_list.go create mode 100644 internal/service/cloudapi/account/schemas/schema_data_source_account_vins_list.go create mode 100644 internal/service/cloudapi/account/schemas/schema_resource_account.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_audits_list.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_computes_list.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_consumed_units.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_consumed_units_by_type.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_disks_list.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_flipgroups_list.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_get_resource_consumption.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_get_resource_consumption_list.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_list.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_list_deleted.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_reserved_units.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_rg_list.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_templates_list.go create mode 100644 internal/service/cloudapi/account/utilities/utility_data_source_account_vins_list.go create mode 100644 internal/service/cloudapi/account/utilities/utility_resource_account.go create mode 100644 internal/service/cloudapi/bservice/data_source_bservice.go create mode 100644 internal/service/cloudapi/bservice/data_source_bservice__deleted_list.go create mode 100644 internal/service/cloudapi/bservice/data_source_bservice_group.go create mode 100644 internal/service/cloudapi/bservice/data_source_bservice_list.go create mode 100644 internal/service/cloudapi/bservice/data_source_bservice_snapshot_list.go create mode 100644 internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice.go create mode 100644 internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_deleted_list.go create mode 100644 internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_group.go create mode 100644 internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_list.go create mode 100644 internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_snapshot_list.go create mode 100644 internal/service/cloudapi/bservice/flattens/flatten_resource_bservice.go create mode 100644 internal/service/cloudapi/bservice/flattens/flatten_resource_bservice_group.go create mode 100644 internal/service/cloudapi/bservice/models/model_data_source_bservice.go create mode 100644 internal/service/cloudapi/bservice/models/model_data_source_bservice_deleted_list.go create mode 100644 internal/service/cloudapi/bservice/models/model_data_source_bservice_group.go create mode 100644 internal/service/cloudapi/bservice/models/model_data_source_bservice_list.go create mode 100644 internal/service/cloudapi/bservice/models/model_data_source_bservice_snapshot_list.go create mode 100644 internal/service/cloudapi/bservice/models/model_resource_bservice.go create mode 100644 internal/service/cloudapi/bservice/models/model_resource_bservice_group.go create mode 100644 internal/service/cloudapi/bservice/resource_bservice.go create mode 100644 internal/service/cloudapi/bservice/resource_bservice_group.go create mode 100644 internal/service/cloudapi/bservice/schemas/schema_data_source_bservice.go create mode 100644 internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_deleted_list.go create mode 100644 internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_group.go create mode 100644 internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_list.go create mode 100644 internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_snapshot_list.go create mode 100644 internal/service/cloudapi/bservice/schemas/schema_resource_bservice.go create mode 100644 internal/service/cloudapi/bservice/schemas/schema_resource_bservice_group.go create mode 100644 internal/service/cloudapi/bservice/utilities/utility_data_source_bservice.go create mode 100644 internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_deleted_list.go create mode 100644 internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_group.go create mode 100644 internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_list.go create mode 100644 internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_snapshot_list.go create mode 100644 internal/service/cloudapi/bservice/utilities/utility_resource_bservice.go create mode 100644 internal/service/cloudapi/bservice/utilities/utility_resource_bservice_group.go create mode 100644 internal/service/cloudapi/disks/data_source_disk.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_list.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_list_deleted.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_list_types.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_list_types_detailed.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_list_unattached.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_replication.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/data_source_disk_snapshot_list.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_data_source_disk.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_deleted.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_types.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_types_detailed.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_unattached.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_data_source_disk_replication.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_data_source_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_data_source_disk_snapshot_list.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_resource_disk.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_resource_disk_replication.go create mode 100644 internal/service/cloudapi/disks/flattens/flatten_resource_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/input_checks.go create mode 100644 internal/service/cloudapi/disks/models/model_data_source_disk.go create mode 100644 internal/service/cloudapi/disks/models/model_data_source_disk_list.go create mode 100644 internal/service/cloudapi/disks/models/model_data_source_disk_list_deleted.go create mode 100644 internal/service/cloudapi/disks/models/model_data_source_disk_list_types.go create mode 100644 internal/service/cloudapi/disks/models/model_data_source_disk_list_types_detailed.go create mode 100644 internal/service/cloudapi/disks/models/model_data_source_disk_list_unattached.go create mode 100644 internal/service/cloudapi/disks/models/model_data_source_disk_replication.go create mode 100644 internal/service/cloudapi/disks/models/model_data_source_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/models/model_data_source_disk_snapshot_list.go create mode 100644 internal/service/cloudapi/disks/models/model_resource_disk.go create mode 100644 internal/service/cloudapi/disks/models/model_resource_disk_replication.go create mode 100644 internal/service/cloudapi/disks/models/model_resource_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/resource_disk.go create mode 100644 internal/service/cloudapi/disks/resource_disk_replication.go create mode 100644 internal/service/cloudapi/disks/resource_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_data_source_disk.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_data_source_disk_list.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_deleted.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_types.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_types_detailed.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_unattached.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_data_source_disk_replication.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_data_source_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_data_source_disk_snapshot_list.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_resource_disk.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_resource_disk_replication.go create mode 100644 internal/service/cloudapi/disks/schemas/schema_resource_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_data_source_disk.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_data_source_disk_list.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_data_source_disk_list_deleted.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_data_source_disk_list_types.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_data_source_disk_list_types_detailed.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_data_source_disk_list_unattached.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_data_source_disk_replication.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_data_source_disk_snapshot.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_data_source_disk_snapshot_list.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_resource_disk.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_resource_disk_replication.go create mode 100644 internal/service/cloudapi/disks/utilities/utility_resource_disk_snapshot.go create mode 100644 internal/service/cloudapi/extnet/data_source_extnet.go create mode 100644 internal/service/cloudapi/extnet/data_source_extnet_computes_list.go create mode 100644 internal/service/cloudapi/extnet/data_source_extnet_default.go create mode 100644 internal/service/cloudapi/extnet/data_source_extnet_list.go create mode 100644 internal/service/cloudapi/extnet/flattens/flatten_data_source_extnet.go create mode 100644 internal/service/cloudapi/extnet/flattens/flatten_data_source_extnet_computes_list.go create mode 100644 internal/service/cloudapi/extnet/flattens/flatten_data_source_extnet_default.go create mode 100644 internal/service/cloudapi/extnet/flattens/flatten_data_source_extnet_list.go create mode 100644 internal/service/cloudapi/extnet/models/model_data_source_extnet.go create mode 100644 internal/service/cloudapi/extnet/models/model_data_source_extnet_computes_list.go create mode 100644 internal/service/cloudapi/extnet/models/model_data_source_extnet_default.go create mode 100644 internal/service/cloudapi/extnet/models/model_data_source_extnet_list.go create mode 100644 internal/service/cloudapi/extnet/schemas/schema_data_source_extnet.go create mode 100644 internal/service/cloudapi/extnet/schemas/schema_data_source_extnet_computes_list.go create mode 100644 internal/service/cloudapi/extnet/schemas/schema_data_source_extnet_default.go create mode 100644 internal/service/cloudapi/extnet/schemas/schema_data_source_extnet_list.go create mode 100644 internal/service/cloudapi/extnet/utilities/utility_data_source_extnet.go create mode 100644 internal/service/cloudapi/extnet/utilities/utility_data_source_extnet_computes_list.go create mode 100644 internal/service/cloudapi/extnet/utilities/utility_data_source_extnet_default.go create mode 100644 internal/service/cloudapi/extnet/utilities/utility_data_source_extnet_list.go create mode 100644 internal/service/cloudapi/flipgroup/data_source_flipgroup.go create mode 100644 internal/service/cloudapi/flipgroup/data_source_flipgroup_list.go create mode 100644 internal/service/cloudapi/flipgroup/flattens/flatten_data_source_flipgroup.go create mode 100644 internal/service/cloudapi/flipgroup/flattens/flatten_data_source_flipgroup_list.go create mode 100644 internal/service/cloudapi/flipgroup/flattens/flatten_resource.go create mode 100644 internal/service/cloudapi/flipgroup/input_check.go create mode 100644 internal/service/cloudapi/flipgroup/models/model_data_source_flipgroup.go create mode 100644 internal/service/cloudapi/flipgroup/models/model_data_source_flipgroup_list.go create mode 100644 internal/service/cloudapi/flipgroup/models/model_resource_flipgroup.go create mode 100644 internal/service/cloudapi/flipgroup/resource_flipgroup.go create mode 100644 internal/service/cloudapi/flipgroup/schemas/schema_data_source_flipgroup.go create mode 100644 internal/service/cloudapi/flipgroup/schemas/schema_data_source_flipgroup_list.go create mode 100644 internal/service/cloudapi/flipgroup/schemas/schema_resource_flipgroup.go create mode 100644 internal/service/cloudapi/flipgroup/utilities/utility_data_source_flipgroup.go create mode 100644 internal/service/cloudapi/flipgroup/utilities/utility_data_source_flipgroup_list.go create mode 100644 internal/service/cloudapi/flipgroup/utilities/utility_resource_flipgroup.go create mode 100644 internal/service/cloudapi/ic/input_checks.go create mode 100644 internal/service/cloudapi/image/data_source_image.go create mode 100644 internal/service/cloudapi/image/data_source_image_list.go create mode 100644 internal/service/cloudapi/image/flattens/flatten_data_source_image.go create mode 100644 internal/service/cloudapi/image/flattens/flatten_data_source_image_list.go create mode 100644 internal/service/cloudapi/image/flattens/flatten_resource_image.go create mode 100644 internal/service/cloudapi/image/flattens/flatten_resource_image_virtual.go create mode 100644 internal/service/cloudapi/image/models/model_data_source_image.go create mode 100644 internal/service/cloudapi/image/models/model_data_source_image_list.go create mode 100644 internal/service/cloudapi/image/models/model_resource_image.go create mode 100644 internal/service/cloudapi/image/models/model_resource_image_virtual.go create mode 100644 internal/service/cloudapi/image/resource_image.go create mode 100644 internal/service/cloudapi/image/resource_image_virtual.go create mode 100644 internal/service/cloudapi/image/schemas/schema_data_source_image.go create mode 100644 internal/service/cloudapi/image/schemas/schema_data_source_image_list.go create mode 100644 internal/service/cloudapi/image/schemas/schema_resource_image.go create mode 100644 internal/service/cloudapi/image/schemas/schema_resource_image_virtual.go create mode 100644 internal/service/cloudapi/image/utilities/resource_check_input_vales.go create mode 100644 internal/service/cloudapi/image/utilities/utility_data_source_image.go create mode 100644 internal/service/cloudapi/image/utilities/utility_data_source_image_list.go create mode 100644 internal/service/cloudapi/image/utilities/utility_resource_image.go create mode 100644 internal/service/cloudapi/image/utilities/utility_resource_image_virtual.go create mode 100644 internal/service/cloudapi/k8s/data_source_k8ci_list.go create mode 100644 internal/service/cloudapi/k8s/data_source_k8s.go create mode 100644 internal/service/cloudapi/k8s/data_source_k8s_computes.go create mode 100644 internal/service/cloudapi/k8s/data_source_k8s_list.go create mode 100644 internal/service/cloudapi/k8s/data_source_k8s_list_deleted.go create mode 100644 internal/service/cloudapi/k8s/data_source_k8s_wg.go create mode 100644 internal/service/cloudapi/k8s/data_source_k8s_wg_cloud_init.go create mode 100644 internal/service/cloudapi/k8s/data_source_k8s_wg_list.go create mode 100644 internal/service/cloudapi/k8s/flattens/flatten_data_source_k8ci_list.go create mode 100644 internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s.go create mode 100644 internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s_computes.go create mode 100644 internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s_list.go create mode 100644 internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s_list_deleted.go create mode 100644 internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s_wg.go create mode 100644 internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s_wg_cloud_init.go create mode 100644 internal/service/cloudapi/k8s/flattens/flatten_data_source_k8s_wg_list.go create mode 100644 internal/service/cloudapi/k8s/flattens/flatten_resource_k8s_cp.go create mode 100644 internal/service/cloudapi/k8s/flattens/flatten_resource_k8s_wg.go create mode 100644 internal/service/cloudapi/k8s/models/model_data_source_k8ci_list.go create mode 100644 internal/service/cloudapi/k8s/models/model_data_source_k8s.go create mode 100644 internal/service/cloudapi/k8s/models/model_data_source_k8s_computes.go create mode 100644 internal/service/cloudapi/k8s/models/model_data_source_k8s_list.go create mode 100644 internal/service/cloudapi/k8s/models/model_data_source_k8s_list_deleted.go create mode 100644 internal/service/cloudapi/k8s/models/model_data_source_k8s_wg.go create mode 100644 internal/service/cloudapi/k8s/models/model_data_source_k8s_wg_cloud_init.go create mode 100644 internal/service/cloudapi/k8s/models/model_data_source_k8s_wg_list.go create mode 100644 internal/service/cloudapi/k8s/models/model_resource_k8s_cp.go create mode 100644 internal/service/cloudapi/k8s/models/model_resource_k8s_wg.go create mode 100644 internal/service/cloudapi/k8s/resource_k8s_cp.go create mode 100644 internal/service/cloudapi/k8s/resource_k8s_wg.go create mode 100644 internal/service/cloudapi/k8s/schemas/schema_data_source_k8ci_list.go create mode 100644 internal/service/cloudapi/k8s/schemas/schema_data_source_k8s.go create mode 100644 internal/service/cloudapi/k8s/schemas/schema_data_source_k8s_computes.go create mode 100644 internal/service/cloudapi/k8s/schemas/schema_data_source_k8s_list.go create mode 100644 internal/service/cloudapi/k8s/schemas/schema_data_source_k8s_list_deleted.go create mode 100644 internal/service/cloudapi/k8s/schemas/schema_data_source_k8s_wg.go create mode 100644 internal/service/cloudapi/k8s/schemas/schema_data_source_k8s_wg_cloud_init.go create mode 100644 internal/service/cloudapi/k8s/schemas/schema_data_source_k8s_wg_list.go create mode 100644 internal/service/cloudapi/k8s/schemas/schema_resource_k8s_cp.go create mode 100644 internal/service/cloudapi/k8s/schemas/schema_resource_k8s_wg.go create mode 100644 internal/service/cloudapi/k8s/utilities/resource_check_input_vales.go create mode 100644 internal/service/cloudapi/k8s/utilities/utility_data_source_k8ci_list.go create mode 100644 internal/service/cloudapi/k8s/utilities/utility_data_source_k8s_list.go create mode 100644 internal/service/cloudapi/k8s/utilities/utility_data_source_k8s_list_deleted.go create mode 100644 internal/service/cloudapi/k8s/utilities/utility_resource_k8s_cp.go create mode 100644 internal/service/cloudapi/k8s/utilities/utility_resource_k8s_wg.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_audits.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_get_audits.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_get_console_url.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_get_log.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_list.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_list_deleted.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_pci_device_list.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_pfw_list.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_snapshot_usage.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_user_list.go create mode 100644 internal/service/cloudapi/kvmvm/data_source_compute_vgpu_list.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_audits.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_get_audits.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_get_console_url.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_get_log.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_list.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_list_deleted.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_pci_device_list.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_pfw_list.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_snapshot_usage.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_user_list.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_data_source_compute_vgpu_list.go create mode 100644 internal/service/cloudapi/kvmvm/flattens/flatten_resource_compute.go create mode 100644 internal/service/cloudapi/kvmvm/input_check.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_audits.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_get_audits.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_get_console_url.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_get_log.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_list.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_list_deleted.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_pci_device_list.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_pfw_list.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_snapshot_usage.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_user_list.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_data_source_compute_vgpu_list.go create mode 100644 internal/service/cloudapi/kvmvm/models/model_resource_compute.go create mode 100644 internal/service/cloudapi/kvmvm/resource_compute.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_audits.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_get_audits.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_get_console_url.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_get_log.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_list.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_list_deleted.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_pci_device_list.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_pfw_list.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_snapshot_usage.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_user_list.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_data_source_compute_vgpu_list.go create mode 100644 internal/service/cloudapi/kvmvm/schemas/schema_resource_compute.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_audits.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_get_audits.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_get_console_url.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_get_log.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_list.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_list_deleted.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_pci_device_list.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_pci_pfw_list.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_snapshot_usage.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_user_list.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_data_source_compute_vgpu_list.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_create.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_difference.go create mode 100644 internal/service/cloudapi/kvmvm/utilities/utility_resource_compute_update.go create mode 100644 internal/service/cloudapi/lb/data_source_lb.go create mode 100644 internal/service/cloudapi/lb/data_source_lb_list.go create mode 100644 internal/service/cloudapi/lb/data_source_lb_list_deleted.go create mode 100644 internal/service/cloudapi/lb/flattens/flatten_data_source_lb.go create mode 100644 internal/service/cloudapi/lb/flattens/flatten_data_source_lb_list.go create mode 100644 internal/service/cloudapi/lb/flattens/flatten_data_source_lb_list_deleted.go create mode 100644 internal/service/cloudapi/lb/flattens/flatten_resource_lb.go create mode 100644 internal/service/cloudapi/lb/flattens/flatten_resource_lb_backend.go create mode 100644 internal/service/cloudapi/lb/flattens/flatten_resource_lb_backend_server.go create mode 100644 internal/service/cloudapi/lb/flattens/flatten_resoursce_lb_frontend.go create mode 100644 internal/service/cloudapi/lb/flattens/flatten_resoursce_lb_frontend_bind.go create mode 100644 internal/service/cloudapi/lb/input_check.go create mode 100644 internal/service/cloudapi/lb/models/model_data_source_lb.go create mode 100644 internal/service/cloudapi/lb/models/model_data_source_lb_list.go create mode 100644 internal/service/cloudapi/lb/models/model_data_source_lb_list_deleted.go create mode 100644 internal/service/cloudapi/lb/models/model_resource_lb.go create mode 100644 internal/service/cloudapi/lb/models/model_resource_lb_backend.go create mode 100644 internal/service/cloudapi/lb/models/model_resource_lb_backend_server.go create mode 100644 internal/service/cloudapi/lb/models/model_resource_lb_frontend.go create mode 100644 internal/service/cloudapi/lb/models/model_resource_lb_frontend_bind.go create mode 100644 internal/service/cloudapi/lb/resource_lb.go create mode 100644 internal/service/cloudapi/lb/resource_lb_backend.go create mode 100644 internal/service/cloudapi/lb/resource_lb_backend_server.go create mode 100644 internal/service/cloudapi/lb/resource_lb_frontend.go create mode 100644 internal/service/cloudapi/lb/resource_lb_frontend_bind.go create mode 100644 internal/service/cloudapi/lb/schemas/schema_data_source_lb.go create mode 100644 internal/service/cloudapi/lb/schemas/schema_data_soursce_lb_list.go create mode 100644 internal/service/cloudapi/lb/schemas/schema_data_soursce_lb_list_deleted.go create mode 100644 internal/service/cloudapi/lb/schemas/schema_resource_lb.go create mode 100644 internal/service/cloudapi/lb/schemas/schema_resource_lb_backend.go create mode 100644 internal/service/cloudapi/lb/schemas/schema_resource_lb_backend_server.go create mode 100644 internal/service/cloudapi/lb/schemas/schema_resource_lb_frontend.go create mode 100644 internal/service/cloudapi/lb/schemas/schema_resource_lb_frontend_bind.go create mode 100644 internal/service/cloudapi/lb/utilities/utility_data_source_lb.go create mode 100644 internal/service/cloudapi/lb/utilities/utility_data_source_lb_list.go create mode 100644 internal/service/cloudapi/lb/utilities/utility_data_source_lb_list_deleted.go create mode 100644 internal/service/cloudapi/lb/utilities/utility_resource_lb.go create mode 100644 internal/service/cloudapi/lb/utilities/utility_resource_lb_backend.go create mode 100644 internal/service/cloudapi/lb/utilities/utility_resource_lb_backend_server.go create mode 100644 internal/service/cloudapi/lb/utilities/utility_resource_lb_frontend.go create mode 100644 internal/service/cloudapi/lb/utilities/utility_resource_lb_frontend_bind.go create mode 100644 internal/service/cloudapi/rg/data_source_rg.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_audits.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_get_resource_consumption.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list_computes.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list_deleted.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list_lb.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list_pfw.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_list_vins.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_resource_consumption_list.go create mode 100644 internal/service/cloudapi/rg/data_source_rg_usage.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_audits.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_get_resource_consumption.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_list.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_list_computes.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_list_deleted.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_list_lb.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_list_pfw.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_list_vins.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_resource_consumption_list.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_data_source_rg_usage.go create mode 100644 internal/service/cloudapi/rg/flattens/flatten_resource_rg.go create mode 100644 internal/service/cloudapi/rg/input_checks.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_audits.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_get_resource_limits.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_list.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_list_computes.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_list_deleted.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_list_lb.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_list_pfw.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_list_vins.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_resource_limits_list.go create mode 100644 internal/service/cloudapi/rg/models/model_data_source_rg_usage.go create mode 100644 internal/service/cloudapi/rg/models/model_resource_rg.go create mode 100644 internal/service/cloudapi/rg/resource_rg.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_audits.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_get_resource_consumption.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_list.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_list_computes.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_list_deleted.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_list_lb.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_list_pfw.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_list_vins.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_resource_consumption_list.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_data_source_rg_usage.go create mode 100644 internal/service/cloudapi/rg/schemas/schema_resource_rg.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_affinity_group_computes.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_affinity_groups_get.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_affinity_groups_list.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_audits.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_get_resource_consumption.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_list.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_list_computes.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_list_deleted.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_list_lb.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_list_pfw.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_list_vins.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_resource_consumption_list.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_data_source_rg_usage.go create mode 100644 internal/service/cloudapi/rg/utilities/utility_resource_rg.go create mode 100644 internal/service/cloudapi/stack/data_source_stack.go create mode 100644 internal/service/cloudapi/stack/data_source_stack_list.go create mode 100644 internal/service/cloudapi/stack/flattens/flatten_schema_data_source_stack.go create mode 100644 internal/service/cloudapi/stack/flattens/flatten_utility_stack_list.go create mode 100644 internal/service/cloudapi/stack/models/model_data_source_stack.go create mode 100644 internal/service/cloudapi/stack/models/model_data_source_stack_list.go create mode 100644 internal/service/cloudapi/stack/schemas/schema_data_source_stack.go create mode 100644 internal/service/cloudapi/stack/schemas/schema_data_source_stack_list.go create mode 100644 internal/service/cloudapi/stack/utilities/utility_data_source_stack.go create mode 100644 internal/service/cloudapi/stack/utilities/utility_utility_stack_list.go create mode 100644 internal/service/cloudapi/vfpool/data_source_vfpool.go create mode 100644 internal/service/cloudapi/vfpool/data_source_vfpool_list.go create mode 100644 internal/service/cloudapi/vfpool/flattens/flatten_data_source_vfpool.go create mode 100644 internal/service/cloudapi/vfpool/flattens/flatten_data_source_vfpool_list.go create mode 100644 internal/service/cloudapi/vfpool/models/model_data_source_vfpool.go create mode 100644 internal/service/cloudapi/vfpool/models/model_data_source_vfpool_list.go create mode 100644 internal/service/cloudapi/vfpool/schemas/schema_data_source_vfpool.go create mode 100644 internal/service/cloudapi/vfpool/schemas/schema_data_source_vfpool_list.go create mode 100644 internal/service/cloudapi/vfpool/utilities/utility_data_source_vfpool.go create mode 100644 internal/service/cloudapi/vfpool/utilities/utility_data_source_vfpool_list.go create mode 100644 internal/service/cloudapi/vins/data_source_vins.go create mode 100644 internal/service/cloudapi/vins/data_source_vins_audits.go create mode 100644 internal/service/cloudapi/vins/data_source_vins_ext_net_list.go create mode 100644 internal/service/cloudapi/vins/data_source_vins_ip_list.go create mode 100644 internal/service/cloudapi/vins/data_source_vins_list.go create mode 100644 internal/service/cloudapi/vins/data_source_vins_list_deleted.go create mode 100644 internal/service/cloudapi/vins/data_source_vins_nat_rule_list.go create mode 100644 internal/service/cloudapi/vins/data_source_vins_static_route.go create mode 100644 internal/service/cloudapi/vins/data_source_vins_static_route_list.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_data_source_vins.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_data_source_vins_audits.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_data_source_vins_ext_net_list.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_data_source_vins_ip_list.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_data_source_vins_list.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_data_source_vins_list_deleted.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_data_source_vins_nat_rule_list.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_data_source_vins_static_route.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_data_source_vins_static_route_list.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_resource_vins.go create mode 100644 internal/service/cloudapi/vins/flattens/flatten_resource_vins_static_route.go create mode 100644 internal/service/cloudapi/vins/input_checks.go create mode 100644 internal/service/cloudapi/vins/models/model_data_source_vins.go create mode 100644 internal/service/cloudapi/vins/models/model_data_source_vins_audits.go create mode 100644 internal/service/cloudapi/vins/models/model_data_source_vins_ext_net_list.go create mode 100644 internal/service/cloudapi/vins/models/model_data_source_vins_ip_list.go create mode 100644 internal/service/cloudapi/vins/models/model_data_source_vins_list.go create mode 100644 internal/service/cloudapi/vins/models/model_data_source_vins_list_deleted.go create mode 100644 internal/service/cloudapi/vins/models/model_data_source_vins_nat_rule_list.go create mode 100644 internal/service/cloudapi/vins/models/model_data_source_vins_static_route.go create mode 100644 internal/service/cloudapi/vins/models/model_data_source_vins_static_route_list.go create mode 100644 internal/service/cloudapi/vins/models/model_resource_vins.go create mode 100644 internal/service/cloudapi/vins/models/model_resource_vins_static_route.go create mode 100644 internal/service/cloudapi/vins/resource_vins.go create mode 100644 internal/service/cloudapi/vins/resource_vins_static_route.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_data_source_vins.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_data_source_vins_audits.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_data_source_vins_ext_net_list.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_data_source_vins_ip_list.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_data_source_vins_list.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_data_source_vins_list_deleted.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_data_source_vins_nat_rule_list.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_data_source_vins_static_route.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_data_source_vins_static_route_list.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_resource_vins.go create mode 100644 internal/service/cloudapi/vins/schemas/schema_resource_vins_static_route.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_data_source_vins.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_data_source_vins_audits.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_data_source_vins_ext_net_list.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_data_source_vins_ip_list.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_data_source_vins_list.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_data_source_vins_list_deleted.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_data_source_vins_nat_rule_list.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_data_source_vins_static_route.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_data_source_vins_static_route_list.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_resource_vins.go create mode 100644 internal/service/cloudapi/vins/utilities/utility_resource_vins_static_route.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_list.go create mode 100644 internal/service/cloudbroker/account/data_source_cb_account_vins_list.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_list.go create mode 100644 internal/service/cloudbroker/account/flattens/flatten_data_source_cb_account_vins_list.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_list.go create mode 100644 internal/service/cloudbroker/account/models/model_data_source_cb_account_vins_list.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_list.go create mode 100644 internal/service/cloudbroker/account/schemas/schema_data_source_cb_account_vins_list.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_list.go create mode 100644 internal/service/cloudbroker/account/utilities/utility_data_source_cb_account_vins_list.go create mode 100644 internal/service/cloudbroker/stack/data_sourse_stack.go create mode 100644 internal/service/cloudbroker/stack/data_sourse_stack_list.go create mode 100644 internal/service/cloudbroker/stack/flattens/flatten_data_sourse_stack.go create mode 100644 internal/service/cloudbroker/stack/flattens/flatten_data_sourse_stack_list.go create mode 100644 internal/service/cloudbroker/stack/flattens/flatten_eco.go create mode 100644 internal/service/cloudbroker/stack/models/model_data_sourse_stack.go create mode 100644 internal/service/cloudbroker/stack/models/model_data_sourse_stack_list.go create mode 100644 internal/service/cloudbroker/stack/schemas/schema_data_sourse_stack.go create mode 100644 internal/service/cloudbroker/stack/schemas/schema_data_sourse_stack_list.go create mode 100644 internal/service/cloudbroker/stack/utilities/utility_data_sourse_stack.go create mode 100644 internal/service/cloudbroker/stack/utilities/utility_data_sourse_stack_list.go create mode 100644 internal/status/status.go create mode 100755 internal/techstatus/techstatus.go create mode 100644 internal/validate/divisibleBy.go create mode 100644 samples/cloudapi/account/data_account/main.tf create mode 100644 samples/cloudapi/account/data_account_audits/main.tf create mode 100644 samples/cloudapi/account/data_account_computes_list/main.tf create mode 100644 samples/cloudapi/account/data_account_consumed_units/main.tf create mode 100644 samples/cloudapi/account/data_account_consumed_units_by_type/main.tf create mode 100644 samples/cloudapi/account/data_account_deleted_list/main.tf create mode 100644 samples/cloudapi/account/data_account_disks_list/main.tf create mode 100644 samples/cloudapi/account/data_account_flipgroups_list/main.tf create mode 100644 samples/cloudapi/account/data_account_get_resource_consumption/main.tf create mode 100644 samples/cloudapi/account/data_account_list/main.tf create mode 100644 samples/cloudapi/account/data_account_reserved_units/main.tf create mode 100644 samples/cloudapi/account/data_account_resource_consumption_list/main.tf create mode 100644 samples/cloudapi/account/data_account_rg_list/main.tf create mode 100644 samples/cloudapi/account/data_account_templates_list/main.tf create mode 100644 samples/cloudapi/account/data_account_vins_list/main.tf create mode 100644 samples/cloudapi/account/resource_account/main.tf create mode 100644 samples/cloudapi/bservice/data_bservice/main.tf create mode 100644 samples/cloudapi/bservice/data_bservice_deleted_list/main.tf create mode 100644 samples/cloudapi/bservice/data_bservice_group/main.tf create mode 100644 samples/cloudapi/bservice/data_bservice_list/main.tf create mode 100644 samples/cloudapi/bservice/data_bservice_snapshot_list/main.tf create mode 100644 samples/cloudapi/bservice/resource_bservice/main.tf create mode 100644 samples/cloudapi/bservice/resource_bservice_group/main.tf create mode 100644 samples/cloudapi/disk/data_disk/main.tf create mode 100644 samples/cloudapi/disk/data_disk_list/main.tf create mode 100644 samples/cloudapi/disk/data_disk_list_deleted/main.tf create mode 100644 samples/cloudapi/disk/data_disk_list_types/main.tf create mode 100644 samples/cloudapi/disk/data_disk_list_types_detailed/main.tf create mode 100644 samples/cloudapi/disk/data_disk_list_unattached/main.tf create mode 100644 samples/cloudapi/disk/data_disk_replication/main.tf create mode 100644 samples/cloudapi/disk/data_disk_snapshot/main.tf create mode 100644 samples/cloudapi/disk/data_disk_snapshot_list/main.tf create mode 100644 samples/cloudapi/disk/resource_disk/main.tf create mode 100644 samples/cloudapi/disk/resource_disk_replication/main.tf create mode 100644 samples/cloudapi/disk/resource_disk_snapshot/main.tf create mode 100644 samples/cloudapi/extnet/data_extnet/main.tf create mode 100644 samples/cloudapi/extnet/data_extnet_computes_list/main.tf create mode 100644 samples/cloudapi/extnet/data_extnet_default/main.tf create mode 100644 samples/cloudapi/extnet/data_extnet_list/main.tf create mode 100644 samples/cloudapi/flipgroup/data_flipgroup/main.tf create mode 100644 samples/cloudapi/flipgroup/data_flipgroup_list/maint.tf create mode 100644 samples/cloudapi/flipgroup/resource_flipgroup/main.tf create mode 100644 samples/cloudapi/image/data_image/main.tf create mode 100644 samples/cloudapi/image/data_image_list/main.tf create mode 100644 samples/cloudapi/image/resource_image/main.tf create mode 100644 samples/cloudapi/image/resource_image_virtual/main.tf create mode 100644 samples/cloudapi/k8s/data_k8ci_list/main.tf create mode 100644 samples/cloudapi/k8s/data_k8s/main.tf create mode 100644 samples/cloudapi/k8s/data_k8s_computes/main.tf create mode 100644 samples/cloudapi/k8s/data_k8s_list/main.tf create mode 100644 samples/cloudapi/k8s/data_k8s_list_deleted/main.tf create mode 100644 samples/cloudapi/k8s/data_k8s_wg/main.tf create mode 100644 samples/cloudapi/k8s/data_k8s_wg_cloud_init/main.tf create mode 100644 samples/cloudapi/k8s/data_k8s_wg_list/main.tf create mode 100644 samples/cloudapi/k8s/resource_k8s_cp/initconfig.tftpl create mode 100644 samples/cloudapi/k8s/resource_k8s_cp/main.tf create mode 100644 samples/cloudapi/k8s/resource_k8s_wg/initconfig.tftpl create mode 100644 samples/cloudapi/k8s/resource_k8s_wg/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_audits/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_get_audits/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_get_console_url/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_get_log/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_list/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_list_deleted/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_pci_device_list/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_pfw_list/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_snapshot_usage/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_user_list/main.tf create mode 100644 samples/cloudapi/kvmvm/data_kvmvm_vgpu_list/main.tf create mode 100644 samples/cloudapi/kvmvm/resource_kvmvm/initconfig.tftpl create mode 100644 samples/cloudapi/kvmvm/resource_kvmvm/main.tf create mode 100644 samples/cloudapi/lb/data_lb/main.tf create mode 100644 samples/cloudapi/lb/data_lb_list/main.tf create mode 100644 samples/cloudapi/lb/data_lb_list_deleted/main.tf create mode 100644 samples/cloudapi/lb/resource_lb/main.tf create mode 100644 samples/cloudapi/lb/resource_lb_backend/main.tf create mode 100644 samples/cloudapi/lb/resource_lb_backend_server/main.tf create mode 100644 samples/cloudapi/lb/resource_lb_frontend/main.tf create mode 100644 samples/cloudapi/lb/resource_lb_frontend_bind/main.tf create mode 100644 samples/cloudapi/rg/data_rg/main.tf create mode 100644 samples/cloudapi/rg/data_rg_affinity_group_computes/main.tf create mode 100644 samples/cloudapi/rg/data_rg_affinity_groups_get/main.tf create mode 100644 samples/cloudapi/rg/data_rg_affinity_groups_list/main.tf create mode 100644 samples/cloudapi/rg/data_rg_audits/main.tf create mode 100644 samples/cloudapi/rg/data_rg_list/main.tf create mode 100644 samples/cloudapi/rg/data_rg_list_computes/main.tf create mode 100644 samples/cloudapi/rg/data_rg_list_deleted/main.tf create mode 100644 samples/cloudapi/rg/data_rg_list_lb/main.tf create mode 100644 samples/cloudapi/rg/data_rg_list_pfw/main.tf create mode 100644 samples/cloudapi/rg/data_rg_list_vins/main.tf create mode 100644 samples/cloudapi/rg/data_rg_resource_consumption_get/main.tf create mode 100644 samples/cloudapi/rg/data_rg_resource_consumption_list/main.tf create mode 100644 samples/cloudapi/rg/data_rg_usage/main.tf create mode 100644 samples/cloudapi/rg/resource_rg/main.tf create mode 100644 samples/cloudapi/stack/data_stack/main.tf create mode 100644 samples/cloudapi/stack/data_stack_list/main.tf create mode 100644 samples/cloudapi/vfpool/data_vfpool/main.tf create mode 100644 samples/cloudapi/vfpool/data_vfpool_list/main.tf create mode 100644 samples/cloudapi/vins/data_vins/main.tf create mode 100644 samples/cloudapi/vins/data_vins_audits/main.tf create mode 100644 samples/cloudapi/vins/data_vins_ext_net_list/main.tf create mode 100644 samples/cloudapi/vins/data_vins_ip_list/main.tf create mode 100644 samples/cloudapi/vins/data_vins_list/main.tf create mode 100644 samples/cloudapi/vins/data_vins_list_deleted/main.tf create mode 100644 samples/cloudapi/vins/data_vins_nat_rule_list/main.tf create mode 100644 samples/cloudapi/vins/data_vins_static_route/main.tf create mode 100644 samples/cloudapi/vins/data_vins_static_route_list/main.tf create mode 100644 samples/cloudapi/vins/resource_vins/main.tf create mode 100644 samples/cloudapi/vins/resource_vins_static_route/main.tf create mode 100644 scripts/install.bat create mode 100755 scripts/install.sh create mode 100644 wiki/1.0.0/01.-Введение.md create mode 100644 wiki/1.0.0/02.-Пример-работы.md create mode 100644 wiki/1.0.0/03.-Обзор-облачной-платформы-DYNAMIX.md create mode 100644 wiki/1.0.0/04.-Начало-работы-с-terraform-провайдером-DYNAMIX.md create mode 100644 wiki/1.0.0/04.01-Установка-Terraform-провайдера-DYNAMIX.md create mode 100644 wiki/1.0.0/04.02-Инициализация-Terraform-провайдера-DYNAMIX.md create mode 100644 wiki/1.0.0/04.03-Переключение-между-группами-API.md create mode 100644 wiki/1.0.0/04.04-Получение-gid-или-grid_id.md create mode 100644 wiki/1.0.0/04.05-Сборка-terraform-провайдера-в-образ.md create mode 100644 wiki/1.0.0/05.-Работа-с-terraform.md create mode 100644 wiki/1.0.0/05.01-Импортирование-ресурсов.md create mode 100644 wiki/1.0.0/05.02-Работа-с-таймаутами.md create mode 100644 wiki/1.0.0/05.03-Восстановление-ресурсов.md create mode 100644 wiki/1.0.0/05.04-Массовое-создание-ресурсов.-Мета-аргументы.md create mode 100644 wiki/1.0.0/05.05-Удаление-ресурсов.md create mode 100644 wiki/1.0.0/05.06-Установка-Terraform.md create mode 100644 wiki/1.0.0/05.07-Инструкция-по-миграции.md create mode 100644 wiki/1.0.0/06.-Data-source-функции-Terraform-провайдера-DYNAMIX.md create mode 100644 wiki/1.0.0/06.01-Пользовательская-группа-API.md create mode 100644 wiki/1.0.0/06.01.01-Data_dynamix_kvmvm.md create mode 100644 wiki/1.0.0/06.01.02-Data_dynamix_resgroup.md create mode 100644 wiki/1.0.0/06.01.03-Data_dynamix_disk.md create mode 100644 wiki/1.0.0/06.01.04-Data_dynamix_vins.md create mode 100644 wiki/1.0.0/06.01.05-Data_dynamix_account.md create mode 100644 wiki/1.0.0/06.01.06-Data_dynamix_image_list.md create mode 100644 wiki/1.0.0/06.01.07-Data_dynamix_image.md create mode 100644 wiki/1.0.0/06.01.08-Data_dynamix_locations_list.md create mode 100644 wiki/1.0.0/06.01.09-Data_dynamix_location_url.md create mode 100644 wiki/1.0.0/06.01.11-Data_dynamix_snapshot_list.md create mode 100644 wiki/1.0.0/06.01.12-Data_dynamix_rg_list.md create mode 100644 wiki/1.0.0/06.01.13-Data_dynamix_disk_list.md create mode 100644 wiki/1.0.0/06.01.14-Data_dynamix_vins_list.md create mode 100644 wiki/1.0.0/06.01.15-Data_dynamix_extnet_list.md create mode 100644 wiki/1.0.0/06.01.16-Data_dynamix_extnet_default.md create mode 100644 wiki/1.0.0/06.01.17-Data_dynamix_extnet_computes_list.md create mode 100644 wiki/1.0.0/06.01.18-Data_dynamix_extnet.md create mode 100644 wiki/1.0.0/06.01.19-Data_dynamix_account_audits_list.md create mode 100644 wiki/1.0.0/06.01.20-Data_dynamix_account_computes_list.md create mode 100644 wiki/1.0.0/06.01.21-Data_dynamix_account_consumed_units.md create mode 100644 wiki/1.0.0/06.01.22-Data_dynamix_account_consumed_units_by_type.md create mode 100644 wiki/1.0.0/06.01.23-Data_dynamix_account_disks_list.md create mode 100644 wiki/1.0.0/06.01.24-Data_dynamix_account_flipgroups_list.md create mode 100644 wiki/1.0.0/06.01.25-Data_dynamix_account_reserved_units.md create mode 100644 wiki/1.0.0/06.01.26-Data_dynamix_account_rg_list.md create mode 100644 wiki/1.0.0/06.01.27-Data_dynamix_account_templates_list.md create mode 100644 wiki/1.0.0/06.01.28-Data_dynamix_account_vins_list.md create mode 100644 wiki/1.0.0/06.01.29-Data_dynamix_account_list.md create mode 100644 wiki/1.0.0/06.01.30-Data_dynamix_account_deleted_list.md create mode 100644 wiki/1.0.0/06.01.31-Data_dynamix_bservice_list.md create mode 100644 wiki/1.0.0/06.01.32-Data_dynamix_bservice_deleted_list.md create mode 100644 wiki/1.0.0/06.01.33-Data_dynamix_bservice_snapshot_list.md create mode 100644 wiki/1.0.0/06.01.34-Data_dynamix_bservice.md create mode 100644 wiki/1.0.0/06.01.35-Data_dynamix_bservice_group.md create mode 100644 wiki/1.0.0/06.01.36-Data_dynamix_lb.md create mode 100644 wiki/1.0.0/06.01.37-Data_dynamix_lb_list.md create mode 100644 wiki/1.0.0/06.01.38-Data_dynamix_lb_list_deleted.md create mode 100644 wiki/1.0.0/06.01.39-Data_dynamix_disk_list_types.md create mode 100644 wiki/1.0.0/06.01.40-Data_dynamix_disk_list_types_detailed.md create mode 100644 wiki/1.0.0/06.01.41-Data_dynamix_disk_list_deleted.md create mode 100644 wiki/1.0.0/06.01.42-Data_dynamix_disk_list_unattached.md create mode 100644 wiki/1.0.0/06.01.43-Data_dynamix_disk_snapshot_list.md create mode 100644 wiki/1.0.0/06.01.44-Data_dynamix_disk_snapshot.md create mode 100644 wiki/1.0.0/06.01.45-Data_dynamix_k8s.md create mode 100644 wiki/1.0.0/06.01.46-Data_dynamix_k8s_list.md create mode 100644 wiki/1.0.0/06.01.47-Data_dynamix_k8s_list_deleted.md create mode 100644 wiki/1.0.0/06.01.48-Data_dynamix_k8s_wg.md create mode 100644 wiki/1.0.0/06.01.49-Data_dynamix_k8s_wg_list.md create mode 100644 wiki/1.0.0/06.01.50-Data_dynamix_vins_audits.md create mode 100644 wiki/1.0.0/06.01.51-Data_dynamix_vins_ext_net_list.md create mode 100644 wiki/1.0.0/06.01.52-Data_dynamix_vins_ip_list.md create mode 100644 wiki/1.0.0/06.01.53-Data_dynamix_vins_list_deleted.md create mode 100644 wiki/1.0.0/06.01.54-Data_dynamix_vins_nat_rule_list.md create mode 100644 wiki/1.0.0/06.01.55-Data_dynamix_kvmvm_audits.md create mode 100644 wiki/1.0.0/06.01.56-Data_dynamix_kvmvm_get_audits.md create mode 100644 wiki/1.0.0/06.01.57-Data_dynamix_kvmvm_get_console_url.md create mode 100644 wiki/1.0.0/06.01.58-Data_dynamix_kvmvm_get_log.md create mode 100644 wiki/1.0.0/06.01.59-Data_dynamix_kvmvm_list.md create mode 100644 wiki/1.0.0/06.01.60-Data_dynamix_kvmvm_pfw_list.md create mode 100644 wiki/1.0.0/06.01.61-Data_dynamix_kvmvm_user_list.md create mode 100644 wiki/1.0.0/06.01.62-Data_dynamix_rg_list.md create mode 100644 wiki/1.0.0/06.01.63-Data_dynamix_rg_affinity_group_computes.md create mode 100644 wiki/1.0.0/06.01.64-Data_dynamix_rg_affinity_groups_list.md create mode 100644 wiki/1.0.0/06.01.65-Data_dynamix_rg_affinity_groups_get.md create mode 100644 wiki/1.0.0/06.01.66-Data_dynamix_rg_audits.md create mode 100644 wiki/1.0.0/06.01.67-Data_dynamix_rg_list_computes.md create mode 100644 wiki/1.0.0/06.01.68-Data_dynamix_rg_list_deleted.md create mode 100644 wiki/1.0.0/06.01.69-Data_dynamix_rg_list_lb.md create mode 100644 wiki/1.0.0/06.01.70-Data_dynamix_rg_list_pfw.md create mode 100644 wiki/1.0.0/06.01.71-Data_dynamix_rg_list_vins.md create mode 100644 wiki/1.0.0/06.01.72-Data_dynamix_rg_usage.md create mode 100644 wiki/1.0.0/06.01.73-Data_dynamix_kvmvm_snapshot_usage.md create mode 100644 wiki/1.0.0/06.01.74-Data _dynamix_k8s_computes.md create mode 100644 wiki/1.0.0/06.01.75-Data_dynamix_flipgroup.md create mode 100644 wiki/1.0.0/06.01.75-Data_dynamix_flipgroup_list.md create mode 100644 wiki/1.0.0/06.01.76-Data_dynamix_k8ci_list.md create mode 100644 wiki/1.0.0/06.01.77-Data_dynamix_vins_static_route.md create mode 100644 wiki/1.0.0/06.01.78-Data_dynamix_vins_static_route_list.md create mode 100644 wiki/1.0.0/06.01.79-Data_dynamix_account_resource_consumption_get.md create mode 100644 wiki/1.0.0/06.01.80-Data_dynamix_account_resource_consumption_list.md create mode 100644 wiki/1.0.0/06.01.81-Data_dynamix_k8s_wg_cloud_init.md create mode 100644 wiki/1.0.0/06.01.82-Data_dynamix_kvmvm_list_deleted.md create mode 100644 wiki/1.0.0/06.01.83-Data_dynamix_kvmvm_pci_device_list.md create mode 100644 wiki/1.0.0/06.01.84-Data_dynamix_kvmvm_vgpu_list.md create mode 100644 wiki/1.0.0/06.01.85-Data_dynamix_rg_resource_consumption_get.md create mode 100644 wiki/1.0.0/06.01.86-Data_dynamix_rg_resource_consumption_list.md create mode 100644 wiki/1.0.0/06.01.87-Data_dynamix_disk_replication.md create mode 100644 wiki/1.0.0/06.01.87-Data_dynamix_vfpool.md create mode 100644 wiki/1.0.0/06.01.88-Data_dynamix_vfpool_list.md create mode 100644 wiki/1.0.0/06.01.89-Data_dynamix_stack.md create mode 100644 wiki/1.0.0/06.01.90-Data_dynamix_stack_list.md create mode 100644 wiki/1.0.0/07.-Resource-функции-Terraform-провайдера-dynamix.md create mode 100644 wiki/1.0.0/07.01-Пользовательская-группа-API.md create mode 100644 wiki/1.0.0/07.01.01-Resource_dynamix_kvmvm.md create mode 100644 wiki/1.0.0/07.01.02-Resource_dynamix_resgroup.md create mode 100644 wiki/1.0.0/07.01.03-Resource_dynamix_disk.md create mode 100644 wiki/1.0.0/07.01.04-Resource_dynamix_vins.md create mode 100644 wiki/1.0.0/07.01.05-Resource_dynamix_snapshot.md create mode 100644 wiki/1.0.0/07.01.06-Resource_dynamix_k8s.md create mode 100644 wiki/1.0.0/07.01.07-Resource_dynamix_k8s_wg.md create mode 100644 wiki/1.0.0/07.01.08-Resource_dynamix_image_virtual.md create mode 100644 wiki/1.0.0/07.01.09-Resource_dynamix_image.md create mode 100644 wiki/1.0.0/07.01.10-Resource_dynamix_pfw.md create mode 100644 wiki/1.0.0/07.01.11-Resource_dynamix_account.md create mode 100644 wiki/1.0.0/07.01.12-Resource_dynamix_bservice.md create mode 100644 wiki/1.0.0/07.01.13-Resource_dynamix_bservice_group.md create mode 100644 wiki/1.0.0/07.01.14-Resource_dynamix_lb.md create mode 100644 wiki/1.0.0/07.01.15-Resource_dynamix_lb_frontend.md create mode 100644 wiki/1.0.0/07.01.16-Resource_dynamix_lb_frontend_bind.md create mode 100644 wiki/1.0.0/07.01.17-Resource_dynamix_lb_backend.md create mode 100644 wiki/1.0.0/07.01.18-Resource_dynamix_lb_backend_server.md create mode 100644 wiki/1.0.0/07.01.19-Resource_dynamix_disk_snapshot.md create mode 100644 wiki/1.0.0/07.01.20-Resource_dynamix_flipgroup.md create mode 100644 wiki/1.0.0/07.01.21-Resource_dynamix_k8s_cp.md create mode 100644 wiki/1.0.0/07.01.22-Resource_dynamix_vins_static_route.md create mode 100644 wiki/1.0.0/07.01.23-Resource_dynamix_image_from_blank_compute.md create mode 100644 wiki/1.0.0/07.01.24-Resource_dynamix_image_from_platform_disk.md create mode 100644 wiki/1.0.0/07.01.25-Resource_dynamix_disk_replication.md create mode 100644 wiki/1.0.0/08.-Полезные-советы.md create mode 100644 wiki/1.0.0/Home.md create mode 100644 wiki/Home.md diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..28544a8 --- /dev/null +++ b/.gitignore @@ -0,0 +1,12 @@ +.idea/ +.vscode/ +.fleet/ +.DS_Store +vendor/ +examples/ +terraform-provider-decort* +terraform-provider-dynamix* +bin/ +zip/ +vendor/ +.terraform/ \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..cea04b9 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,136 @@ +## Version 1.0.0 + +### Feature + +#### account: +- Add data source dynamix_account_audits_list in cloudapi/account +- Add data source dynamix_account_computes_list in cloudapi/account +- Add data source dynamix_account_consumed_unit_by_type in cloudapi/account +- Add data source dynamix_account_consumed_units in cloudapi/account +- Add data source dynamix_account_disks_list in cloudapi/account +- Add data source dynamix_account_flipgroups_list in cloudapi/account +- Add data source dynamix_account_get_resource_consumption_list in cloudapi/account +- Add data source dynamix_account_get_resource_consumption in cloudapi/account +- Add data source dynamix_account_deleted_list in cloudapi/account +- Add data source dynamix_account_list in cloudapi/account +- Add data source dynamix_account_reserved_units in cloudapi/account +- Add data source dynamix_account_rg_list in cloudapi/account +- Add data source dynamix_account_templates_list in cloudapi/account +- Add data source dynamix_account_vins_list in cloudapi/account +- Add data source dynamix_account in cloudapi/account +- Add resource dynamix_account in cloudapi/account + +#### bservice: +- Add data source dynamix_bservice_deleted_list in cloudapi/bservice +- Add data source dynamix_bservice_group in cloudapi/bservice +- Add data source dynamix_bservice_list in cloudapi/bservice +- Add data source dynamix_bservice_snapshot_list in cloudapi/bservice +- Add data source dynamix_bservice in cloudapi/bservice +- Add resource dynamix_bservice_group in cloudapi/bservice +- Add resource dynamix_bservice in cloudapi/bservice + +#### disks: +- Add data source dynamix_disk in cloudapi/disks +- Add data source dynamix_disk_list in cloudapi/disks +- Add data source dynamix_disk_list_deleted in cloudapi/disks +- Add data source dynamix_disk_list_types in cloudapi/disks +- Add data source dynamix_disk_list_types_detailed in cloudapi/disks +- Add data source dynamix_disk_list_unattached in cloudapi/disks +- Add data source dynamix_disk_replication in cloudapi/disks +- Add data source dynamix_disk_snapshot in cloudapi/disks +- Add data source dynamix_disk_snapshot_list in cloudapi/disks +- Add resource dynamix_disk in cloudapi/disks +- Add resource dynamix_disk_replication in cloudapi/disks +- Add resource dynamix_disk_snapshot in cloudapi/disks + +#### extnet: +- Add data source dynamix_extnet in cloudapi/extnet +- Add data source dynamix_extnet_computes_list in cloudapi/extnet +- Add data source dynamix_extnet_default in cloudapi/extnet +- Add data source dynamix_extnet_list in cloudapi/extnet + +#### flipgroup: +- Add data source dynamix_flipgroup in cloudapi/flipgroup +- Add data source dynamix_flipgroup_list in cloudapi/flipgroup +- Add resource dynamix_flipgroup in cloudapi/flipgroup + +#### image: +- Add data source dynamix_image in cloudapi/image +- Add data source dynamix_image_list in cloudapi/image +- Add resource dynamix_image in cloudapi/image +- Add resource dynamix_image_virtual in cloudapi/image + +#### k8s: +- Add data source dynamix_k8ci_list in cloudapi/k8s +- Add data source dynamix_k8s in cloudapi/k8s +- Add data source dynamix_k8s_computes in cloudapi/k8s +- Add data source dynamix_k8s_list in cloudapi/k8s +- Add data source dynamix_k8s_list_deleted in cloudapi/k8s +- Add data source dynamix_k8s_wg in cloudapi/k8s +- Add data source dynamix_k8s_wg_cloud_init in cloudapi/k8s +- Add data source dynamix_k8s_wg_list in cloudapi/k8s +- Add resource dynamix_k8s_cp in cloudapi/k8s +- Add resource dynamix_k8s_wg in cloudapi/k8s + +#### kvmvm: +- Add data source dynamix_kvmvm in cloudapi/kvmvm +- Add data source dynamix_kvmvm_audits in cloudapi/kvmvm +- Add data source dynamix_kvmvm_get_audits in cloudapi/kvmvm +- Add data source dynamix_kvmvm_get_console_url in cloudapi/kvmvm +- Add data source dynamix_kvmvm_get_log in cloudapi/kvmvm +- Add data source dynamix_kvmvm_list in cloudapi/kvmvm +- Add data source dynamix_kvmvm_list_deleted in cloudapi/kvmvm +- Add data source dynamix_kvmvm_pci_device_list in cloudapi/kvmvm +- Add data source dynamix_kvmvm_pfw_list in cloudapi/kvmvm +- Add data source dynamix_kvmvm_snapshot_usage in cloudapi/kvmvm +- Add data source dynamix_kvmvm_user_list in cloudapi/kvmvm +- Add data source dynamix_kvmvm_vgpu_list in cloudapi/kvmvm +- Add resource dynamix_kvmvm in cloudapi/kvmvm + +#### lb: +- Add data source dynamix_data_source_lb_list_deleted in cloudapi/lb +- Add data source dynamix_lb_list_deleted in cloudapi/lb +- Add data source dynamix_lb in cloudapi/lb +- Add resource dynamix_lb_backend_server in cloudapi/lb +- Add resource dynamix_lb_backend in cloudapi/lb +- Add resource dynamix_lb_frontend_bind in cloudapi/lb +- Add resource dynamix_lb_frontend in cloudapi/lb +- Add resource dynamix_lb in cloudapi/lb + +#### rg: +- Add data source dynamix_rg in cloudapi/rg +- Add data source dynamix_rg_affinity_group_computes in cloudapi/rg +- Add data source dynamix_rg_affinity_groups_get in cloudapi/rg +- Add data source dynamix_rg_affinity_groups_list in cloudapi/rg +- Add data source dynamix_rg_audits in cloudapi/rg +- Add data source dynamix_rg_resource_consumption_get in cloudapi/rg +- Add data source dynamix_rg_list in cloudapi/rg +- Add data source dynamix_rg_list_computes in cloudapi/rg +- Add data source dynamix_rg_list_deleted in cloudapi/rg +- Add data source dynamix_rg_list_lb in cloudapi/rg +- Add data source dynamix_rg_list_pfw in cloudapi/rg +- Add data source dynamix_rg_list_vins in cloudapi/rg +- Add data source dynamix_rg_resource_consumption_list in cloudapi/rg +- Add data source dynamix_rg_usage in cloudapi/rg +- Add resource dynamix_rg in cloudapi/rg + +#### stack: +- Add data source dynamix_stack in cloudapi/stack +- Add data source dynamix_stack_list in cloudapi/stack + +#### vfpool: +- Add data source dynamix_vfpool in cloudapi/vfpool +- Add data source dynamix_vfpool_list in cloudapi/vfpool + +#### vins: +- Add data source dynamix_vins in cloudapi/vins +- Add data source dynamix_vins_audits in cloudapi/vins +- Add data source dynamix_vins_ext_net_list in cloudapi/vins +- Add data source dynamix_vins_ip_list in cloudapi/vins +- Add data source dynamix_vins_list in cloudapi/vins +- Add data source dynamix_vins_list_deleted in cloudapi/vins +- Add data source dynamix_vins_nat_rule_list in cloudapi/vins +- Add data source dynamix_vins_static_route in cloudapi/vins +- Add data source dynamix_vins_static_route_list in cloudapi/vins +- Add resource dynamix_vins in cloudapi/vins +- Add resource dynamix_vins_static_route in cloudapi/vins diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..b090019 --- /dev/null +++ b/Makefile @@ -0,0 +1,86 @@ +TEST?=$$(go list ./... | grep -v 'vendor') +HOSTNAME=basistech.ru +NAMESPACE=tf +SECONDNAMESPACE=dynamix +NAME=terraform-provider-dynamix +BINDIR = ./bin +ZIPDIR = ./zip +BINARY=${NAME} +WORKPATH= ./examples/terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${SECONDNAMESPACE}/${VERSION}/${OS_ARCH} +MAINPATH = ./cmd/dynamix/ +VERSION=1.0.0 +OS_ARCH=$(shell go env GOHOSTOS)_$(shell go env GOHOSTARCH) + +FILES = ${BINARY}_${VERSION}_darwin_amd64\ + ${BINARY}_${VERSION}_darwin_arm64\ + ${BINARY}_${VERSION}_freebsd_386\ + ${BINARY}_${VERSION}_freebsd_amd64\ + ${BINARY}_${VERSION}_freebsd_arm\ + ${BINARY}_${VERSION}_linux_386\ + ${BINARY}_${VERSION}_linux_amd64\ + ${BINARY}_${VERSION}_linux_arm\ + ${BINARY}_${VERSION}_linux_arm64\ + ${BINARY}_${VERSION}_openbsd_386\ + ${BINARY}_${VERSION}_openbsd_amd64\ + ${BINARY}_${VERSION}_solaris_amd64\ + ${BINARY}_${VERSION}_windows_386.exe\ + ${BINARY}_${VERSION}_windows_amd64.exe\ + +BINS = $(addprefix bin/, $(FILES)) + +default: install + +image: + GOOS=linux GOARCH=amd64 go build -o terraform-provider-dynamix ./cmd/basis/ + docker build . -t rudecs/tf:3.2.2 + rm terraform-provider-dynamix + +lint: + golangci-lint run --timeout 600s + +st: + go build -o ${BINARY} ${MAINPATH} + cp ${BINARY} ${WORKPATH} + rm ${BINARY} + +build: + go build -o ${BINARY} ${MAINPATH} + +release: $(FILES) + +$(FILES) : $(BINDIR) $(ZIPDIR) $(BINS) + zip -r $(ZIPDIR)/$@.zip $(BINDIR)/$@ + zip -rj $(ZIPDIR)/$@.zip scripts/install.bat scripts/install.sh + +$(BINDIR): + mkdir $@ + +$(ZIPDIR): + mkdir $@ + +$(BINS): + GOOS=darwin GOARCH=amd64 go build -o ./bin/${BINARY}_${VERSION}_darwin_amd64 $(MAINPATH) + GOOS=darwin GOARCH=arm64 go build -o ./bin/${BINARY}_${VERSION}_darwin_arm64 $(MAINPATH) + GOOS=freebsd GOARCH=386 go build -o ./bin/${BINARY}_${VERSION}_freebsd_386 $(MAINPATH) + GOOS=freebsd GOARCH=amd64 go build -o ./bin/${BINARY}_${VERSION}_freebsd_amd64 $(MAINPATH) + GOOS=freebsd GOARCH=arm go build -o ./bin/${BINARY}_${VERSION}_freebsd_arm $(MAINPATH) + GOOS=linux GOARCH=386 go build -o ./bin/${BINARY}_${VERSION}_linux_386 $(MAINPATH) + GOOS=linux GOARCH=amd64 go build -o ./bin/${BINARY}_${VERSION}_linux_amd64 $(MAINPATH) + GOOS=linux GOARCH=arm go build -o ./bin/${BINARY}_${VERSION}_linux_arm $(MAINPATH) + GOOS=linux GOARCH=arm64 go build -o ./bin/${BINARY}_${VERSION}_linux_arm64 ${MAINPATH} + GOOS=openbsd GOARCH=386 go build -o ./bin/${BINARY}_${VERSION}_openbsd_386 $(MAINPATH) + GOOS=openbsd GOARCH=amd64 go build -o ./bin/${BINARY}_${VERSION}_openbsd_amd64 $(MAINPATH) + GOOS=solaris GOARCH=amd64 go build -o ./bin/${BINARY}_${VERSION}_solaris_amd64 $(MAINPATH) + GOOS=windows GOARCH=386 go build -o ./bin/${BINARY}_${VERSION}_windows_386.exe $(MAINPATH) + GOOS=windows GOARCH=amd64 go build -o ./bin/${BINARY}_${VERSION}_windows_amd64.exe $(MAINPATH) + +install: build + mkdir -p ~/.terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${SECONDNAMESPACE}/${VERSION}/${OS_ARCH} + mv ${BINARY} ~/.terraform.d/plugins/${HOSTNAME}/${NAMESPACE}/${SECONDNAMESPACE}/${VERSION}/${OS_ARCH} + +test: + go test -i $(TEST) || exit 1 + echo $(TEST) | xargs -t -n4 go test $(TESTARGS) -timeout=30s -parallel=4 + +testacc: + TF_ACC=1 go test $(TEST) -v $(TESTARGS) -timeout 120m diff --git a/README.md b/README.md new file mode 100644 index 0000000..56ac2c0 --- /dev/null +++ b/README.md @@ -0,0 +1,143 @@ +# terraform-provider-dynamix + +## Особенности и изменения в terraform-provider-dynamix по сравнению с terraform-provider-decort + +### Пользовательская группа API + +Общее: значения незаданных пользователем блоков и атрибутов в terraform.tfstate сохраняюся как null. +Параметры permanently, restore в terraform-provider-decort имели дефолтное значение false, а в terraform-provider-dynamix true. + +#### Аккаунты acconts + +Следующие поля в terraform-provider-decort имели тип списка (List), а в terraform-provider-dynamix имеют тип Single (единичная структура): + +| Название ресурса | Поля схемы | +|--------------------------|-------------------| +| data source dynamix_account_resource_consumption_list | consumed, reserved | +| data source dynamix_account_resource_consumption_get | consumed, reserved, resource_limits | +| data source dynamix_account_rg_list | computes, reserved, resource_limits, limits, reserved | +| resource dynamix_account | resource_limits, computes, machines | +| data source dynamix_disk_list_deleted | iotune | +| data source dynamix_disk_list_unattached | iotune | +| data source dynamix_disk_list | iotune | +| data source dynamix_disk_replication | iotune, replication | +| data source dynamix_disk | iotune | +| resource dynamix_disk_replication | iotune, replication | +| resource dynamix_disk | iotune | + +#### Ресурсная группа disks + +| Название ресурса | Поля схемы | Изменение по сравнению с terraform-provider-decort | Комментарий | +|------------------------------------------|-------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| resource dynamix_disk | iotune, shareable | Если при создании ресурса диска операция настроки лимитов (поле iotune) и/или операция поделиться диском (поле shareable) выполняются некорректно, теперь ресурс создается с предупреждениями (Warnings). Ранее ресурс создавался с ошибками (Errors). | Данное изменение касается только создания ресурса. Обновление ресурса проходит также: если операция изменения iotune и/или shareable выполняется некорректно, возвращаются ошибки (Errors). | +| resource dynamix_disk | - | Операция автоматического восстановления диска (для диска, находящегося в корзине) теперь происходит при чтении ресурса. Ранее она происходила при обновлении ресурса. | | +| data source dynamix_disk_list_unattached | ckey, meta | Изменилось названия поля с "_ckey" на "ckey" и с "_meta" на "meta". | | + +Следующие поля в terraform-provider-decort имели тип списка (List), а в terraform-provider-dynamix имеют тип Single (единичная структура): + +| Название ресурса | Поля схемы | +|------------------------------------------|------------| +| data source dynamix_disk_list_unattached | iotune | +| data source dynamix_disk | iotune | +| data source dynamix_disk_list | iotune | +| data source dynamix_disk_list_deleted | iotune | +| resource dynamix_disk | iotune | + +#### Ресурсная группа rg + +| Название ресурса | Поля схемы | Изменение по сравнению с terraform-provider-decort | Комментарий | +|--------------------------------|------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| resource dynamix_resgroup | def_net_type | Поле def_net_type теперь только Optional (не Computed). Если не задан блок def_net, оно отображает текущий статус def_net_type. В случае если задан блок def_net, то текущий тип сети по умолчанию находится в поле def_net.net_type. | При другой реализации возникают ошибки, т.к. к вычисляемому полю def_net_type обращаются сразу две разные структуры, и фреймворк выдает ошибку при несовпадении плана и платформы, что неизбежно, когда к полю обращаются две разные структуры. | +| resource dynamix_resgroup | def_net, access, quota | Блоки def_net, access, quota стали атрибутами. При конфигурации ресурса задаются как атрибуты (через знак равно).
Стало: def_net = {}.
Было: def_net {}. | | +| resource dynamix_resgroup | force, permanently | Новые дефолтные значения: true.
Старые дефолтные значения: false. | | +| resource dynamix_resgroup | restore | Новое дефолтное значение: true. | | +| data source dynamix_rg_usage | seps | Изменилась внутренняя структура списка seps
Было: sep_id (string), map (map[string]string).
Стало: sed_id (string), data_name (string), disk_size (float64), disk_size_max (float64) | Теперь структура seps аналогична таковой для data sources dynamix_rg_get_resource-consumption, dynamix_rg_resource_consumption_list. | +| data source dynamix_rg_list_lb | acl | Удалено вычисляемое поле acl. | Причина: несоответствие типов acl в terraform-provider-decort и ACL в sdk. | + +Следующие поля в terraform-provider-decort имели тип списка (List), а в terraform-provider-dynamix имеют тип Single (единичная структура): + +| Название ресурса | Поля схемы | +|--------------------------------------------------|------------------------------------------------------------------------| +| data source dynamix_rg_resource_consumption_list | consumed, reserved, resource_limits | +| data source dynamix_rg | resource_limits | +| data source dynamix_rg_get_resource_consumption | consumed, reserved, resource_limits | +| data source dynamix_rg_list | resource_limits | +| data source dynamix_rg_list_deleted | resource_limits | +| data source dynamix_rg_list_lb | server_default_settings, server_settings, primary_node, secondary_node | +| resource dynamix_resgroup | quota, def_net | + +#### Внешние сети extnet + +Следующие поля в terraform-provider-decort имели тип списка (List), а в terraform-provider-dynamix имеют тип Single (единичная структура): + +| Название ресурса | Поля схемы | +|----------------------------|-------------------| +| data source dynamix_extnet | default_qos, vnfs | + +#### Кластеры k8s + +Следующие поля в terraform-provider-decort имели тип списка (List), а в terraform-provider-dynamix имеют тип Single (единичная структура): + +| Название ресурса | Поля схемы | +|--------------------------------------|-----------------| +| data source dynamix_k8s | acl, masters | +| data source dynamix_k8s_list | service_account | +| data source dynamix_k8s_list_deleted | service_account | +| resource dynamix_k8s_cp | acl | + +#### Образы image + +Параметр permanently в terraform-provider-decort имел дефолтное значение false, а в terraform-provider-dynamix true. + +#### Внутренние сети vins + +| Название ресурса | Поля схемы | Изменение по сравнению с terraform-provider-decort | Комментарий | +|--------------------------|-------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------| +| data source dynamix_vins | ckey | Название вычисляемых полей изменено с "_ckey" на "ckey". | Переименование связано с ограничениями terraform framework. | +| resource dynamix_vins | ckey | Название вычисляемых полей изменено с "_ckey" на "ckey". | Переименование связано с ограничениями terraform framework. | +| resource dynamix_vins | ext_net_id, ext_ip_addr | Удалены вычисляемые поля ext_net_id и ext_ip_addr. | При создании и изменении внешних сетей используется блок ext_net {ext_net_id int; ext_net_ip string}. | +| resource dynamix_vins | force, permanently | Новые дефолтные значения: true.
Старые дефолтные значения: false. | | +| resource dynamix_vins | restore | Новое дефолтное значение: true. | | +| resource dynamix_vins | - | Если при обновлении ресурса vins какая-либо операция (изменение полей/блоков enable, ext_net, ip, nat_rule, vnfdev_restart, vnfdev_redeploy) не может быть выполнена, возвращаются ошибки (Errors). | Ранее ресурс изменялся с предупреждениями (Warnings). | +| resource dynamix_vins | nat_rule | В блоке nat_rule поля int_ip и ext_port_start стали обязательными. Ранее были опциональными. | | + +Следующие поля в terraform-provider-decort имели тип списка (List), а в terraform-provider-dynamix имеют тип Single (единичная структура): + +| Название ресурса | Поля схемы | +|--------------------------|----------------------------------------------------------------------------------------------------| +| data source dynamix_vins | vnf_dev, config, mgmt, resources, qos, default_qos, vnfs, dhcp, devices, primary, gw, nat | +| resource dynamix_vins | ext_net, vnf_dev, config, mgmt, resources, qos, default_qos, vnfs, dhcp, devices, primary, gw, nat | + +#### Виртуальные машины KVM KVMVM + +| Название ресурса | Поля схемы | Изменение по сравнению с terraform-provider-decort | Комментарий | +|------------------------|-------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------| +| resource dynamix_kvmvm | disks | Поле удалено | Исключено дублирование работы с дисками, которое можно производить при помощи ресурса dynamix_disk | +| resource dynamix_kvmvm | affinity_rules, anti_affinity_rules | Изменен тип с List на Set | | +| resource dynamix_kvmvm | force, permanently | Новые дефолтные значения: true.
Старые дефолтные значения: false. | | +| resource dynamix_kvmvm | restore | Новое дефолтное значение: true. | | +| resource dynamix_kvmvm | - | Операция автоматического восстановления диска (для диска, находящегося в корзине) теперь происходит при чтении ресурса. Ранее она происходила при обновлении ресурса. | | + +Следующие поля в terraform-provider-decort имели тип списка (List), а в terraform-provider-dynamix имеют тип Single (единичная структура): + +| Название ресурса | Поля схемы | +|----------------------------------------|---------------------------------------------------------| +| data source dynamix_kvmvm | acl, iotune, replication, qos | +| data source dynamix_kvmvm_list | qos | +| data source dynamix_kvmvm_list_deleted | qos | +| data source dynamix_kvmvm_user_list | items | +| resource dynamix_kvmvm | rollback, cd, boot_disk, acl, qos, iotune, replication | + +#### Балансировщики нагрузок lb + +Параметры permanently и restore в terraform-provider-decort имел дефолтное значение false, а в terraform-provider-dynamix true. + +Следующие поля в terraform-provider-decort имели тип списка (List), а в terraform-provider-dynamix имеют тип Single (единичная структура): + +| Название ресурса | Поля схемы | +|-------------------------------------|------------------------------------------------------------------------| +| data source dynamix_lb | server_default_settings, server_settings, primary_node, secondary_node | +| data source dynamix_lb_list | server_default_settings, server_settings, primary_node, secondary_node | +| data source dynamix_lb_list_deleted | server_default_settings, server_settings, primary_node, secondary_node | +| resource dynamix_lb | server_default_settings, server_settings, primary_node, secondary_node | +| resource dynamix_lb_backend | server_settings | \ No newline at end of file diff --git a/cmd/dynamix/main.go b/cmd/dynamix/main.go new file mode 100644 index 0000000..a5af33b --- /dev/null +++ b/cmd/dynamix/main.go @@ -0,0 +1,24 @@ +package main + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-log/tflog" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/provider" +) + +func main() { + opts := providerserver.ServeOpts{ + Address: "basistech.ru/tf/dynamix", + //Debug: true, + } + + // TODO version from env + ctx := context.Background() + err := providerserver.Serve(ctx, provider.New("dev"), opts) + if err != nil { + tflog.Error(ctx, "Error start provider server", map[string]any{"error": err.Error()}) + } + +} diff --git a/docs/data-sources/account.md b/docs/data-sources/account.md new file mode 100644 index 0000000..25ddbc7 --- /dev/null +++ b/docs/data-sources/account.md @@ -0,0 +1,106 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) account id + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_name` (String) +- `acl` (Attributes List) (see [below for nested schema](#nestedatt--acl)) +- `ckey` (String) +- `company` (String) +- `companyurl` (String) +- `compute_features` (List of String) +- `computes` (Attributes) (see [below for nested schema](#nestedatt--computes)) +- `cpu_allocation_parameter` (String) +- `cpu_allocation_ratio` (Number) +- `created_by` (String) +- `created_time` (Number) +- `dc_location` (String) +- `deactivation_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `displayname` (String) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `machines` (Attributes) (see [below for nested schema](#nestedatt--machines)) +- `resource_limits` (Attributes) (see [below for nested schema](#nestedatt--resource_limits)) +- `send_access_emails` (Boolean) +- `status` (String) +- `updated_time` (Number) +- `version` (Number) +- `vins` (List of Number) +- `vinses` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `acl` + +Read-Only: + +- `can_be_deleted` (Boolean) +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `computes` + +Read-Only: + +- `started` (Number) +- `stopped` (Number) + + + +### Nested Schema for `machines` + +Read-Only: + +- `halted` (Number) +- `running` (Number) + + + +### Nested Schema for `resource_limits` + +Read-Only: + +- `cu_c` (Number) +- `cu_d` (Number) +- `cu_dm` (Number) +- `cu_i` (Number) +- `cu_m` (Number) +- `cu_np` (Number) +- `gpu_units` (Number) diff --git a/docs/data-sources/account_audits_list.md b/docs/data-sources/account_audits_list.md new file mode 100644 index 0000000..7461984 --- /dev/null +++ b/docs/data-sources/account_audits_list.md @@ -0,0 +1,48 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_audits_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_audits_list (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) account id + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `call` (String) +- `responsetime` (Number) +- `statuscode` (Number) +- `timestamp` (Number) +- `user` (String) diff --git a/docs/data-sources/account_computes_list.md b/docs/data-sources/account_computes_list.md new file mode 100644 index 0000000..4b51092 --- /dev/null +++ b/docs/data-sources/account_computes_list.md @@ -0,0 +1,75 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_computes_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_computes_list (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) account id + +### Optional + +- `compute_id` (Number) find by compute id +- `extnet_id` (Number) find by external network id +- `extnet_name` (String) find by external network name +- `ip_address` (String) find by ip address +- `name` (String) find by name +- `page` (Number) page number +- `rg_id` (Number) find by resource group id +- `rg_name` (String) find by resource group name +- `size` (Number) page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `tech_status` (String) find by tech status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `compute_id` (Number) +- `compute_name` (String) +- `cpus` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `ram` (Number) +- `registered` (Boolean) +- `rg_id` (Number) +- `rg_name` (String) +- `status` (String) +- `tech_status` (String) +- `total_disks_size` (Number) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) +- `vins_connected` (Number) diff --git a/docs/data-sources/account_consumed_units.md b/docs/data-sources/account_consumed_units.md new file mode 100644 index 0000000..d8d26f6 --- /dev/null +++ b/docs/data-sources/account_consumed_units.md @@ -0,0 +1,42 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_consumed_units Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_consumed_units (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) account id + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `cu_c` (Number) +- `cu_d` (Number) +- `cu_dm` (Number) +- `cu_i` (Number) +- `cu_m` (Number) +- `cu_np` (Number) +- `gpu_units` (Number) +- `id` (String) The ID of this resource. + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/account_consumed_units_by_type.md b/docs/data-sources/account_consumed_units_by_type.md new file mode 100644 index 0000000..6b68668 --- /dev/null +++ b/docs/data-sources/account_consumed_units_by_type.md @@ -0,0 +1,37 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_consumed_units_by_type Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_consumed_units_by_type (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) account id +- `cu_type` (String) cloud unit resource type + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `cu_result` (Number) +- `id` (String) The ID of this resource. + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/account_deleted_list.md b/docs/data-sources/account_deleted_list.md new file mode 100644 index 0000000..1eb0783 --- /dev/null +++ b/docs/data-sources/account_deleted_list.md @@ -0,0 +1,66 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_deleted_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_deleted_list (Data Source) + + + + + + +## Schema + +### Optional + +- `acl` (String) filter by acl +- `by_id` (Number) filter by id +- `name` (String) filter by name +- `page` (Number) page number +- `size` (Number) page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `acl` (Attributes List) (see [below for nested schema](#nestedatt--items--acl)) +- `compute_features` (List of String) +- `created_time` (Number) +- `deleted_time` (Number) +- `status` (String) +- `updated_time` (Number) + + +### Nested Schema for `items.acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) diff --git a/docs/data-sources/account_disks_list.md b/docs/data-sources/account_disks_list.md new file mode 100644 index 0000000..39d3942 --- /dev/null +++ b/docs/data-sources/account_disks_list.md @@ -0,0 +1,58 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_disks_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_disks_list (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) account id + +### Optional + +- `disk_id` (Number) find by disk id +- `disk_max_size` (Number) find by max size disk +- `name` (String) find by name +- `page` (Number) page number +- `size` (Number) page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `type` (String) find by type of the disks + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `disk_id` (Number) +- `disk_name` (String) +- `pool` (String) +- `sep_id` (Number) +- `shareable` (Boolean) +- `size_max` (Number) +- `type` (String) diff --git a/docs/data-sources/account_flipgroups_list.md b/docs/data-sources/account_flipgroups_list.md new file mode 100644 index 0000000..17543d9 --- /dev/null +++ b/docs/data-sources/account_flipgroups_list.md @@ -0,0 +1,73 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_flipgroups_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_flipgroups_list (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) ID of the account + +### Optional + +- `by_ip` (String) find by ip address +- `extnet_id` (Number) find by extnet ID +- `flipgroup_id` (Number) find by flipgroup id +- `name` (String) find by name +- `page` (Number) page number +- `size` (Number) page size +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vins_id` (Number) find by vins ID +- `vins_name` (String) find by vins name + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `client_type` (String) +- `conn_type` (String) +- `created_by` (String) +- `created_time` (Number) +- `default_gw` (String) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `fg_id` (Number) +- `fg_name` (String) +- `gid` (Number) +- `guid` (Number) +- `ip` (String) +- `milestones` (Number) +- `net_id` (Number) +- `net_type` (String) +- `netmask` (Number) +- `status` (String) +- `updated_by` (String) +- `updated_time` (Number) diff --git a/docs/data-sources/account_list.md b/docs/data-sources/account_list.md new file mode 100644 index 0000000..eb39166 --- /dev/null +++ b/docs/data-sources/account_list.md @@ -0,0 +1,67 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_list (Data Source) + + + + + + +## Schema + +### Optional + +- `acl` (String) filter by acl +- `by_id` (Number) filter by id +- `name` (String) filter by name +- `page` (Number) page number +- `size` (Number) page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) filter by status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `acl` (Attributes List) (see [below for nested schema](#nestedatt--items--acl)) +- `compute_features` (List of String) +- `created_time` (Number) +- `deleted_time` (Number) +- `status` (String) +- `updated_time` (Number) + + +### Nested Schema for `items.acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) diff --git a/docs/data-sources/account_reserved_units.md b/docs/data-sources/account_reserved_units.md new file mode 100644 index 0000000..9559e75 --- /dev/null +++ b/docs/data-sources/account_reserved_units.md @@ -0,0 +1,41 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_reserved_units Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_reserved_units (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `cu_c` (Number) +- `cu_d` (Number) +- `cu_dm` (Number) +- `cu_i` (Number) +- `cu_m` (Number) +- `cu_np` (Number) +- `gpu_units` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/account_resource_consumption_get.md b/docs/data-sources/account_resource_consumption_get.md new file mode 100644 index 0000000..5705bf9 --- /dev/null +++ b/docs/data-sources/account_resource_consumption_get.md @@ -0,0 +1,102 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_resource_consumption_get Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_resource_consumption_get (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) + +### Optional + +- `resource_limits` (Attributes) (see [below for nested schema](#nestedatt--resource_limits)) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `consumed` (Attributes) (see [below for nested schema](#nestedatt--consumed)) +- `reserved` (Attributes) (see [below for nested schema](#nestedatt--reserved)) + + +### Nested Schema for `resource_limits` + +Read-Only: + +- `cu_c` (Number) +- `cu_d` (Number) +- `cu_dm` (Number) +- `cu_i` (Number) +- `cu_m` (Number) +- `cu_np` (Number) +- `gpu_units` (Number) + + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `consumed` + +Read-Only: + +- `cpu` (Number) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `ext_ips` (Number) +- `ext_traffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--consumed--seps)) + + +### Nested Schema for `consumed.seps` + +Read-Only: + +- `data_name` (String) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `sep_id` (String) + + + + +### Nested Schema for `reserved` + +Read-Only: + +- `cpu` (Number) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `ext_ips` (Number) +- `ext_traffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--reserved--seps)) + + +### Nested Schema for `reserved.seps` + +Read-Only: + +- `data_name` (String) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `sep_id` (String) diff --git a/docs/data-sources/account_resource_consumption_list.md b/docs/data-sources/account_resource_consumption_list.md new file mode 100644 index 0000000..6a15a97 --- /dev/null +++ b/docs/data-sources/account_resource_consumption_list.md @@ -0,0 +1,95 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_resource_consumption_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_resource_consumption_list (Data Source) + + + + + + +## Schema + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Required: + +- `account_id` (Number) + +Read-Only: + +- `consumed` (Attributes) (see [below for nested schema](#nestedatt--items--consumed)) +- `reserved` (Attributes) (see [below for nested schema](#nestedatt--items--reserved)) + + +### Nested Schema for `items.consumed` + +Read-Only: + +- `cpu` (Number) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `ext_ips` (Number) +- `ext_traffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--items--consumed--seps)) + + +### Nested Schema for `items.consumed.seps` + +Read-Only: + +- `data_name` (String) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `sep_id` (String) + + + + +### Nested Schema for `items.reserved` + +Read-Only: + +- `cpu` (Number) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `ext_ips` (Number) +- `ext_traffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--items--reserved--seps)) + + +### Nested Schema for `items.reserved.seps` + +Read-Only: + +- `data_name` (String) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `sep_id` (String) diff --git a/docs/data-sources/account_rg_list.md b/docs/data-sources/account_rg_list.md new file mode 100644 index 0000000..b87caba --- /dev/null +++ b/docs/data-sources/account_rg_list.md @@ -0,0 +1,148 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_rg_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_rg_list (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) ID of the account + +### Optional + +- `name` (String) find by name +- `page` (Number) page number +- `rg_id` (Number) find by rg id +- `size` (Number) page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) find by status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vins_id` (Number) find by vins id +- `vm_id` (Number) find by vm id + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `computes` (Attributes) (see [below for nested schema](#nestedatt--items--computes)) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `milestones` (Number) +- `resources` (Attributes) (see [below for nested schema](#nestedatt--items--resources)) +- `rg_id` (Number) +- `rg_name` (String) +- `status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `vinses` (Number) + + +### Nested Schema for `items.computes` + +Read-Only: + +- `started` (Number) +- `stopped` (Number) + + + +### Nested Schema for `items.resources` + +Read-Only: + +- `consumed` (Attributes) (see [below for nested schema](#nestedatt--items--resources--consumed)) +- `limits` (Attributes) (see [below for nested schema](#nestedatt--items--resources--limits)) +- `reserved` (Attributes) (see [below for nested schema](#nestedatt--items--resources--reserved)) + + +### Nested Schema for `items.resources.consumed` + +Read-Only: + +- `cpu` (Number) +- `disksize` (Number) +- `disksizemax` (Number) +- `extips` (Number) +- `exttraffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--items--resources--consumed--seps)) + + +### Nested Schema for `items.resources.consumed.seps` + +Read-Only: + +- `disksize` (Number) +- `disksizemax` (Number) +- `pool_name` (String) +- `sep_id` (String) + + + + +### Nested Schema for `items.resources.limits` + +Read-Only: + +- `cpu` (Number) +- `disksize` (Number) +- `disksizemax` (Number) +- `extips` (Number) +- `exttraffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Number) + + + +### Nested Schema for `items.resources.reserved` + +Read-Only: + +- `cpu` (Number) +- `disksize` (Number) +- `disksizemax` (Number) +- `extips` (Number) +- `exttraffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--items--resources--reserved--seps)) + + +### Nested Schema for `items.resources.reserved.seps` + +Read-Only: + +- `disksize` (Number) +- `disksizemax` (Number) +- `pool_name` (String) +- `sep_id` (String) diff --git a/docs/data-sources/account_templates_list.md b/docs/data-sources/account_templates_list.md new file mode 100644 index 0000000..4ae0156 --- /dev/null +++ b/docs/data-sources/account_templates_list.md @@ -0,0 +1,61 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_templates_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_templates_list (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) ID of the account + +### Optional + +- `image_id` (Number) find by image id +- `include_deleted` (Boolean) include deleted images +- `name` (String) find by name +- `page` (Number) page number +- `size` (Number) page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `type` (String) find by type + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `desc` (String) +- `public` (Boolean) +- `size` (Number) +- `status` (String) +- `template_id` (Number) +- `template_name` (String) +- `type` (String) +- `unc_path` (String) +- `username` (String) diff --git a/docs/data-sources/account_vins_list.md b/docs/data-sources/account_vins_list.md new file mode 100644 index 0000000..5f15f44 --- /dev/null +++ b/docs/data-sources/account_vins_list.md @@ -0,0 +1,69 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account_vins_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account_vins_list (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) + +### Optional + +- `ext_ip` (String) +- `name` (String) +- `page` (Number) +- `rg_id` (Number) +- `size` (Number) +- `sort_by` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vins_id` (Number) + +### Read-Only + +- `entry_count` (Number) +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `computes` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `external_ip` (String) +- `extnet_id` (Number) +- `free_ips` (Number) +- `network` (String) +- `pri_vnf_dev_id` (Number) +- `rg_id` (Number) +- `rg_name` (String) +- `status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `vin_id` (Number) +- `vin_name` (String) diff --git a/docs/data-sources/bservice.md b/docs/data-sources/bservice.md new file mode 100644 index 0000000..e16b9ef --- /dev/null +++ b/docs/data-sources/bservice.md @@ -0,0 +1,104 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_bservice Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_bservice (Data Source) + + + + + + +## Schema + +### Required + +- `service_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `base_domain` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--computes)) +- `cpu_total` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `disk_total` (Number) +- `gid` (Number) +- `groups` (Attributes List) (see [below for nested schema](#nestedatt--groups)) +- `guid` (Number) +- `milestones` (Number) +- `parent_srv_id` (Number) +- `parent_srv_type` (String) +- `ram_total` (Number) +- `rg_id` (Number) +- `rg_name` (String) +- `service_name` (String) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--snapshots)) +- `ssh_key` (String) +- `ssh_user` (String) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `computes` + +Read-Only: + +- `account_id` (Number) +- `architecture` (String) +- `compgroup_id` (Number) +- `compgroup_name` (String) +- `compgroup_role` (String) +- `id` (Number) +- `name` (String) +- `rg_id` (Number) +- `stack_id` (Number) +- `status` (String) +- `tech_status` (String) + + + +### Nested Schema for `groups` + +Read-Only: + +- `computes` (Number) +- `consistency` (Boolean) +- `id` (Number) +- `name` (String) +- `status` (String) +- `tech_status` (String) + + + +### Nested Schema for `snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `timestamp` (Number) +- `valid` (Boolean) diff --git a/docs/data-sources/bservice_deleted_list.md b/docs/data-sources/bservice_deleted_list.md new file mode 100644 index 0000000..c0eabcc --- /dev/null +++ b/docs/data-sources/bservice_deleted_list.md @@ -0,0 +1,66 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_bservice_deleted_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_bservice_deleted_list (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) +- `page` (Number) +- `rg_id` (Number) +- `size` (Number) +- `sort_by` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `base_domain` (String) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `gid` (Number) +- `groups` (List of Number) +- `guid` (Number) +- `parent_srv_id` (Number) +- `parent_srv_type` (String) +- `rg_id` (Number) +- `rg_name` (String) +- `service_id` (Number) +- `service_name` (String) +- `ssh_user` (String) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) diff --git a/docs/data-sources/bservice_group.md b/docs/data-sources/bservice_group.md new file mode 100644 index 0000000..cbc2611 --- /dev/null +++ b/docs/data-sources/bservice_group.md @@ -0,0 +1,84 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_bservice_group Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_bservice_group (Data Source) + + + + + + +## Schema + +### Required + +- `compgroup_id` (Number) +- `service_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `compgroup_name` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--computes)) +- `consistency` (Boolean) +- `cpu` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `disk` (Number) +- `driver` (String) +- `extnets` (List of Number) +- `gid` (Number) +- `guid` (Number) +- `image_id` (Number) +- `milestones` (Number) +- `parents` (List of Number) +- `ram` (Number) +- `rg_id` (Number) +- `rg_name` (String) +- `role` (String) +- `sep_id` (Number) +- `seq_no` (Number) +- `status` (String) +- `tech_status` (String) +- `timeout_start` (Number) +- `updated_by` (String) +- `updated_time` (Number) +- `vinses` (List of Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `computes` + +Read-Only: + +- `id` (Number) +- `ip_addresses` (List of String) +- `name` (String) +- `os_users` (Attributes List) (see [below for nested schema](#nestedatt--computes--os_users)) + + +### Nested Schema for `computes.os_users` + +Read-Only: + +- `login` (String) +- `password` (String) diff --git a/docs/data-sources/bservice_list.md b/docs/data-sources/bservice_list.md new file mode 100644 index 0000000..bb015a3 --- /dev/null +++ b/docs/data-sources/bservice_list.md @@ -0,0 +1,72 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_bservice_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_bservice_list (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) +- `account_name` (String) +- `by_id` (Number) +- `name` (String) +- `page` (Number) +- `rg_id` (Number) +- `rg_name` (String) +- `size` (Number) +- `sort_by` (String) +- `status` (String) +- `tech_status` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `base_domain` (String) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `gid` (Number) +- `groups` (List of Number) +- `guid` (Number) +- `parent_srv_id` (Number) +- `parent_srv_type` (String) +- `rg_id` (Number) +- `rg_name` (String) +- `service_id` (Number) +- `service_name` (String) +- `ssh_user` (String) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) diff --git a/docs/data-sources/bservice_snapshot_list.md b/docs/data-sources/bservice_snapshot_list.md new file mode 100644 index 0000000..90591d5 --- /dev/null +++ b/docs/data-sources/bservice_snapshot_list.md @@ -0,0 +1,47 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_bservice_snapshot_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_bservice_snapshot_list (Data Source) + + + + + + +## Schema + +### Required + +- `service_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `timestamp` (Number) +- `valid` (Boolean) diff --git a/docs/data-sources/disk.md b/docs/data-sources/disk.md new file mode 100644 index 0000000..a3d574b --- /dev/null +++ b/docs/data-sources/disk.md @@ -0,0 +1,111 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk (Data Source) + + + + + + +## Schema + +### Required + +- `disk_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `acl` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--computes)) +- `created_time` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `destruction_time` (Number) +- `devicename` (String) +- `disk_name` (String) +- `gid` (Number) +- `id` (String) The ID of this resource. +- `image_id` (Number) +- `images` (List of String) +- `iotune` (Attributes) (see [below for nested schema](#nestedatt--iotune)) +- `order` (Number) +- `params` (String) +- `parent_id` (Number) +- `pci_slot` (Number) +- `pool` (String) +- `present_to` (List of Number) +- `purge_time` (Number) +- `res_id` (String) +- `res_name` (String) +- `role` (String) +- `sep_id` (Number) +- `sep_type` (String) +- `shareable` (Boolean) +- `size_max` (Number) +- `size_used` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--snapshots)) +- `status` (String) +- `tech_status` (String) +- `type` (String) +- `vmid` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `computes` + +Read-Only: + +- `compute_id` (String) +- `compute_name` (String) + + + +### Nested Schema for `iotune` + +Read-Only: + +- `read_bytes_sec` (Number) +- `read_bytes_sec_max` (Number) +- `read_iops_sec` (Number) +- `read_iops_sec_max` (Number) +- `size_iops_sec` (Number) +- `total_bytes_sec` (Number) +- `total_bytes_sec_max` (Number) +- `total_iops_sec` (Number) +- `total_iops_sec_max` (Number) +- `write_bytes_sec` (Number) +- `write_bytes_sec_max` (Number) +- `write_iops_sec` (Number) +- `write_iops_sec_max` (Number) + + + +### Nested Schema for `snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) diff --git a/docs/data-sources/disk_list.md b/docs/data-sources/disk_list.md new file mode 100644 index 0000000..c06faa4 --- /dev/null +++ b/docs/data-sources/disk_list.md @@ -0,0 +1,131 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk_list (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) ID of the account the disks belong to +- `account_name` (String) Find by account name +- `by_id` (Number) Find by ID +- `disk_max_size` (Number) Find by max disk size +- `name` (String) Find by name +- `page` (Number) Page number +- `pool_name` (String) find by pool name +- `sep_id` (Number) find by sep ID +- `shared` (Boolean) Find by shared field +- `size` (Number) Page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) Find by status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `type` (String) type of the disks + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `acl` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--items--computes)) +- `created_time` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `destruction_time` (Number) +- `devicename` (String) +- `disk_id` (Number) +- `disk_name` (String) +- `gid` (Number) +- `image_id` (Number) +- `images` (List of String) +- `iotune` (Attributes) (see [below for nested schema](#nestedatt--items--iotune)) +- `machine_id` (Number) +- `machine_name` (String) +- `order` (Number) +- `params` (String) +- `parent_id` (Number) +- `pci_slot` (Number) +- `pool` (String) +- `present_to` (List of Number) +- `purge_time` (Number) +- `res_id` (String) +- `res_name` (String) +- `role` (String) +- `sep_id` (Number) +- `sep_type` (String) +- `shareable` (Boolean) +- `size_max` (Number) +- `size_used` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--items--snapshots)) +- `status` (String) +- `tech_status` (String) +- `type` (String) +- `vmid` (Number) + + +### Nested Schema for `items.computes` + +Read-Only: + +- `compute_id` (String) +- `compute_name` (String) + + + +### Nested Schema for `items.iotune` + +Read-Only: + +- `read_bytes_sec` (Number) +- `read_bytes_sec_max` (Number) +- `read_iops_sec` (Number) +- `read_iops_sec_max` (Number) +- `size_iops_sec` (Number) +- `total_bytes_sec` (Number) +- `total_bytes_sec_max` (Number) +- `total_iops_sec` (Number) +- `total_iops_sec_max` (Number) +- `write_bytes_sec` (Number) +- `write_bytes_sec_max` (Number) +- `write_iops_sec` (Number) +- `write_iops_sec_max` (Number) + + + +### Nested Schema for `items.snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) diff --git a/docs/data-sources/disk_list_deleted.md b/docs/data-sources/disk_list_deleted.md new file mode 100644 index 0000000..986ac06 --- /dev/null +++ b/docs/data-sources/disk_list_deleted.md @@ -0,0 +1,128 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk_list_deleted Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk_list_deleted (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) ID of the account the disks belong to +- `account_name` (String) Find by account name +- `by_id` (Number) Find by ID +- `disk_max_size` (Number) Find by max disk size +- `name` (String) Find by name +- `page` (Number) Page number +- `shared` (Boolean) Find by shared field +- `size` (Number) Page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `type` (String) type of the disks + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `acl` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--items--computes)) +- `created_time` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `destruction_time` (Number) +- `devicename` (String) +- `disk_id` (Number) +- `disk_name` (String) +- `gid` (Number) +- `image_id` (Number) +- `images` (List of String) +- `iotune` (Attributes) (see [below for nested schema](#nestedatt--items--iotune)) +- `machine_id` (Number) +- `machine_name` (String) +- `order` (Number) +- `params` (String) +- `parent_id` (Number) +- `pci_slot` (Number) +- `pool` (String) +- `present_to` (List of Number) +- `purge_time` (Number) +- `res_id` (String) +- `res_name` (String) +- `role` (String) +- `sep_id` (Number) +- `sep_type` (String) +- `shareable` (Boolean) +- `size_max` (Number) +- `size_used` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--items--snapshots)) +- `status` (String) +- `tech_status` (String) +- `type` (String) +- `vmid` (Number) + + +### Nested Schema for `items.computes` + +Read-Only: + +- `compute_id` (String) +- `compute_name` (String) + + + +### Nested Schema for `items.iotune` + +Read-Only: + +- `read_bytes_sec` (Number) +- `read_bytes_sec_max` (Number) +- `read_iops_sec` (Number) +- `read_iops_sec_max` (Number) +- `size_iops_sec` (Number) +- `total_bytes_sec` (Number) +- `total_bytes_sec_max` (Number) +- `total_iops_sec` (Number) +- `total_iops_sec_max` (Number) +- `write_bytes_sec` (Number) +- `write_bytes_sec_max` (Number) +- `write_iops_sec` (Number) +- `write_iops_sec_max` (Number) + + + +### Nested Schema for `items.snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) diff --git a/docs/data-sources/disk_list_types.md b/docs/data-sources/disk_list_types.md new file mode 100644 index 0000000..f1ed067 --- /dev/null +++ b/docs/data-sources/disk_list_types.md @@ -0,0 +1,36 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk_list_types Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk_list_types (Data Source) + + + + + + +## Schema + +### Optional + +- `page` (Number) Page number +- `size` (Number) Page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `types` (List of String) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/disk_list_types_detailed.md b/docs/data-sources/disk_list_types_detailed.md new file mode 100644 index 0000000..7284baa --- /dev/null +++ b/docs/data-sources/disk_list_types_detailed.md @@ -0,0 +1,55 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk_list_types_detailed Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk_list_types_detailed (Data Source) + + + + + + +## Schema + +### Optional + +- `page` (Number) Page number +- `size` (Number) Page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `pools` (Attributes List) (see [below for nested schema](#nestedatt--items--pools)) +- `sep_id` (Number) +- `sep_name` (String) + + +### Nested Schema for `items.pools` + +Read-Only: + +- `name` (String) +- `system` (String) +- `types` (List of String) diff --git a/docs/data-sources/disk_list_unattached.md b/docs/data-sources/disk_list_unattached.md new file mode 100644 index 0000000..90a0eaf --- /dev/null +++ b/docs/data-sources/disk_list_unattached.md @@ -0,0 +1,126 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk_list_unattached Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk_list_unattached (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) ID of the account the disks belong to +- `account_name` (String) Find by account name +- `by_id` (Number) Find by ID +- `disk_max_size` (Number) Find by max disk size +- `page` (Number) Page number +- `pool_name` (String) find by pool name +- `sep_id` (Number) find by sep ID +- `size` (Number) Page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) Find by status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `type` (String) type of the disks + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `acl` (String) +- `boot_partition` (Number) +- `ckey` (String) +- `created_time` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `destruction_time` (Number) +- `disk_id` (Number) +- `disk_name` (String) +- `disk_path` (String) +- `gid` (Number) +- `guid` (Number) +- `image_id` (Number) +- `images` (List of String) +- `iotune` (Attributes) (see [below for nested schema](#nestedatt--items--iotune)) +- `iqn` (String) +- `login` (String) +- `meta` (List of String) +- `milestones` (Number) +- `order` (Number) +- `params` (String) +- `parent_id` (Number) +- `passwd` (String) +- `pci_slot` (Number) +- `pool` (String) +- `purge_attempts` (Number) +- `purge_time` (Number) +- `reality_device_number` (Number) +- `reference_id` (String) +- `res_id` (String) +- `res_name` (String) +- `role` (String) +- `sep_id` (Number) +- `shareable` (Boolean) +- `size_max` (Number) +- `size_used` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--items--snapshots)) +- `status` (String) +- `tech_status` (String) +- `type` (String) +- `vmid` (Number) + + +### Nested Schema for `items.iotune` + +Read-Only: + +- `read_bytes_sec` (Number) +- `read_bytes_sec_max` (Number) +- `read_iops_sec` (Number) +- `read_iops_sec_max` (Number) +- `size_iops_sec` (Number) +- `total_bytes_sec` (Number) +- `total_bytes_sec_max` (Number) +- `total_iops_sec` (Number) +- `total_iops_sec_max` (Number) +- `write_bytes_sec` (Number) +- `write_bytes_sec_max` (Number) +- `write_iops_sec` (Number) +- `write_iops_sec_max` (Number) + + + +### Nested Schema for `items.snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) diff --git a/docs/data-sources/disk_replication.md b/docs/data-sources/disk_replication.md new file mode 100644 index 0000000..e2c7abb --- /dev/null +++ b/docs/data-sources/disk_replication.md @@ -0,0 +1,126 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk_replication Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk_replication (Data Source) + + + + + + +## Schema + +### Required + +- `disk_id` (Number) Id of primary disk +- `replica_disk_id` (Number) Id of secondary disk + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) The unique ID of the subscriber-owner of the disk +- `account_name` (String) The name of the subscriber '(account') to whom this disk belongs +- `acl` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--computes)) +- `created_time` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `destruction_time` (Number) +- `devicename` (String) +- `disk_name` (String) +- `gid` (Number) +- `image_id` (Number) +- `images` (List of String) +- `iotune` (Attributes) (see [below for nested schema](#nestedatt--iotune)) +- `order` (Number) +- `params` (String) +- `parent_id` (Number) +- `pci_slot` (Number) +- `pool` (String) +- `present_to` (List of Number) +- `purge_time` (Number) +- `replication` (Attributes) (see [below for nested schema](#nestedatt--replication)) +- `res_id` (String) +- `res_name` (String) +- `role` (String) +- `sep_id` (Number) +- `sep_type` (String) +- `shareable` (Boolean) +- `size_max` (Number) +- `size_used` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--snapshots)) +- `status` (String) +- `status_replication` (String) Status of replication +- `tech_status` (String) +- `type` (String) +- `vmid` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `computes` + +Read-Only: + +- `compute_id` (String) +- `compute_name` (String) + + + +### Nested Schema for `iotune` + +Read-Only: + +- `read_bytes_sec` (Number) +- `read_bytes_sec_max` (Number) +- `read_iops_sec` (Number) +- `read_iops_sec_max` (Number) +- `size_iops_sec` (Number) +- `total_bytes_sec` (Number) +- `total_bytes_sec_max` (Number) +- `total_iops_sec` (Number) +- `total_iops_sec_max` (Number) +- `write_bytes_sec` (Number) +- `write_bytes_sec_max` (Number) +- `write_iops_sec` (Number) +- `write_iops_sec_max` (Number) + + + +### Nested Schema for `replication` + +Read-Only: + +- `disk_id` (Number) +- `pool_id` (String) +- `role` (String) +- `self_volume_id` (String) +- `storage_id` (String) +- `volume_id` (String) + + + +### Nested Schema for `snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) diff --git a/docs/data-sources/disk_snapshot.md b/docs/data-sources/disk_snapshot.md new file mode 100644 index 0000000..25cf7ef --- /dev/null +++ b/docs/data-sources/disk_snapshot.md @@ -0,0 +1,41 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk_snapshot Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk_snapshot (Data Source) + + + + + + +## Schema + +### Required + +- `disk_id` (Number) The unique ID of the subscriber-owner of the disk +- `label` (String) Name of the snapshot + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `guid` (String) +- `id` (String) The ID of this resource. +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/disk_snapshot_list.md b/docs/data-sources/disk_snapshot_list.md new file mode 100644 index 0000000..77ffb79 --- /dev/null +++ b/docs/data-sources/disk_snapshot_list.md @@ -0,0 +1,49 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk_snapshot_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk_snapshot_list (Data Source) + + + + + + +## Schema + +### Required + +- `disk_id` (Number) The unique ID of the subscriber-owner of the disk + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) diff --git a/docs/data-sources/extnet.md b/docs/data-sources/extnet.md new file mode 100644 index 0000000..0447def --- /dev/null +++ b/docs/data-sources/extnet.md @@ -0,0 +1,107 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_extnet Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_extnet (Data Source) + + + + + + +## Schema + +### Required + +- `net_id` (Number) ext net id + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `check_ips` (List of String) +- `ckey` (String) +- `default` (Boolean) +- `default_qos` (Attributes) (see [below for nested schema](#nestedatt--default_qos)) +- `desc` (String) +- `dns` (List of String) +- `excluded` (Attributes List) (see [below for nested schema](#nestedatt--excluded)) +- `free_ips` (Number) +- `gateway` (String) +- `gid` (Number) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `ipcidr` (String) +- `meta` (List of String) +- `milestones` (Number) +- `net_name` (String) +- `network` (String) +- `network_id` (Number) +- `pre_reservations_num` (Number) +- `prefix` (Number) +- `pri_vnf_dev_id` (Number) +- `reservations` (Attributes List) (see [below for nested schema](#nestedatt--reservations)) +- `shared_with` (List of Number) +- `status` (String) +- `vlan_id` (Number) +- `vnfs` (Attributes) (see [below for nested schema](#nestedatt--vnfs)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `default_qos` + +Read-Only: + +- `e_burst` (Number) +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + +### Nested Schema for `excluded` + +Read-Only: + +- `client_type` (String) +- `ip` (String) +- `mac` (String) +- `type` (String) +- `vm_id` (Number) + + + +### Nested Schema for `reservations` + +Read-Only: + +- `client_type` (String) +- `desc` (String) +- `domainname` (String) +- `hostname` (String) +- `ip` (String) +- `mac` (String) +- `type` (String) +- `vm_id` (Number) + + + +### Nested Schema for `vnfs` + +Read-Only: + +- `dhcp` (Number) diff --git a/docs/data-sources/extnet_computes_list.md b/docs/data-sources/extnet_computes_list.md new file mode 100644 index 0000000..af5d11c --- /dev/null +++ b/docs/data-sources/extnet_computes_list.md @@ -0,0 +1,66 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_extnet_computes_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_extnet_computes_list (Data Source) + + + + + + +## Schema + +### Required + +- `account_id` (Number) find by account id + +### Optional + +- `compute_id` (Number) find by compute id +- `page` (Number) page number +- `rg_id` (Number) find by rg id +- `size` (Number) page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `extnets` (Attributes List) (see [below for nested schema](#nestedatt--items--extnets)) +- `id` (Number) +- `name` (String) +- `rg_id` (Number) +- `rg_name` (String) + + +### Nested Schema for `items.extnets` + +Read-Only: + +- `ipaddr` (String) +- `ipcidr` (String) +- `name` (String) +- `net_id` (Number) diff --git a/docs/data-sources/extnet_default.md b/docs/data-sources/extnet_default.md new file mode 100644 index 0000000..58231db --- /dev/null +++ b/docs/data-sources/extnet_default.md @@ -0,0 +1,32 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_extnet_default Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_extnet_default (Data Source) + + + + + + +## Schema + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `net_id` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/extnet_list.md b/docs/data-sources/extnet_list.md new file mode 100644 index 0000000..85fcbea --- /dev/null +++ b/docs/data-sources/extnet_list.md @@ -0,0 +1,54 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_extnet_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_extnet_list (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) find by account id +- `by_id` (Number) find by id +- `name` (String) find by name +- `network` (String) find by network ip address +- `page` (Number) page number +- `size` (Number) page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) find by status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vlan_id` (Number) find by vlan id +- `vnfdev_id` (Number) find by vnfdevices id + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `ipcidr` (String) +- `name` (String) +- `net_id` (Number) +- `status` (String) diff --git a/docs/data-sources/flipgroup.md b/docs/data-sources/flipgroup.md new file mode 100644 index 0000000..3dc777c --- /dev/null +++ b/docs/data-sources/flipgroup.md @@ -0,0 +1,60 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_flipgroup Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_flipgroup (Data Source) + + + + + + +## Schema + +### Required + +- `flipgroup_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `client_ids` (List of Number) +- `client_type` (String) +- `conn_id` (Number) +- `conn_type` (String) +- `created_by` (String) +- `created_time` (Number) +- `default_gw` (String) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `gid` (Number) +- `guid` (Number) +- `id` (Number) The ID of this resource. +- `ip` (String) +- `milestones` (Number) +- `name` (String) +- `net_id` (Number) +- `net_type` (String) +- `network` (String) +- `rg_id` (Number) +- `rg_name` (String) +- `status` (String) +- `updated_by` (String) +- `updated_time` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/flipgroup_list.md b/docs/data-sources/flipgroup_list.md new file mode 100644 index 0000000..1903e47 --- /dev/null +++ b/docs/data-sources/flipgroup_list.md @@ -0,0 +1,73 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_flipgroup_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_flipgroup_list (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) Account id +- `by_id` (Number) Filter by ID +- `by_ip` (String) Filter by IP-address +- `client_ids` (List of Number) client_ids +- `conn_id` (Number) Conn id +- `extnet_id` (Number) Filter by ExtNetID +- `name` (String) Filter by Name +- `page` (Number) Page number +- `rg_id` (Number) Filter by RG ID +- `size` (Number) Page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) Status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vins_id` (Number) Filter by ViNS ID +- `vins_name` (String) Filter by ViNS name + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `ckey` (String) +- `client_ids` (List of Number) +- `client_type` (String) +- `conn_id` (Number) +- `conn_type` (String) +- `default_gw` (String) +- `desc` (String) +- `flipgroup_id` (Number) +- `gid` (Number) +- `guid` (Number) +- `ip` (String) +- `meta` (List of String) +- `milestones` (Number) +- `name` (String) +- `net_id` (Number) +- `net_mask` (Number) +- `net_type` (String) +- `status` (String) diff --git a/docs/data-sources/image.md b/docs/data-sources/image.md new file mode 100644 index 0000000..7d1822d --- /dev/null +++ b/docs/data-sources/image.md @@ -0,0 +1,83 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_image Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_image (Data Source) + + + + + + +## Schema + +### Required + +- `image_id` (Number) + +### Optional + +- `show_all` (Boolean) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `acl` (List of String) +- `architecture` (String) +- `boot_type` (String) +- `bootable` (Boolean) +- `cd_presented_to` (String) +- `ckey` (String) +- `compute_ci_id` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `drivers` (List of String) +- `enabled` (Boolean) +- `gid` (Number) +- `guid` (Number) +- `history` (Attributes List) (see [below for nested schema](#nestedatt--history)) +- `hot_resize` (Boolean) +- `id` (String) The ID of this resource. +- `image_name` (String) +- `image_type` (String) +- `last_modified` (Number) +- `link_to` (Number) +- `milestones` (Number) +- `network_interface_naming` (String) +- `password` (String) +- `pool_name` (String) +- `present_to` (List of Number) +- `provider_name` (String) +- `purge_attempts` (Number) +- `res_id` (String) +- `rescuecd` (Boolean) +- `sep_id` (Number) +- `shared_with` (List of Number) +- `size` (Number) +- `status` (String) +- `tech_status` (String) +- `unc_path` (String) +- `username` (String) +- `version` (String) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `history` + +Read-Only: + +- `guid` (String) +- `id` (Number) +- `timestamp` (Number) diff --git a/docs/data-sources/image_list.md b/docs/data-sources/image_list.md new file mode 100644 index 0000000..3fd387f --- /dev/null +++ b/docs/data-sources/image_list.md @@ -0,0 +1,74 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_image_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_image_list (Data Source) + + + + + + +## Schema + +### Optional + +- `architecture` (String) +- `bootable` (Boolean) +- `by_id` (Number) +- `hot_resize` (Boolean) +- `image_size` (Number) +- `name` (String) +- `page` (Number) +- `pool_name` (String) +- `public` (Boolean) +- `sep_id` (Number) +- `sep_name` (String) +- `size` (Number) +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `type_image` (String) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `architecture` (String) +- `boot_type` (String) +- `bootable` (Boolean) +- `cdrom` (Boolean) +- `desc` (String) +- `drivers` (List of String) +- `hot_resize` (Boolean) +- `image_id` (Number) +- `image_name` (String) +- `image_type` (String) +- `link_to` (Number) +- `network_interface_naming` (String) +- `pool_name` (String) +- `sep_id` (Number) +- `size` (Number) +- `status` (String) +- `username` (String) +- `virtual` (Boolean) diff --git a/docs/data-sources/k8ci_list.md b/docs/data-sources/k8ci_list.md new file mode 100644 index 0000000..35c8bfc --- /dev/null +++ b/docs/data-sources/k8ci_list.md @@ -0,0 +1,58 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_k8ci_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_k8ci_list (Data Source) + + + + + + +## Schema + +### Optional + +- `by_id` (Number) +- `include_disabled` (Boolean) +- `master_driver` (String) +- `name` (String) +- `network_plugins` (String) +- `page` (Number) +- `size` (Number) +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `worker_driver` (String) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `created_time` (Number) +- `desc` (String) +- `k8ci_id` (Number) +- `k8ci_name` (String) +- `lb_image_id` (Number) +- `network_plugins` (List of String) +- `status` (String) +- `version` (String) diff --git a/docs/data-sources/k8s.md b/docs/data-sources/k8s.md new file mode 100644 index 0000000..c8b6b52 --- /dev/null +++ b/docs/data-sources/k8s.md @@ -0,0 +1,185 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_k8s Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_k8s (Data Source) + + + + + + +## Schema + +### Required + +- `k8s_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `acl` (Attributes) (see [below for nested schema](#nestedatt--acl)) +- `bservice_id` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `extnet_id` (Number) +- `id` (String) The ID of this resource. +- `k8sci_id` (Number) +- `k8sci_name` (String) +- `kubeconfig` (String) +- `lb_id` (Number) +- `lb_ip` (String) +- `masters` (Attributes) (see [below for nested schema](#nestedatt--masters)) +- `name` (String) +- `network_plugin` (String) +- `rg_id` (Number) +- `rg_name` (String) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `vins_id` (Number) +- `workers` (Attributes List) (see [below for nested schema](#nestedatt--workers)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `acl` + +Read-Only: + +- `account_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--account_acl)) +- `k8s_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--k8s_acl)) +- `rg_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--rg_acl)) + + +### Nested Schema for `acl.account_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `acl.k8s_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `acl.rg_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + + +### Nested Schema for `masters` + +Read-Only: + +- `cpu` (Number) +- `detailed_info` (Attributes List) (see [below for nested schema](#nestedatt--masters--detailed_info)) +- `disk` (Number) +- `master_group_id` (Number) +- `master_group_name` (String) +- `num` (Number) +- `ram` (Number) + + +### Nested Schema for `masters.detailed_info` + +Read-Only: + +- `compute_id` (Number) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--masters--detailed_info--interfaces)) +- `name` (String) +- `natable_vins_ip` (String) +- `natable_vins_network` (String) +- `status` (String) +- `tech_status` (String) + + +### Nested Schema for `masters.detailed_info.interfaces` + +Read-Only: + +- `def_gw` (String) +- `ip_address` (String) + + + + + +### Nested Schema for `workers` + +Read-Only: + +- `annotations` (List of String) +- `cpu` (Number) +- `detailed_info` (Attributes List) (see [below for nested schema](#nestedatt--workers--detailed_info)) +- `disk` (Number) +- `guid` (String) +- `labels` (List of String) +- `num` (Number) +- `ram` (Number) +- `taints` (List of String) +- `worker_group_id` (Number) +- `worker_group_name` (String) + + +### Nested Schema for `workers.detailed_info` + +Read-Only: + +- `compute_id` (Number) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--workers--detailed_info--interfaces)) +- `name` (String) +- `natable_vins_ip` (String) +- `natable_vins_network` (String) +- `status` (String) +- `tech_status` (String) + + +### Nested Schema for `workers.detailed_info.interfaces` + +Read-Only: + +- `def_gw` (String) +- `ip_address` (String) diff --git a/docs/data-sources/k8s_computes.md b/docs/data-sources/k8s_computes.md new file mode 100644 index 0000000..04d4cd8 --- /dev/null +++ b/docs/data-sources/k8s_computes.md @@ -0,0 +1,61 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_k8s_computes Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_k8s_computes (Data Source) + + + + + + +## Schema + +### Required + +- `k8s_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `masters` (Attributes List) (see [below for nested schema](#nestedatt--masters)) +- `workers` (Attributes List) (see [below for nested schema](#nestedatt--workers)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `masters` + +Read-Only: + +- `compute_id` (Number) +- `group_name` (String) +- `name` (String) +- `status` (String) +- `tech_status` (String) + + + +### Nested Schema for `workers` + +Read-Only: + +- `compute_id` (Number) +- `group_name` (String) +- `name` (String) +- `status` (String) +- `tech_status` (String) diff --git a/docs/data-sources/k8s_list.md b/docs/data-sources/k8s_list.md new file mode 100644 index 0000000..5e986f0 --- /dev/null +++ b/docs/data-sources/k8s_list.md @@ -0,0 +1,117 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_k8s_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_k8s_list (Data Source) + + + + + + +## Schema + +### Optional + +- `bservice_id` (Number) +- `by_id` (Number) +- `includedeleted` (Boolean) +- `ip_address` (String) +- `lb_id` (Number) +- `name` (String) +- `page` (Number) +- `rg_id` (Number) +- `size` (Number) +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) +- `tech_status` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `acl` (List of String) +- `bservice_id` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `extnet_id` (Number) +- `gid` (Number) +- `guid` (Number) +- `k8s_id` (Number) +- `k8s_name` (String) +- `k8sci_id` (Number) +- `lb_id` (Number) +- `milestones` (Number) +- `network_plugin` (String) +- `rg_id` (Number) +- `rg_name` (String) +- `service_account` (Attributes) (see [below for nested schema](#nestedatt--items--service_account)) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `vins_id` (Number) +- `workers_groups` (Attributes List) (see [below for nested schema](#nestedatt--items--workers_groups)) + + +### Nested Schema for `items.service_account` + +Read-Only: + +- `guid` (String) +- `password` (String) +- `username` (String) + + + +### Nested Schema for `items.workers_groups` + +Read-Only: + +- `annotations` (List of String) +- `cpu` (Number) +- `detailed_info` (Attributes List) (see [below for nested schema](#nestedatt--items--workers_groups--detailed_info)) +- `disk` (Number) +- `guid` (String) +- `labels` (List of String) +- `num` (Number) +- `ram` (Number) +- `taints` (List of String) +- `worker_group_id` (Number) +- `worker_group_name` (String) + + +### Nested Schema for `items.workers_groups.detailed_info` + +Read-Only: + +- `externalip` (String) +- `id` (Number) +- `name` (String) +- `status` (String) +- `tech_status` (String) diff --git a/docs/data-sources/k8s_list_deleted.md b/docs/data-sources/k8s_list_deleted.md new file mode 100644 index 0000000..f256018 --- /dev/null +++ b/docs/data-sources/k8s_list_deleted.md @@ -0,0 +1,115 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_k8s_list_deleted Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_k8s_list_deleted (Data Source) + + + + + + +## Schema + +### Optional + +- `bservice_id` (Number) +- `by_id` (Number) +- `ip_address` (String) +- `lb_id` (Number) +- `name` (String) +- `page` (Number) +- `rg_id` (Number) +- `size` (Number) +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `tech_status` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `acl` (List of String) +- `bservice_id` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `extnet_id` (Number) +- `gid` (Number) +- `guid` (Number) +- `k8s_id` (Number) +- `k8s_name` (String) +- `k8sci_id` (Number) +- `lb_id` (Number) +- `milestones` (Number) +- `network_plugin` (String) +- `rg_id` (Number) +- `rg_name` (String) +- `service_account` (Attributes) (see [below for nested schema](#nestedatt--items--service_account)) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `vins_id` (Number) +- `workers_groups` (Attributes List) (see [below for nested schema](#nestedatt--items--workers_groups)) + + +### Nested Schema for `items.service_account` + +Read-Only: + +- `guid` (String) +- `password` (String) +- `username` (String) + + + +### Nested Schema for `items.workers_groups` + +Read-Only: + +- `annotations` (List of String) +- `cpu` (Number) +- `detailed_info` (Attributes List) (see [below for nested schema](#nestedatt--items--workers_groups--detailed_info)) +- `disk` (Number) +- `guid` (String) +- `labels` (List of String) +- `num` (Number) +- `ram` (Number) +- `taints` (List of String) +- `worker_group_id` (Number) +- `worker_group_name` (String) + + +### Nested Schema for `items.workers_groups.detailed_info` + +Read-Only: + +- `externalip` (String) +- `id` (Number) +- `name` (String) +- `status` (String) +- `tech_status` (String) diff --git a/docs/data-sources/k8s_wg.md b/docs/data-sources/k8s_wg.md new file mode 100644 index 0000000..e6878d5 --- /dev/null +++ b/docs/data-sources/k8s_wg.md @@ -0,0 +1,68 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_k8s_wg Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_k8s_wg (Data Source) + + + + + + +## Schema + +### Required + +- `k8s_id` (Number) +- `wg_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `annotations` (List of String) +- `cpu` (Number) +- `detailed_info` (Attributes List) (see [below for nested schema](#nestedatt--detailed_info)) +- `disk` (Number) +- `guid` (String) +- `id` (String) The ID of this resource. +- `labels` (List of String) +- `num` (Number) +- `ram` (Number) +- `taints` (List of String) +- `worker_group_name` (String) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `detailed_info` + +Read-Only: + +- `compute_id` (Number) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--detailed_info--interfaces)) +- `name` (String) +- `natable_vins_ip` (String) +- `natable_vins_network` (String) +- `status` (String) +- `tech_status` (String) + + +### Nested Schema for `detailed_info.interfaces` + +Read-Only: + +- `def_gw` (String) +- `ip_address` (String) diff --git a/docs/data-sources/k8s_wg_cloud_init.md b/docs/data-sources/k8s_wg_cloud_init.md new file mode 100644 index 0000000..6745830 --- /dev/null +++ b/docs/data-sources/k8s_wg_cloud_init.md @@ -0,0 +1,37 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_k8s_wg_cloud_init Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_k8s_wg_cloud_init (Data Source) + + + + + + +## Schema + +### Required + +- `k8s_id` (Number) +- `wg_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `cloud_init` (String) +- `id` (String) The ID of this resource. + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/k8s_wg_list.md b/docs/data-sources/k8s_wg_list.md new file mode 100644 index 0000000..6e2d157 --- /dev/null +++ b/docs/data-sources/k8s_wg_list.md @@ -0,0 +1,75 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_k8s_wg_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_k8s_wg_list (Data Source) + + + + + + +## Schema + +### Required + +- `k8s_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `annotations` (List of String) +- `cpu` (Number) +- `detailed_info` (Attributes List) (see [below for nested schema](#nestedatt--items--detailed_info)) +- `disk` (Number) +- `guid` (String) +- `labels` (List of String) +- `num` (Number) +- `ram` (Number) +- `taints` (List of String) +- `worker_group_id` (Number) +- `worker_group_name` (String) + + +### Nested Schema for `items.detailed_info` + +Read-Only: + +- `compute_id` (Number) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--items--detailed_info--interfaces)) +- `name` (String) +- `natable_vins_ip` (String) +- `natable_vins_network` (String) +- `status` (String) +- `tech_status` (String) + + +### Nested Schema for `items.detailed_info.interfaces` + +Read-Only: + +- `def_gw` (String) +- `ip_address` (String) diff --git a/docs/data-sources/kvmvm.md b/docs/data-sources/kvmvm.md new file mode 100644 index 0000000..d58d4f6 --- /dev/null +++ b/docs/data-sources/kvmvm.md @@ -0,0 +1,331 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm (Data Source) + + + + + + +## Schema + +### Required + +- `compute_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `acl` (Attributes) (see [below for nested schema](#nestedatt--acl)) +- `affinity_label` (String) +- `affinity_rules` (Attributes List) (see [below for nested schema](#nestedatt--affinity_rules)) +- `affinity_weight` (Number) +- `anti_affinity_rules` (Attributes List) (see [below for nested schema](#nestedatt--anti_affinity_rules)) +- `arch` (String) +- `boot_order` (List of String) +- `bootdisk_size` (Number) +- `cd_image_id` (Number) +- `clone_reference` (Number) +- `clones` (List of Number) +- `computeci_id` (Number) +- `cpu_pin` (Boolean) +- `cpus` (Number) +- `created_by` (String) +- `created_time` (Number) +- `custom_fields` (String) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `devices` (String) +- `disks` (Attributes List) (see [below for nested schema](#nestedatt--disks)) +- `driver` (String) +- `gid` (Number) +- `guid` (Number) +- `hp_backed` (Boolean) +- `id` (String) The ID of this resource. +- `image_id` (Number) +- `image_name` (String) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--interfaces)) +- `lock_status` (String) +- `manager_id` (Number) +- `manager_type` (String) +- `migrationjob` (Number) +- `milestones` (Number) +- `name` (String) +- `natable_vins_id` (Number) +- `natable_vins_ip` (String) +- `natable_vins_name` (String) +- `natable_vins_network` (String) +- `natable_vins_network_name` (String) +- `need_reboot` (Boolean) +- `numa_affinity` (String) +- `numa_node_id` (Number) +- `os_users` (Attributes List) (see [below for nested schema](#nestedatt--os_users)) +- `pinned` (Boolean) +- `ram` (Number) +- `reference_id` (String) +- `registered` (Boolean) +- `res_name` (String) +- `reserved_node_cpus` (List of Number) +- `rg_id` (Number) +- `rg_name` (String) +- `snap_sets` (Attributes List) (see [below for nested schema](#nestedatt--snap_sets)) +- `stateless_sep_id` (Number) +- `stateless_sep_type` (String) +- `status` (String) +- `tags` (Map of String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_data` (String) +- `user_managed` (Boolean) +- `vgpus` (List of Number) +- `virtual_image_id` (Number) +- `virtual_image_name` (String) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `acl` + +Read-Only: + +- `account_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--account_acl)) +- `compute_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--compute_acl)) +- `rg_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--rg_acl)) + + +### Nested Schema for `acl.account_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `acl.compute_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `acl.rg_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + + +### Nested Schema for `affinity_rules` + +Read-Only: + +- `guid` (String) +- `key` (String) +- `mode` (String) +- `policy` (String) +- `topology` (String) +- `value` (String) + + + +### Nested Schema for `anti_affinity_rules` + +Read-Only: + +- `guid` (String) +- `key` (String) +- `mode` (String) +- `policy` (String) +- `topology` (String) +- `value` (String) + + + +### Nested Schema for `disks` + +Read-Only: + +- `account_id` (Number) +- `acl` (String) +- `boot_partition` (Number) +- `ckey` (String) +- `created_time` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `destruction_time` (Number) +- `disk_id` (Number) +- `disk_path` (String) +- `gid` (Number) +- `guid` (Number) +- `image_id` (Number) +- `images` (List of Number) +- `iotune` (Attributes) (see [below for nested schema](#nestedatt--disks--iotune)) +- `iqn` (String) +- `login` (String) +- `milestones` (Number) +- `name` (String) +- `order` (Number) +- `params` (String) +- `parent_id` (Number) +- `passwd` (String) +- `pci_slot` (Number) +- `pool` (String) +- `present_to` (List of Number) +- `purge_time` (Number) +- `reality_device_number` (Number) +- `reference_id` (String) +- `replication` (Attributes) (see [below for nested schema](#nestedatt--disks--replication)) +- `res_id` (String) +- `role` (String) +- `sep_id` (Number) +- `shareable` (Boolean) +- `size_max` (Number) +- `size_used` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--disks--snapshots)) +- `status` (String) +- `tech_status` (String) +- `type` (String) +- `vmid` (Number) + + +### Nested Schema for `disks.iotune` + +Read-Only: + +- `read_bytes_sec` (Number) +- `read_bytes_sec_max` (Number) +- `read_iops_sec` (Number) +- `read_iops_sec_max` (Number) +- `size_iops_sec` (Number) +- `total_bytes_sec` (Number) +- `total_bytes_sec_max` (Number) +- `total_iops_sec` (Number) +- `total_iops_sec_max` (Number) +- `write_bytes_sec` (Number) +- `write_bytes_sec_max` (Number) +- `write_iops_sec` (Number) +- `write_iops_sec_max` (Number) + + + +### Nested Schema for `disks.replication` + +Read-Only: + +- `disk_id` (Number) +- `pool_id` (String) +- `role` (String) +- `self_volume_id` (String) +- `storage_id` (String) +- `volume_id` (String) + + + +### Nested Schema for `disks.snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `reference_id` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) + + + + +### Nested Schema for `interfaces` + +Read-Only: + +- `conn_id` (Number) +- `conn_type` (String) +- `def_gw` (String) +- `enabled` (Boolean) +- `flip_group_id` (Number) +- `guid` (String) +- `ip_address` (String) +- `listen_ssh` (Boolean) +- `mac` (String) +- `name` (String) +- `net_id` (Number) +- `net_type` (String) +- `netmask` (Number) +- `node_id` (Number) +- `pci_slot` (Number) +- `qos` (Attributes) (see [below for nested schema](#nestedatt--interfaces--qos)) +- `target` (String) +- `type` (String) +- `vnfs` (List of Number) + + +### Nested Schema for `interfaces.qos` + +Read-Only: + +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + + +### Nested Schema for `os_users` + +Read-Only: + +- `guid` (String) +- `login` (String) +- `password` (String) +- `public_key` (String) + + + +### Nested Schema for `snap_sets` + +Read-Only: + +- `disks` (List of Number) +- `guid` (String) +- `label` (String) +- `timestamp` (Number) diff --git a/docs/data-sources/kvmvm_audits.md b/docs/data-sources/kvmvm_audits.md new file mode 100644 index 0000000..81204d9 --- /dev/null +++ b/docs/data-sources/kvmvm_audits.md @@ -0,0 +1,48 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_audits Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_audits (Data Source) + + + + + + +## Schema + +### Required + +- `compute_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `call` (String) +- `responsetime` (Number) +- `statuscode` (Number) +- `timestamp` (Number) +- `user` (String) diff --git a/docs/data-sources/kvmvm_get_audits.md b/docs/data-sources/kvmvm_get_audits.md new file mode 100644 index 0000000..6516ae9 --- /dev/null +++ b/docs/data-sources/kvmvm_get_audits.md @@ -0,0 +1,45 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_get_audits Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_get_audits (Data Source) + + + + + + +## Schema + +### Required + +- `compute_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `epoch` (Number) +- `message` (String) diff --git a/docs/data-sources/kvmvm_get_console_url.md b/docs/data-sources/kvmvm_get_console_url.md new file mode 100644 index 0000000..a6945e6 --- /dev/null +++ b/docs/data-sources/kvmvm_get_console_url.md @@ -0,0 +1,36 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_get_console_url Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_get_console_url (Data Source) + + + + + + +## Schema + +### Required + +- `compute_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `console_url` (String) +- `id` (String) The ID of this resource. + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/kvmvm_get_log.md b/docs/data-sources/kvmvm_get_log.md new file mode 100644 index 0000000..ec07c1f --- /dev/null +++ b/docs/data-sources/kvmvm_get_log.md @@ -0,0 +1,37 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_get_log Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_get_log (Data Source) + + + + + + +## Schema + +### Required + +- `compute_id` (Number) +- `path` (String) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `log` (String) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/kvmvm_list.md b/docs/data-sources/kvmvm_list.md new file mode 100644 index 0000000..324672a --- /dev/null +++ b/docs/data-sources/kvmvm_list.md @@ -0,0 +1,211 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_list (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) Find by AccountID +- `by_id` (Number) Find by ID +- `extnet_id` (Number) Find by Extnet ID +- `extnet_name` (String) Find by Extnet name +- `ignore_k8s` (Boolean) If set to true, ignores any VMs associated with any k8s cluster +- `includedeleted` (Boolean) Include deleted computes. If using field 'status', then includedeleted will be ignored +- `ip_address` (String) Find by IP address +- `name` (String) Find by name +- `page` (Number) Page number +- `rg_id` (Number) Find by RGID +- `rg_name` (String) Find by resgroup name +- `size` (Number) Page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) Find by status +- `tech_status` (String) Find by tech status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `acl` (Attributes List) (see [below for nested schema](#nestedatt--items--acl)) +- `affinity_label` (String) +- `affinity_rules` (Attributes List) (see [below for nested schema](#nestedatt--items--affinity_rules)) +- `affinity_weight` (Number) +- `anti_affinity_rules` (Attributes List) (see [below for nested schema](#nestedatt--items--anti_affinity_rules)) +- `arch` (String) +- `boot_order` (List of String) +- `bootdisk_size` (Number) +- `cd_image_id` (Number) +- `clone_reference` (Number) +- `clones` (List of Number) +- `compute_id` (Number) +- `computeci_id` (Number) +- `cpu_pin` (Boolean) +- `cpus` (Number) +- `created_by` (String) +- `created_time` (Number) +- `custom_fields` (String) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `devices` (String) +- `disks` (Attributes List) (see [below for nested schema](#nestedatt--items--disks)) +- `driver` (String) +- `gid` (Number) +- `guid` (Number) +- `hp_backed` (Boolean) +- `image_id` (Number) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--items--interfaces)) +- `lock_status` (String) +- `manager_id` (Number) +- `manager_type` (String) +- `migrationjob` (Number) +- `milestones` (Number) +- `name` (String) +- `need_reboot` (Boolean) +- `numa_affinity` (String) +- `numa_node_id` (Number) +- `pinned` (Boolean) +- `ram` (Number) +- `reference_id` (String) +- `registered` (Boolean) +- `res_name` (String) +- `reserved_node_cpus` (List of Number) +- `rg_id` (Number) +- `rg_name` (String) +- `snap_sets` (Attributes List) (see [below for nested schema](#nestedatt--items--snap_sets)) +- `stateless_sep_id` (Number) +- `stateless_sep_type` (String) +- `status` (String) +- `tags` (Map of String) +- `tech_status` (String) +- `total_disks_size` (Number) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) +- `vgpus` (List of Number) +- `vins_connected` (Number) +- `virtual_image_id` (Number) + + +### Nested Schema for `items.acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `items.affinity_rules` + +Read-Only: + +- `guid` (String) +- `key` (String) +- `mode` (String) +- `policy` (String) +- `topology` (String) +- `value` (String) + + + +### Nested Schema for `items.anti_affinity_rules` + +Read-Only: + +- `guid` (String) +- `key` (String) +- `mode` (String) +- `policy` (String) +- `topology` (String) +- `value` (String) + + + +### Nested Schema for `items.disks` + +Read-Only: + +- `disk_id` (Number) +- `pci_slot` (Number) + + + +### Nested Schema for `items.interfaces` + +Read-Only: + +- `conn_id` (Number) +- `conn_type` (String) +- `def_gw` (String) +- `enabled` (Boolean) +- `flip_group_id` (Number) +- `guid` (String) +- `ip_address` (String) +- `listen_ssh` (Boolean) +- `mac` (String) +- `name` (String) +- `net_id` (Number) +- `net_type` (String) +- `netmask` (Number) +- `node_id` (Number) +- `pci_slot` (Number) +- `qos` (Attributes) (see [below for nested schema](#nestedatt--items--interfaces--qos)) +- `target` (String) +- `type` (String) +- `vnfs` (List of Number) + + +### Nested Schema for `items.interfaces.qos` + +Read-Only: + +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + + +### Nested Schema for `items.snap_sets` + +Read-Only: + +- `disks` (List of Number) +- `guid` (String) +- `label` (String) +- `timestamp` (Number) diff --git a/docs/data-sources/kvmvm_list_deleted.md b/docs/data-sources/kvmvm_list_deleted.md new file mode 100644 index 0000000..cc09fbf --- /dev/null +++ b/docs/data-sources/kvmvm_list_deleted.md @@ -0,0 +1,209 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_list_deleted Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_list_deleted (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) Find by AccountID +- `by_id` (Number) Find by ID +- `extnet_id` (Number) Find by Extnet ID +- `extnet_name` (String) Find by Extnet name +- `ignore_k8s` (Boolean) If set to true, ignores any VMs associated with any k8s cluster +- `ip_address` (String) Find by IP address +- `name` (String) Find by name +- `page` (Number) Page number +- `rg_id` (Number) Find by RGID +- `rg_name` (String) Find by resgroup name +- `size` (Number) Page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `tech_status` (String) Find by tech status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `acl` (Attributes List) (see [below for nested schema](#nestedatt--items--acl)) +- `affinity_label` (String) +- `affinity_rules` (Attributes List) (see [below for nested schema](#nestedatt--items--affinity_rules)) +- `affinity_weight` (Number) +- `anti_affinity_rules` (Attributes List) (see [below for nested schema](#nestedatt--items--anti_affinity_rules)) +- `arch` (String) +- `boot_order` (List of String) +- `bootdisk_size` (Number) +- `cd_image_id` (Number) +- `clone_reference` (Number) +- `clones` (List of Number) +- `compute_id` (Number) +- `computeci_id` (Number) +- `cpu_pin` (Boolean) +- `cpus` (Number) +- `created_by` (String) +- `created_time` (Number) +- `custom_fields` (String) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `devices` (String) +- `disks` (Attributes List) (see [below for nested schema](#nestedatt--items--disks)) +- `driver` (String) +- `gid` (Number) +- `guid` (Number) +- `hp_backed` (Boolean) +- `image_id` (Number) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--items--interfaces)) +- `lock_status` (String) +- `manager_id` (Number) +- `manager_type` (String) +- `migrationjob` (Number) +- `milestones` (Number) +- `name` (String) +- `need_reboot` (Boolean) +- `numa_affinity` (String) +- `numa_node_id` (Number) +- `pinned` (Boolean) +- `ram` (Number) +- `reference_id` (String) +- `registered` (Boolean) +- `res_name` (String) +- `reserved_node_cpus` (List of Number) +- `rg_id` (Number) +- `rg_name` (String) +- `snap_sets` (Attributes List) (see [below for nested schema](#nestedatt--items--snap_sets)) +- `stateless_sep_id` (Number) +- `stateless_sep_type` (String) +- `status` (String) +- `tags` (Map of String) +- `tech_status` (String) +- `total_disks_size` (Number) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) +- `vgpus` (List of Number) +- `vins_connected` (Number) +- `virtual_image_id` (Number) + + +### Nested Schema for `items.acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `items.affinity_rules` + +Read-Only: + +- `guid` (String) +- `key` (String) +- `mode` (String) +- `policy` (String) +- `topology` (String) +- `value` (String) + + + +### Nested Schema for `items.anti_affinity_rules` + +Read-Only: + +- `guid` (String) +- `key` (String) +- `mode` (String) +- `policy` (String) +- `topology` (String) +- `value` (String) + + + +### Nested Schema for `items.disks` + +Read-Only: + +- `disk_id` (Number) +- `pci_slot` (Number) + + + +### Nested Schema for `items.interfaces` + +Read-Only: + +- `conn_id` (Number) +- `conn_type` (String) +- `def_gw` (String) +- `enabled` (Boolean) +- `flip_group_id` (Number) +- `guid` (String) +- `ip_address` (String) +- `listen_ssh` (Boolean) +- `mac` (String) +- `name` (String) +- `net_id` (Number) +- `net_type` (String) +- `netmask` (Number) +- `node_id` (Number) +- `pci_slot` (Number) +- `qos` (Attributes) (see [below for nested schema](#nestedatt--items--interfaces--qos)) +- `target` (String) +- `type` (String) +- `vnfs` (List of Number) + + +### Nested Schema for `items.interfaces.qos` + +Read-Only: + +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + + +### Nested Schema for `items.snap_sets` + +Read-Only: + +- `disks` (List of Number) +- `guid` (String) +- `label` (String) +- `timestamp` (Number) diff --git a/docs/data-sources/kvmvm_pci_device_list.md b/docs/data-sources/kvmvm_pci_device_list.md new file mode 100644 index 0000000..fca5858 --- /dev/null +++ b/docs/data-sources/kvmvm_pci_device_list.md @@ -0,0 +1,61 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_pci_device_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_pci_device_list (Data Source) + + + + + + +## Schema + +### Required + +- `compute_id` (Number) + +### Optional + +- `device_id` (Number) +- `name` (String) +- `page` (Number) +- `rg_id` (Number) +- `size` (Number) +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `compute_id` (Number) +- `description` (String) +- `device_id` (Number) +- `guid` (Number) +- `hwpath` (String) +- `name` (String) +- `rg_id` (Number) +- `stack_id` (Number) +- `status` (String) +- `system_name` (String) diff --git a/docs/data-sources/kvmvm_pfw_list.md b/docs/data-sources/kvmvm_pfw_list.md new file mode 100644 index 0000000..253ffe1 --- /dev/null +++ b/docs/data-sources/kvmvm_pfw_list.md @@ -0,0 +1,51 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_pfw_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_pfw_list (Data Source) + + + + + + +## Schema + +### Required + +- `compute_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `local_ip` (String) +- `local_port` (Number) +- `pfw_id` (Number) +- `protocol` (String) +- `public_port_end` (Number) +- `public_port_start` (Number) +- `vm_id` (Number) diff --git a/docs/data-sources/kvmvm_snapshot_usage.md b/docs/data-sources/kvmvm_snapshot_usage.md new file mode 100644 index 0000000..2c0aa30 --- /dev/null +++ b/docs/data-sources/kvmvm_snapshot_usage.md @@ -0,0 +1,48 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_snapshot_usage Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_snapshot_usage (Data Source) + + + + + + +## Schema + +### Required + +- `compute_id` (Number) + +### Optional + +- `label` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `count` (Number) +- `label` (String) +- `stored` (Number) +- `timestamp` (Number) diff --git a/docs/data-sources/kvmvm_user_list.md b/docs/data-sources/kvmvm_user_list.md new file mode 100644 index 0000000..b9321ce --- /dev/null +++ b/docs/data-sources/kvmvm_user_list.md @@ -0,0 +1,85 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_user_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_user_list (Data Source) + + + + + + +## Schema + +### Required + +- `compute_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_acl` (Attributes List) (see [below for nested schema](#nestedatt--items--account_acl)) +- `compute_acl` (Attributes List) (see [below for nested schema](#nestedatt--items--compute_acl)) +- `rg_acl` (Attributes List) (see [below for nested schema](#nestedatt--items--rg_acl)) + + +### Nested Schema for `items.account_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `items.compute_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `items.rg_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) diff --git a/docs/data-sources/kvmvm_vgpu_list.md b/docs/data-sources/kvmvm_vgpu_list.md new file mode 100644 index 0000000..0d65a49 --- /dev/null +++ b/docs/data-sources/kvmvm_vgpu_list.md @@ -0,0 +1,69 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm_vgpu_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm_vgpu_list (Data Source) + + + + + + +## Schema + +### Required + +- `compute_id` (Number) + +### Optional + +- `gpu_id` (Number) +- `includedeleted` (Boolean) +- `page` (Number) +- `size` (Number) +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `type` (String) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `created_time` (Number) +- `deleted_time` (Number) +- `gid` (Number) +- `guid` (Number) +- `last_claimed_by` (Number) +- `last_update_time` (Number) +- `mode` (String) +- `pci_slot` (Number) +- `pgpuid` (Number) +- `profile_id` (Number) +- `ram` (Number) +- `reference_id` (String) +- `rg_id` (Number) +- `status` (String) +- `type` (String) +- `vgpu_id` (Number) +- `vm_id` (Number) diff --git a/docs/data-sources/lb.md b/docs/data-sources/lb.md new file mode 100644 index 0000000..232bb57 --- /dev/null +++ b/docs/data-sources/lb.md @@ -0,0 +1,171 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_lb Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_lb (Data Source) + + + + + + +## Schema + +### Required + +- `lb_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `acl` (String) +- `backend_haip` (String) +- `backends` (Attributes List) (see [below for nested schema](#nestedatt--backends)) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `dp_api_user` (String) +- `extnet_id` (Number) +- `frontend_haip` (String) +- `frontends` (Attributes List) (see [below for nested schema](#nestedatt--frontends)) +- `gid` (Number) +- `guid` (Number) +- `ha_mode` (Boolean) +- `id` (Number) The ID of this resource. +- `image_id` (Number) +- `manager_id` (Number) +- `manager_type` (String) +- `milestones` (Number) +- `name` (String) +- `part_k8s` (Boolean) +- `primary_node` (Attributes) (see [below for nested schema](#nestedatt--primary_node)) +- `rg_id` (Number) +- `rg_name` (String) +- `secondary_node` (Attributes) (see [below for nested schema](#nestedatt--secondary_node)) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) +- `vins_id` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `backends` + +Read-Only: + +- `algorithm` (String) +- `guid` (String) +- `name` (String) +- `server_default_settings` (Attributes) (see [below for nested schema](#nestedatt--backends--server_default_settings)) +- `servers` (Attributes List) (see [below for nested schema](#nestedatt--backends--servers)) + + +### Nested Schema for `backends.server_default_settings` + +Read-Only: + +- `downinter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `weight` (Number) + + + +### Nested Schema for `backends.servers` + +Read-Only: + +- `address` (String) +- `check` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) +- `server_settings` (Attributes) (see [below for nested schema](#nestedatt--backends--servers--server_settings)) + + +### Nested Schema for `backends.servers.server_settings` + +Read-Only: + +- `downinter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `weight` (Number) + + + + + +### Nested Schema for `frontends` + +Read-Only: + +- `backend` (String) +- `bindings` (Attributes List) (see [below for nested schema](#nestedatt--frontends--bindings)) +- `guid` (String) +- `name` (String) + + +### Nested Schema for `frontends.bindings` + +Read-Only: + +- `address` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) + + + + +### Nested Schema for `primary_node` + +Read-Only: + +- `backend_ip` (String) +- `compute_id` (Number) +- `frontend_ip` (String) +- `guid` (String) +- `mgmt_ip` (String) +- `network_id` (Number) + + + +### Nested Schema for `secondary_node` + +Read-Only: + +- `backend_ip` (String) +- `compute_id` (Number) +- `frontend_ip` (String) +- `guid` (String) +- `mgmt_ip` (String) +- `network_id` (Number) diff --git a/docs/data-sources/lb_list.md b/docs/data-sources/lb_list.md new file mode 100644 index 0000000..c355eaa --- /dev/null +++ b/docs/data-sources/lb_list.md @@ -0,0 +1,188 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_lb_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_lb_list (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) find by account ID +- `back_ip` (String) find by backend Ip +- `by_id` (Number) find by id +- `front_ip` (String) find by frontend Ip +- `include_deleted` (Boolean) included deleted LBs. If using field 'status', then include_deleted will be ignored +- `name` (String) find by name +- `page` (Number) page number +- `rg_id` (Number) find by RG ID +- `size` (Number) size number +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) find by status +- `tech_status` (String) find by tech status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `acl` (String) +- `backend_haip` (String) +- `backends` (Attributes List) (see [below for nested schema](#nestedatt--items--backends)) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `dp_api_user` (String) +- `extnet_id` (Number) +- `frontend_haip` (String) +- `frontends` (Attributes List) (see [below for nested schema](#nestedatt--items--frontends)) +- `gid` (Number) +- `guid` (Number) +- `ha_mode` (Boolean) +- `image_id` (Number) +- `lb_id` (Number) +- `manager_id` (Number) +- `manager_type` (String) +- `milestones` (Number) +- `name` (String) +- `part_k8s` (Boolean) +- `primary_node` (Attributes) (see [below for nested schema](#nestedatt--items--primary_node)) +- `rg_id` (Number) +- `rg_name` (String) +- `secondary_node` (Attributes) (see [below for nested schema](#nestedatt--items--secondary_node)) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) +- `vins_id` (Number) + + +### Nested Schema for `items.backends` + +Read-Only: + +- `algorithm` (String) +- `guid` (String) +- `name` (String) +- `server_default_settings` (Attributes) (see [below for nested schema](#nestedatt--items--backends--server_default_settings)) +- `servers` (Attributes List) (see [below for nested schema](#nestedatt--items--backends--servers)) + + +### Nested Schema for `items.backends.server_default_settings` + +Read-Only: + +- `downinter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `weight` (Number) + + + +### Nested Schema for `items.backends.servers` + +Read-Only: + +- `address` (String) +- `check` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) +- `server_settings` (Attributes) (see [below for nested schema](#nestedatt--items--backends--servers--server_settings)) + + +### Nested Schema for `items.backends.servers.server_settings` + +Read-Only: + +- `downinter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `weight` (Number) + + + + + +### Nested Schema for `items.frontends` + +Read-Only: + +- `backend` (String) +- `bindings` (Attributes List) (see [below for nested schema](#nestedatt--items--frontends--bindings)) +- `guid` (String) +- `name` (String) + + +### Nested Schema for `items.frontends.bindings` + +Read-Only: + +- `address` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) + + + + +### Nested Schema for `items.primary_node` + +Read-Only: + +- `backend_ip` (String) +- `compute_id` (Number) +- `frontend_ip` (String) +- `guid` (String) +- `mgmt_ip` (String) +- `network_id` (Number) + + + +### Nested Schema for `items.secondary_node` + +Read-Only: + +- `backend_ip` (String) +- `compute_id` (Number) +- `frontend_ip` (String) +- `guid` (String) +- `mgmt_ip` (String) +- `network_id` (Number) diff --git a/docs/data-sources/lb_list_deleted.md b/docs/data-sources/lb_list_deleted.md new file mode 100644 index 0000000..0beaa26 --- /dev/null +++ b/docs/data-sources/lb_list_deleted.md @@ -0,0 +1,186 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_lb_list_deleted Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_lb_list_deleted (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) find by account ID +- `back_ip` (String) find by backend Ip +- `by_id` (Number) find by id +- `front_ip` (String) find by frontend Ip +- `name` (String) find by name +- `page` (Number) page number +- `rg_id` (Number) find by RG ID +- `size` (Number) size number +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `tech_status` (String) find by tech status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `acl` (String) +- `backend_haip` (String) +- `backends` (Attributes List) (see [below for nested schema](#nestedatt--items--backends)) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `dp_api_user` (String) +- `extnet_id` (Number) +- `frontend_haip` (String) +- `frontends` (Attributes List) (see [below for nested schema](#nestedatt--items--frontends)) +- `gid` (Number) +- `guid` (Number) +- `ha_mode` (Boolean) +- `image_id` (Number) +- `lb_id` (Number) +- `manager_id` (Number) +- `manager_type` (String) +- `milestones` (Number) +- `name` (String) +- `part_k8s` (Boolean) +- `primary_node` (Attributes) (see [below for nested schema](#nestedatt--items--primary_node)) +- `rg_id` (Number) +- `rg_name` (String) +- `secondary_node` (Attributes) (see [below for nested schema](#nestedatt--items--secondary_node)) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) +- `vins_id` (Number) + + +### Nested Schema for `items.backends` + +Read-Only: + +- `algorithm` (String) +- `guid` (String) +- `name` (String) +- `server_default_settings` (Attributes) (see [below for nested schema](#nestedatt--items--backends--server_default_settings)) +- `servers` (Attributes List) (see [below for nested schema](#nestedatt--items--backends--servers)) + + +### Nested Schema for `items.backends.server_default_settings` + +Read-Only: + +- `downinter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `weight` (Number) + + + +### Nested Schema for `items.backends.servers` + +Read-Only: + +- `address` (String) +- `check` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) +- `server_settings` (Attributes) (see [below for nested schema](#nestedatt--items--backends--servers--server_settings)) + + +### Nested Schema for `items.backends.servers.server_settings` + +Read-Only: + +- `downinter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `weight` (Number) + + + + + +### Nested Schema for `items.frontends` + +Read-Only: + +- `backend` (String) +- `bindings` (Attributes List) (see [below for nested schema](#nestedatt--items--frontends--bindings)) +- `guid` (String) +- `name` (String) + + +### Nested Schema for `items.frontends.bindings` + +Read-Only: + +- `address` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) + + + + +### Nested Schema for `items.primary_node` + +Read-Only: + +- `backend_ip` (String) +- `compute_id` (Number) +- `frontend_ip` (String) +- `guid` (String) +- `mgmt_ip` (String) +- `network_id` (Number) + + + +### Nested Schema for `items.secondary_node` + +Read-Only: + +- `backend_ip` (String) +- `compute_id` (Number) +- `frontend_ip` (String) +- `guid` (String) +- `mgmt_ip` (String) +- `network_id` (Number) diff --git a/docs/data-sources/resgroup.md b/docs/data-sources/resgroup.md new file mode 100644 index 0000000..c67a7ab --- /dev/null +++ b/docs/data-sources/resgroup.md @@ -0,0 +1,92 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_resgroup Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_resgroup (Data Source) + + + + + + +## Schema + +### Required + +- `rg_id` (Number) resource group id + +### Optional + +- `reason` (String) reason for request +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `acl` (Attributes List) (see [below for nested schema](#nestedatt--acl)) +- `compute_features` (List of String) +- `computes` (List of Number) +- `cpu_allocation_parameter` (String) +- `cpu_allocation_ratio` (Number) +- `created_by` (String) +- `created_time` (Number) +- `def_net_id` (Number) +- `def_net_type` (String) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `dirty` (Boolean) +- `gid` (Number) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `lock_status` (String) +- `milestones` (Number) +- `name` (String) +- `register_computes` (Boolean) +- `res_types` (List of String) +- `resource_limits` (Attributes) (see [below for nested schema](#nestedatt--resource_limits)) +- `secret` (String) +- `status` (String) +- `uniq_pools` (List of String) +- `updated_by` (String) +- `updated_time` (Number) +- `vins` (List of Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `resource_limits` + +Read-Only: + +- `cu_c` (Number) +- `cu_d` (Number) +- `cu_dm` (Number) +- `cu_i` (Number) +- `cu_m` (Number) +- `cu_np` (Number) +- `gpu_units` (Number) diff --git a/docs/data-sources/rg_affinity_group_computes.md b/docs/data-sources/rg_affinity_group_computes.md new file mode 100644 index 0000000..53eb883 --- /dev/null +++ b/docs/data-sources/rg_affinity_group_computes.md @@ -0,0 +1,51 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_affinity_group_computes Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_affinity_group_computes (Data Source) + + + + + + +## Schema + +### Required + +- `affinity_group` (String) Affinity group label +- `rg_id` (Number) find by rg id + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `compute_id` (Number) +- `other_node` (List of Number) +- `other_node_indirect` (List of Number) +- `other_node_indirect_soft` (List of Number) +- `other_node_soft` (List of Number) +- `same_node` (List of Number) +- `same_node_soft` (List of Number) diff --git a/docs/data-sources/rg_affinity_groups_get.md b/docs/data-sources/rg_affinity_groups_get.md new file mode 100644 index 0000000..91adf2a --- /dev/null +++ b/docs/data-sources/rg_affinity_groups_get.md @@ -0,0 +1,37 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_affinity_groups_get Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_affinity_groups_get (Data Source) + + + + + + +## Schema + +### Required + +- `affinity_group` (String) Affinity group label +- `rg_id` (Number) find by rg id + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `ids` (List of Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/rg_affinity_groups_list.md b/docs/data-sources/rg_affinity_groups_list.md new file mode 100644 index 0000000..a5c9589 --- /dev/null +++ b/docs/data-sources/rg_affinity_groups_list.md @@ -0,0 +1,56 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_affinity_groups_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_affinity_groups_list (Data Source) + + + + + + +## Schema + +### Required + +- `rg_id` (Number) find by rg id + +### Optional + +- `page` (Number) page number +- `size` (Number) size number +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `affinity_groups` (Attributes List) (see [below for nested schema](#nestedatt--affinity_groups)) +- `entry_count` (Number) +- `id` (String) The ID of this resource. + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `affinity_groups` + +Read-Only: + +- `ids` (Attributes List) (see [below for nested schema](#nestedatt--affinity_groups--ids)) +- `label` (String) + + +### Nested Schema for `affinity_groups.ids` + +Read-Only: + +- `id` (Number) +- `node_id` (Number) diff --git a/docs/data-sources/rg_audits.md b/docs/data-sources/rg_audits.md new file mode 100644 index 0000000..bbb1b0d --- /dev/null +++ b/docs/data-sources/rg_audits.md @@ -0,0 +1,48 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_audits Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_audits (Data Source) + + + + + + +## Schema + +### Required + +- `rg_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `call` (String) +- `responsetime` (Number) +- `statuscode` (Number) +- `timestamp` (Number) +- `user` (String) diff --git a/docs/data-sources/rg_list.md b/docs/data-sources/rg_list.md new file mode 100644 index 0000000..de42012 --- /dev/null +++ b/docs/data-sources/rg_list.md @@ -0,0 +1,108 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_list (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) find by account id +- `account_name` (String) find by account name +- `by_id` (Number) find by id +- `created_after` (Number) find by created after time (unix timestamp) +- `created_before` (Number) find by created before time (unix timestamp) +- `includedeleted` (Boolean) included deleted resource groups. If using field 'status', then includedeleted will be ignored +- `lock_status` (String) find by lock status +- `name` (String) find by name +- `page` (Number) page number +- `size` (Number) size number +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) find by status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_acl` (Attributes List) (see [below for nested schema](#nestedatt--items--account_acl)) +- `account_id` (Number) +- `account_name` (String) +- `compute_features` (List of String) +- `cpu_allocation_parameter` (String) +- `cpu_allocation_ratio` (Number) +- `created_by` (String) +- `created_time` (Number) +- `def_net_id` (Number) +- `def_net_type` (String) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `dirty` (Boolean) +- `gid` (Number) +- `guid` (Number) +- `lock_status` (String) +- `milestones` (Number) +- `name` (String) +- `register_computes` (Boolean) +- `resource_limits` (Attributes) (see [below for nested schema](#nestedatt--items--resource_limits)) +- `resource_types` (List of String) +- `rg_id` (Number) +- `secret` (String) +- `status` (String) +- `uniq_pools` (List of String) +- `updated_by` (String) +- `updated_time` (Number) +- `vins` (List of Number) +- `vms` (List of Number) + + +### Nested Schema for `items.account_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `items.resource_limits` + +Read-Only: + +- `cu_c` (Number) +- `cu_d` (Number) +- `cu_dm` (Number) +- `cu_i` (Number) +- `cu_m` (Number) +- `cu_np` (Number) +- `gpu_units` (Number) diff --git a/docs/data-sources/rg_list_computes.md b/docs/data-sources/rg_list_computes.md new file mode 100644 index 0000000..4f30ddc --- /dev/null +++ b/docs/data-sources/rg_list_computes.md @@ -0,0 +1,103 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_list_computes Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_list_computes (Data Source) + + + + + + +## Schema + +### Required + +- `rg_id` (Number) ID of the RG + +### Optional + +- `account_id` (Number) find by account id +- `compute_id` (Number) find by id +- `extnet_id` (Number) find by external network id +- `extnet_name` (String) find by external network name +- `ip_address` (String) find by ip address +- `name` (String) find by name +- `page` (Number) page number +- `size` (Number) size number +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) find by status +- `tech_status` (String) find by tech status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `affinity_label` (String) +- `affinity_rules` (Attributes List) (see [below for nested schema](#nestedatt--items--affinity_rules)) +- `affinity_weight` (Number) +- `antiaffinity_rules` (Attributes List) (see [below for nested schema](#nestedatt--items--antiaffinity_rules)) +- `cpus` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `id` (Number) +- `name` (String) +- `ram` (Number) +- `registered` (Boolean) +- `rg_name` (String) +- `status` (String) +- `tech_status` (String) +- `total_disks_size` (Number) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) +- `vins_connected` (Number) + + +### Nested Schema for `items.affinity_rules` + +Read-Only: + +- `guid` (String) +- `key` (String) +- `mode` (String) +- `policy` (String) +- `topology` (String) +- `value` (String) + + + +### Nested Schema for `items.antiaffinity_rules` + +Read-Only: + +- `guid` (String) +- `key` (String) +- `mode` (String) +- `policy` (String) +- `topology` (String) +- `value` (String) diff --git a/docs/data-sources/rg_list_deleted.md b/docs/data-sources/rg_list_deleted.md new file mode 100644 index 0000000..875868c --- /dev/null +++ b/docs/data-sources/rg_list_deleted.md @@ -0,0 +1,106 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_list_deleted Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_list_deleted (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) find by account id +- `account_name` (String) find by account name +- `by_id` (Number) find by id +- `created_after` (Number) find by created after time (unix timestamp) +- `created_before` (Number) find by created before time (unix timestamp) +- `lock_status` (String) find by lock status +- `name` (String) find by name +- `page` (Number) page number +- `size` (Number) size number +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_acl` (Attributes List) (see [below for nested schema](#nestedatt--items--account_acl)) +- `account_id` (Number) +- `account_name` (String) +- `compute_features` (List of String) +- `cpu_allocation_parameter` (String) +- `cpu_allocation_ratio` (Number) +- `created_by` (String) +- `created_time` (Number) +- `def_net_id` (Number) +- `def_net_type` (String) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `dirty` (Boolean) +- `gid` (Number) +- `guid` (Number) +- `lock_status` (String) +- `milestones` (Number) +- `name` (String) +- `register_computes` (Boolean) +- `resource_limits` (Attributes) (see [below for nested schema](#nestedatt--items--resource_limits)) +- `resource_types` (List of String) +- `rg_id` (Number) +- `secret` (String) +- `status` (String) +- `uniq_pools` (List of String) +- `updated_by` (String) +- `updated_time` (Number) +- `vins` (List of Number) +- `vms` (List of Number) + + +### Nested Schema for `items.account_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `items.resource_limits` + +Read-Only: + +- `cu_c` (Number) +- `cu_d` (Number) +- `cu_dm` (Number) +- `cu_i` (Number) +- `cu_m` (Number) +- `cu_np` (Number) +- `gpu_units` (Number) diff --git a/docs/data-sources/rg_list_lb.md b/docs/data-sources/rg_list_lb.md new file mode 100644 index 0000000..1d370a0 --- /dev/null +++ b/docs/data-sources/rg_list_lb.md @@ -0,0 +1,183 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_list_lb Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_list_lb (Data Source) + + + + + + +## Schema + +### Required + +- `rg_id` (Number) ID of the RG + +### Optional + +- `back_ip` (String) find by backend Ip +- `by_id` (Number) find by id +- `front_ip` (String) find by frontend Ip +- `name` (String) find by name +- `page` (Number) page number +- `size` (Number) size number +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `status` (String) find by status +- `tech_status` (String) find by tech status +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `backend_haip` (String) +- `backends` (Attributes List) (see [below for nested schema](#nestedatt--items--backends)) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `dp_api_user` (String) +- `extnet_id` (Number) +- `frontend_haip` (String) +- `frontends` (Attributes List) (see [below for nested schema](#nestedatt--items--frontends)) +- `gid` (Number) +- `guid` (Number) +- `ha_mode` (Boolean) +- `id` (Number) +- `image_id` (Number) +- `milestones` (Number) +- `name` (String) +- `primary_node` (Attributes) (see [below for nested schema](#nestedatt--items--primary_node)) +- `rg_name` (String) +- `secondary_node` (Attributes) (see [below for nested schema](#nestedatt--items--secondary_node)) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `vins_id` (Number) + + +### Nested Schema for `items.backends` + +Read-Only: + +- `algorithm` (String) +- `guid` (String) +- `name` (String) +- `server_default_settings` (Attributes) (see [below for nested schema](#nestedatt--items--backends--server_default_settings)) +- `servers` (Attributes List) (see [below for nested schema](#nestedatt--items--backends--servers)) + + +### Nested Schema for `items.backends.server_default_settings` + +Read-Only: + +- `down_inter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `max_conn` (Number) +- `max_queue` (Number) +- `rise` (Number) +- `slow_start` (Number) +- `weight` (Number) + + + +### Nested Schema for `items.backends.servers` + +Read-Only: + +- `address` (String) +- `check` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) +- `server_settings` (Attributes) (see [below for nested schema](#nestedatt--items--backends--servers--server_settings)) + + +### Nested Schema for `items.backends.servers.server_settings` + +Read-Only: + +- `down_inter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `max_conn` (Number) +- `max_queue` (Number) +- `rise` (Number) +- `slow_start` (Number) +- `weight` (Number) + + + + + +### Nested Schema for `items.frontends` + +Read-Only: + +- `backend` (String) +- `bindings` (Attributes List) (see [below for nested schema](#nestedatt--items--frontends--bindings)) +- `guid` (String) +- `name` (String) + + +### Nested Schema for `items.frontends.bindings` + +Read-Only: + +- `address` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) + + + + +### Nested Schema for `items.primary_node` + +Read-Only: + +- `backend_ip` (String) +- `compute_id` (Number) +- `frontend_ip` (String) +- `guid` (String) +- `mgmt_ip` (String) +- `network_id` (Number) + + + +### Nested Schema for `items.secondary_node` + +Read-Only: + +- `backend_ip` (String) +- `compute_id` (Number) +- `frontend_ip` (String) +- `guid` (String) +- `mgmt_ip` (String) +- `network_id` (Number) diff --git a/docs/data-sources/rg_list_pfw.md b/docs/data-sources/rg_list_pfw.md new file mode 100644 index 0000000..4d909f6 --- /dev/null +++ b/docs/data-sources/rg_list_pfw.md @@ -0,0 +1,52 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_list_pfw Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_list_pfw (Data Source) + + + + + + +## Schema + +### Required + +- `rg_id` (Number) ID of the RG + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `public_port_end` (Number) +- `public_port_start` (Number) +- `vins_id` (Number) +- `vins_name` (String) +- `vm_id` (Number) +- `vm_ip` (String) +- `vm_name` (String) +- `vm_port` (Number) diff --git a/docs/data-sources/rg_list_vins.md b/docs/data-sources/rg_list_vins.md new file mode 100644 index 0000000..b2c4a0c --- /dev/null +++ b/docs/data-sources/rg_list_vins.md @@ -0,0 +1,69 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_list_vins Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_list_vins (Data Source) + + + + + + +## Schema + +### Required + +- `rg_id` (Number) ID of the RG + +### Optional + +- `account_id` (Number) find by account id +- `ext_ip` (String) find by external ip address +- `name` (String) find by name +- `page` (Number) page number +- `size` (Number) size number +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vins_id` (Number) find by vins id + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `computes` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `external_ip` (String) +- `extnet_id` (Number) +- `free_ips` (Number) +- `id` (Number) +- `name` (String) +- `network` (String) +- `pri_vnf_dev_id` (Number) +- `rg_name` (String) +- `status` (String) +- `updated_by` (String) +- `updated_time` (Number) diff --git a/docs/data-sources/rg_resource_consumption_get.md b/docs/data-sources/rg_resource_consumption_get.md new file mode 100644 index 0000000..b03210c --- /dev/null +++ b/docs/data-sources/rg_resource_consumption_get.md @@ -0,0 +1,104 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_resource_consumption_get Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_resource_consumption_get (Data Source) + + + + + + +## Schema + +### Required + +- `rg_id` (Number) find by rg id + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `consumed` (Attributes) (see [below for nested schema](#nestedatt--consumed)) +- `id` (String) The ID of this resource. +- `reserved` (Attributes) (see [below for nested schema](#nestedatt--reserved)) +- `resource_limits` (Attributes) (see [below for nested schema](#nestedatt--resource_limits)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `consumed` + +Read-Only: + +- `cpu` (Number) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `extips` (Number) +- `exttraffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--consumed--seps)) + + +### Nested Schema for `consumed.seps` + +Read-Only: + +- `data_name` (String) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `sep_id` (String) + + + + +### Nested Schema for `reserved` + +Read-Only: + +- `cpu` (Number) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `extips` (Number) +- `exttraffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--reserved--seps)) + + +### Nested Schema for `reserved.seps` + +Read-Only: + +- `data_name` (String) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `sep_id` (String) + + + + +### Nested Schema for `resource_limits` + +Read-Only: + +- `cu_c` (Number) +- `cu_d` (Number) +- `cu_dm` (Number) +- `cu_i` (Number) +- `cu_m` (Number) +- `cu_np` (Number) +- `gpu_units` (Number) diff --git a/docs/data-sources/rg_resource_consumption_list.md b/docs/data-sources/rg_resource_consumption_list.md new file mode 100644 index 0000000..61a7841 --- /dev/null +++ b/docs/data-sources/rg_resource_consumption_list.md @@ -0,0 +1,109 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_resource_consumption_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_resource_consumption_list (Data Source) + + + + + + +## Schema + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `consumed` (Attributes) (see [below for nested schema](#nestedatt--items--consumed)) +- `reserved` (Attributes) (see [below for nested schema](#nestedatt--items--reserved)) +- `resource_limits` (Attributes) (see [below for nested schema](#nestedatt--items--resource_limits)) +- `rg_id` (Number) + + +### Nested Schema for `items.consumed` + +Read-Only: + +- `cpu` (Number) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `extips` (Number) +- `exttraffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--items--consumed--seps)) + + +### Nested Schema for `items.consumed.seps` + +Read-Only: + +- `data_name` (String) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `sep_id` (String) + + + + +### Nested Schema for `items.reserved` + +Read-Only: + +- `cpu` (Number) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `extips` (Number) +- `exttraffic` (Number) +- `gpu` (Number) +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--items--reserved--seps)) + + +### Nested Schema for `items.reserved.seps` + +Read-Only: + +- `data_name` (String) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `sep_id` (String) + + + + +### Nested Schema for `items.resource_limits` + +Read-Only: + +- `cu_c` (Number) +- `cu_d` (Number) +- `cu_dm` (Number) +- `cu_i` (Number) +- `cu_m` (Number) +- `cu_np` (Number) +- `gpu_units` (Number) diff --git a/docs/data-sources/rg_usage.md b/docs/data-sources/rg_usage.md new file mode 100644 index 0000000..0270fd7 --- /dev/null +++ b/docs/data-sources/rg_usage.md @@ -0,0 +1,55 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_rg_usage Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_rg_usage (Data Source) + + + + + + +## Schema + +### Required + +- `rg_id` (Number) find by rg id + +### Optional + +- `reason` (String) reason for action +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `cpu` (Number) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `extips` (Number) +- `exttraffic` (Number) +- `gpu` (Number) +- `id` (String) The ID of this resource. +- `ram` (Number) +- `seps` (Attributes List) (see [below for nested schema](#nestedatt--seps)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `seps` + +Read-Only: + +- `data_name` (String) +- `disk_size` (Number) +- `disk_size_max` (Number) +- `sep_id` (String) diff --git a/docs/data-sources/stack.md b/docs/data-sources/stack.md new file mode 100644 index 0000000..a64d77d --- /dev/null +++ b/docs/data-sources/stack.md @@ -0,0 +1,41 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_stack Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_stack (Data Source) + + + + + + +## Schema + +### Required + +- `stack_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `cpu_allocation_ratio` (Number) +- `descr` (String) +- `drivers` (List of String) +- `mem_allocation_ratio` (Number) +- `name` (String) +- `status` (String) +- `type` (String) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/stack_list.md b/docs/data-sources/stack_list.md new file mode 100644 index 0000000..4f55bb7 --- /dev/null +++ b/docs/data-sources/stack_list.md @@ -0,0 +1,50 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_stack_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_stack_list (Data Source) + + + + + + +## Schema + +### Optional + +- `by_id` (Number) +- `name` (String) +- `page` (Number) +- `size` (Number) +- `sort_by` (String) +- `status` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `type` (String) + +### Read-Only + +- `entry_count` (Number) +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `name` (String) +- `stack_id` (Number) +- `status` (String) +- `type` (String) diff --git a/docs/data-sources/vfpool.md b/docs/data-sources/vfpool.md new file mode 100644 index 0000000..dcae158 --- /dev/null +++ b/docs/data-sources/vfpool.md @@ -0,0 +1,70 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vfpool Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vfpool (Data Source) + + + + + + +## Schema + +### Required + +- `vfpool_id` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_access` (List of Number) +- `created_time` (Number) +- `description` (String) +- `gid` (Number) +- `guid` (Number) +- `name` (String) +- `rg_access` (List of Number) +- `status` (String) +- `updated_time` (Number) +- `vfs` (Attributes List) (see [below for nested schema](#nestedatt--vfs)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `vfs` + +Read-Only: + +- `node_id` (Number) +- `vf_list` (Attributes List) (see [below for nested schema](#nestedatt--vfs--vf_list)) + + +### Nested Schema for `vfs.vf_list` + +Read-Only: + +- `nic_name` (String) +- `vfs_info` (Attributes List) (see [below for nested schema](#nestedatt--vfs--vf_list--vfs_info)) + + +### Nested Schema for `vfs.vf_list.vfs_info` + +Read-Only: + +- `claimed` (Boolean) +- `id` (Number) +- `vm_id` (Number) diff --git a/docs/data-sources/vfpool_list.md b/docs/data-sources/vfpool_list.md new file mode 100644 index 0000000..9a03f6b --- /dev/null +++ b/docs/data-sources/vfpool_list.md @@ -0,0 +1,85 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vfpool_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vfpool_list (Data Source) + + + + + + +## Schema + +### Optional + +- `account_access` (Number) +- `by_id` (Number) +- `description` (String) +- `gid` (Number) +- `name` (String) +- `page` (Number) +- `rg_access` (Number) +- `size` (Number) +- `sort_by` (String) +- `status` (String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_access` (List of Number) +- `created_time` (Number) +- `description` (String) +- `gid` (Number) +- `guid` (Number) +- `name` (String) +- `rg_access` (List of Number) +- `status` (String) +- `updated_time` (Number) +- `vfpool_id` (Number) +- `vfs` (Attributes List) (see [below for nested schema](#nestedatt--items--vfs)) + + +### Nested Schema for `items.vfs` + +Read-Only: + +- `node_id` (Number) +- `vf_list` (Attributes List) (see [below for nested schema](#nestedatt--items--vfs--vf_list)) + + +### Nested Schema for `items.vfs.vf_list` + +Read-Only: + +- `nic_name` (String) +- `vfs_info` (Attributes List) (see [below for nested schema](#nestedatt--items--vfs--vf_list--vfs_info)) + + +### Nested Schema for `items.vfs.vf_list.vfs_info` + +Read-Only: + +- `claimed` (Boolean) +- `id` (Number) +- `vm_id` (Number) diff --git a/docs/data-sources/vins.md b/docs/data-sources/vins.md new file mode 100644 index 0000000..314fc53 --- /dev/null +++ b/docs/data-sources/vins.md @@ -0,0 +1,430 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins (Data Source) + + + + + + +## Schema + +### Required + +- `vins_id` (Number) Unique ID of the ViNS. + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--computes)) +- `created_by` (String) +- `created_time` (Number) +- `default_gw` (String) +- `default_qos` (Attributes) (see [below for nested schema](#nestedatt--default_qos)) +- `deleted_by` (String) +- `deleted_time` (Number) +- `desc` (String) +- `gid` (Number) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `lock_status` (String) +- `manager_id` (Number) +- `manager_type` (String) +- `milestones` (Number) +- `name` (String) +- `net_mask` (Number) +- `network` (String) +- `pre_reservations_num` (Number) +- `redundant` (Boolean) +- `rg_id` (Number) +- `rg_name` (String) +- `sec_vnf_dev_id` (Number) +- `status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) +- `vnf_dev` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev)) +- `vnfs` (Attributes) (see [below for nested schema](#nestedatt--vnfs)) +- `vxlan_id` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `computes` + +Read-Only: + +- `compute_id` (Number) +- `compute_name` (String) + + + +### Nested Schema for `default_qos` + +Read-Only: + +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + +### Nested Schema for `vnf_dev` + +Read-Only: + +- `account_id` (Number) +- `capabilities` (List of String) +- `ckey` (String) +- `config` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev--config)) +- `config_saved` (Boolean) +- `custom_pre_cfg` (Boolean) +- `desc` (String) +- `gid` (Number) +- `guid` (Number) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--vnf_dev--interfaces)) +- `lock_status` (String) +- `milestones` (Number) +- `status` (String) +- `tech_status` (String) +- `type` (String) +- `vins` (List of Number) +- `vnf_id` (Number) +- `vnf_name` (String) + + +### Nested Schema for `vnf_dev.config` + +Read-Only: + +- `mgmt` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev--config--mgmt)) +- `resources` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev--config--resources)) + + +### Nested Schema for `vnf_dev.config.mgmt` + +Read-Only: + +- `ip_addr` (String) +- `password` (String) +- `ssh_key` (String) +- `user` (String) + + + +### Nested Schema for `vnf_dev.config.resources` + +Read-Only: + +- `cpu` (Number) +- `ram` (Number) +- `stack_id` (Number) +- `uuid` (String) + + + + +### Nested Schema for `vnf_dev.interfaces` + +Read-Only: + +- `conn_id` (Number) +- `conn_type` (String) +- `def_gw` (String) +- `enabled` (Boolean) +- `flipgroup_id` (Number) +- `guid` (String) +- `ip_address` (String) +- `listen_ssh` (Boolean) +- `mac` (String) +- `name` (String) +- `net_id` (Number) +- `net_mask` (Number) +- `net_type` (String) +- `node_id` (Number) +- `pci_slot` (Number) +- `qos` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev--interfaces--qos)) +- `target` (String) +- `type` (String) +- `vnfs` (List of Number) + + +### Nested Schema for `vnf_dev.interfaces.qos` + +Read-Only: + +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + + + +### Nested Schema for `vnfs` + +Read-Only: + +- `dhcp` (Attributes) (see [below for nested schema](#nestedatt--vnfs--dhcp)) +- `gw` (Attributes) (see [below for nested schema](#nestedatt--vnfs--gw)) +- `nat` (Attributes) (see [below for nested schema](#nestedatt--vnfs--nat)) + + +### Nested Schema for `vnfs.dhcp` + +Read-Only: + +- `account_id` (Number) +- `ckey` (String) +- `config` (Attributes) (see [below for nested schema](#nestedatt--vnfs--dhcp--config)) +- `created_time` (Number) +- `devices` (Attributes) (see [below for nested schema](#nestedatt--vnfs--dhcp--devices)) +- `dhcp_id` (Number) +- `gid` (Number) +- `guid` (Number) +- `lock_status` (String) +- `milestones` (Number) +- `owner_id` (Number) +- `owner_type` (String) +- `pure_virtual` (Boolean) +- `routes` (Attributes List) (see [below for nested schema](#nestedatt--vnfs--dhcp--routes)) +- `status` (String) +- `tech_status` (String) +- `type` (String) + + +### Nested Schema for `vnfs.dhcp.config` + +Read-Only: + +- `default_gw` (String) +- `dns` (List of String) +- `ip_end` (String) +- `ip_start` (String) +- `lease` (Number) +- `netmask` (Number) +- `network` (String) +- `reservations` (Attributes List) (see [below for nested schema](#nestedatt--vnfs--dhcp--config--reservations)) + + +### Nested Schema for `vnfs.dhcp.config.reservations` + +Read-Only: + +- `client_type` (String) +- `desc` (String) +- `domainname` (String) +- `hostname` (String) +- `ip` (String) +- `mac` (String) +- `type` (String) +- `vm_id` (Number) + + + + +### Nested Schema for `vnfs.dhcp.devices` + +Read-Only: + +- `primary` (Attributes) (see [below for nested schema](#nestedatt--vnfs--dhcp--devices--primary)) + + +### Nested Schema for `vnfs.dhcp.devices.primary` + +Read-Only: + +- `dev_id` (Number) +- `iface01` (String) +- `iface02` (String) + + + + +### Nested Schema for `vnfs.dhcp.routes` + +Read-Only: + +- `compute_ids` (List of Number) +- `destination` (String) +- `gateway` (String) +- `guid` (String) +- `netmask` (String) +- `route_id` (Number) + + + + +### Nested Schema for `vnfs.gw` + +Read-Only: + +- `account_id` (Number) +- `ckey` (String) +- `config` (Attributes) (see [below for nested schema](#nestedatt--vnfs--gw--config)) +- `created_time` (Number) +- `devices` (Attributes) (see [below for nested schema](#nestedatt--vnfs--gw--devices)) +- `gid` (Number) +- `guid` (Number) +- `gw_id` (Number) +- `lock_status` (String) +- `milestones` (Number) +- `owner_id` (Number) +- `owner_type` (String) +- `pure_virtual` (Boolean) +- `routes` (Attributes List) (see [below for nested schema](#nestedatt--vnfs--gw--routes)) +- `status` (String) +- `tech_status` (String) +- `type` (String) + + +### Nested Schema for `vnfs.gw.config` + +Read-Only: + +- `default_gw` (String) +- `ext_net_id` (Number) +- `ext_net_ip` (String) +- `ext_netmask` (Number) +- `qos` (Attributes) (see [below for nested schema](#nestedatt--vnfs--gw--config--qos)) + + +### Nested Schema for `vnfs.gw.config.qos` + +Read-Only: + +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + + +### Nested Schema for `vnfs.gw.devices` + +Read-Only: + +- `primary` (Attributes) (see [below for nested schema](#nestedatt--vnfs--gw--devices--primary)) + + +### Nested Schema for `vnfs.gw.devices.primary` + +Read-Only: + +- `dev_id` (Number) +- `iface01` (String) +- `iface02` (String) + + + + +### Nested Schema for `vnfs.gw.routes` + +Read-Only: + +- `compute_ids` (List of Number) +- `destination` (String) +- `gateway` (String) +- `guid` (String) +- `netmask` (String) +- `route_id` (Number) + + + + +### Nested Schema for `vnfs.nat` + +Read-Only: + +- `account_id` (Number) +- `ckey` (String) +- `config` (Attributes) (see [below for nested schema](#nestedatt--vnfs--nat--config)) +- `created_time` (Number) +- `devices` (Attributes) (see [below for nested schema](#nestedatt--vnfs--nat--devices)) +- `gid` (Number) +- `guid` (Number) +- `lock_status` (String) +- `milestones` (Number) +- `nat_id` (Number) +- `owner_id` (Number) +- `owner_type` (String) +- `pure_virtual` (Boolean) +- `routes` (Attributes List) (see [below for nested schema](#nestedatt--vnfs--nat--routes)) +- `status` (String) +- `tech_status` (String) +- `type` (String) + + +### Nested Schema for `vnfs.nat.config` + +Read-Only: + +- `net_mask` (Number) +- `network` (String) +- `rules` (Attributes List) (see [below for nested schema](#nestedatt--vnfs--nat--config--rules)) + + +### Nested Schema for `vnfs.nat.config.rules` + +Read-Only: + +- `local_ip` (String) +- `local_port` (Number) +- `protocol` (String) +- `public_port_end` (Number) +- `public_port_start` (Number) +- `rule_id` (Number) +- `vm_id` (Number) +- `vm_name` (String) + + + + +### Nested Schema for `vnfs.nat.devices` + +Read-Only: + +- `primary` (Attributes) (see [below for nested schema](#nestedatt--vnfs--nat--devices--primary)) + + +### Nested Schema for `vnfs.nat.devices.primary` + +Read-Only: + +- `dev_id` (Number) +- `iface01` (String) +- `iface02` (String) + + + + +### Nested Schema for `vnfs.nat.routes` + +Read-Only: + +- `compute_ids` (List of Number) +- `destination` (String) +- `gateway` (String) +- `guid` (String) +- `netmask` (String) +- `route_id` (Number) diff --git a/docs/data-sources/vins_audits.md b/docs/data-sources/vins_audits.md new file mode 100644 index 0000000..87c0db3 --- /dev/null +++ b/docs/data-sources/vins_audits.md @@ -0,0 +1,48 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins_audits Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins_audits (Data Source) + + + + + + +## Schema + +### Required + +- `vins_id` (Number) Unique ID of the ViNS. + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `call` (String) +- `response_time` (Number) +- `statuscode` (Number) +- `timestamp` (Number) +- `user` (String) diff --git a/docs/data-sources/vins_ext_net_list.md b/docs/data-sources/vins_ext_net_list.md new file mode 100644 index 0000000..b12d0cc --- /dev/null +++ b/docs/data-sources/vins_ext_net_list.md @@ -0,0 +1,50 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins_ext_net_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins_ext_net_list (Data Source) + + + + + + +## Schema + +### Required + +- `vins_id` (Number) Unique ID of the ViNS. + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `default_gw` (String) +- `ext_net_id` (Number) +- `ip` (String) +- `prefix_len` (Number) +- `status` (String) +- `tech_status` (String) diff --git a/docs/data-sources/vins_ip_list.md b/docs/data-sources/vins_ip_list.md new file mode 100644 index 0000000..7a1790a --- /dev/null +++ b/docs/data-sources/vins_ip_list.md @@ -0,0 +1,51 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins_ip_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins_ip_list (Data Source) + + + + + + +## Schema + +### Required + +- `vins_id` (Number) Unique ID of the ViNS + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `client_type` (String) +- `domainname` (String) +- `hostname` (String) +- `ip` (String) +- `mac` (String) +- `type` (String) +- `vm_id` (Number) diff --git a/docs/data-sources/vins_list.md b/docs/data-sources/vins_list.md new file mode 100644 index 0000000..41c9dbf --- /dev/null +++ b/docs/data-sources/vins_list.md @@ -0,0 +1,67 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins_list (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) Filter by Account ID +- `by_id` (Number) Filter by ID +- `ext_ip` (String) Filter by external IP address +- `include_deleted` (Boolean) Include deleted computes +- `name` (String) Filter by Name +- `page` (Number) Page number +- `rg_id` (Number) Filter by RG ID +- `size` (Number) Page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `external_ip` (String) +- `extnet_id` (Number) +- `free_ips` (Number) +- `network` (String) +- `rg_id` (Number) +- `rg_name` (String) +- `status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `vins_id` (Number) +- `vins_name` (String) +- `vxlan_id` (Number) diff --git a/docs/data-sources/vins_list_deleted.md b/docs/data-sources/vins_list_deleted.md new file mode 100644 index 0000000..84716b1 --- /dev/null +++ b/docs/data-sources/vins_list_deleted.md @@ -0,0 +1,65 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins_list_deleted Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins_list_deleted (Data Source) + + + + + + +## Schema + +### Optional + +- `account_id` (Number) Filter by Account ID +- `by_id` (Number) Filter by ID +- `ext_ip` (String) Filter by external IP address +- `name` (String) Filter by Name +- `page` (Number) Page number +- `rg_id` (Number) Filter by RG ID +- `size` (Number) Page size +- `sort_by` (String) sort by one of supported fields, format +|-(field) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vnfdev_id` (Number) Find by VNF Device id + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `account_id` (Number) +- `account_name` (String) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `external_ip` (String) +- `network` (String) +- `rg_id` (Number) +- `rg_name` (String) +- `status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `vins_id` (Number) +- `vins_name` (String) +- `vxlan_id` (Number) diff --git a/docs/data-sources/vins_nat_rule_list.md b/docs/data-sources/vins_nat_rule_list.md new file mode 100644 index 0000000..2c57ced --- /dev/null +++ b/docs/data-sources/vins_nat_rule_list.md @@ -0,0 +1,52 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins_nat_rule_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins_nat_rule_list (Data Source) + + + + + + +## Schema + +### Required + +- `vins_id` (Number) VINS id + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `id` (Number) +- `local_ip` (String) +- `local_port` (Number) +- `protocol` (String) +- `public_port_end` (Number) +- `public_port_start` (Number) +- `vm_id` (Number) +- `vm_name` (String) diff --git a/docs/data-sources/vins_static_route.md b/docs/data-sources/vins_static_route.md new file mode 100644 index 0000000..d444fe6 --- /dev/null +++ b/docs/data-sources/vins_static_route.md @@ -0,0 +1,41 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins_static_route Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins_static_route (Data Source) + + + + + + +## Schema + +### Required + +- `route_id` (Number) static route ID +- `vins_id` (Number) vins ID + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `compute_ids` (List of Number) +- `destination` (String) +- `gateway` (String) +- `guid` (String) +- `id` (String) The ID of this resource. +- `netmask` (String) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/data-sources/vins_static_route_list.md b/docs/data-sources/vins_static_route_list.md new file mode 100644 index 0000000..44ce6ee --- /dev/null +++ b/docs/data-sources/vins_static_route_list.md @@ -0,0 +1,50 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins_static_route_list Data Source - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins_static_route_list (Data Source) + + + + + + +## Schema + +### Required + +- `vins_id` (Number) vins ID + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `entry_count` (Number) +- `id` (String) The ID of this resource. +- `items` (Attributes List) (see [below for nested schema](#nestedatt--items)) + + +### Nested Schema for `timeouts` + +Optional: + +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `items` + +Read-Only: + +- `compute_ids` (List of Number) +- `destination` (String) +- `gateway` (String) +- `guid` (String) +- `netmask` (String) +- `route_id` (Number) diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..3fe3326 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,37 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix Provider" +subcategory: "" +description: |- + +--- + +# dynamix Provider + + + + + + +## Schema + +### Required + +- `authenticator` (String) Authentication mode to use when connecting to DECORT cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'. +- `controller_url` (String) URL of DECORT Cloud controller to use. API calls will be directed to this URL. + +### Optional + +- `allow_unverified_ssl` (Boolean) If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only! Default is false. +- `app_id` (String) Application ID to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode. +- `app_secret` (String) Application secret to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode. +- `bvs_password` (String) User password for DECORT cloud API operations in 'bvs' authentication mode. +- `bvs_user` (String) User name for DECORT cloud API operations in 'bvs' authentication mode. +- `domain` (String) User password for DECORT cloud API operations in 'bvs' authentication mode. +- `jwt` (String) JWT to access DECORT cloud API in 'jwt' authentication mode. +- `oauth2_url` (String) OAuth2 application URL in 'decs3o' and 'bvs' authentication mode. +- `password` (String) User password for DECORT cloud API operations in 'legacy' authentication mode. +- `path_config` (String) The path of the configuration file entry. +- `path_token` (String) The path of the token file entry. +- `time_to_refresh` (Number) The number of minutes before the expiration of the token, a refresh will be made. +- `user` (String) User name for DECORT cloud API operations in 'legacy' authentication mode. diff --git a/docs/resources/account.md b/docs/resources/account.md new file mode 100644 index 0000000..0f19829 --- /dev/null +++ b/docs/resources/account.md @@ -0,0 +1,128 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_account Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_account (Resource) + + + + + + +## Schema + +### Required + +- `account_name` (String) name of the account +- `username` (String) username of owner the account + +### Optional + +- `emailaddress` (String) email +- `enable` (Boolean) enable/disable account +- `permanently` (Boolean) whether to completely delete the account +- `resource_limits` (Attributes) (see [below for nested schema](#nestedatt--resource_limits)) +- `restore` (Boolean) restore a deleted account +- `send_access_emails` (Boolean) if true send emails when a user is granted access to resources +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `users` (Attributes List) (see [below for nested schema](#nestedatt--users)) + +### Read-Only + +- `account_id` (Number) +- `acl` (Attributes List) (see [below for nested schema](#nestedatt--acl)) +- `ckey` (String) +- `company` (String) +- `companyurl` (String) +- `compute_features` (List of String) +- `computes` (Attributes) (see [below for nested schema](#nestedatt--computes)) +- `cpu_allocation_parameter` (String) +- `cpu_allocation_ratio` (Number) +- `created_by` (String) +- `created_time` (Number) +- `dc_location` (String) +- `deactivation_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `displayname` (String) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `last_updated` (String) Timestamp of the last Terraform update of the disk resource. +- `machines` (Attributes) (see [below for nested schema](#nestedatt--machines)) +- `status` (String) +- `updated_time` (Number) +- `version` (Number) +- `vins` (List of Number) +- `vinses` (Number) + + +### Nested Schema for `resource_limits` + +Optional: + +- `cu_c` (Number) +- `cu_d` (Number) +- `cu_i` (Number) +- `cu_m` (Number) +- `cu_np` (Number) +- `gpu_units` (Number) + + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `users` + +Required: + +- `access_type` (String) +- `user_id` (String) + +Optional: + +- `recursive_delete` (Boolean) + + + +### Nested Schema for `acl` + +Read-Only: + +- `can_be_deleted` (Boolean) +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `computes` + +Read-Only: + +- `started` (Number) +- `stopped` (Number) + + + +### Nested Schema for `machines` + +Read-Only: + +- `halted` (Number) +- `running` (Number) diff --git a/docs/resources/bservice.md b/docs/resources/bservice.md new file mode 100644 index 0000000..148d9aa --- /dev/null +++ b/docs/resources/bservice.md @@ -0,0 +1,116 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_bservice Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_bservice (Resource) + + + + + + +## Schema + +### Required + +- `rg_id` (Number) +- `service_name` (String) + +### Optional + +- `enable` (Boolean) +- `permanently` (Boolean) +- `restore` (Boolean) +- `service_id` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--snapshots)) +- `ssh_key` (String) +- `ssh_user` (String) +- `start` (Boolean) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `base_domain` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--computes)) +- `cpu_total` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `disk_total` (Number) +- `gid` (Number) +- `groups` (Attributes List) (see [below for nested schema](#nestedatt--groups)) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `milestones` (Number) +- `parent_srv_id` (Number) +- `parent_srv_type` (String) +- `ram_total` (Number) +- `rg_name` (String) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) + + +### Nested Schema for `snapshots` + +Optional: + +- `rollback` (Boolean) + +Read-Only: + +- `guid` (String) +- `label` (String) +- `timestamp` (Number) +- `valid` (Boolean) + + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `computes` + +Read-Only: + +- `account_id` (Number) +- `architecture` (String) +- `compgroup_id` (Number) +- `compgroup_name` (String) +- `compgroup_role` (String) +- `id` (Number) +- `name` (String) +- `rg_id` (Number) +- `stack_id` (Number) +- `status` (String) +- `tech_status` (String) + + + +### Nested Schema for `groups` + +Read-Only: + +- `computes` (Number) +- `consistency` (Boolean) +- `id` (Number) +- `name` (String) +- `status` (String) +- `tech_status` (String) diff --git a/docs/resources/bservice_group.md b/docs/resources/bservice_group.md new file mode 100644 index 0000000..a762636 --- /dev/null +++ b/docs/resources/bservice_group.md @@ -0,0 +1,97 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_bservice_group Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_bservice_group (Resource) + + + + + + +## Schema + +### Required + +- `comp_count` (Number) +- `compgroup_name` (String) +- `cpu` (Number) +- `disk` (Number) +- `driver` (String) +- `image_id` (Number) +- `ram` (Number) +- `service_id` (Number) + +### Optional + +- `cloud_init` (String) +- `compgroup_id` (Number) +- `extnets` (List of Number) +- `force_stop` (Boolean) +- `force_update` (Boolean) +- `mode` (String) +- `parents` (List of Number) +- `remove_computes` (List of Number) +- `role` (String) +- `sep_id` (Number) +- `sep_pool` (String) +- `start` (Boolean) +- `timeout_start` (Number) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vinses` (List of Number) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--computes)) +- `consistency` (Boolean) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `gid` (Number) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `milestones` (Number) +- `rg_id` (Number) +- `rg_name` (String) +- `seq_no` (Number) +- `sid` (String) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `computes` + +Read-Only: + +- `id` (Number) +- `ip_addresses` (List of String) +- `name` (String) +- `os_users` (Attributes List) (see [below for nested schema](#nestedatt--computes--os_users)) + + +### Nested Schema for `computes.os_users` + +Read-Only: + +- `login` (String) +- `password` (String) diff --git a/docs/resources/disk.md b/docs/resources/disk.md new file mode 100644 index 0000000..bbb1e44 --- /dev/null +++ b/docs/resources/disk.md @@ -0,0 +1,118 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk (Resource) + + + + + + +## Schema + +### Required + +- `account_id` (Number) ID of the account +- `disk_name` (String) Iname of disk +- `gid` (Number) ID of the grid (platform) +- `size_max` (Number) size in GB, default is 10 + +### Optional + +- `desc` (String) description of disk +- `detach` (Boolean) Detaching the disk from compute +- `iotune` (Attributes) (see [below for nested schema](#nestedatt--iotune)) +- `permanently` (Boolean) Whether to completely delete the disk, works only with non attached disks +- `pool` (String) Pool for disk location +- `reason` (String) Reason for deletion +- `sep_id` (Number) Storage endpoint provider ID to create disk +- `shareable` (Boolean) share disk +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `type` (String) (B;D;T) B=Boot;D=Data;T=Temp + +### Read-Only + +- `account_name` (String) +- `acl` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--computes)) +- `created_time` (Number) +- `deleted_time` (Number) +- `destruction_time` (Number) +- `devicename` (String) +- `disk_id` (Number) +- `id` (String) The ID of this resource. +- `image_id` (Number) +- `images` (List of String) +- `last_updated` (String) Timestamp of the last Terraform update of the disk resource. +- `order` (Number) +- `params` (String) +- `parent_id` (Number) +- `pci_slot` (Number) +- `present_to` (List of Number) +- `purge_time` (Number) +- `res_id` (String) +- `res_name` (String) +- `role` (String) +- `sep_type` (String) +- `size_used` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--snapshots)) +- `status` (String) +- `tech_status` (String) +- `vmid` (Number) + + +### Nested Schema for `iotune` + +Optional: + +- `read_bytes_sec` (Number) Number of bytes to read per second +- `read_bytes_sec_max` (Number) Maximum number of bytes to read +- `read_iops_sec` (Number) Number of io read operations per second +- `read_iops_sec_max` (Number) Maximum number of io read operations +- `size_iops_sec` (Number) Size of io operations +- `total_bytes_sec` (Number) Total size bytes per second +- `total_bytes_sec_max` (Number) Maximum total size of bytes per second +- `total_iops_sec` (Number) Total number of io operations per second +- `total_iops_sec_max` (Number) Maximum total number of io operations per second +- `write_bytes_sec` (Number) Number of bytes to write per second +- `write_bytes_sec_max` (Number) Maximum number of bytes to write per second +- `write_iops_sec` (Number) Number of write operations per second +- `write_iops_sec_max` (Number) Maximum number of write operations per second + + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `computes` + +Read-Only: + +- `compute_id` (String) +- `compute_name` (String) + + + +### Nested Schema for `snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) diff --git a/docs/resources/disk_replication.md b/docs/resources/disk_replication.md new file mode 100644 index 0000000..7e49831 --- /dev/null +++ b/docs/resources/disk_replication.md @@ -0,0 +1,137 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk_replication Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk_replication (Resource) + + + + + + +## Schema + +### Required + +- `disk_id` (Number) +- `disk_name` (String) +- `pool_name` (String) +- `sep_id` (Number) + +### Optional + +- `detach` (Boolean) +- `pause` (Boolean) +- `permanently` (Boolean) +- `reason` (String) +- `reverse` (Boolean) +- `start` (Boolean) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `acl` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--computes)) +- `created_time` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `destruction_time` (Number) +- `devicename` (String) +- `gid` (Number) +- `id` (String) The ID of this resource. +- `image_id` (Number) +- `images` (List of String) +- `iotune` (Attributes) (see [below for nested schema](#nestedatt--iotune)) +- `order` (Number) +- `params` (String) +- `parent_id` (Number) +- `pci_slot` (Number) +- `pool` (String) +- `present_to` (List of Number) +- `purge_time` (Number) +- `replica_disk_id` (Number) +- `replication` (Attributes) (see [below for nested schema](#nestedatt--replication)) +- `res_id` (String) +- `res_name` (String) +- `role` (String) +- `sep_type` (String) +- `shareable` (Boolean) +- `size_max` (Number) +- `size_used` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--snapshots)) +- `status` (String) +- `status_replication` (String) +- `tech_status` (String) +- `type` (String) +- `vmid` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `computes` + +Read-Only: + +- `compute_id` (String) +- `compute_name` (String) + + + +### Nested Schema for `iotune` + +Read-Only: + +- `read_bytes_sec` (Number) +- `read_bytes_sec_max` (Number) +- `read_iops_sec` (Number) +- `read_iops_sec_max` (Number) +- `size_iops_sec` (Number) +- `total_bytes_sec` (Number) +- `total_bytes_sec_max` (Number) +- `total_iops_sec` (Number) +- `total_iops_sec_max` (Number) +- `write_bytes_sec` (Number) +- `write_bytes_sec_max` (Number) +- `write_iops_sec` (Number) +- `write_iops_sec_max` (Number) + + + +### Nested Schema for `replication` + +Read-Only: + +- `disk_id` (Number) +- `pool_id` (String) +- `role` (String) +- `self_volume_id` (String) +- `storage_id` (String) +- `volume_id` (String) + + + +### Nested Schema for `snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) diff --git a/docs/resources/disk_snapshot.md b/docs/resources/disk_snapshot.md new file mode 100644 index 0000000..84a83d0 --- /dev/null +++ b/docs/resources/disk_snapshot.md @@ -0,0 +1,46 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_disk_snapshot Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_disk_snapshot (Resource) + + + + + + +## Schema + +### Required + +- `disk_id` (Number) The unique ID of the subscriber-owner of the disk +- `label` (String) Name of the snapshot + +### Optional + +- `rollback` (Boolean) Needed in order to make a snapshot rollback +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `timestamp` (Number) Snapshot time + +### Read-Only + +- `guid` (String) +- `id` (String) The ID of this resource. +- `last_updated` (String) Timestamp of the last Terraform update of the disk resource. +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/resources/flipgroup.md b/docs/resources/flipgroup.md new file mode 100644 index 0000000..88e9612 --- /dev/null +++ b/docs/resources/flipgroup.md @@ -0,0 +1,63 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_flipgroup Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_flipgroup (Resource) + + + + + + +## Schema + +### Required + +- `account_id` (Number) Account ID +- `client_type` (String) Type of client, 'compute' ('vins' will be later) +- `name` (String) Flipgroup name +- `net_id` (Number) EXTNET or ViNS ID +- `net_type` (String) Network type, EXTNET or VINS + +### Optional + +- `client_ids` (List of Number) List of clients attached to this Flipgroup instance +- `desc` (String) Text description of this Flipgroup instance +- `ip` (String) IP address to associate with this group. If empty, the platform will autoselect IP address +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_name` (String) +- `conn_id` (Number) +- `conn_type` (String) +- `created_by` (String) +- `created_time` (Number) +- `default_gw` (String) +- `deleted_by` (String) +- `deleted_time` (Number) +- `flipgroup_id` (Number) +- `gid` (Number) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `milestones` (Number) +- `network` (String) +- `rg_id` (Number) +- `rg_name` (String) +- `status` (String) +- `updated_by` (String) +- `updated_time` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/resources/image.md b/docs/resources/image.md new file mode 100644 index 0000000..3a2ff61 --- /dev/null +++ b/docs/resources/image.md @@ -0,0 +1,90 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_image Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_image (Resource) + + + + + + +## Schema + +### Required + +- `account_id` (Number) AccountId to make the image exclusive +- `boot_type` (String) Boot type of image bios or uefi +- `drivers` (List of String) List of types of compute suitable for image. Example: [ "KVM_X86" ] +- `image_name` (String) Name of the rescue disk +- `image_type` (String) Image type linux, windows or other +- `url` (String) URL where to download media from + +### Optional + +- `architecture` (String) Binary architecture of this image, one of X86_64 of PPC64_LE +- `hot_resize` (Boolean) Does this machine supports hot resize +- `password` (String) Optional password for the image +- `password_dl` (String) Password for upload binary media +- `permanently` (Boolean) +- `pool_name` (String) Pool for image create +- `sep_id` (Number) Storage endpoint provider ID +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `username` (String) Optional username for the image +- `username_dl` (String) Username for upload binary media + +### Read-Only + +- `acl` (List of String) +- `bootable` (Boolean) +- `cd_presented_to` (String) +- `ckey` (String) +- `compute_ci_id` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `enabled` (Boolean) +- `gid` (Number) +- `guid` (Number) +- `history` (Attributes List) (see [below for nested schema](#nestedatt--history)) +- `id` (String) The ID of this resource. +- `image_id` (Number) +- `last_modified` (Number) +- `last_updated` (String) Timestamp of the last Terraform update of the order. +- `link_to` (Number) +- `milestones` (Number) +- `network_interface_naming` (String) +- `present_to` (List of Number) +- `provider_name` (String) +- `purge_attempts` (Number) +- `res_id` (String) +- `rescuecd` (Boolean) +- `shared_with` (List of Number) +- `size` (Number) +- `status` (String) +- `tech_status` (String) +- `unc_path` (String) +- `version` (String) + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `history` + +Read-Only: + +- `guid` (String) +- `id` (Number) +- `timestamp` (Number) diff --git a/docs/resources/image_virtual.md b/docs/resources/image_virtual.md new file mode 100644 index 0000000..d4e4007 --- /dev/null +++ b/docs/resources/image_virtual.md @@ -0,0 +1,85 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_image_virtual Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_image_virtual (Resource) + + + + + + +## Schema + +### Required + +- `image_name` (String) Name of the rescue disk +- `link_to` (Number) ID of real image to link this virtual image to upon creation + +### Optional + +- `permanently` (Boolean) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_id` (Number) +- `acl` (List of String) +- `architecture` (String) +- `boot_type` (String) +- `bootable` (Boolean) +- `ckey` (String) +- `compute_ci_id` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `drivers` (List of String) +- `enabled` (Boolean) +- `gid` (Number) +- `guid` (Number) +- `history` (Attributes List) (see [below for nested schema](#nestedatt--history)) +- `hot_resize` (Boolean) +- `id` (String) The ID of this resource. +- `image_id` (Number) +- `image_type` (String) +- `last_modified` (Number) +- `last_updated` (String) Timestamp of the last Terraform update of the order. +- `milestones` (Number) +- `password` (String) +- `pool_name` (String) +- `present_to` (List of Number) +- `provider_name` (String) +- `purge_attempts` (Number) +- `res_id` (String) +- `rescuecd` (Boolean) +- `sep_id` (Number) +- `shared_with` (List of Number) +- `size` (Number) +- `status` (String) +- `tech_status` (String) +- `unc_path` (String) +- `username` (String) +- `version` (String) + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `history` + +Read-Only: + +- `guid` (String) +- `id` (Number) +- `timestamp` (Number) diff --git a/docs/resources/k8s_cp.md b/docs/resources/k8s_cp.md new file mode 100644 index 0000000..5548e57 --- /dev/null +++ b/docs/resources/k8s_cp.md @@ -0,0 +1,167 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_k8s_cp Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_k8s_cp (Resource) + + + + + + +## Schema + +### Required + +- `k8sci_id` (Number) ID of the k8s catalog item to base this instance on. +- `name` (String) Name of the cluster. +- `network_plugin` (String) Network plugin to be used +- `rg_id` (Number) Resource group ID that this instance belongs to. + +### Optional + +- `additional_sans` (List of String) is used to define settings and actions that should be performed before any other component in the cluster starts. It allows you to configure things like node registration, network setup, and other initialization tasks. insert a valid JSON string with all levels of nesting. +- `cluster_config` (String) is used to configure the behavior and settings of the Kube-proxy, which is responsible for network proxying and load balancing within the cluster. It includes parameters such as proxy mode, cluster IP ranges, and other Kube-proxy specific configurations. insert a valid JSON string with all levels of nesting. +- `cpu` (Number) Node CPU count. +- `desc` (String) Text description of this instance. +- `disk` (Number) Node boot disk size in GB. +- `enabled` (Boolean) Enable k8s cluster +- `extnet_id` (Number) ID of the external network to connect workers to. If omitted network will be chosen by the platfom. +- `extnet_only` (Boolean) Use only selected ExtNet for infrastructure connections +- `ha_mode` (Boolean) Use Highly Available schema for LB deploy +- `init_config` (String) is used to define settings and actions that should be performed before any other component in the cluster starts. It allows you to configure things like node registration, network setup, and other initialization tasks. insert a valid JSON string with all levels of nesting. +- `join_config` (String) is used to configure the behavior and settings for joining a node to a cluster. It includes parameters such as the cluster's control plane endpoint, token, and certificate key. insert a valid JSON string with all levels of nesting. +- `kube_proxy_config` (String) is used to configure the behavior and settings of the Kube-proxy, which is responsible for network proxying and load balancing within the cluster. It includes parameters such as proxy mode, cluster IP ranges, and other Kube-proxy specific configurations. insert a valid JSON string with all levels of nesting. +- `kubelet_config` (String) is used to configure the behavior and settings of the Kube-proxy, which is responsible for network proxying and load balancing within the cluster. It includes parameters such as proxy mode, cluster IP ranges, and other Kube-proxy specific configurations. insert a valid JSON string with all levels of nesting. +- `lb_sysctl_params` (Attributes List) Custom sysctl values for Load Balancer instance. Applied on boot. (see [below for nested schema](#nestedatt--lb_sysctl_params)) +- `num` (Number) Number of VMs to create. Can be either 1,3 or 5 +- `oidc_cert` (String) insert ssl certificate in x509 pem format +- `permanently` (Boolean) whether to completely delete the k8s cluster +- `ram` (Number) Node RAM in MB. +- `restore` (Boolean) if true, restore the k8s cluster from Recycle Bin +- `sep_id` (Number) Storage Endpoint ID +- `sep_pool` (String) Storage Endpoint Pool +- `start` (Boolean) Start k8s cluster. +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vins_id` (Number) ID of default vins for this instace. +- `with_lb` (Boolean) Create k8s with load balancer if true. + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `acl` (Attributes) (see [below for nested schema](#nestedatt--acl)) +- `bservice_id` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `detailed_info` (Attributes List) (see [below for nested schema](#nestedatt--detailed_info)) +- `id` (String) The ID of this resource. +- `k8s_ci_name` (String) +- `k8s_id` (Number) +- `kubeconfig` (String) +- `last_updated` (String) Timestamp of the last Terraform update of the order. +- `lb_id` (Number) +- `lb_ip` (String) +- `master_group_id` (Number) Master group ID. +- `master_group_name` (String) Master group name. +- `rg_name` (String) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) + + +### Nested Schema for `lb_sysctl_params` + +Required: + +- `key` (String) +- `value` (String) + + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `acl` + +Read-Only: + +- `account_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--account_acl)) +- `k8s_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--k8s_acl)) +- `rg_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--rg_acl)) + + +### Nested Schema for `acl.account_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `acl.k8s_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `acl.rg_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + + +### Nested Schema for `detailed_info` + +Read-Only: + +- `compute_id` (Number) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--detailed_info--interfaces)) +- `name` (String) +- `natable_vins_ip` (String) +- `natable_vins_network` (String) +- `status` (String) +- `tech_status` (String) + + +### Nested Schema for `detailed_info.interfaces` + +Read-Only: + +- `def_gw` (String) +- `ip_address` (String) diff --git a/docs/resources/k8s_wg.md b/docs/resources/k8s_wg.md new file mode 100644 index 0000000..6bcb3f2 --- /dev/null +++ b/docs/resources/k8s_wg.md @@ -0,0 +1,75 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_k8s_wg Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_k8s_wg (Resource) + + + + + + +## Schema + +### Required + +- `k8s_id` (Number) ID of k8s instance. +- `name` (String) Name of the worker group. + +### Optional + +- `annotations` (List of String) +- `cloud_init` (String) +- `cpu` (Number) Worker node CPU count. +- `disk` (Number) Worker node boot disk size. If unspecified or 0, size is defined by OS image size. +- `labels` (List of String) +- `num` (Number) Number of worker nodes to create. +- `ram` (Number) Worker node RAM in MB. +- `taints` (List of String) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `worker_sep_id` (Number) +- `worker_sep_pool` (String) + +### Read-Only + +- `detailed_info` (Attributes List) (see [below for nested schema](#nestedatt--detailed_info)) +- `guid` (String) +- `id` (String) The ID of this resource. +- `last_updated` (String) Timestamp of the last Terraform update of the order. +- `wg_id` (Number) + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `detailed_info` + +Read-Only: + +- `compute_id` (Number) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--detailed_info--interfaces)) +- `name` (String) +- `natable_vins_ip` (String) +- `natable_vins_network` (String) +- `status` (String) +- `tech_status` (String) + + +### Nested Schema for `detailed_info.interfaces` + +Read-Only: + +- `def_gw` (String) +- `ip_address` (String) diff --git a/docs/resources/kvmvm.md b/docs/resources/kvmvm.md new file mode 100644 index 0000000..3a3e084 --- /dev/null +++ b/docs/resources/kvmvm.md @@ -0,0 +1,534 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_kvmvm Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_kvmvm (Resource) + + + + + + +## Schema + +### Required + +- `cpu` (Number) Number of CPUs to allocate to this compute instance. +- `driver` (String) Hardware architecture of this compute instance. +- `name` (String) Name of this compute. Compute names are case sensitive and must be unique in the resource group. +- `ram` (Number) Amount of RAM in MB to allocate to this compute instance. +- `rg_id` (Number) ID of the resource group where this compute should be deployed. + +### Optional + +- `affinity_label` (String) Set affinity label for compute +- `affinity_rules` (Attributes Set) (see [below for nested schema](#nestedatt--affinity_rules)) +- `anti_affinity_rules` (Attributes Set) (see [below for nested schema](#nestedatt--anti_affinity_rules)) +- `auto_start` (Boolean) Flag for redeploy compute +- `boot_disk_size` (Number) This compute instance boot disk size in GB. Make sure it is large enough to accomodate selected OS image. +- `cd` (Attributes) (see [below for nested schema](#nestedatt--cd)) +- `cloud_init` (String) Optional cloud_init parameters. Applied when creating new compute instance only, ignored in all other cases. +- `cpu_pin` (Boolean) Run VM on dedicated CPUs. To use this feature, the system must be pre-configured by allocating CPUs on the physical node. +- `custom_fields` (String) custom fields for Compute. Must be dict +- `data_disks` (String) Flag for redeploy compute +- `description` (String) Optional text description of this compute instance. +- `detach_disks` (Boolean) +- `enabled` (Boolean) If true - enable compute, else - disable +- `extra_disks` (Set of Number) Optional list of IDs of extra disks to attach to this compute. You may specify several extra disks. +- `force_resize` (Boolean) Flag for resize compute +- `force_stop` (Boolean) Flag for redeploy compute +- `hp_backed` (Boolean) Use Huge Pages to allocate RAM of the virtual machine. The system must be pre-configured by allocating Huge Pages on the physical node. +- `image_id` (Number) ID of the OS image to base this compute instance on. +- `ipa_type` (String) compute purpose +- `is` (String) system name +- `network` (Attributes Set) Optional network connection(s) for this compute. You may specify several network blocks, one for each connection. (see [below for nested schema](#nestedatt--network)) +- `numa_affinity` (String) Rule for VM placement with NUMA affinity. +- `pause` (Boolean) +- `permanently` (Boolean) +- `pin_to_stack` (Boolean) +- `pool` (String) Pool to use if sepId is set, can be also empty if needed to be chosen by system. +- `port_forwarding` (Attributes Set) (see [below for nested schema](#nestedatt--port_forwarding)) +- `reset` (Boolean) +- `restore` (Boolean) +- `rollback` (Attributes) (see [below for nested schema](#nestedatt--rollback)) +- `sep_id` (Number) ID of SEP to create bootDisk on. Uses image's sepId if not set. +- `snapshot` (Attributes Set) (see [below for nested schema](#nestedatt--snapshot)) +- `started` (Boolean) Is compute started. +- `stateless` (Boolean) Compute will be stateless (SVA_KVM_X86) if set to True +- `tags` (Attributes Set) (see [below for nested schema](#nestedatt--tags)) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `user_access` (Attributes Set) (see [below for nested schema](#nestedatt--user_access)) +- `without_boot_disk` (Boolean) If True, the imageId, bootDisk, sepId, pool parameters are ignored and the compute is created without a boot disk in the stopped state. + +### Read-Only + +- `account_id` (Number) +- `account_name` (String) +- `acl` (Attributes) (see [below for nested schema](#nestedatt--acl)) +- `affinity_weight` (Number) +- `arch` (String) +- `boot_disk` (Attributes) (see [below for nested schema](#nestedatt--boot_disk)) +- `boot_disk_id` (Number) +- `boot_order` (List of String) +- `cd_image_id` (Number) +- `clone_reference` (Number) +- `clones` (List of Number) +- `compute_id` (Number) +- `computeci_id` (Number) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `devices` (String) +- `disks` (Attributes List) (see [below for nested schema](#nestedatt--disks)) +- `gid` (Number) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `image_name` (String) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--interfaces)) +- `lock_status` (String) +- `manager_id` (Number) +- `manager_type` (String) +- `migrationjob` (Number) +- `milestones` (Number) +- `natable_vins_id` (Number) +- `natable_vins_ip` (String) +- `natable_vins_name` (String) +- `natable_vins_network` (String) +- `natable_vins_network_name` (String) +- `need_reboot` (Boolean) +- `numa_node_id` (Number) +- `os_users` (Attributes List) (see [below for nested schema](#nestedatt--os_users)) +- `pinned` (Boolean) +- `reference_id` (String) +- `registered` (Boolean) +- `res_name` (String) +- `reserved_node_cpus` (List of Number) +- `rg_name` (String) +- `snap_sets` (Attributes List) (see [below for nested schema](#nestedatt--snap_sets)) +- `stateless_sep_id` (Number) +- `stateless_sep_type` (String) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_data` (String) +- `user_managed` (Boolean) +- `vgpus` (List of Number) +- `virtual_image_id` (Number) +- `virtual_image_name` (String) + + +### Nested Schema for `affinity_rules` + +Required: + +- `key` (String) key that are taken into account when analyzing this rule will be identified +- `mode` (String) EQ or NE or ANY - the comparison mode is 'value', recorded by the specified 'key' +- `policy` (String) RECOMMENDED or REQUIRED, the degree of 'strictness' of this rule +- `topology` (String) compute or node, for whom rule applies + +Optional: + +- `value` (String) value that must match the key to be taken into account when analyzing this rule + + + +### Nested Schema for `anti_affinity_rules` + +Required: + +- `key` (String) key that are taken into account when analyzing this rule will be identified +- `mode` (String) EQ or NE or ANY - the comparison mode is 'value', recorded by the specified 'key' +- `policy` (String) RECOMMENDED or REQUIRED, the degree of 'strictness' of this rule +- `topology` (String) compute or node, for whom rule applies + +Optional: + +- `value` (String) value that must match the key to be taken into account when analyzing this rule + + + +### Nested Schema for `cd` + +Required: + +- `cdrom_id` (Number) + + + +### Nested Schema for `network` + +Required: + +- `net_id` (Number) ID of the network for this connection. +- `net_type` (String) Type of the network for this connection, either EXTNET or VINS. + +Optional: + +- `ip_address` (String) Optional IP address to assign to this connection. This IP should belong to the selected network and free for use. + +Read-Only: + +- `mac` (String) MAC address associated with this connection. MAC address is assigned automatically. + + + +### Nested Schema for `port_forwarding` + +Required: + +- `proto` (String) +- `public_port_start` (Number) + +Optional: + +- `local_port` (Number) +- `public_port_end` (Number) + + + +### Nested Schema for `rollback` + +Required: + +- `label` (String) + + + +### Nested Schema for `snapshot` + +Required: + +- `label` (String) + + + +### Nested Schema for `tags` + +Required: + +- `key` (String) +- `value` (String) + + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `user_access` + +Required: + +- `access_type` (String) +- `username` (String) + + + +### Nested Schema for `acl` + +Read-Only: + +- `account_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--account_acl)) +- `compute_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--compute_acl)) +- `rg_acl` (Attributes List) (see [below for nested schema](#nestedatt--acl--rg_acl)) + + +### Nested Schema for `acl.account_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `acl.compute_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + +### Nested Schema for `acl.rg_acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) + + + + +### Nested Schema for `boot_disk` + +Read-Only: + +- `account_id` (Number) +- `acl` (String) +- `boot_partition` (Number) +- `ckey` (String) +- `created_time` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `destruction_time` (Number) +- `disk_id` (Number) +- `disk_path` (String) +- `gid` (Number) +- `guid` (Number) +- `image_id` (Number) +- `images` (List of String) +- `iotune` (Attributes) (see [below for nested schema](#nestedatt--boot_disk--iotune)) +- `iqn` (String) +- `login` (String) +- `milestones` (Number) +- `name` (String) +- `order` (Number) +- `params` (String) +- `parent_id` (Number) +- `passwd` (String) +- `pci_slot` (Number) +- `pool` (String) +- `present_to` (List of Number) +- `purge_time` (Number) +- `reality_device_number` (Number) +- `reference_id` (String) +- `replication` (Attributes) (see [below for nested schema](#nestedatt--boot_disk--replication)) +- `res_id` (String) +- `role` (String) +- `sep_id` (Number) +- `shareable` (Boolean) +- `size_max` (Number) +- `size_used` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--boot_disk--snapshots)) +- `status` (String) +- `tech_status` (String) +- `type` (String) +- `vmid` (Number) + + +### Nested Schema for `boot_disk.iotune` + +Read-Only: + +- `read_bytes_sec` (Number) +- `read_bytes_sec_max` (Number) +- `read_iops_sec` (Number) +- `read_iops_sec_max` (Number) +- `size_iops_sec` (Number) +- `total_bytes_sec` (Number) +- `total_bytes_sec_max` (Number) +- `total_iops_sec` (Number) +- `total_iops_sec_max` (Number) +- `write_bytes_sec` (Number) +- `write_bytes_sec_max` (Number) +- `write_iops_sec` (Number) +- `write_iops_sec_max` (Number) + + + +### Nested Schema for `boot_disk.replication` + +Read-Only: + +- `disk_id` (Number) +- `pool_id` (String) +- `role` (String) +- `self_volume_id` (String) +- `storage_id` (String) +- `volume_id` (String) + + + +### Nested Schema for `boot_disk.snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `reference_id` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) + + + + +### Nested Schema for `disks` + +Read-Only: + +- `account_id` (Number) +- `acl` (String) +- `boot_partition` (Number) +- `ckey` (String) +- `created_time` (Number) +- `deleted_time` (Number) +- `desc` (String) +- `destruction_time` (Number) +- `disk_id` (Number) +- `disk_path` (String) +- `gid` (Number) +- `guid` (Number) +- `image_id` (Number) +- `images` (List of String) +- `iotune` (Attributes) (see [below for nested schema](#nestedatt--disks--iotune)) +- `iqn` (String) +- `login` (String) +- `milestones` (Number) +- `name` (String) +- `order` (Number) +- `params` (String) +- `parent_id` (Number) +- `passwd` (String) +- `pci_slot` (Number) +- `pool` (String) +- `present_to` (List of Number) +- `purge_time` (Number) +- `reality_device_number` (Number) +- `reference_id` (String) +- `replication` (Attributes) (see [below for nested schema](#nestedatt--disks--replication)) +- `res_id` (String) +- `role` (String) +- `sep_id` (Number) +- `shareable` (Boolean) +- `size_max` (Number) +- `size_used` (Number) +- `snapshots` (Attributes List) (see [below for nested schema](#nestedatt--disks--snapshots)) +- `status` (String) +- `tech_status` (String) +- `type` (String) +- `vmid` (Number) + + +### Nested Schema for `disks.iotune` + +Read-Only: + +- `read_bytes_sec` (Number) +- `read_bytes_sec_max` (Number) +- `read_iops_sec` (Number) +- `read_iops_sec_max` (Number) +- `size_iops_sec` (Number) +- `total_bytes_sec` (Number) +- `total_bytes_sec_max` (Number) +- `total_iops_sec` (Number) +- `total_iops_sec_max` (Number) +- `write_bytes_sec` (Number) +- `write_bytes_sec_max` (Number) +- `write_iops_sec` (Number) +- `write_iops_sec_max` (Number) + + + +### Nested Schema for `disks.replication` + +Read-Only: + +- `disk_id` (Number) +- `pool_id` (String) +- `role` (String) +- `self_volume_id` (String) +- `storage_id` (String) +- `volume_id` (String) + + + +### Nested Schema for `disks.snapshots` + +Read-Only: + +- `guid` (String) +- `label` (String) +- `reference_id` (String) +- `res_id` (String) +- `snap_set_guid` (String) +- `snap_set_time` (Number) +- `timestamp` (Number) + + + + +### Nested Schema for `interfaces` + +Read-Only: + +- `conn_id` (Number) +- `conn_type` (String) +- `enabled` (Boolean) +- `flip_group_id` (Number) +- `get_gw` (String) +- `guid` (String) +- `ip_address` (String) +- `listen_ssh` (Boolean) +- `mac` (String) +- `name` (String) +- `net_id` (Number) +- `net_type` (String) +- `netmask` (Number) +- `node_id` (Number) +- `pci_slot` (Number) +- `qos` (Attributes) (see [below for nested schema](#nestedatt--interfaces--qos)) +- `target` (String) +- `type` (String) +- `vnfs` (List of Number) + + +### Nested Schema for `interfaces.qos` + +Read-Only: + +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + + +### Nested Schema for `os_users` + +Read-Only: + +- `guid` (String) +- `login` (String) +- `password` (String) +- `public_key` (String) + + + +### Nested Schema for `snap_sets` + +Read-Only: + +- `disks` (List of Number) +- `guid` (String) +- `label` (String) +- `timestamp` (Number) diff --git a/docs/resources/lb.md b/docs/resources/lb.md new file mode 100644 index 0000000..cb357c7 --- /dev/null +++ b/docs/resources/lb.md @@ -0,0 +1,191 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_lb Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_lb (Resource) + + + + + + +## Schema + +### Required + +- `extnet_id` (Number) +- `name` (String) +- `rg_id` (Number) +- `start` (Boolean) +- `vins_id` (Number) + +### Optional + +- `config_reset` (Boolean) +- `desc` (String) +- `enable` (Boolean) +- `ha_mode` (Boolean) +- `permanently` (Boolean) +- `restart` (Boolean) +- `restore` (Boolean) +- `safe` (Boolean) +- `sysctl_params` (Attributes List) Custom sysctl values for Load Balancer instance. Applied on boot. (see [below for nested schema](#nestedatt--sysctl_params)) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `acl` (String) +- `backend_haip` (String) +- `backends` (Attributes List) (see [below for nested schema](#nestedatt--backends)) +- `created_by` (String) +- `created_time` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `dp_api_user` (String) +- `frontend_haip` (String) +- `frontends` (Attributes List) (see [below for nested schema](#nestedatt--frontends)) +- `gid` (Number) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `image_id` (Number) +- `lb_id` (Number) +- `manager_id` (Number) +- `manager_type` (String) +- `milestones` (Number) +- `part_k8s` (Boolean) +- `primary_node` (Attributes) (see [below for nested schema](#nestedatt--primary_node)) +- `rg_name` (String) +- `secondary_node` (Attributes) (see [below for nested schema](#nestedatt--secondary_node)) +- `status` (String) +- `tech_status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) + + +### Nested Schema for `sysctl_params` + +Required: + +- `key` (String) +- `value` (String) + + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `backends` + +Read-Only: + +- `algorithm` (String) +- `guid` (String) +- `name` (String) +- `server_default_settings` (Attributes) (see [below for nested schema](#nestedatt--backends--server_default_settings)) +- `servers` (Attributes List) (see [below for nested schema](#nestedatt--backends--servers)) + + +### Nested Schema for `backends.server_default_settings` + +Read-Only: + +- `downinter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `weight` (Number) + + + +### Nested Schema for `backends.servers` + +Read-Only: + +- `address` (String) +- `check` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) +- `server_settings` (Attributes) (see [below for nested schema](#nestedatt--backends--servers--server_settings)) + + +### Nested Schema for `backends.servers.server_settings` + +Read-Only: + +- `downinter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `weight` (Number) + + + + + +### Nested Schema for `frontends` + +Read-Only: + +- `backend` (String) +- `bindings` (Attributes List) (see [below for nested schema](#nestedatt--frontends--bindings)) +- `guid` (String) +- `name` (String) + + +### Nested Schema for `frontends.bindings` + +Read-Only: + +- `address` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) + + + + +### Nested Schema for `primary_node` + +Read-Only: + +- `backend_ip` (String) +- `compute_id` (Number) +- `frontend_ip` (String) +- `guid` (String) +- `mgmt_ip` (String) +- `network_id` (Number) + + + +### Nested Schema for `secondary_node` + +Read-Only: + +- `backend_ip` (String) +- `compute_id` (Number) +- `frontend_ip` (String) +- `guid` (String) +- `mgmt_ip` (String) +- `network_id` (Number) diff --git a/docs/resources/lb_backend.md b/docs/resources/lb_backend.md new file mode 100644 index 0000000..83e290c --- /dev/null +++ b/docs/resources/lb_backend.md @@ -0,0 +1,78 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_lb_backend Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_lb_backend (Resource) + + + + + + +## Schema + +### Required + +- `lb_id` (Number) +- `name` (String) + +### Optional + +- `algorithm` (String) +- `downinter` (Number) +- `fall` (Number) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `weight` (Number) + +### Read-Only + +- `guid` (String) +- `id` (String) The ID of this resource. +- `servers` (Attributes List) (see [below for nested schema](#nestedatt--servers)) + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `servers` + +Read-Only: + +- `address` (String) +- `check` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) +- `server_settings` (Attributes) (see [below for nested schema](#nestedatt--servers--server_settings)) + + +### Nested Schema for `servers.server_settings` + +Read-Only: + +- `downinter` (Number) +- `fall` (Number) +- `guid` (String) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `weight` (Number) diff --git a/docs/resources/lb_backend_server.md b/docs/resources/lb_backend_server.md new file mode 100644 index 0000000..39352cc --- /dev/null +++ b/docs/resources/lb_backend_server.md @@ -0,0 +1,52 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_lb_backend_server Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_lb_backend_server (Resource) + + + + + + +## Schema + +### Required + +- `address` (String) IP address of the server. +- `backend_name` (String) Must be unique among all backends of this LB - name of the new backend to create +- `lb_id` (Number) ID of the LB instance to backendCreate +- `name` (String) Must be unique among all servers defined for this backend - name of the server definition to add. +- `port` (Number) Port number on the server + +### Optional + +- `check` (String) set to disabled if this server should be used regardless of its state. +- `downinter` (Number) +- `fall` (Number) +- `inter` (Number) +- `maxconn` (Number) +- `maxqueue` (Number) +- `rise` (Number) +- `slowstart` (Number) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `weight` (Number) + +### Read-Only + +- `guid` (String) +- `id` (String) The ID of this resource. + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/resources/lb_frontend.md b/docs/resources/lb_frontend.md new file mode 100644 index 0000000..a034006 --- /dev/null +++ b/docs/resources/lb_frontend.md @@ -0,0 +1,53 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_lb_frontend Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_lb_frontend (Resource) + + + + + + +## Schema + +### Required + +- `backend_name` (String) +- `lb_id` (Number) +- `name` (String) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `bindings` (Attributes List) (see [below for nested schema](#nestedatt--bindings)) +- `guid` (String) +- `id` (String) The ID of this resource. + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `bindings` + +Read-Only: + +- `address` (String) +- `guid` (String) +- `name` (String) +- `port` (Number) diff --git a/docs/resources/lb_frontend_bind.md b/docs/resources/lb_frontend_bind.md new file mode 100644 index 0000000..4fb7295 --- /dev/null +++ b/docs/resources/lb_frontend_bind.md @@ -0,0 +1,43 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_lb_frontend_bind Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_lb_frontend_bind (Resource) + + + + + + +## Schema + +### Required + +- `address` (String) +- `frontend_name` (String) +- `lb_id` (Number) +- `name` (String) +- `port` (Number) + +### Optional + +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `guid` (String) +- `id` (String) The ID of this resource. + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/docs/resources/resgroup.md b/docs/resources/resgroup.md new file mode 100644 index 0000000..f1f0259 --- /dev/null +++ b/docs/resources/resgroup.md @@ -0,0 +1,133 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_resgroup Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_resgroup (Resource) + + + + + + +## Schema + +### Required + +- `account_id` (Number) account, which will own this resource group +- `gid` (Number) grid id +- `name` (String) name of this RG. Must be unique within the account. + +### Optional + +- `access` (Attributes List) Grant/revoke user or group access to the Resource group as specified (see [below for nested schema](#nestedatt--access)) +- `def_net` (Attributes) Set default network for attach associated VMs (see [below for nested schema](#nestedatt--def_net)) +- `def_net_type` (String) type of the default network for this RG. VMs created in this RG will be by default connected to this network. Allowed values are PRIVATE, PUBLIC, NONE. +- `description` (String) User-defined text description of this resource group. +- `enable` (Boolean) flag for enable/disable RG +- `ext_ip` (String) IP address on the external network to request when def_net_type=PRIVATE and ext_net_id is not 0. +- `ext_net_id` (Number) external network id +- `force` (Boolean) Set to True if you want force delete non-empty RG +- `ipcidr` (String) private network IP CIDR if default network PRIVATE +- `owner` (String) username - owner of this RG. Leave blank to set current user as owner +- `permanently` (Boolean) Set to True if you want force delete non-empty RG +- `quota` (Attributes) Quota settings for this resource group. (see [below for nested schema](#nestedatt--quota)) +- `reason` (String) Set to True if you want force delete non-empty RG +- `register_computes` (Boolean) Register computes in registration system +- `restore` (Boolean) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `account_name` (String) +- `acl` (Attributes List) (see [below for nested schema](#nestedatt--acl)) +- `compute_features` (List of String) +- `cpu_allocation_parameter` (String) +- `cpu_allocation_ratio` (Number) +- `def_net_id` (Number) +- `deleted_by` (String) +- `deleted_time` (Number) +- `dirty` (Boolean) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `last_updated` (String) +- `lock_status` (String) +- `milestones` (Number) +- `res_types` (List of String) +- `rg_id` (Number) +- `secret` (String) +- `status` (String) +- `uniq_pools` (List of String) +- `updated_by` (String) +- `updated_time` (Number) +- `vins` (List of Number) +- `vms` (List of Number) + + +### Nested Schema for `access` + +Required: + +- `right` (String) Access rights to set, one of 'R', 'RCX' or 'ARCXDU' +- `user` (String) User or group name to grant access + +Optional: + +- `reason` (String) Reason for action + + + +### Nested Schema for `def_net` + +Required: + +- `net_type` (String) Network type to set. Must be on of 'PRIVATE' or 'PUBLIC'. + +Optional: + +- `net_id` (Number) Network segment ID. If netType is PUBLIC and netId is 0 then default external network segment will be selected. If netType is PRIVATE and netId=0, the first ViNS defined for this RG will be selected. Otherwise, netId identifies either existing external network segment or ViNS. +- `reason` (String) Reason for action + + + +### Nested Schema for `quota` + +Optional: + +- `cpu` (Number) Limit on the total number of CPUs in this resource group. +- `disk` (Number) Limit on the total volume of storage resources in this resource group, specified in GB. +- `ext_ips` (Number) Limit on the total number of external IP addresses this resource group can use. +- `ext_traffic` (Number) Limit on the total ingress network traffic for this resource group, specified in GB. +- `ram` (Number) Limit on the total amount of RAM in this resource group, specified in MB. + +Read-Only: + +- `cu_d` (Number) Limit on the total volume of storage resources in this resource group, specified in GB. +- `gpu_units` (Number) Limit on the total number of virtual GPUs this resource group can use. + + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `acl` + +Read-Only: + +- `explicit` (Boolean) +- `guid` (String) +- `right` (String) +- `status` (String) +- `type` (String) +- `user_group_id` (String) diff --git a/docs/resources/vins.md b/docs/resources/vins.md new file mode 100644 index 0000000..24aa34c --- /dev/null +++ b/docs/resources/vins.md @@ -0,0 +1,487 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins (Resource) + + + + + + +## Schema + +### Required + +- `name` (String) vins name + +### Optional + +- `account_id` (Number) account id, used for creating vins in account +- `desc` (String) description, used for creating vins either in resource group or in account +- `dns` (Set of String) list of DNS ip address +- `enable` (Boolean) flag to enable/disable vins +- `ext_net` (Attributes) connect/disconnect vins to/from external network (see [below for nested schema](#nestedatt--ext_net)) +- `force` (Boolean) flag to force delete of non-empty vins +- `gid` (Number) grid (platform) ID, used for creating vins in account +- `ip` (Attributes List) ip reserve/release (see [below for nested schema](#nestedatt--ip)) +- `ipcidr` (String) private network IP CIDR, used for creating vins either in resource group or in account +- `nat_rule` (Attributes List) create/delete NAT (port forwarding rule) on vins (see [below for nested schema](#nestedatt--nat_rule)) +- `permanently` (Boolean) flag to delete vins permanently +- `pre_reservations_num` (Number) number of pre created reservations, used for creating vins either in resource group or in account +- `restore` (Boolean) flag to restore vins +- `rg_id` (Number) resource group id, used for creating vins in resource group +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) +- `vnfdev_redeploy` (Boolean) flag to redeploy vnfdev +- `vnfdev_restart` (Boolean) flag to restart vnfdev + +### Read-Only + +- `account_name` (String) +- `computes` (Attributes List) (see [below for nested schema](#nestedatt--computes)) +- `created_by` (String) +- `created_time` (Number) +- `default_gw` (String) +- `default_qos` (Attributes) (see [below for nested schema](#nestedatt--default_qos)) +- `deleted_by` (String) +- `deleted_time` (Number) +- `guid` (Number) +- `id` (String) The ID of this resource. +- `last_updated` (String) +- `lock_status` (String) +- `manager_id` (Number) +- `manager_type` (String) +- `milestones` (Number) +- `net_mask` (Number) +- `network` (String) +- `redundant` (Boolean) +- `rg_name` (String) +- `sec_vnf_dev_id` (Number) +- `status` (String) +- `updated_by` (String) +- `updated_time` (Number) +- `user_managed` (Boolean) +- `vins_id` (Number) Unique ID of the ViNS. If ViNS ID is specified, then ViNS name, rg_id and account_id are ignored. +- `vnf_dev` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev)) +- `vnfs` (Attributes) (see [below for nested schema](#nestedatt--vnfs)) +- `vxlan_id` (Number) + + +### Nested Schema for `ext_net` + +Optional: + +- `ext_net_id` (Number) external network ID +- `ext_net_ip` (String) directly set IP address + + + +### Nested Schema for `ip` + +Required: + +- `type` (String) type of the reservation + +Optional: + +- `compute_id` (Number) ID of the compute, associated with this reservation of type DHCP. Ignored for other types. +- `ip_addr` (String) IP address to use. Non-empty string is required for type EXCLUDE. Ignored for types DHCP and VIP. +- `mac_addr` (String) MAC address to associate with IP reservation. Ignored for type EXCLUDE, non-empty string is required for DHCP and VIP. + + + +### Nested Schema for `nat_rule` + +Required: + +- `ext_port_start` (Number) external IP start port to use for this rule +- `int_ip` (String) internal IP address to apply this rule to + +Optional: + +- `ext_port_end` (Number) external IP end port to use for this rule +- `int_port` (Number) internal IP port number to use for this rule +- `proto` (String) IP protocol type (tcp or udp) + +Read-Only: + +- `rule_id` (Number) rule id + + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). + + + +### Nested Schema for `computes` + +Read-Only: + +- `compute_id` (Number) +- `compute_name` (String) + + + +### Nested Schema for `default_qos` + +Read-Only: + +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + +### Nested Schema for `vnf_dev` + +Read-Only: + +- `account_id` (Number) +- `capabilities` (List of String) +- `ckey` (String) +- `config` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev--config)) +- `config_saved` (Boolean) +- `custom_pre_cfg` (Boolean) +- `desc` (String) +- `gid` (Number) +- `guid` (Number) +- `interfaces` (Attributes List) (see [below for nested schema](#nestedatt--vnf_dev--interfaces)) +- `lock_status` (String) +- `milestones` (Number) +- `status` (String) +- `tech_status` (String) +- `type` (String) +- `vins` (List of Number) +- `vnf_id` (Number) +- `vnf_name` (String) + + +### Nested Schema for `vnf_dev.config` + +Read-Only: + +- `mgmt` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev--config--mgmt)) +- `resources` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev--config--resources)) + + +### Nested Schema for `vnf_dev.config.mgmt` + +Read-Only: + +- `ip_addr` (String) +- `password` (String) +- `ssh_key` (String) +- `user` (String) + + + +### Nested Schema for `vnf_dev.config.resources` + +Read-Only: + +- `cpu` (Number) +- `ram` (Number) +- `stack_id` (Number) +- `uuid` (String) + + + + +### Nested Schema for `vnf_dev.interfaces` + +Read-Only: + +- `conn_id` (Number) +- `conn_type` (String) +- `def_gw` (String) +- `enabled` (Boolean) +- `flipgroup_id` (Number) +- `guid` (String) +- `ip_address` (String) +- `listen_ssh` (Boolean) +- `mac` (String) +- `name` (String) +- `net_id` (Number) +- `net_mask` (Number) +- `net_type` (String) +- `node_id` (Number) +- `pci_slot` (Number) +- `qos` (Attributes) (see [below for nested schema](#nestedatt--vnf_dev--interfaces--qos)) +- `target` (String) +- `type` (String) +- `vnfs` (List of Number) + + +### Nested Schema for `vnf_dev.interfaces.qos` + +Read-Only: + +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + + + +### Nested Schema for `vnfs` + +Read-Only: + +- `dhcp` (Attributes) (see [below for nested schema](#nestedatt--vnfs--dhcp)) +- `gw` (Attributes) (see [below for nested schema](#nestedatt--vnfs--gw)) +- `nat` (Attributes) (see [below for nested schema](#nestedatt--vnfs--nat)) + + +### Nested Schema for `vnfs.dhcp` + +Read-Only: + +- `account_id` (Number) +- `ckey` (String) +- `config` (Attributes) (see [below for nested schema](#nestedatt--vnfs--dhcp--config)) +- `created_time` (Number) +- `devices` (Attributes) (see [below for nested schema](#nestedatt--vnfs--dhcp--devices)) +- `dhcp_id` (Number) +- `gid` (Number) +- `guid` (Number) +- `lock_status` (String) +- `milestones` (Number) +- `owner_id` (Number) +- `owner_type` (String) +- `pure_virtual` (Boolean) +- `routes` (Attributes List) (see [below for nested schema](#nestedatt--vnfs--dhcp--routes)) +- `status` (String) +- `tech_status` (String) +- `type` (String) + + +### Nested Schema for `vnfs.dhcp.config` + +Read-Only: + +- `default_gw` (String) +- `dns` (List of String) +- `ip_end` (String) +- `ip_start` (String) +- `lease` (Number) +- `netmask` (Number) +- `network` (String) +- `reservations` (Attributes List) (see [below for nested schema](#nestedatt--vnfs--dhcp--config--reservations)) + + +### Nested Schema for `vnfs.dhcp.config.reservations` + +Read-Only: + +- `client_type` (String) +- `desc` (String) +- `domainname` (String) +- `hostname` (String) +- `ip` (String) +- `mac` (String) +- `type` (String) +- `vm_id` (Number) + + + + +### Nested Schema for `vnfs.dhcp.devices` + +Read-Only: + +- `primary` (Attributes) (see [below for nested schema](#nestedatt--vnfs--dhcp--devices--primary)) + + +### Nested Schema for `vnfs.dhcp.devices.primary` + +Read-Only: + +- `dev_id` (Number) +- `iface01` (String) +- `iface02` (String) + + + + +### Nested Schema for `vnfs.dhcp.routes` + +Read-Only: + +- `compute_ids` (List of Number) +- `destination` (String) +- `gateway` (String) +- `guid` (String) +- `netmask` (String) +- `route_id` (Number) + + + + +### Nested Schema for `vnfs.gw` + +Read-Only: + +- `account_id` (Number) +- `ckey` (String) +- `config` (Attributes) (see [below for nested schema](#nestedatt--vnfs--gw--config)) +- `created_time` (Number) +- `devices` (Attributes) (see [below for nested schema](#nestedatt--vnfs--gw--devices)) +- `gid` (Number) +- `guid` (Number) +- `gw_id` (Number) +- `lock_status` (String) +- `milestones` (Number) +- `owner_id` (Number) +- `owner_type` (String) +- `pure_virtual` (Boolean) +- `routes` (Attributes List) (see [below for nested schema](#nestedatt--vnfs--gw--routes)) +- `status` (String) +- `tech_status` (String) +- `type` (String) + + +### Nested Schema for `vnfs.gw.config` + +Read-Only: + +- `default_gw` (String) +- `ext_net_id` (Number) +- `ext_net_ip` (String) +- `ext_netmask` (Number) +- `qos` (Attributes) (see [below for nested schema](#nestedatt--vnfs--gw--config--qos)) + + +### Nested Schema for `vnfs.gw.config.qos` + +Read-Only: + +- `e_rate` (Number) +- `guid` (String) +- `in_burst` (Number) +- `in_rate` (Number) + + + + +### Nested Schema for `vnfs.gw.devices` + +Read-Only: + +- `primary` (Attributes) (see [below for nested schema](#nestedatt--vnfs--gw--devices--primary)) + + +### Nested Schema for `vnfs.gw.devices.primary` + +Read-Only: + +- `dev_id` (Number) +- `iface01` (String) +- `iface02` (String) + + + + +### Nested Schema for `vnfs.gw.routes` + +Read-Only: + +- `compute_ids` (List of Number) +- `destination` (String) +- `gateway` (String) +- `guid` (String) +- `netmask` (String) +- `route_id` (Number) + + + + +### Nested Schema for `vnfs.nat` + +Read-Only: + +- `account_id` (Number) +- `ckey` (String) +- `config` (Attributes) (see [below for nested schema](#nestedatt--vnfs--nat--config)) +- `created_time` (Number) +- `devices` (Attributes) (see [below for nested schema](#nestedatt--vnfs--nat--devices)) +- `gid` (Number) +- `guid` (Number) +- `lock_status` (String) +- `milestones` (Number) +- `nat_id` (Number) +- `owner_id` (Number) +- `owner_type` (String) +- `pure_virtual` (Boolean) +- `routes` (Attributes List) (see [below for nested schema](#nestedatt--vnfs--nat--routes)) +- `status` (String) +- `tech_status` (String) +- `type` (String) + + +### Nested Schema for `vnfs.nat.config` + +Read-Only: + +- `net_mask` (Number) +- `network` (String) +- `rules` (Attributes List) (see [below for nested schema](#nestedatt--vnfs--nat--config--rules)) + + +### Nested Schema for `vnfs.nat.config.rules` + +Read-Only: + +- `local_ip` (String) +- `local_port` (Number) +- `protocol` (String) +- `public_port_end` (Number) +- `public_port_start` (Number) +- `rule_id` (Number) +- `vm_id` (Number) +- `vm_name` (String) + + + + +### Nested Schema for `vnfs.nat.devices` + +Read-Only: + +- `primary` (Attributes) (see [below for nested schema](#nestedatt--vnfs--nat--devices--primary)) + + +### Nested Schema for `vnfs.nat.devices.primary` + +Read-Only: + +- `dev_id` (Number) +- `iface01` (String) +- `iface02` (String) + + + + +### Nested Schema for `vnfs.nat.routes` + +Read-Only: + +- `compute_ids` (List of Number) +- `destination` (String) +- `gateway` (String) +- `guid` (String) +- `netmask` (String) +- `route_id` (Number) diff --git a/docs/resources/vins_static_route.md b/docs/resources/vins_static_route.md new file mode 100644 index 0000000..5d01c5b --- /dev/null +++ b/docs/resources/vins_static_route.md @@ -0,0 +1,45 @@ +--- +# generated by https://github.com/hashicorp/terraform-plugin-docs +page_title: "dynamix_vins_static_route Resource - terraform-provider-dynamix" +subcategory: "" +description: |- + +--- + +# dynamix_vins_static_route (Resource) + + + + + + +## Schema + +### Required + +- `destination` (String) +- `gateway` (String) +- `netmask` (String) +- `vins_id` (Number) Unique ID of the ViNS. + +### Optional + +- `compute_ids` (List of Number) +- `route_id` (Number) +- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts)) + +### Read-Only + +- `guid` (String) +- `id` (String) The ID of this resource. +- `last_updated` (String) + + +### Nested Schema for `timeouts` + +Optional: + +- `create` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). +- `delete` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Setting a timeout for a Delete operation is only applicable if changes are saved into state before the destroy operation occurs. +- `read` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). Read operations occur during any refresh or planning operation when refresh is enabled. +- `update` (String) A string that can be [parsed as a duration](https://pkg.go.dev/time#ParseDuration) consisting of numbers and unit suffixes, such as "30s" or "2h45m". Valid time units are "s" (seconds), "m" (minutes), "h" (hours). diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..6555683 --- /dev/null +++ b/go.mod @@ -0,0 +1,81 @@ +module repository.basistech.ru/BASIS/terraform-provider-dynamix + +go 1.20 + +require ( + github.com/google/uuid v1.6.0 + github.com/hashicorp/terraform-plugin-framework v1.9.0 + github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 + github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 + github.com/hashicorp/terraform-plugin-log v0.9.0 + github.com/sirupsen/logrus v1.9.3 + repository.basistech.ru/BASIS/decort-golang-sdk v1.8.1 +) + +require ( + github.com/BurntSushi/toml v1.2.1 // indirect + github.com/Kunde21/markdownfmt/v3 v3.1.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.2.0 // indirect + github.com/Masterminds/sprig/v3 v3.2.3 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/armon/go-radix v1.0.0 // indirect + github.com/bgentry/speakeasy v0.1.0 // indirect + github.com/bmatcuk/doublestar/v4 v4.6.1 // indirect + github.com/cloudflare/circl v1.3.7 // indirect + github.com/fatih/color v1.16.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.4 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.21.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-querystring v1.1.0 // indirect + github.com/hashicorp/cli v1.1.6 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-checkpoint v0.5.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-hclog v1.5.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect + github.com/hashicorp/hc-install v0.7.0 // indirect + github.com/hashicorp/terraform-exec v0.21.0 // indirect + github.com/hashicorp/terraform-json v0.22.1 // indirect + github.com/hashicorp/terraform-plugin-docs v0.19.4 // indirect + github.com/hashicorp/terraform-plugin-go v0.23.0 // indirect + github.com/hashicorp/terraform-registry-address v0.2.3 // indirect + github.com/hashicorp/terraform-svchost v0.1.1 // indirect + github.com/hashicorp/yamux v0.1.1 // indirect + github.com/huandu/xstrings v1.3.3 // indirect + github.com/imdario/mergo v0.3.15 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/oklog/run v1.0.0 // indirect + github.com/posener/complete v1.2.3 // indirect + github.com/shopspring/decimal v1.3.1 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect + github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect + github.com/yuin/goldmark v1.7.1 // indirect + github.com/yuin/goldmark-meta v1.1.0 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect + go.abhg.dev/goldmark/frontmatter v0.2.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df // indirect + golang.org/x/mod v0.17.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect + google.golang.org/grpc v1.63.2 // indirect + google.golang.org/protobuf v1.34.0 // indirect + gopkg.in/yaml.v2 v2.3.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..abc5dfb --- /dev/null +++ b/go.sum @@ -0,0 +1,221 @@ +github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= +github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/Kunde21/markdownfmt/v3 v3.1.0 h1:KiZu9LKs+wFFBQKhrZJrFZwtLnCCWJahL+S+E/3VnM0= +github.com/Kunde21/markdownfmt/v3 v3.1.0/go.mod h1:tPXN1RTyOzJwhfHoon9wUr4HGYmWgVxSQN6VBJDkrVc= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= +github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= +github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= +github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bmatcuk/doublestar/v4 v4.6.1 h1:FH9SifrbvJhnlQpztAx++wlkk70QBf0iBWDwNy7PA4I= +github.com/bmatcuk/doublestar/v4 v4.6.1/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/gabriel-vasile/mimetype v1.4.4 h1:QjV6pZ7/XZ7ryI2KuyeEDE8wnh7fHP9YnQy+R0LnH8I= +github.com/gabriel-vasile/mimetype v1.4.4/go.mod h1:JwLei5XPtWdGiMFB5Pjle1oEeoSeEuJfJE+TtfvdB/s= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.21.0 h1:4fZA11ovvtkdgaeev9RGWPgc1uj3H8W+rNYyH/ySBb0= +github.com/go-playground/validator/v10 v10.21.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/cli v1.1.6 h1:CMOV+/LJfL1tXCOKrgAX0uRKnzjj/mpmqNXloRSy2K8= +github.com/hashicorp/cli v1.1.6/go.mod h1:MPon5QYlgjjo0BSoAiN0ESeT5fRzDjVRp+uioJ0piz4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-checkpoint v0.5.0 h1:MFYpPZCnQqQTE18jFwSII6eUQrD/oxMFp3mlgcqk5mU= +github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk= +github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA= +github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= +github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= +github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= +github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= +github.com/hashicorp/terraform-plugin-docs v0.19.4 h1:G3Bgo7J22OMtegIgn8Cd/CaSeyEljqjH3G39w28JK4c= +github.com/hashicorp/terraform-plugin-docs v0.19.4/go.mod h1:4pLASsatTmRynVzsjEhbXZ6s7xBlUw/2Kt0zfrq8HxA= +github.com/hashicorp/terraform-plugin-framework v1.9.0 h1:caLcDoxiRucNi2hk8+j3kJwkKfvHznubyFsJMWfZqKU= +github.com/hashicorp/terraform-plugin-framework v1.9.0/go.mod h1:qBXLDn69kM97NNVi/MQ9qgd1uWWsVftGSnygYG1tImM= +github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= +github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= +github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= +github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= +github.com/hashicorp/terraform-plugin-go v0.23.0 h1:AALVuU1gD1kPb48aPQUjug9Ir/125t+AAurhqphJ2Co= +github.com/hashicorp/terraform-plugin-go v0.23.0/go.mod h1:1E3Cr9h2vMlahWMbsSEcNrOCxovCZhOOIXjFHbjc/lQ= +github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= +github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= +github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= +github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= +github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= +github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= +github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= +github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= +github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= +github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= +github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.7.1 h1:3bajkSilaCbjdKVsKdZjZCLBNPL9pYzrCakKaf4U49U= +github.com/yuin/goldmark v1.7.1/go.mod h1:uzxRWxtg69N339t3louHJ7+O03ezfj6PlliRlaOzY1E= +github.com/yuin/goldmark-meta v1.1.0 h1:pWw+JLHGZe8Rk0EGsMVssiNb/AaPMHfSRszZeUeiOUc= +github.com/yuin/goldmark-meta v1.1.0/go.mod h1:U4spWENafuA7Zyg+Lj5RqK/MF+ovMYtBvXi1lBb2VP0= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +go.abhg.dev/goldmark/frontmatter v0.2.0 h1:P8kPG0YkL12+aYk2yU3xHv4tcXzeVnN+gU0tJ5JnxRw= +go.abhg.dev/goldmark/frontmatter v0.2.0/go.mod h1:XqrEkZuM57djk7zrlRUB02x8I5J0px76YjkOzhB4YlU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df h1:UA2aFVmmsIlefxMk29Dp2juaUSth8Pyn3Tq5Y5mJGME= +golang.org/x/exp v0.0.0-20230626212559-97b1e661b5df/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/protobuf v1.34.0 h1:Qo/qEd2RZPCf2nKuorzksSknv0d3ERwp1vFG38gSmH4= +google.golang.org/protobuf v1.34.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +repository.basistech.ru/BASIS/decort-golang-sdk v1.8.1 h1:wpxjnVO7hGaQVWbtFoYVbNoeFxk0QUnEfDsPuWjiNk0= +repository.basistech.ru/BASIS/decort-golang-sdk v1.8.1/go.mod h1:OaUynHHuSjWMzpfyoL4au6oLcUogqUkPPBKA15pbHWo= diff --git a/internal/client/client.go b/internal/client/client.go new file mode 100644 index 0000000..c85f61c --- /dev/null +++ b/internal/client/client.go @@ -0,0 +1,43 @@ +package client + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" +) + +func DataSource(ctx context.Context, req *datasource.ConfigureRequest, resp *datasource.ConfigureResponse) *decort.DecortClient { + if req.ProviderData == nil { + tflog.Error(ctx, "Provider Configure is nill") + return nil + } + client, ok := req.ProviderData.(*decort.DecortClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *decort.DecortClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return nil + } + return client +} + +func Resource(ctx context.Context, req *resource.ConfigureRequest, resp *resource.ConfigureResponse) *decort.DecortClient { + if req.ProviderData == nil { + tflog.Error(ctx, "Provider Configure is nill") + return nil + } + client, ok := req.ProviderData.(*decort.DecortClient) + if !ok { + resp.Diagnostics.AddError( + "Unexpected Data Source Configure Type", + fmt.Sprintf("Expected *decort.DecortClient, got: %T. Please report this issue to the provider developers.", req.ProviderData), + ) + return nil + } + return client +} diff --git a/internal/constants/constants.go b/internal/constants/constants.go new file mode 100644 index 0000000..93f5767 --- /dev/null +++ b/internal/constants/constants.go @@ -0,0 +1,22 @@ +package constants + +// LimitMaxVinsPerResgroup set maximum number of VINs instances per Resource Group +const LimitMaxVinsPerResgroup = 4 + +// MaxSshKeysPerCompute sets maximum number of user:ssh_key pairs to authorize when creating new compute +const MaxSshKeysPerCompute = 12 + +// MaxExtraDisksPerCompute sets maximum number of extra disks that can be added when creating new compute +const MaxExtraDisksPerCompute = 12 + +// MaxNetworksPerCompute sets maximum number of vNICs per compute +const MaxNetworksPerCompute = 8 + +// MaxCpusPerCompute sets maximum number of vCPUs per compute +const MaxCpusPerCompute = 128 + +// MinRamPerCompute sets minimum amount of RAM per compute in MB +const MinRamPerCompute = 128 + +// DivisibleByRAM sets multiplicity of RAM per compute +const DivisibleByRAM = 128 diff --git a/internal/constants/timeouts.go b/internal/constants/timeouts.go new file mode 100644 index 0000000..7024630 --- /dev/null +++ b/internal/constants/timeouts.go @@ -0,0 +1,12 @@ +package constants + +import "time" + +// timeouts for API calls from CRUD functions of Terraform plugin +var Timeout30s = time.Second * 30 +var Timeout60s = time.Second * 60 +var Timeout180s = time.Second * 180 +var Timeout300s = time.Second * 300 +var Timeout600s = time.Second * 600 +var Timeout20m = time.Minute * 20 +var Timeout30m = time.Minute * 30 diff --git a/internal/flattens/flatten_simple_type.go b/internal/flattens/flatten_simple_type.go new file mode 100644 index 0000000..267248c --- /dev/null +++ b/internal/flattens/flatten_simple_type.go @@ -0,0 +1,19 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// FlattenSimpleTypeToList convert a slice of simple type to a types.List +func FlattenSimpleTypeToList(ctx context.Context, elementType attr.Type, elements any) types.List { + res, diags := types.ListValueFrom(ctx, elementType, elements) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error FlattenSimpleTypeToList", diags)) + } + return res +} diff --git a/internal/flattens/meta.go b/internal/flattens/meta.go new file mode 100644 index 0000000..d9072af --- /dev/null +++ b/internal/flattens/meta.go @@ -0,0 +1,38 @@ +package flattens + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" +) + +// Meta flattens []interface{} golang struct to a list of terraform framework types.StringType +func Meta(ctx context.Context, m []interface{}) types.List { + tflog.Info(ctx, "Start flattenMeta") + tempSlice := make([]string, 0, len(m)) + for _, item := range m { + switch d := item.(type) { + case string: + tempSlice = append(tempSlice, d) + case int: + tempSlice = append(tempSlice, strconv.Itoa(d)) + case int64: + tempSlice = append(tempSlice, strconv.FormatInt(d, 10)) + case float64: + tempSlice = append(tempSlice, strconv.FormatInt(int64(d), 10)) + default: + tempSlice = append(tempSlice, "") + } + } + + res, diags := types.ListValueFrom(ctx, types.StringType, tempSlice) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("Error flattenMeta:", diags)) + } + + tflog.Info(ctx, "End flattenMeta") + return res +} diff --git a/internal/provider/data_sources_map.go b/internal/provider/data_sources_map.go new file mode 100644 index 0000000..db23b2e --- /dev/null +++ b/internal/provider/data_sources_map.go @@ -0,0 +1,131 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/extnet" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/flipgroup" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/image" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/k8s" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/stack" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vfpool" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vins" + cbaccount "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/account" + cbStack "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudbroker/stack" +) + +func newDataSourcesMap() []func() datasource.DataSource { + return []func() datasource.DataSource{ + account.NewDataSourceAccount, + account.NewDataSourceAccountAuditsList, + account.NewDataSourceAccountComputesList, + account.NewDataSourceAccountConsumedUnits, + account.NewDataSourceAccountConsumedUnitsByType, + account.NewDataSourceAccountDisksList, + account.NewDataSourceAccountFlipgroupsList, + account.NewDataSourceAccountList, + account.NewDataSourceAccountListDeleted, + account.NewDataSourceAccountRGList, + account.NewDataSourceAccountTemplatesList, + account.NewDataSourceAccountVinsList, + account.NewDataSourceAccountGetResourceConsumption, + account.NewDataSourceAccountReservedUnits, + account.NewDataSourceAccountGetResourceConsumptionList, + + bservice.NewDataSourceBService, + bservice.NewDataSourceBServiceList, + bservice.NewDataSourceBServiceGroup, + bservice.NewDataSourceBServiceDeletedList, + bservice.NewDataSourceBServiceSnapshotList, + + disks.NewDataSourceDisk, + disks.NewDataSourceDiskList, + disks.NewDataSourceDiskListDeleted, + disks.NewDataSourceDiskListTypes, + disks.NewDataSourceDiskListTypesDetailed, + disks.NewDataSourceDiskListUnattached, + disks.NewDataSourceDiskSnapshot, + disks.NewDataSourceDiskSnapshotList, + disks.NewDataSourceDiskReplication, + + extnet.NewDataSourceExtNet, + extnet.NewDataSourceExtNetComputesList, + extnet.NewDataSourceExtNetDefault, + extnet.NewDataSourceExtNetList, + + flipgroup.NewDataSourceFlipgroup, + flipgroup.NewDataSourceFlipgroupList, + + image.NewDataSourceImage, + image.NewDataSourceImageList, + + k8s.NewDataSourceK8S, + k8s.NewDataSourceK8SWgCloudInit, + k8s.NewDataSourceK8SList, + k8s.NewDataSourceK8SListDeleted, + k8s.NewDataSourceK8SWg, + k8s.NewDataSourceK8SComputes, + k8s.NewDataSourceK8ciList, + k8s.NewDataSourceK8SWgList, + + kvmvm.NewDataSourceComputeAudits, + kvmvm.NewDataSourceComputeGetAudits, + kvmvm.NewDataSourceComputeGetConsoleUrl, + kvmvm.NewDataSourceComputeGetLog, + kvmvm.NewDataSourceComputePciDeviceList, + kvmvm.NewDataSourceComputePFWList, + kvmvm.NewDataSourceComputeSnapshotUsage, + kvmvm.NewDataSourceComputeUserList, + kvmvm.NewDataSourceComputeVGPUList, + kvmvm.NewDataSourceComputeList, + kvmvm.NewDataSourceComputeListDeleted, + kvmvm.NewDataSourceCompute, + + lb.NewDataSourceLB, + lb.NewDataSourceLBList, + lb.NewDataSourceLBListDeleted, + + rg.NewDataSourceRG, + rg.NewDataSourceRGAffinityGroupComputes, + rg.NewDataSourceRGAffinityGroupsGet, + rg.NewDataSourceRGAffinityGroupsList, + rg.NewDataSourceRGAudits, + rg.NewDataSourceRGGetResourceConsumption, + rg.NewDataSourceRGList, + rg.NewDataSourceRGListComputes, + rg.NewDataSourceRGListDeleted, + rg.NewDataSourceRGListLB, + rg.NewDataSourceRGListPFW, + rg.NewDataSourceRGListVins, + rg.NewDataSourceRGResourceConsumptionList, + rg.NewDataSourceRGUsage, + + stack.NewDataSourceStack, + stack.NewDataSourceStackList, + + vfpool.NewDataSourceVFPool, + vfpool.NewDataSourceVFPoolList, + + vins.NewDataSourceVINS, + vins.NewDataSourceVINSAudits, + vins.NewDataSourceVINSExtNetList, + vins.NewDataSourceVINSIPList, + vins.NewDataSourceVINSList, + vins.NewDataSourceVINSListDeleted, + vins.NewDataSourceVINSNATRuleList, + vins.NewDataSourceVINSStaticRoute, + vins.NewDataSourceVINSStaticRouteList, + + cbaccount.NewDataSourceAccount, + cbaccount.NewDataSourceAccountList, + cbaccount.NewDataSourceAccountVinsList, + + cbStack.NewDataSourceStack, + cbStack.NewDataSourceStackList, + } +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go new file mode 100644 index 0000000..b7d6da4 --- /dev/null +++ b/internal/provider/provider.go @@ -0,0 +1,228 @@ +package provider + +import ( + "context" + "crypto/tls" + "net/http" + + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/provider/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + log "github.com/sirupsen/logrus" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + sdk_config "repository.basistech.ru/BASIS/decort-golang-sdk/config" +) + +// enumerated constants that define authentication modes for Configure +const ( + MODE_UNDEF = iota // this is the invalid mode - it should never be seen + MODE_LEGACY + MODE_DECS3O + MODE_JWT + MODE_BVS +) + +// Ensure DynamixProvider satisfies various provider interfaces. +var _ provider.Provider = &DynamixProvider{} + +// DynamixProvider defines the provider implementation. +type DynamixProvider struct { + // version is set to the provider version on release, "dev" when the + // provider is built and ran locally, and "test" when running acceptance + // testing. + version string +} + +// dynamixProviderModel describes the provider data model. +type dynamixProviderModel struct { + Authenticator types.String `tfsdk:"authenticator"` + Oauth2Url types.String `tfsdk:"oauth2_url"` + ControllerUrl types.String `tfsdk:"controller_url"` + User types.String `tfsdk:"user"` + Password types.String `tfsdk:"password"` + BvsUser types.String `tfsdk:"bvs_user"` + BvsPassword types.String `tfsdk:"bvs_password"` + Domain types.String `tfsdk:"domain"` + AppId types.String `tfsdk:"app_id"` + AppSecret types.String `tfsdk:"app_secret"` + Jwt types.String `tfsdk:"jwt"` + AllowUnverifiedSsl types.Bool `tfsdk:"allow_unverified_ssl"` + PathConfig types.String `tfsdk:"path_config"` + PathToken types.String `tfsdk:"path_token"` + TimeToRefresh types.Int64 `tfsdk:"time_to_refresh"` +} + +func (p *DynamixProvider) Metadata(_ context.Context, _ provider.MetadataRequest, resp *provider.MetadataResponse) { + resp.TypeName = "dynamix" + resp.Version = p.version +} + +func (p *DynamixProvider) Schema(_ context.Context, _ provider.SchemaRequest, resp *provider.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "authenticator": schema.StringAttribute{ + MarkdownDescription: "Authentication mode to use when connecting to DECORT cloud API. Should be one of 'decs3o', 'legacy', 'jwt' or 'bvs'.", + Required: true, + Validators: []validator.String{ + stringvalidator.OneOfCaseInsensitive("decs3o", "legacy", "jwt", "bvs"), // ignore case while validating + }, + }, + "oauth2_url": schema.StringAttribute{ + MarkdownDescription: "OAuth2 application URL in 'decs3o' and 'bvs' authentication mode.", + Optional: true, + }, + "controller_url": schema.StringAttribute{ + MarkdownDescription: "URL of DECORT Cloud controller to use. API calls will be directed to this URL.", + Required: true, + }, + "user": schema.StringAttribute{ + MarkdownDescription: "User name for DECORT cloud API operations in 'legacy' authentication mode.", + Optional: true, + }, + "password": schema.StringAttribute{ + MarkdownDescription: "User password for DECORT cloud API operations in 'legacy' authentication mode.", + Optional: true, + }, + "bvs_user": schema.StringAttribute{ + MarkdownDescription: "User name for DECORT cloud API operations in 'bvs' authentication mode.", + Optional: true, + }, + "bvs_password": schema.StringAttribute{ + MarkdownDescription: "User password for DECORT cloud API operations in 'bvs' authentication mode.", + Optional: true, + }, + "domain": schema.StringAttribute{ + MarkdownDescription: "User password for DECORT cloud API operations in 'bvs' authentication mode.", + Optional: true, + }, + "app_id": schema.StringAttribute{ + MarkdownDescription: "Application ID to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.", + Optional: true, + }, + "app_secret": schema.StringAttribute{ + MarkdownDescription: "Application secret to access DECORT cloud API in 'decs3o' and 'bvs' authentication mode.", + Optional: true, + }, + "jwt": schema.StringAttribute{ + MarkdownDescription: "JWT to access DECORT cloud API in 'jwt' authentication mode.", + Optional: true, + }, + "allow_unverified_ssl": schema.BoolAttribute{ + MarkdownDescription: "If true, DECORT API will not verify SSL certificates. Use this with caution and in trusted environments only! Default is false.", + Optional: true, + }, + "path_config": schema.StringAttribute{ + MarkdownDescription: "The path of the configuration file entry.", + Optional: true, + }, + "path_token": schema.StringAttribute{ + MarkdownDescription: "The path of the token file entry.", + Optional: true, + }, + "time_to_refresh": schema.Int64Attribute{ + MarkdownDescription: "The number of minutes before the expiration of the token, a refresh will be made.", + Optional: true, + }, + }, + } +} + +func (p *DynamixProvider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + // Retrieve provider data from configuration + var config dynamixProviderModel + diags := req.Config.Get(ctx, &config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + log.Debugf("Provider Configure error after req.Config.Get") + return + } + + // Set up default values, values from env and save user provided values into decortConfig + dynamixConfig := dynamixProviderConfig{} + dynamixConfig.new(config) + + // Validate and set up authentication mode + mode, err := dynamixConfig.validateAuthenticator() + if err != nil { + log.Debug(err) + return + } + + // Set up client transport + if dynamixConfig.allowUnverifiedSsl { + log.Warn("Provider Configure: allow_unverified_ssl is set - will not check certificates!") + transCfg := &http.Transport{TLSClientConfig: &tls.Config{InsecureSkipVerify: true}} //nolint:gosec + dynamixConfig.cc_client = &http.Client{ + Transport: transCfg, + } + } else { + dynamixConfig.cc_client = &http.Client{} + } + + // Set up clients for data sources and resources depending on authentication mode + switch mode { + case MODE_LEGACY: + legacyConf := sdk_config.LegacyConfig{ + Username: dynamixConfig.user, + Password: dynamixConfig.password, + DecortURL: dynamixConfig.controllerUrl, + SSLSkipVerify: dynamixConfig.allowUnverifiedSsl, + } + legacyClient := decort.NewLegacy(legacyConf) + resp.DataSourceData = legacyClient + resp.ResourceData = legacyClient + case MODE_JWT: + case MODE_DECS3O: + sdkConf := sdk_config.Config{ + AppID: dynamixConfig.appId, + AppSecret: dynamixConfig.appSecret, + SSOURL: dynamixConfig.oauth2Url, + DecortURL: dynamixConfig.controllerUrl, + SSLSkipVerify: dynamixConfig.allowUnverifiedSsl, + } + decortClient := decort.New(sdkConf) + resp.DataSourceData = decortClient + resp.ResourceData = decortClient + case MODE_BVS: + bvsConf := sdk_config.BVSConfig{ + AppID: dynamixConfig.appId, + AppSecret: dynamixConfig.appSecret, + SSOURL: dynamixConfig.oauth2Url, + DecortURL: dynamixConfig.controllerUrl, + SSLSkipVerify: dynamixConfig.allowUnverifiedSsl, + Username: dynamixConfig.bvsUser, + Password: dynamixConfig.bvsPassword, + Domain: dynamixConfig.domain, + Token: dynamixConfig.token, + PathCfg: dynamixConfig.pathConfig, + PathToken: dynamixConfig.pathToken, + TimeToRefresh: dynamixConfig.timeToRefresh, + } + bvsClient := decort.NewBVS(bvsConf) + resp.DataSourceData = bvsClient + resp.ResourceData = bvsClient + default: + log.Debugf("unknown authenticator mode code %d provided", mode) + return + } +} + +func (p *DynamixProvider) Resources(_ context.Context) []func() resource.Resource { + return newResourcesMap() +} + +func (p *DynamixProvider) DataSources(_ context.Context) []func() datasource.DataSource { + return newDataSourcesMap() +} + +func New(version string) func() provider.Provider { + return func() provider.Provider { + return &DynamixProvider{ + version: version, + } + } +} diff --git a/internal/provider/provider_validation.go b/internal/provider/provider_validation.go new file mode 100644 index 0000000..f567be4 --- /dev/null +++ b/internal/provider/provider_validation.go @@ -0,0 +1,178 @@ +package provider + +import ( + "fmt" + "net/http" + "os" + "strings" + + log "github.com/sirupsen/logrus" + sdk_config "repository.basistech.ru/BASIS/decort-golang-sdk/config" +) + +// dynamixProviderConfig helps organize provider validation +type dynamixProviderConfig struct { + authenticator string + oauth2Url string + controllerUrl string + user string + password string + bvsUser string + bvsPassword string + domain string + appId string + appSecret string + jwt string + allowUnverifiedSsl bool + pathConfig string + pathToken string + timeToRefresh int64 + token sdk_config.Token + cc_client *http.Client +} + +// new sets up default values, values from env and save user provided values for decort provider into decortConfig: +// authenticator, oauth2Url, controllerUrl transformed to lowercase; +// oauth2Url, user, password, bvsUser, bvsPassword, domain, appId, appSecret, jwt uploaded from env if not provided; +// allowUnverifiedSsl default value set as false. +func (d *dynamixProviderConfig) new(config dynamixProviderModel) { + d.authenticator = strings.ToLower(config.Authenticator.ValueString()) + + if config.Oauth2Url.IsUnknown() { + d.oauth2Url = os.Getenv("DECORT_OAUTH2_URL") + } else { + d.oauth2Url = config.Oauth2Url.ValueString() + } + d.oauth2Url = strings.ToLower(d.oauth2Url) + + d.controllerUrl = strings.ToLower(config.ControllerUrl.ValueString()) + if d.controllerUrl == "" { + log.Debugf("empty DECORT cloud controller URL provided") + return + } + + if config.User.IsUnknown() { + d.user = os.Getenv("DECORT_USER") + } else { + d.user = config.User.ValueString() + } + + if config.Password.IsUnknown() { + d.password = os.Getenv("DECORT_PASSWORD") + } else { + d.password = config.Password.ValueString() + } + + if config.BvsUser.IsUnknown() { + d.bvsUser = os.Getenv("DECORT_BVS_USER") + } else { + d.bvsUser = config.BvsUser.ValueString() + } + + if config.BvsPassword.IsUnknown() { + d.bvsPassword = os.Getenv("DECORT_BVS_PASSWORD") + } else { + d.bvsPassword = config.BvsPassword.ValueString() + } + + if config.Domain.IsUnknown() { + d.domain = os.Getenv("DECORT_DOMAIN") + } else { + d.domain = config.Domain.ValueString() + } + + if config.AppId.IsUnknown() { + d.appId = os.Getenv("DECORT_APP_ID") + } else { + d.appId = config.AppId.ValueString() + } + + if config.AppSecret.IsUnknown() { + d.appSecret = os.Getenv("DECORT_APP_SECRET") + } else { + d.appSecret = config.AppSecret.ValueString() + } + + if config.Jwt.IsUnknown() { + d.jwt = os.Getenv("DECORT_JWT") + } else { + d.jwt = config.Jwt.ValueString() + } + + if config.AllowUnverifiedSsl.IsUnknown() { + d.allowUnverifiedSsl = false // default false + } else { + d.allowUnverifiedSsl = config.AllowUnverifiedSsl.ValueBool() + } + + if !config.PathConfig.IsUnknown() { + d.pathConfig = config.PathConfig.ValueString() + } + + if !config.PathToken.IsUnknown() { + d.pathToken = config.PathToken.ValueString() + } + + if !config.TimeToRefresh.IsUnknown() { + d.timeToRefresh = config.TimeToRefresh.ValueInt64() + } + + d.token = sdk_config.Token{} +} + +// validateAuthenticator validates authenticator and other parameters from provider configuration provided by user. +// If successful, the mode and nil is returned. If unsuccessful for any +// reason, the method will return mode = MODE_UNDEF and error. +func (d *dynamixProviderConfig) validateAuthenticator() (int, error) { + var mode = MODE_UNDEF + switch d.authenticator { + case "jwt": + if d.jwt == "" { + return mode, fmt.Errorf("authenticator mode 'jwt' specified but no JWT provided") + } + mode = MODE_JWT + case "decs3o": + if d.oauth2Url == "" { + return mode, fmt.Errorf("authenticator mode 'decs3o' specified but no OAuth2 URL provided") + } + if d.appId == "" { + return mode, fmt.Errorf("authenticator mode 'decs3o' specified but no Application ID provided") + } + if d.appSecret == "" { + return mode, fmt.Errorf("authenticator mode 'decs3o' specified but no Secret ID provided") + } + mode = MODE_DECS3O + case "legacy": + if d.user == "" { + return mode, fmt.Errorf("authenticator mode 'legacy' specified but no user provided") + } + if d.password == "" { + return mode, fmt.Errorf("authenticator mode 'legacy' specified but no password provided") + } + mode = MODE_LEGACY + case "bvs": + if d.bvsUser == "" { + return mode, fmt.Errorf("authenticator mode 'bvs' specified but no bvs user provided") + } + if d.bvsPassword == "" { + return mode, fmt.Errorf("authenticator mode 'bvs' specified but no bvs password provided") + } + if d.oauth2Url == "" { + return mode, fmt.Errorf("authenticator mode 'bvs' specified but no bvs URL provided") + } + if d.appId == "" { + return mode, fmt.Errorf("authenticator mode 'bvs' specified but no Application ID provided") + } + if d.appSecret == "" { + return mode, fmt.Errorf("authenticator mode 'bvs' specified but no Secret ID provided") + } + if d.domain == "" { + return mode, fmt.Errorf("authenticator mode 'bvs' specified but no Domain provided") + } + mode = MODE_BVS + default: + return mode, fmt.Errorf("unknown authenticator mode %q provided", d.authenticator) + } + + return mode, nil +} diff --git a/internal/provider/resources_map.go b/internal/provider/resources_map.go new file mode 100644 index 0000000..53d3e3c --- /dev/null +++ b/internal/provider/resources_map.go @@ -0,0 +1,40 @@ +package provider + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/flipgroup" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/image" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/k8s" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/kvmvm" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/lb" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/rg" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/vins" +) + +func newResourcesMap() []func() resource.Resource { + return []func() resource.Resource{ + account.NewResourceAccount, + image.NewResourceImage, + image.NewResourceImageVirtual, + disks.NewResourceDisk, + disks.NewResourceDiskSnapshot, + disks.NewResourceDiskReplications, + flipgroup.NewResourceFlipgroup, + k8s.NewResourceK8SCP, + k8s.NewResourceK8SWG, + kvmvm.NewResourceCompute, + lb.NewResourceLB, + lb.NewResourceLBBackend, + lb.NewResourceLBBackendServer, + lb.NewResourceLBFrontend, + lb.NewResourceLBFrontendBind, + rg.NewResourceRG, + vins.NewResourceVINS, + vins.NewResourceVINSStaticRoute, + bservice.NewResourceBService, + bservice.NewResourceBServiceGroup, + } +} diff --git a/internal/service/cloudapi/account/data_source_account.go b/internal/service/cloudapi/account/data_source_account.go new file mode 100644 index 0000000..5478999 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account.go @@ -0,0 +1,91 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccount{} +) + +func NewDataSourceAccount() datasource.DataSource { + return &dataSourceAccount{} +} + +// dataSourceAccount is the data source implementation. +type dataSourceAccount struct { + client *decort.DecortClient +} + +func (d *dataSourceAccount) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccount: Error get state") + return + } + accountId := uint64(state.AccountID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceAccount: got state successfully", map[string]any{"account_id": accountId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccount: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccount: set timeouts successfully", map[string]any{ + "account_id": accountId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccount: Error flatten data source account") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccount: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccount", map[string]any{"account_id": accountId}) +} + +func (d *dataSourceAccount) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccount(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccount) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccount) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccount") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccount successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_audits_list.go b/internal/service/cloudapi/account/data_source_account_audits_list.go new file mode 100644 index 0000000..ca72610 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_audits_list.go @@ -0,0 +1,91 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountAuditsList{} +) + +func NewDataSourceAccountAuditsList() datasource.DataSource { + return &dataSourceAccountAuditsList{} +} + +// dataSourceAccountAuditsList is the data source implementation. +type dataSourceAccountAuditsList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountAuditsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountAuditsListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error get state") + return + } + accountId := uint64(state.AccountID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceAccountAuditsList: got state successfully", map[string]any{"account_id": accountId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountAuditsList: set timeouts successfully", map[string]any{ + "account_id": accountId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountAuditsDataSourceList(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountAuditsList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountAuditsList", map[string]any{"account_id": accountId}) +} + +func (d *dataSourceAccountAuditsList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountAuditsList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountAuditsList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_audits_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountAuditsList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountAuditsList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountAuditsList successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_computes_list.go b/internal/service/cloudapi/account/data_source_account_computes_list.go new file mode 100644 index 0000000..4c576e0 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_computes_list.go @@ -0,0 +1,88 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountComputesList{} +) + +func NewDataSourceAccountComputesList() datasource.DataSource { + return &dataSourceAccountComputesList{} +} + +// dataSourceAccountComputesList is the data source implementation. +type dataSourceAccountComputesList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountComputesList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountComputesListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountComputesList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountComputesList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountComputesList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountComputesList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountComputesListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountComputesList: Error flatten data source account computes list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountComputesList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountComputesList") +} + +func (d *dataSourceAccountComputesList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountComputesList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountComputesList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_computes_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountComputesList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountComputesList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountComputesList successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_consumed_unit_by_type.go b/internal/service/cloudapi/account/data_source_account_consumed_unit_by_type.go new file mode 100644 index 0000000..3ca9ac5 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_consumed_unit_by_type.go @@ -0,0 +1,88 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountConsumedUnitsByType{} +) + +func NewDataSourceAccountConsumedUnitsByType() datasource.DataSource { + return &dataSourceAccountConsumedUnitsByType{} +} + +// dataSourceAccountConsumedUnitsByType is the data source implementation. +type dataSourceAccountConsumedUnitsByType struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountConsumedUnitsByType) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountConsumedUnitsByTypeModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountConsumedUnitsByType: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountConsumedUnitsByType: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountConsumedUnitsByType: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountConsumedUnitsByType: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountConsumedUnitsByTypeDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountConsumedUnitsByType: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountConsumedUnitsByType: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountConsumedUnitsByType") +} + +func (d *dataSourceAccountConsumedUnitsByType) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountConsumedUnitsByType(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountConsumedUnitsByType) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_consumed_units_by_type" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountConsumedUnitsByType) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountConsumedUnitsByType") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountConsumedUnitsByType successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_consumed_units.go b/internal/service/cloudapi/account/data_source_account_consumed_units.go new file mode 100644 index 0000000..97450b0 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_consumed_units.go @@ -0,0 +1,91 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountConsumedUints{} +) + +func NewDataSourceAccountConsumedUnits() datasource.DataSource { + return &dataSourceAccountConsumedUints{} +} + +// dataSourceAccountConsumedUints is the data source implementation. +type dataSourceAccountConsumedUints struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountConsumedUints) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountConsumedUnitsModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountConsumedUints: Error get state") + return + } + accountId := uint64(state.AccountID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceAccountConsumedUints: got state successfully", map[string]any{"account_id": accountId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountConsumedUints: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountConsumedUints: set timeouts successfully", map[string]any{ + "account_id": accountId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountConsumedUnitsDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountConsumedUints: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountConsumedUints: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountConsumedUints", map[string]any{"account_id": accountId}) +} + +func (d *dataSourceAccountConsumedUints) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountConsumedUnits(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountConsumedUints) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_consumed_units" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountConsumedUints) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountConsumedUints") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountConsumedUints successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_disks_list.go b/internal/service/cloudapi/account/data_source_account_disks_list.go new file mode 100644 index 0000000..f4a888a --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_disks_list.go @@ -0,0 +1,88 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountDisksList{} +) + +func NewDataSourceAccountDisksList() datasource.DataSource { + return &dataSourceAccountDisksList{} +} + +// dataSourceAccountDisksList is the data source implementation. +type dataSourceAccountDisksList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountDisksList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountDisksListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountDisksList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountDisksList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountDisksList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountDisksList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountDisksListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountDisksList: Error flatten data source account disks list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountDisksList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountDisksList") +} + +func (d *dataSourceAccountDisksList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountDisksList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountDisksList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_disks_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountDisksList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountDisksList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountDisksList successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_flipgroups_list.go b/internal/service/cloudapi/account/data_source_account_flipgroups_list.go new file mode 100644 index 0000000..c121b24 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_flipgroups_list.go @@ -0,0 +1,88 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountFlipgroupsList{} +) + +func NewDataSourceAccountFlipgroupsList() datasource.DataSource { + return &dataSourceAccountFlipgroupsList{} +} + +// dataSourceAccountFlipgroupsList is the data source implementation. +type dataSourceAccountFlipgroupsList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountFlipgroupsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountFlipgroupsListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountFlipgroupsList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountFlipgroupsList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountFlipgroupsListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountFlipgroupsList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountFlipgroupsList") +} + +func (d *dataSourceAccountFlipgroupsList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountFlipgroupsList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountFlipgroupsList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_flipgroups_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountFlipgroupsList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountFlipgroupsList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountFlipgroupsList successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_get_resource_consumption.go b/internal/service/cloudapi/account/data_source_account_get_resource_consumption.go new file mode 100644 index 0000000..07f5758 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_get_resource_consumption.go @@ -0,0 +1,91 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountGetResourceConsumption{} +) + +func NewDataSourceAccountGetResourceConsumption() datasource.DataSource { + return &dataSourceAccountGetResourceConsumption{} +} + +// dataSourceAccountGetResourceConsumption is the data source implementation. +type dataSourceAccountGetResourceConsumption struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountGetResourceConsumption) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.AccountGetResourceConsumptionModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error get state") + return + } + accountId := uint64(state.AccountID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumption: got state successfully", map[string]any{"account_id": accountId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumption: set timeouts successfully", map[string]any{ + "account_id": accountId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountGetResourceConsumptionDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error flatten data source account") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumption: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountGetResourceConsumption", map[string]any{"account_id": accountId}) +} + +func (d *dataSourceAccountGetResourceConsumption) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountGetResourceConsumption(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountGetResourceConsumption) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_resource_consumption_get" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountGetResourceConsumption) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountGetResourceConsumption") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountGetResourceConsumption successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_get_resource_consumption_list.go b/internal/service/cloudapi/account/data_source_account_get_resource_consumption_list.go new file mode 100644 index 0000000..a992425 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_get_resource_consumption_list.go @@ -0,0 +1,89 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountGetResourceConsumptionList{} +) + +func NewDataSourceAccountGetResourceConsumptionList() datasource.DataSource { + return &dataSourceAccountGetResourceConsumptionList{} +} + +// dataSourceAccountGetResourceConsumptionList is the data source implementation. +type dataSourceAccountGetResourceConsumptionList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountGetResourceConsumptionList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.AccountGetResourceConsumptionListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumptionList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountGetResourceConsumptionList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountGetResourceConsumptionList(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error flatten data source account") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountGetResourceConsumptionList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountGetResourceConsumptionList") +} + +func (d *dataSourceAccountGetResourceConsumptionList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountGetResourceListConsumption(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountGetResourceConsumptionList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_resource_consumption_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountGetResourceConsumptionList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountGetResourceConsumptionList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountGetResourceConsumptionList successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_list.go b/internal/service/cloudapi/account/data_source_account_list.go new file mode 100644 index 0000000..dafc792 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_list.go @@ -0,0 +1,88 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountList{} +) + +func NewDataSourceAccountList() datasource.DataSource { + return &dataSourceAccountList{} +} + +// dataSourceAccountList is the data source implementation. +type dataSourceAccountList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountList: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountList") +} + +func (d *dataSourceAccountList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountList successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_list_deleted.go b/internal/service/cloudapi/account/data_source_account_list_deleted.go new file mode 100644 index 0000000..955447b --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_list_deleted.go @@ -0,0 +1,88 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountListDeleted{} +) + +func NewDataSourceAccountListDeleted() datasource.DataSource { + return &dataSourceAccountListDeleted{} +} + +// dataSourceAccountListDeleted is the data source implementation. +type dataSourceAccountListDeleted struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountListDeletedModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountListDeleted: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountListDeleted: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountListDeletedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountListDeleted: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountListDeleted") +} + +func (d *dataSourceAccountListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountListDeleted(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_deleted_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountListDeleted") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountListDeleted successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_reserved_units.go b/internal/service/cloudapi/account/data_source_account_reserved_units.go new file mode 100644 index 0000000..3d91bc2 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_reserved_units.go @@ -0,0 +1,91 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountReservedUnits{} +) + +func NewDataSourceAccountReservedUnits() datasource.DataSource { + return &dataSourceAccountReservedUnits{} +} + +// dataSourceAccount is the data source implementation. +type dataSourceAccountReservedUnits struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountReservedUnits) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountReservedUnitsModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountReservedUnits: Error get state") + return + } + accountId := uint64(state.AccountID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceAccountReservedUnits: got state successfully", map[string]any{"account_id": accountId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountReservedUnits: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountReservedUnits: set timeouts successfully", map[string]any{ + "account_id": accountId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountReservedUnitsDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountReservedUnits: Error flatten data source account") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountReservedUnits: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountReservedUnits", map[string]any{"account_id": accountId}) +} + +func (d *dataSourceAccountReservedUnits) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountReservedUnits(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountReservedUnits) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_reserved_units" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountReservedUnits) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountReservedUnits") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountReservedUnits successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_rg_list.go b/internal/service/cloudapi/account/data_source_account_rg_list.go new file mode 100644 index 0000000..b58ce3f --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_rg_list.go @@ -0,0 +1,88 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountRGList{} +) + +func NewDataSourceAccountRGList() datasource.DataSource { + return &dataSourceAccountRGList{} +} + +// dataSourceAccountList is the data source implementation. +type dataSourceAccountRGList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountRGList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountRGListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountRGList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountRGList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountRGList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountRGList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountRGListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountRGList: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountRGList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountRGList") +} + +func (d *dataSourceAccountRGList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountRGList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountRGList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_rg_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountRGList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountRGList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountRGList successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_templates_list.go b/internal/service/cloudapi/account/data_source_account_templates_list.go new file mode 100644 index 0000000..c5b48b0 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_templates_list.go @@ -0,0 +1,88 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountTemplatesList{} +) + +func NewDataSourceAccountTemplatesList() datasource.DataSource { + return &dataSourceAccountTemplatesList{} +} + +// dataSourceAccountTemplatesList is the data source implementation. +type dataSourceAccountTemplatesList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountTemplatesList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountTemplatesListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountTemplatesList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceAccountTemplatesList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountTemplatesList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountTemplatesList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountTemplatesListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountTemplatesList: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountTemplatesList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountList") +} + +func (d *dataSourceAccountTemplatesList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountTemplatesList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountTemplatesList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_templates_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountTemplatesList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountTemplatesList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountTemplatesList successfully") +} diff --git a/internal/service/cloudapi/account/data_source_account_vins_list.go b/internal/service/cloudapi/account/data_source_account_vins_list.go new file mode 100644 index 0000000..403df34 --- /dev/null +++ b/internal/service/cloudapi/account/data_source_account_vins_list.go @@ -0,0 +1,91 @@ +package account + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceAccountVinsList{} +) + +func NewDataSourceAccountVinsList() datasource.DataSource { + return &dataSourceAccountVinsList{} +} + +// dataSourceAccountVinsList is the data source implementation. +type dataSourceAccountVinsList struct { + client *decort.DecortClient +} + +func (d *dataSourceAccountVinsList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceAccountVinsListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountVinsList: Error get state") + return + } + accountId := uint64(state.AccountID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceAccountVinsList: got state successfully", map[string]any{"account_id": accountId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountVinsList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceAccountVinsList: set timeouts successfully", map[string]any{ + "account_id": accountId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.AccountVinsListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountVinsList: Error flatten data source account") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceAccountVinsList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceAccountVinsList", map[string]any{"account_id": accountId}) +} + +func (d *dataSourceAccountVinsList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceAccountVinsList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceAccountVinsList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account_vins_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceAccountVinsList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceAccountVinsList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceAccountVinsList successfully") +} diff --git a/internal/service/cloudapi/account/flattens/flatten_account_list_deleted.go b/internal/service/cloudapi/account/flattens/flatten_account_list_deleted.go new file mode 100644 index 0000000..2b96a19 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_account_list_deleted.go @@ -0,0 +1,84 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountListDeletedDataSource flattens data source for account list deleted. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountListDeletedDataSource(ctx context.Context, state *models.DataSourceAccountListDeletedModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountListDeletedDataSource") + + diags := diag.Diagnostics{} + + accListDel, err := utilities.AccountListDeletedCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about account list deleted", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountListDeletedDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountListDeletedModel{ + ByID: state.ByID, + Name: state.Name, + ACL: state.ACL, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + SortBy: state.SortBy, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(accListDel.EntryCount)), + } + + items := make([]models.ItemAccountListDeletedModel, 0, len(accListDel.Data)) + for _, item := range accListDel.Data { + i := models.ItemAccountListDeletedModel{ + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + AccountID: types.Int64Value(int64(item.ID)), + AccountName: types.StringValue(item.Name), + Status: types.StringValue(item.Status), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + } + + i.ComputeFeatures, diags = types.ListValueFrom(ctx, types.StringType, item.ComputeFeatures) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.AccountListDeletedDataSource: cannot flatten item.ComputeFeatures to i.ComputeFeatures", diags)) + } + + aclList := make([]models.RecordACLModel, 0, len(item.ACL)) + for _, acl := range item.ACL { + a := models.RecordACLModel{ + Explicit: types.BoolValue(acl.IsExplicit), + GUID: types.StringValue(acl.GUID), + Right: types.StringValue(acl.Rights), + Status: types.StringValue(acl.Status), + Type: types.StringValue(acl.Type), + UserGroupID: types.StringValue(acl.UgroupID), + } + aclList = append(aclList, a) + } + + i.ACL = aclList + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "flattens.AccountListDeletedDataSource: after flatten") + + tflog.Info(ctx, "End flattens.AccountListDeletedDataSource") + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account.go new file mode 100644 index 0000000..7a01097 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account.go @@ -0,0 +1,162 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountDataSource flattens data source for account. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountDataSource(ctx context.Context, state *models.DataSourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountDataSource") + + diags := diag.Diagnostics{} + + accountId := uint64(state.AccountID.ValueInt64()) + + recordAccount, err := utilities.AccountDataSourceCheckPresence(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountDataSource: before flatten", map[string]any{"account_id": accountId, "recordAccount": recordAccount}) + + id := uuid.New() + *state = models.DataSourceAccountModel{ + AccountID: state.AccountID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + DCLocation: types.StringValue(recordAccount.DCLocation), + CKey: types.StringValue(recordAccount.CKey), + ACL: flattenACLInAccount(ctx, recordAccount.ACL), + Company: types.StringValue(recordAccount.Company), + CompanyURL: types.StringValue(recordAccount.CompanyURL), + Computes: flattenComputes(ctx, recordAccount.Computes), + CPUAllocationParameter: types.StringValue(recordAccount.CPUAllocationParameter), + CPUAllocationRatio: types.Float64Value(recordAccount.CPUAllocationRatio), + CreatedBy: types.StringValue(recordAccount.CreatedBy), + CreatedTime: types.Int64Value(int64(recordAccount.CreatedTime)), + DeactivationTime: types.Float64Value(recordAccount.DeactivationTime), + DeletedBy: types.StringValue(recordAccount.DeletedBy), + DeletedTime: types.Int64Value(int64(recordAccount.DeletedTime)), + DisplayName: types.StringValue(recordAccount.DisplayName), + GUID: types.Int64Value(int64(recordAccount.GUID)), + Machines: flattenMachines(ctx, recordAccount.Machines), + AccountName: types.StringValue(recordAccount.Name), + ResourceLimits: flattenResourceLimits(ctx, recordAccount.ResourceLimits), + SendAccessEmails: types.BoolValue(recordAccount.SendAccessEmails), + Status: types.StringValue(recordAccount.Status), + UpdatedTime: types.Int64Value(int64(recordAccount.UpdatedTime)), + Version: types.Int64Value(int64(recordAccount.Version)), + VINSes: types.Int64Value(int64(recordAccount.VINSes)), + } + + state.VINS, diags = types.ListValueFrom(ctx, types.Int64Type, recordAccount.VINS) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.AccountDataSource: cannot flatten recordAccount.VINS to state.VINS", diags)) + } + + state.ComputeFeatures, diags = types.ListValueFrom(ctx, types.StringType, recordAccount.ComputeFeatures) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.AccountDataSource: cannot flatten recordAccount.ComputeFeatures to state.ComputeFeatures", diags)) + } + + tflog.Info(ctx, "flattens.AccountDataSource: after flatten", map[string]any{"account_id": state.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.AccountDataSource", map[string]any{"account_id": state.Id.ValueString()}) + return nil +} + +func flattenComputes(ctx context.Context, computes account.Computes) types.Object { + tflog.Info(ctx, "Start flattenComputes") + temp := models.ComputesInAccountModel{ + Started: types.Int64Value(int64(computes.Started)), + Stopped: types.Int64Value(int64(computes.Stopped)), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemComputesInAccount, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenComputes struct to obj", err)) + } + + tflog.Info(ctx, "End flattenComputes") + return res +} + +func flattenMachines(ctx context.Context, machines account.Machines) types.Object { + tflog.Info(ctx, "Start flattenMachines") + temp := models.MachinesInAccountModel{ + Running: types.Int64Value(int64(machines.Running)), + Halted: types.Int64Value(int64(machines.Halted)), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemMachinesInAccount, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenMachines struct to obj", err)) + } + + tflog.Info(ctx, "End flattenMachines") + return res +} + +func flattenResourceLimits(ctx context.Context, limits account.ResourceLimits) types.Object { + tflog.Info(ctx, "Start flattenResourceLimits") + temp := models.ResourceLimitsInAccountModel{ + CUC: types.Float64Value(limits.CUC), + CUD: types.Float64Value(limits.CUD), + CUI: types.Float64Value(limits.CUI), + CUM: types.Float64Value(limits.CUM), + CUDM: types.Float64Value(limits.CUDM), + CUNP: types.Float64Value(limits.CUNP), + GPUUnits: types.Float64Value(limits.GPUUnits), + } + + res, err := types.ObjectValueFrom(ctx, models.ItemResourceLimitsInAccount, temp) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenResourceLimits struct to obj", err)) + } + + tflog.Info(ctx, "End flattenResourceLimits") + return res +} + +func flattenACLInAccount(ctx context.Context, aclList []account.RecordACL) types.List { + tflog.Info(ctx, "Start flattenACLInAccount") + tempSlice := make([]types.Object, 0, len(aclList)) + for _, item := range aclList { + temp := models.ACLInAccountModel{ + Explicit: types.BoolValue(item.IsExplicit), + GUID: types.StringValue(item.GUID), + Right: types.StringValue(item.Rights), + Status: types.StringValue(item.Status), + Type: types.StringValue(item.Type), + UserGroupID: types.StringValue(item.UgroupID), + CanBeDeleted: types.BoolValue(item.CanBeDeleted), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemACLInAccount, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenACLInAccount struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemACLInAccount}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenACLInAccount", diags)) + } + + tflog.Info(ctx, "End flattenACLInAccount") + return res +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_audits_list.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_audits_list.go new file mode 100644 index 0000000..8ef0265 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_audits_list.go @@ -0,0 +1,59 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountAuditsDataSourceList flattens data source for account audits. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountAuditsDataSourceList(ctx context.Context, state *models.DataSourceAccountAuditsListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountAuditsDataSourceList") + + diags := diag.Diagnostics{} + + accountId := uint64(state.AccountID.ValueInt64()) + + auditsList, err := utilities.AccountAuditsListDataSourceCheckPresence(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account audits with account ID %v", accountId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountAuditsDataSourceList: before flatten", map[string]any{"account_id": accountId}) + + id := uuid.New() + *state = models.DataSourceAccountAuditsListModel{ + AccountID: state.AccountID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + } + + items := make([]models.ItemAuditModel, 0, len(*auditsList)) + for _, item := range *auditsList { + i := models.ItemAuditModel{ + Call: types.StringValue(item.Call), + ResponseTime: types.Float64Value(item.ResponseTime), + StatusCode: types.Int64Value(int64(item.StatusCode)), + Timestamp: types.Float64Value(item.Timestamp), + User: types.StringValue(item.User), + } + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "flattens.AccountAuditsDataSourceList: after flatten", map[string]any{"account_id": state.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.AccountAuditsDataSourceList", map[string]any{"account_id": state.Id.ValueString()}) + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_computes_list.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_computes_list.go new file mode 100644 index 0000000..1042694 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_computes_list.go @@ -0,0 +1,83 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountComputesListDataSource flattens data source for account computes list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountComputesListDataSource(ctx context.Context, state *models.DataSourceAccountComputesListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountComputesListDataSource") + + diags := diag.Diagnostics{} + + computesList, err := utilities.AccountComputesListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about account computes list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountComputesListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountComputesListModel{ + AccountID: state.AccountID, + ComputeID: state.ComputeID, + Name: state.Name, + RGName: state.RGName, + RGID: state.RGID, + TechStatus: state.TechStatus, + IPAddress: state.IPAddress, + ExtNetName: state.ExtNetName, + ExtNetID: state.ExtNetID, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + SortBy: state.SortBy, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(computesList.EntryCount)), + } + + items := make([]models.ItemComputeModel, 0, len(computesList.Data)) + for _, item := range computesList.Data { + i := models.ItemComputeModel{ + AccountID: types.Int64Value(int64(item.AccountID)), + AccountName: types.StringValue(item.AccountName), + CPUs: types.Int64Value(int64(item.CPUs)), + CreatedBy: types.StringValue(item.CreatedBy), + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + DeletedBy: types.StringValue(item.DeletedBy), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + ComputeID: types.Int64Value(int64(item.ComputeID)), + ComputeName: types.StringValue(item.ComputeName), + RAM: types.Int64Value(int64(item.RAM)), + Registered: types.BoolValue(item.Registered), + RGID: types.Int64Value(int64(item.RGID)), + RGName: types.StringValue(item.RGName), + Status: types.StringValue(item.Status), + TechStatus: types.StringValue(item.TechStatus), + TotalDisksSize: types.Int64Value(int64(item.TotalDisksSize)), + UpdatedBy: types.StringValue(item.UpdatedBy), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + UserManaged: types.BoolValue(item.UserManaged), + VINSConnected: types.Int64Value(int64(item.VINSConnected)), + } + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "flattens.AccountComputesListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.AccountComputesListDataSource") + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_consumed_units.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_consumed_units.go new file mode 100644 index 0000000..92d599a --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_consumed_units.go @@ -0,0 +1,49 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountConsumedUnitsDataSource flattens data source for account consumed units. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountConsumedUnitsDataSource(ctx context.Context, state *models.DataSourceAccountConsumedUnitsModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountConsumedUnitsDataSource") + + diags := diag.Diagnostics{} + + accountId := uint64(state.AccountID.ValueInt64()) + + limits, err := utilities.AccountConsumedUnitsDataSourceCheckPresence(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account consumed units with account ID %v", accountId), err.Error()) + return diags + } + + id := uuid.New() + *state = models.DataSourceAccountConsumedUnitsModel{ + AccountID: state.AccountID, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + CUC: types.Float64Value(limits.CUC), + CUD: types.Float64Value(limits.CUD), + CUDM: types.Float64Value(limits.CUDM), + CUI: types.Float64Value(limits.CUI), + CUM: types.Float64Value(limits.CUM), + CUNP: types.Float64Value(limits.CUNP), + GPUUnits: types.Float64Value(limits.GPUUnits), + } + + tflog.Info(ctx, "End flattens.AccountConsumedUnitsDataSource", map[string]any{"account_id": state.Id.ValueString()}) + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_consumed_units_by_type.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_consumed_units_by_type.go new file mode 100644 index 0000000..b3cbf79 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_consumed_units_by_type.go @@ -0,0 +1,47 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountConsumedUnitsByTypeDataSource flattens data source for account consumed units by type. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountConsumedUnitsByTypeDataSource(ctx context.Context, state *models.DataSourceAccountConsumedUnitsByTypeModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountConsumedUnitsByTypeDataSource") + + diags := diag.Diagnostics{} + + accountId := uint64(state.AccountID.ValueInt64()) + cuType := state.CUType.ValueString() + + res, err := utilities.AccountConsumedUnitsByTypeDataSourceCheckPresence(ctx, accountId, cuType, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account consumed units by type with account ID %v", accountId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountConsumedUnitsByTypeDataSource: before flatten", map[string]any{"account_id": accountId, "res": res}) + + id := uuid.New() + *state = models.DataSourceAccountConsumedUnitsByTypeModel{ + AccountID: state.AccountID, + CUType: state.CUType, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + CUResult: types.Float64Value(res), + } + + tflog.Info(ctx, "End flattens.AccountConsumedUnitsByTypeDataSource", map[string]any{"account_id": state.Id.ValueString()}) + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_disks_list.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_disks_list.go new file mode 100644 index 0000000..142f1d7 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_disks_list.go @@ -0,0 +1,66 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountDisksListDataSource flattens data source for account disks list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountDisksListDataSource(ctx context.Context, state *models.DataSourceAccountDisksListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountDisksListDataSource") + + diags := diag.Diagnostics{} + + disksList, err := utilities.AccountDisksListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("flattens.AccountDisksListDataSource: Cannot get info", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountDisksListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountDisksListModel{ + AccountID: state.AccountID, + DiskID: state.DiskID, + Name: state.Name, + DiskMaxSize: state.DiskMaxSize, + Type: state.Type, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + SortBy: state.SortBy, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(disksList.EntryCount)), + } + + items := make([]models.ItemDiskModel, 0, len(disksList.Data)) + for _, item := range disksList.Data { + i := models.ItemDiskModel{ + DiskID: types.Int64Value(int64(item.ID)), + DiskName: types.StringValue(item.Name), + Pool: types.StringValue(item.Pool), + SEPID: types.Int64Value(int64(item.SEPID)), + Shareable: types.BoolValue(item.Shareable), + SizeMax: types.Int64Value(int64(item.SizeMax)), + Type: types.StringValue(item.Type), + } + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "flattens.AccountDisksListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.AccountDisksListDataSource") + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_flipgroups_list.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_flipgroups_list.go new file mode 100644 index 0000000..2c912c0 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_flipgroups_list.go @@ -0,0 +1,80 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountFlipgroupsListDataSource flattens data source for account flipgroups list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountFlipgroupsListDataSource(ctx context.Context, state *models.DataSourceAccountFlipgroupsListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountFlipgroupsListDataSource") + + diags := diag.Diagnostics{} + + flipgroups, err := utilities.AccountFlipgroupsListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about account flipgroups list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountFlipgroupsListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountFlipgroupsListModel{ + AccountID: state.AccountID, + Name: state.Name, + VINSID: state.VINSID, + VINSName: state.VINSName, + ExtNetID: state.ExtNetID, + ByIP: state.ByIP, + FLIPGroupID: state.FLIPGroupID, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(flipgroups.EntryCount)), + } + + items := make([]models.ItemAccountFlipgroupModel, 0, len(flipgroups.Data)) + for _, item := range flipgroups.Data { + i := models.ItemAccountFlipgroupModel{ + AccountID: types.Int64Value(int64(item.AccountID)), + ClientType: types.StringValue(item.ClientType), + ConnType: types.StringValue(item.ConnType), + CreatedBy: types.StringValue(item.CreatedBy), + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + DefaultGW: types.StringValue(item.DefaultGW), + DeletedBy: types.StringValue(item.DeletedBy), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + Description: types.StringValue(item.Description), + GID: types.Int64Value(int64(item.GID)), + GUID: types.Int64Value(int64(item.GUID)), + ID: types.Int64Value(int64(item.ID)), + IP: types.StringValue(item.IP), + Milestones: types.Int64Value(int64(item.Milestones)), + Name: types.StringValue(item.Name), + NetID: types.Int64Value(int64(item.NetID)), + NetType: types.StringValue(item.NetType), + NetMask: types.Int64Value(int64(item.NetMask)), + Status: types.StringValue(item.Status), + UpdatedBy: types.StringValue(item.UpdatedBy), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + } + + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "End flattens.AccountFlipgroupsListDataSource") + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_get_resource_consumption.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_get_resource_consumption.go new file mode 100644 index 0000000..bbd3fde --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_get_resource_consumption.go @@ -0,0 +1,91 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountGetResourceConsumptionDataSource flattens data source for account. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountGetResourceConsumptionDataSource(ctx context.Context, state *models.AccountGetResourceConsumptionModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountGetResourceConsumptionDataSource") + + diags := diag.Diagnostics{} + + accountId := uint64(state.AccountID.ValueInt64()) + + record, err := utilities.AccountGetResourceConsumptionDataSourceCheckPresence(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountGetResourceConsumptionDataSource: before flatten", map[string]any{"account_id": accountId, "record": record}) + + *state = models.AccountGetResourceConsumptionModel{ + AccountID: state.AccountID, + Timeouts: state.Timeouts, + + Consumed: &models.ResourceConsumptionModel{ + CPU: types.Int64Value(record.Consumed.CPU), + DiskSize: types.Float64Value(record.Consumed.DiskSize), + DiskSizeMax: types.Float64Value(record.Consumed.DiskSizeMax), + ExtIPs: types.Int64Value(record.Consumed.ExtIPs), + ExtTraffic: types.Int64Value(record.Consumed.ExtTraffic), + GPU: types.Int64Value(record.Consumed.GPU), + RAM: types.Int64Value(record.Consumed.RAM), + SEPs: flattenResourceConsumptionSep(ctx, record.Consumed.SEPs), + }, + Limits: &models.ResourceConsumptionLimitsModel{ + CUC: types.Float64Value(record.ResourceLimits.CUC), + CUD: types.Float64Value(record.ResourceLimits.CUD), + CUI: types.Float64Value(record.ResourceLimits.CUI), + CUM: types.Float64Value(record.ResourceLimits.CUM), + CUDM: types.Float64Value(record.ResourceLimits.CUDM), + CUNP: types.Float64Value(record.ResourceLimits.CUNP), + GPUUnits: types.Float64Value(record.ResourceLimits.GPUUnits), + }, + Reserved: &models.ResourceConsumptionModel{ + CPU: types.Int64Value(record.Reserved.CPU), + DiskSize: types.Float64Value(record.Reserved.DiskSize), + DiskSizeMax: types.Float64Value(record.Reserved.DiskSizeMax), + ExtIPs: types.Int64Value(record.Reserved.ExtIPs), + ExtTraffic: types.Int64Value(record.Reserved.ExtTraffic), + GPU: types.Int64Value(record.Reserved.GPU), + RAM: types.Int64Value(record.Reserved.RAM), + SEPs: flattenResourceConsumptionSep(ctx, record.Reserved.SEPs), + }, + } + + tflog.Info(ctx, "flattens.AccountGetResourceConsumptionDataSource: after flatten", map[string]any{"account_id": state.AccountID.ValueInt64()}) + + tflog.Info(ctx, "End flattens.AccountGetResourceConsumptionDataSource", map[string]any{"account_id": state.AccountID.ValueInt64()}) + return nil +} + +func flattenResourceConsumptionSep(ctx context.Context, seps map[string]map[string]account.DiskUsage) []models.ResourceConsumptionSepModel { + tflog.Info(ctx, "Start flattenResourceConsumption") + res := make([]models.ResourceConsumptionSepModel, 0, len(seps)) + for sepId := range seps { + for poolName, diskData := range seps[sepId] { + s := models.ResourceConsumptionSepModel{ + SepID: types.StringValue(sepId), + PoolName: types.StringValue(poolName), + DiskSize: types.Float64Value(diskData.DiskSize), + DiskSizeMax: types.Float64Value(diskData.DiskSizeMax), + } + res = append(res, s) + } + } + tflog.Info(ctx, "End flattenResourceConsumption") + return res +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_get_resource_consumption_list.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_get_resource_consumption_list.go new file mode 100644 index 0000000..646786f --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_get_resource_consumption_list.go @@ -0,0 +1,97 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountGetResourceConsumptionList flattens data source for rg get resource consumption. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountGetResourceConsumptionList(ctx context.Context, state *models.AccountGetResourceConsumptionListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountGetResourceConsumptionList") + + diags := diag.Diagnostics{} + + resConsList, err := utilities.AccountGetResourceConsumptionListDataSourceCheckPresence(ctx, c) + if err != nil { + diags.AddError("Cannot get info about resource consumptions", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountGetResourceConsumptionList: before flatten") + + *state = models.AccountGetResourceConsumptionListModel{ + EntryCount: state.EntryCount, + Timeouts: state.Timeouts, + } + + items := make([]models.AccountGetResourceConsumptionListItemModel, 0, len(resConsList.Data)) + for _, resConsItem := range resConsList.Data { + item := models.AccountGetResourceConsumptionListItemModel{ + AccountId: types.Int64Value(int64(resConsItem.AccountID)), + Consumed: &models.ResourceConsumptionListModel{ + CPU: types.Int64Value(resConsItem.Consumed.CPU), + DiskSize: types.Float64Value(resConsItem.Consumed.DiskSize), + DiskSizeMax: types.Float64Value(resConsItem.Consumed.DiskSizeMax), + ExtIPs: types.Int64Value(resConsItem.Consumed.ExtIPs), + ExtTraffic: types.Int64Value(resConsItem.Consumed.ExtTraffic), + GPU: types.Int64Value(resConsItem.Consumed.GPU), + RAM: types.Int64Value(resConsItem.Consumed.RAM), + }, + Reserved: &models.ResourceConsumptionListModel{ + CPU: types.Int64Value(resConsItem.Reserved.CPU), + DiskSize: types.Float64Value(resConsItem.Reserved.DiskSize), + DiskSizeMax: types.Float64Value(resConsItem.Reserved.DiskSizeMax), + ExtIPs: types.Int64Value(resConsItem.Reserved.ExtIPs), + ExtTraffic: types.Int64Value(resConsItem.Reserved.ExtTraffic), + GPU: types.Int64Value(resConsItem.Reserved.GPU), + RAM: types.Int64Value(resConsItem.Reserved.RAM), + }, + } + + sepsConsumed := make([]models.ResourceConsumptionSepListModel, 0, len(resConsItem.Consumed.SEPs)) + for sepId, data := range resConsItem.Consumed.SEPs { + for dataName, diskData := range data { + sepItem := models.ResourceConsumptionSepListModel{ + SepID: types.StringValue(sepId), + PoolName: types.StringValue(dataName), + DiskSize: types.Float64Value(diskData.DiskSize), + DiskSizeMax: types.Float64Value(diskData.DiskSizeMax), + } + sepsConsumed = append(sepsConsumed, sepItem) + } + } + item.Consumed.SEPs = sepsConsumed + + sepsReserved := make([]models.ResourceConsumptionSepListModel, 0, len(resConsItem.Reserved.SEPs)) + for sepId, data := range resConsItem.Reserved.SEPs { + for dataName, diskData := range data { + sepItem := models.ResourceConsumptionSepListModel{ + SepID: types.StringValue(sepId), + PoolName: types.StringValue(dataName), + DiskSize: types.Float64Value(diskData.DiskSize), + DiskSizeMax: types.Float64Value(diskData.DiskSizeMax), + } + sepsReserved = append(sepsReserved, sepItem) + } + } + item.Reserved.SEPs = sepsReserved + + items = append(items, item) + } + + state.Items = items + state.EntryCount = types.Int64Value(int64(resConsList.EntryCount)) + + tflog.Info(ctx, "flattens.AccountGetResourceConsumptionList: after flatten") + + tflog.Info(ctx, "End flattens.AccountGetResourceConsumptionList") + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_list.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_list.go new file mode 100644 index 0000000..b3e8a85 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_list.go @@ -0,0 +1,83 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountListDataSource flattens data source for account list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountListDataSource(ctx context.Context, state *models.DataSourceAccountListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountListDataSource") + + diags := diag.Diagnostics{} + + accountList, err := utilities.AccountListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about account list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountListModel{ + ByID: state.ByID, + Name: state.Name, + ACL: state.ACL, + Status: state.Status, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + SortBy: state.SortBy, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(accountList.EntryCount)), + } + items := make([]models.ItemAccountListModel, 0, len(accountList.Data)) + for _, item := range accountList.Data { + i := models.ItemAccountListModel{ + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + AccountID: types.Int64Value(int64(item.ID)), + AccountName: types.StringValue(item.Name), + Status: types.StringValue(item.Status), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + } + i.ComputeFeatures, diags = types.ListValueFrom(ctx, types.StringType, item.ComputeFeatures) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.AccountListDataSource: cannot flatten item.ComputeFeatures to i.ComputeFeatures", diags)) + } + + aclList := make([]models.RecordACLModel, 0, len(item.ACL)) + for _, acl := range item.ACL { + a := models.RecordACLModel{ + Explicit: types.BoolValue(acl.IsExplicit), + GUID: types.StringValue(acl.GUID), + Right: types.StringValue(acl.Rights), + Status: types.StringValue(acl.Status), + Type: types.StringValue(acl.Type), + UserGroupID: types.StringValue(acl.UgroupID), + } + aclList = append(aclList, a) + } + + i.ACL = aclList + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "flattens.AccountListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.AccountListDataSource") + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_reserved_units.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_reserved_units.go new file mode 100644 index 0000000..b7a7a7b --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_reserved_units.go @@ -0,0 +1,50 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountReservedUnitsDataSource flattens data source for account. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountReservedUnitsDataSource(ctx context.Context, state *models.DataSourceAccountReservedUnitsModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountReservedUnitsDataSource") + + diags := diag.Diagnostics{} + + accountId := uint64(state.AccountID.ValueInt64()) + + recordAccount, err := utilities.AccountReservedUnitsCheck(ctx, state, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountReservedUnitsDataSource: before flatten", map[string]any{"account_id": accountId, "recordAccount": recordAccount}) + + *state = models.DataSourceAccountReservedUnitsModel{ + AccountID: state.AccountID, + Timeouts: state.Timeouts, + + CUC: types.Float64Value(recordAccount.CUC), + CUD: types.Float64Value(recordAccount.CUD), + CUI: types.Float64Value(recordAccount.CUI), + CUM: types.Float64Value(recordAccount.CUM), + CUDM: types.Float64Value(recordAccount.CUDM), + CUNP: types.Float64Value(recordAccount.CUNP), + GPUUnits: types.Float64Value(recordAccount.GPUUnits), + } + + tflog.Info(ctx, "flattens.AccountReservedUnitsDataSource: after flatten", map[string]any{"account_id": state.AccountID.ValueInt64()}) + + tflog.Info(ctx, "End flattens.AccountReservedUnitsDataSource", map[string]any{"account_id": state.AccountID.ValueInt64()}) + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_rg_list.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_rg_list.go new file mode 100644 index 0000000..520e6c5 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_rg_list.go @@ -0,0 +1,124 @@ +package flattens + +import ( + "context" + + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountRGListDataSource flattens data source for account rg list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountRGListDataSource(ctx context.Context, state *models.DataSourceAccountRGListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountRGListDataSource") + + diags := diag.Diagnostics{} + + rgList, err := utilities.AccountRGListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about account rg list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountRGListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountRGListModel{ + AccountID: state.AccountID, + RGID: state.RGID, + VinsID: state.VinsID, + VMID: state.VMID, + Name: state.Name, + Status: state.Status, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + SortBy: state.SortBy, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(rgList.EntryCount)), + } + + items := make([]models.ItemAccountRGModel, 0, len(rgList.Data)) + for _, item := range rgList.Data { + i := models.ItemAccountRGModel{ + Computes: &models.RGComputesModel{ + Started: types.Int64Value(int64(item.Computes.Started)), + Stopped: types.Int64Value(int64(item.Computes.Stopped)), + }, + Resources: &models.RGResourcesModel{ + Consumed: &models.ResourceModel{ + CPU: types.Int64Value(item.Resources.Consumed.CPU), + DiskSize: types.Float64Value(item.Resources.Consumed.DiskSize), + DiskSizeMax: types.Float64Value(item.Resources.Consumed.DiskSizeMax), + ExtIPs: types.Int64Value(item.Resources.Consumed.ExtIPs), + ExtTraffic: types.Int64Value(item.Resources.Consumed.ExtTraffic), + GPU: types.Int64Value(item.Resources.Consumed.GPU), + RAM: types.Int64Value(item.Resources.Consumed.RAM), + SEPs: flattenSep(item.Resources.Consumed.SEPs), + }, + Limits: &models.LimitsRGModel{ + CPU: types.Int64Value(item.Resources.Limits.CPU), + DiskSize: types.Int64Value(item.Resources.Limits.DiskSize), + DiskSizeMax: types.Int64Value(item.Resources.Limits.DiskSizeMax), + ExtIPs: types.Int64Value(item.Resources.Limits.ExtIPs), + ExtTraffic: types.Int64Value(item.Resources.Limits.ExtTraffic), + GPU: types.Int64Value(item.Resources.Limits.GPU), + RAM: types.Int64Value(item.Resources.Limits.RAM), + SEPs: types.Int64Value(int64(item.Resources.Limits.SEPs)), + }, + Reserved: &models.ResourceModel{ + CPU: types.Int64Value(item.Resources.Reserved.CPU), + DiskSize: types.Float64Value(item.Resources.Reserved.DiskSize), + DiskSizeMax: types.Float64Value(item.Resources.Reserved.DiskSizeMax), + ExtIPs: types.Int64Value(item.Resources.Reserved.ExtIPs), + ExtTraffic: types.Int64Value(item.Resources.Reserved.ExtTraffic), + GPU: types.Int64Value(item.Resources.Reserved.GPU), + RAM: types.Int64Value(item.Resources.Reserved.RAM), + SEPs: flattenSep(item.Resources.Reserved.SEPs), + }, + }, + CreatedBy: types.StringValue(item.CreatedBy), + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + DeletedBy: types.StringValue(item.DeletedBy), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + RGID: types.Int64Value(int64(item.RGID)), + Milestones: types.Int64Value(int64(item.Milestones)), + RGName: types.StringValue(item.RGName), + Status: types.StringValue(item.Status), + UpdatedBy: types.StringValue(item.UpdatedBy), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + VINSes: types.Int64Value(int64(item.VINSes)), + } + + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "End flattens.AccountRGListDataSource") + return nil +} + +func flattenSep(seps map[string]map[string]account.DiskUsage) []models.SepModel { + res := make([]models.SepModel, 0, len(seps)) + for sepId := range seps { + for poolName, diskData := range seps[sepId] { + s := models.SepModel{ + SepID: types.StringValue(sepId), + PoolName: types.StringValue(poolName), + DiskSize: types.Float64Value(diskData.DiskSize), + DiskSizeMax: types.Float64Value(diskData.DiskSizeMax), + } + res = append(res, s) + } + } + return res +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_templates_list.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_templates_list.go new file mode 100644 index 0000000..d0a303f --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_templates_list.go @@ -0,0 +1,68 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountTemplatesListDataSource flattens data source for account templates list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountTemplatesListDataSource(ctx context.Context, state *models.DataSourceAccountTemplatesListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountTemplatesListDataSource") + + diags := diag.Diagnostics{} + + templatesList, err := utilities.AccountTemplatesListCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about account templates list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountTemplatesListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceAccountTemplatesListModel{ + AccountID: state.AccountID, + IncludeDeleted: state.IncludeDeleted, + ImageID: state.ImageID, + Name: state.Name, + Type: state.Type, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(templatesList.EntryCount)), + } + + items := make([]models.ItemTemplateModel, 0, len(templatesList.Data)) + for _, item := range templatesList.Data { + i := models.ItemTemplateModel{ + UNCPath: types.StringValue(item.UNCPath), + AccountID: types.Int64Value(int64(item.AccountID)), + Description: types.StringValue(item.Description), + ID: types.Int64Value(int64(item.ID)), + Name: types.StringValue(item.Name), + Public: types.BoolValue(item.Public), + Size: types.Int64Value(int64(item.Size)), + Status: types.StringValue(item.Status), + Type: types.StringValue(item.Type), + Username: types.StringValue(item.Username), + } + + items = append(items, i) + } + state.Items = items + + tflog.Info(ctx, "End flattens.AccountTemplatesListDataSource") + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_data_source_account_vins_list.go b/internal/service/cloudapi/account/flattens/flatten_data_source_account_vins_list.go new file mode 100644 index 0000000..24e6f82 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_data_source_account_vins_list.go @@ -0,0 +1,76 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountVinsListDataSource flattens data source for account list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountVinsListDataSource(ctx context.Context, state *models.DataSourceAccountVinsListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountVinsListDataSource") + + diags := diag.Diagnostics{} + + accountVinsList, err := utilities.AccountVinsListCheck(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about account list", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountVinsListDataSource: before flatten") + + *state = models.DataSourceAccountVinsListModel{ + AccountID: state.AccountID, + VinsID: state.VinsID, + Name: state.Name, + RGID: state.RGID, + ExtIp: state.ExtIp, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + EntryCount: types.Int64Value(int64(accountVinsList.EntryCount)), + } + items := make([]models.ItemVINSModel, 0, len(accountVinsList.Data)) + for _, item := range accountVinsList.Data { + i := models.ItemVINSModel{ + AccountID: types.Int64Value(int64(item.ID)), + AccountName: types.StringValue(item.Name), + Computes: types.Int64Value(int64(item.Computes)), + CreatedBy: types.StringValue(item.CreatedBy), + CreatedTime: types.Int64Value(int64(item.CreatedTime)), + DeletedBy: types.StringValue(item.DeletedBy), + DeletedTime: types.Int64Value(int64(item.DeletedTime)), + ExternalIP: types.StringValue(item.ExternalIP), + ExtnetId: types.Int64Value(int64(item.ExtnetId)), + FreeIPs: types.Int64Value(int64(item.FreeIPs)), + ID: types.Int64Value(int64(item.ID)), + Name: types.StringValue(item.Name), + Network: types.StringValue(item.Network), + PriVNFDevID: types.Int64Value(int64(item.PriVNFDevID)), + RGID: types.Int64Value(int64(item.RGID)), + RGName: types.StringValue(item.RGName), + Status: types.StringValue(item.Status), + UpdatedBy: types.StringValue(item.UpdatedBy), + UpdatedTime: types.Int64Value(int64(item.UpdatedTime)), + } + + items = append(items, i) + } + + state.Data = items + + tflog.Info(ctx, "flattens.AccountVinsListDataSource: after flatten") + + tflog.Info(ctx, "End flattens.AccountVinsListDataSource") + return nil +} diff --git a/internal/service/cloudapi/account/flattens/flatten_resource_account.go b/internal/service/cloudapi/account/flattens/flatten_resource_account.go new file mode 100644 index 0000000..08801b4 --- /dev/null +++ b/internal/service/cloudapi/account/flattens/flatten_resource_account.go @@ -0,0 +1,139 @@ +package flattens + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// AccountResource flattens resource for account. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func AccountResource(ctx context.Context, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.AccountResource") + + diags := diag.Diagnostics{} + + accountId := uint64(state.AccountID.ValueInt64()) + if accountId == 0 { + id, err := strconv.Atoi(state.Id.ValueString()) + if err != nil { + diags.AddError( + "flattens.AccountResource: cannot parse resource ID from state", + err.Error()) + return diags + } + accountId = uint64(id) + } + + recordAccount, err := utilities.AccountResourceCheckPresence(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("flattens.AccountResource: Cannot get info about resource with ID %v", accountId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.AccountResource: before flatten", map[string]any{"account_id": accountId, "recordAccount": recordAccount}) + + *state = models.ResourceAccountModel{ + // request fields + AccountName: types.StringValue(recordAccount.Name), + Username: state.Username, + EmailAddress: state.EmailAddress, + SendAccessEmails: state.SendAccessEmails, + Users: state.Users, + Restore: state.Restore, + Permanently: state.Permanently, + Enable: state.Enable, + ResourceLimits: flattenResourceLimitsInAccountResource(ctx, recordAccount.ResourceLimits, state), + Timeouts: state.Timeouts, + + // response fields + Id: types.StringValue(strconv.Itoa(int(accountId))), + LastUpdated: state.LastUpdated, + AccountID: types.Int64Value(int64(recordAccount.ID)), + DCLocation: types.StringValue(recordAccount.DCLocation), + CKey: types.StringValue(recordAccount.CKey), + ACL: flattenACLInAccount(ctx, recordAccount.ACL), + Company: types.StringValue(recordAccount.Company), + CompanyURL: types.StringValue(recordAccount.CompanyURL), + Computes: flattenComputes(ctx, recordAccount.Computes), + CPUAllocationParameter: types.StringValue(recordAccount.CPUAllocationParameter), + CPUAllocationRatio: types.Float64Value(recordAccount.CPUAllocationRatio), + CreatedBy: types.StringValue(recordAccount.CreatedBy), + CreatedTime: types.Int64Value(int64(recordAccount.CreatedTime)), + DeactivationTime: types.Float64Value(recordAccount.DeactivationTime), + DeletedBy: types.StringValue(recordAccount.DeletedBy), + DeletedTime: types.Int64Value(int64(recordAccount.DeletedTime)), + DisplayName: types.StringValue(recordAccount.DisplayName), + GUID: types.Int64Value(int64(recordAccount.GUID)), + Machines: flattenMachines(ctx, recordAccount.Machines), + Status: types.StringValue(recordAccount.Status), + UpdatedTime: types.Int64Value(int64(recordAccount.UpdatedTime)), + Version: types.Int64Value(int64(recordAccount.Version)), + VINSes: types.Int64Value(int64(recordAccount.VINSes)), + } + + state.VINS, diags = types.ListValueFrom(ctx, types.Int64Type, recordAccount.VINS) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.AccountResource: cannot flatten recordAccount.VINS to state.VINS", diags)) + } + + state.ComputeFeatures, diags = types.ListValueFrom(ctx, types.StringType, recordAccount.ComputeFeatures) + if diags.HasError() { + tflog.Error(ctx, fmt.Sprint("flattens.AccountResource: cannot flatten recordAccount.ComputeFeatures to state.ComputeFeatures", diags)) + } + + tflog.Info(ctx, "flattens.AccountResource: after flatten", map[string]any{"account_id": state.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.AccountResource", map[string]any{"account_id": state.Id.ValueString()}) + return nil +} + +func flattenResourceLimitsInAccountResource(ctx context.Context, limits account.ResourceLimits, state *models.ResourceAccountModel) types.Object { + tflog.Info(ctx, "Start flattenResourceLimitsInAccountResource") + + diags := diag.Diagnostics{} + + var resourceLimits models.ResourceLimitsInAccountResourceModel + diags.Append(state.ResourceLimits.As(ctx, &resourceLimits, basetypes.ObjectAsOptions{})...) + if diags.HasError() { + tflog.Error(ctx, "flattenResourceLimitsInAccountResource: cannot populate resourceLimits with plan.ResourceLimits object element") + } + + if resourceLimits.CUC.ValueFloat64() == 0 { + resourceLimits.CUC = types.Float64Value(limits.CUC) + } + if resourceLimits.CUD.ValueFloat64() == 0 { + resourceLimits.CUD = types.Float64Value(limits.CUD) + } + if resourceLimits.CUI.ValueFloat64() == 0 { + resourceLimits.CUI = types.Float64Value(limits.CUI) + } + if resourceLimits.CUM.ValueFloat64() == 0 { + resourceLimits.CUM = types.Float64Value(limits.CUM) + } + if resourceLimits.CUNP.ValueFloat64() == 0 { + resourceLimits.CUNP = types.Float64Value(limits.CUNP) + } + if resourceLimits.GPUUnits.ValueFloat64() == 0 { + resourceLimits.GPUUnits = types.Float64Value(limits.GPUUnits) + } + + res, err := types.ObjectValueFrom(ctx, models.ItemResourceLimitsInAccountResource, resourceLimits) + if err != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenResourceLimitsInAccountResource struct to obj", err)) + } + + tflog.Info(ctx, "End flattenResourceLimitsInAccountResource") + return res +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account.go b/internal/service/cloudapi/account/models/model_data_source_account.go new file mode 100644 index 0000000..9b0ba78 --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account.go @@ -0,0 +1,101 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountModel struct { + // request fields + AccountID types.Int64 `tfsdk:"account_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + DCLocation types.String `tfsdk:"dc_location"` + CKey types.String `tfsdk:"ckey"` + ACL types.List `tfsdk:"acl"` + Company types.String `tfsdk:"company"` + CompanyURL types.String `tfsdk:"companyurl"` + ComputeFeatures types.List `tfsdk:"compute_features"` + Computes types.Object `tfsdk:"computes"` + CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"` + CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeactivationTime types.Float64 `tfsdk:"deactivation_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DisplayName types.String `tfsdk:"displayname"` + GUID types.Int64 `tfsdk:"guid"` + Machines types.Object `tfsdk:"machines"` + AccountName types.String `tfsdk:"account_name"` + ResourceLimits types.Object `tfsdk:"resource_limits"` + SendAccessEmails types.Bool `tfsdk:"send_access_emails"` + Status types.String `tfsdk:"status"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + Version types.Int64 `tfsdk:"version"` + VINS types.List `tfsdk:"vins"` + VINSes types.Int64 `tfsdk:"vinses"` +} + +type ResourceLimitsInAccountModel struct { + CUC types.Float64 `tfsdk:"cu_c"` + CUD types.Float64 `tfsdk:"cu_d"` + CUI types.Float64 `tfsdk:"cu_i"` + CUM types.Float64 `tfsdk:"cu_m"` + CUDM types.Float64 `tfsdk:"cu_dm"` + CUNP types.Float64 `tfsdk:"cu_np"` + GPUUnits types.Float64 `tfsdk:"gpu_units"` +} + +type ACLInAccountModel struct { + Explicit types.Bool `tfsdk:"explicit"` + GUID types.String `tfsdk:"guid"` + Right types.String `tfsdk:"right"` + Status types.String `tfsdk:"status"` + Type types.String `tfsdk:"type"` + UserGroupID types.String `tfsdk:"user_group_id"` + CanBeDeleted types.Bool `tfsdk:"can_be_deleted"` +} + +type ComputesInAccountModel struct { + Started types.Int64 `tfsdk:"started"` + Stopped types.Int64 `tfsdk:"stopped"` +} + +type MachinesInAccountModel struct { + Running types.Int64 `tfsdk:"running"` + Halted types.Int64 `tfsdk:"halted"` +} + +var ItemResourceLimitsInAccount = map[string]attr.Type{ + "cu_c": types.Float64Type, + "cu_d": types.Float64Type, + "cu_i": types.Float64Type, + "cu_m": types.Float64Type, + "cu_dm": types.Float64Type, + "cu_np": types.Float64Type, + "gpu_units": types.Float64Type, +} + +var ItemACLInAccount = map[string]attr.Type{ + "explicit": types.BoolType, + "guid": types.StringType, + "right": types.StringType, + "status": types.StringType, + "type": types.StringType, + "user_group_id": types.StringType, + "can_be_deleted": types.BoolType, +} + +var ItemComputesInAccount = map[string]attr.Type{ + "started": types.Int64Type, + "stopped": types.Int64Type, +} + +var ItemMachinesInAccount = map[string]attr.Type{ + "running": types.Int64Type, + "halted": types.Int64Type, +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_audits_list.go b/internal/service/cloudapi/account/models/model_data_source_account_audits_list.go new file mode 100644 index 0000000..e26b0b3 --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_audits_list.go @@ -0,0 +1,24 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountAuditsListModel struct { + // request fields + AccountID types.Int64 `tfsdk:"account_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAuditModel `tfsdk:"items"` +} + +type ItemAuditModel struct { + Call types.String `tfsdk:"call"` + ResponseTime types.Float64 `tfsdk:"responsetime"` + StatusCode types.Int64 `tfsdk:"statuscode"` + Timestamp types.Float64 `tfsdk:"timestamp"` + User types.String `tfsdk:"user"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_computes_list.go b/internal/service/cloudapi/account/models/model_data_source_account_computes_list.go new file mode 100644 index 0000000..3f78cba --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_computes_list.go @@ -0,0 +1,53 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountComputesListModel struct { + // required fields + AccountID types.Int64 `tfsdk:"account_id"` + + // optional fields + ComputeID types.Int64 `tfsdk:"compute_id"` + Name types.String `tfsdk:"name"` + RGName types.String `tfsdk:"rg_name"` + RGID types.Int64 `tfsdk:"rg_id"` + TechStatus types.String `tfsdk:"tech_status"` + IPAddress types.String `tfsdk:"ip_address"` + ExtNetName types.String `tfsdk:"extnet_name"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + SortBy types.String `tfsdk:"sort_by"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemComputeModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemComputeModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + CPUs types.Int64 `tfsdk:"cpus"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + ComputeID types.Int64 `tfsdk:"compute_id"` + ComputeName types.String `tfsdk:"compute_name"` + RAM types.Int64 `tfsdk:"ram"` + Registered types.Bool `tfsdk:"registered"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + TotalDisksSize types.Int64 `tfsdk:"total_disks_size"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` + VINSConnected types.Int64 `tfsdk:"vins_connected"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_consumed_units.go b/internal/service/cloudapi/account/models/model_data_source_account_consumed_units.go new file mode 100644 index 0000000..df21a63 --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_consumed_units.go @@ -0,0 +1,22 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountConsumedUnitsModel struct { + // request fields + AccountID types.Int64 `tfsdk:"account_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + CUC types.Float64 `tfsdk:"cu_c"` + CUD types.Float64 `tfsdk:"cu_d"` + CUDM types.Float64 `tfsdk:"cu_dm"` + CUI types.Float64 `tfsdk:"cu_i"` + CUM types.Float64 `tfsdk:"cu_m"` + CUNP types.Float64 `tfsdk:"cu_np"` + GPUUnits types.Float64 `tfsdk:"gpu_units"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_consumed_units_by_type.go b/internal/service/cloudapi/account/models/model_data_source_account_consumed_units_by_type.go new file mode 100644 index 0000000..35bb75a --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_consumed_units_by_type.go @@ -0,0 +1,17 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountConsumedUnitsByTypeModel struct { + // optional fields + AccountID types.Int64 `tfsdk:"account_id"` + CUType types.String `tfsdk:"cu_type"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + CUResult types.Float64 `tfsdk:"cu_result"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_disks_list.go b/internal/service/cloudapi/account/models/model_data_source_account_disks_list.go new file mode 100644 index 0000000..d87f54a --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_disks_list.go @@ -0,0 +1,36 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountDisksListModel struct { + // required fields + AccountID types.Int64 `tfsdk:"account_id"` + + // optional fields + DiskID types.Int64 `tfsdk:"disk_id"` + Name types.String `tfsdk:"name"` + DiskMaxSize types.Int64 `tfsdk:"disk_max_size"` + Type types.String `tfsdk:"type"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + SortBy types.String `tfsdk:"sort_by"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemDiskModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemDiskModel struct { + DiskID types.Int64 `tfsdk:"disk_id"` + DiskName types.String `tfsdk:"disk_name"` + Pool types.String `tfsdk:"pool"` + SEPID types.Int64 `tfsdk:"sep_id"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + Type types.String `tfsdk:"type"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_flipgroups_list.go b/internal/service/cloudapi/account/models/model_data_source_account_flipgroups_list.go new file mode 100644 index 0000000..b05e9c5 --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_flipgroups_list.go @@ -0,0 +1,49 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountFlipgroupsListModel struct { + // optional and required fields + AccountID types.Int64 `tfsdk:"account_id"` + Name types.String `tfsdk:"name"` + VINSID types.Int64 `tfsdk:"vins_id"` + VINSName types.String `tfsdk:"vins_name"` + ExtNetID types.Int64 `tfsdk:"extnet_id"` + ByIP types.String `tfsdk:"by_ip"` + FLIPGroupID types.Int64 `tfsdk:"flipgroup_id"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAccountFlipgroupModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemAccountFlipgroupModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + ClientType types.String `tfsdk:"client_type"` + ConnType types.String `tfsdk:"conn_type"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DefaultGW types.String `tfsdk:"default_gw"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"fg_id"` + IP types.String `tfsdk:"ip"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"fg_name"` + NetID types.Int64 `tfsdk:"net_id"` + NetType types.String `tfsdk:"net_type"` + NetMask types.Int64 `tfsdk:"netmask"` + Status types.String `tfsdk:"status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_get_resource_consumption.go b/internal/service/cloudapi/account/models/model_data_source_account_get_resource_consumption.go new file mode 100644 index 0000000..f5343b3 --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_get_resource_consumption.go @@ -0,0 +1,45 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AccountGetResourceConsumptionModel struct { + // request fields + AccountID types.Int64 `tfsdk:"account_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Consumed *ResourceConsumptionModel `tfsdk:"consumed"` + Reserved *ResourceConsumptionModel `tfsdk:"reserved"` + Limits *ResourceConsumptionLimitsModel `tfsdk:"resource_limits"` +} + +type ResourceConsumptionModel struct { + CPU types.Int64 `tfsdk:"cpu"` + DiskSize types.Float64 `tfsdk:"disk_size"` + DiskSizeMax types.Float64 `tfsdk:"disk_size_max"` + ExtIPs types.Int64 `tfsdk:"ext_ips"` + ExtTraffic types.Int64 `tfsdk:"ext_traffic"` + GPU types.Int64 `tfsdk:"gpu"` + RAM types.Int64 `tfsdk:"ram"` + SEPs []ResourceConsumptionSepModel `tfsdk:"seps"` +} + +type ResourceConsumptionSepModel struct { + SepID types.String `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"data_name"` + DiskSize types.Float64 `tfsdk:"disk_size"` + DiskSizeMax types.Float64 `tfsdk:"disk_size_max"` +} + +type ResourceConsumptionLimitsModel struct { + CUC types.Float64 `tfsdk:"cu_c"` + CUD types.Float64 `tfsdk:"cu_d"` + CUI types.Float64 `tfsdk:"cu_i"` + CUM types.Float64 `tfsdk:"cu_m"` + CUDM types.Float64 `tfsdk:"cu_dm"` + CUNP types.Float64 `tfsdk:"cu_np"` + GPUUnits types.Float64 `tfsdk:"gpu_units"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_get_resource_consumption_list.go b/internal/service/cloudapi/account/models/model_data_source_account_get_resource_consumption_list.go new file mode 100644 index 0000000..6813c87 --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_get_resource_consumption_list.go @@ -0,0 +1,40 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type AccountGetResourceConsumptionListModel struct { + // request fields + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Items []AccountGetResourceConsumptionListItemModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type AccountGetResourceConsumptionListItemModel struct { + // response fields + AccountId types.Int64 `tfsdk:"account_id"` + Consumed *ResourceConsumptionListModel `tfsdk:"consumed"` + Reserved *ResourceConsumptionListModel `tfsdk:"reserved"` +} + +type ResourceConsumptionListModel struct { + CPU types.Int64 `tfsdk:"cpu"` + DiskSize types.Float64 `tfsdk:"disk_size"` + DiskSizeMax types.Float64 `tfsdk:"disk_size_max"` + ExtIPs types.Int64 `tfsdk:"ext_ips"` + ExtTraffic types.Int64 `tfsdk:"ext_traffic"` + GPU types.Int64 `tfsdk:"gpu"` + RAM types.Int64 `tfsdk:"ram"` + SEPs []ResourceConsumptionSepListModel `tfsdk:"seps"` +} + +type ResourceConsumptionSepListModel struct { + SepID types.String `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"data_name"` + DiskSize types.Float64 `tfsdk:"disk_size"` + DiskSizeMax types.Float64 `tfsdk:"disk_size_max"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_list.go b/internal/service/cloudapi/account/models/model_data_source_account_list.go new file mode 100644 index 0000000..dc8d258 --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_list.go @@ -0,0 +1,43 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountListModel struct { + // optional fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + ACL types.String `tfsdk:"acl"` + Status types.String `tfsdk:"status"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + SortBy types.String `tfsdk:"sort_by"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAccountListModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemAccountListModel struct { + ACL []RecordACLModel `tfsdk:"acl"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Status types.String `tfsdk:"status"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + ComputeFeatures types.List `tfsdk:"compute_features"` +} + +type RecordACLModel struct { + Explicit types.Bool `tfsdk:"explicit"` + GUID types.String `tfsdk:"guid"` + Right types.String `tfsdk:"right"` + Status types.String `tfsdk:"status"` + Type types.String `tfsdk:"type"` + UserGroupID types.String `tfsdk:"user_group_id"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_list_deleted.go b/internal/service/cloudapi/account/models/model_data_source_account_list_deleted.go new file mode 100644 index 0000000..ba613d8 --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_list_deleted.go @@ -0,0 +1,33 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountListDeletedModel struct { + // optional fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + ACL types.String `tfsdk:"acl"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + SortBy types.String `tfsdk:"sort_by"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAccountListDeletedModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemAccountListDeletedModel struct { + ACL []RecordACLModel `tfsdk:"acl"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Status types.String `tfsdk:"status"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + ComputeFeatures types.List `tfsdk:"compute_features"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_reserved_units.go b/internal/service/cloudapi/account/models/model_data_source_account_reserved_units.go new file mode 100644 index 0000000..e4b725a --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_reserved_units.go @@ -0,0 +1,21 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountReservedUnitsModel struct { + // optional and required fields + AccountID types.Int64 `tfsdk:"account_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + CUC types.Float64 `tfsdk:"cu_c"` + CUD types.Float64 `tfsdk:"cu_d"` + CUI types.Float64 `tfsdk:"cu_i"` + CUM types.Float64 `tfsdk:"cu_m"` + CUDM types.Float64 `tfsdk:"cu_dm"` + CUNP types.Float64 `tfsdk:"cu_np"` + GPUUnits types.Float64 `tfsdk:"gpu_units"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_rg_list.go b/internal/service/cloudapi/account/models/model_data_source_account_rg_list.go new file mode 100644 index 0000000..2c9430d --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_rg_list.go @@ -0,0 +1,81 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountRGListModel struct { + // optional and required fields + AccountID types.Int64 `tfsdk:"account_id"` + RGID types.Int64 `tfsdk:"rg_id"` + VinsID types.Int64 `tfsdk:"vins_id"` + VMID types.Int64 `tfsdk:"vm_id"` + Name types.String `tfsdk:"name"` + Status types.String `tfsdk:"status"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + SortBy types.String `tfsdk:"sort_by"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemAccountRGModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemAccountRGModel struct { + Computes *RGComputesModel `tfsdk:"computes"` + Resources *RGResourcesModel `tfsdk:"resources"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + RGID types.Int64 `tfsdk:"rg_id"` + Milestones types.Int64 `tfsdk:"milestones"` + RGName types.String `tfsdk:"rg_name"` + Status types.String `tfsdk:"status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + VINSes types.Int64 `tfsdk:"vinses"` +} + +type RGComputesModel struct { + Started types.Int64 `tfsdk:"started"` + Stopped types.Int64 `tfsdk:"stopped"` +} + +type RGResourcesModel struct { + Consumed *ResourceModel `tfsdk:"consumed"` + Limits *LimitsRGModel `tfsdk:"limits"` + Reserved *ResourceModel `tfsdk:"reserved"` +} + +type LimitsRGModel struct { + CPU types.Int64 `tfsdk:"cpu"` + DiskSize types.Int64 `tfsdk:"disksize"` + DiskSizeMax types.Int64 `tfsdk:"disksizemax"` + ExtIPs types.Int64 `tfsdk:"extips"` + ExtTraffic types.Int64 `tfsdk:"exttraffic"` + GPU types.Int64 `tfsdk:"gpu"` + RAM types.Int64 `tfsdk:"ram"` + SEPs types.Int64 `tfsdk:"seps"` +} + +type ResourceModel struct { + CPU types.Int64 `tfsdk:"cpu"` + DiskSize types.Float64 `tfsdk:"disksize"` + DiskSizeMax types.Float64 `tfsdk:"disksizemax"` + ExtIPs types.Int64 `tfsdk:"extips"` + ExtTraffic types.Int64 `tfsdk:"exttraffic"` + GPU types.Int64 `tfsdk:"gpu"` + RAM types.Int64 `tfsdk:"ram"` + SEPs []SepModel `tfsdk:"seps"` +} + +type SepModel struct { + SepID types.String `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"pool_name"` + DiskSize types.Float64 `tfsdk:"disksize"` + DiskSizeMax types.Float64 `tfsdk:"disksizemax"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_templates_list.go b/internal/service/cloudapi/account/models/model_data_source_account_templates_list.go new file mode 100644 index 0000000..45cb1d1 --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_templates_list.go @@ -0,0 +1,39 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountTemplatesListModel struct { + // required fields + AccountID types.Int64 `tfsdk:"account_id"` + + // optional fields + IncludeDeleted types.Bool `tfsdk:"include_deleted"` + ImageID types.Int64 `tfsdk:"image_id"` + Name types.String `tfsdk:"name"` + Type types.String `tfsdk:"type"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + SortBy types.String `tfsdk:"sort_by"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemTemplateModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemTemplateModel struct { + UNCPath types.String `tfsdk:"unc_path"` + AccountID types.Int64 `tfsdk:"account_id"` + Description types.String `tfsdk:"desc"` + ID types.Int64 `tfsdk:"template_id"` + Name types.String `tfsdk:"template_name"` + Public types.Bool `tfsdk:"public"` + Size types.Int64 `tfsdk:"size"` + Status types.String `tfsdk:"status"` + Type types.String `tfsdk:"type"` + Username types.String `tfsdk:"username"` +} diff --git a/internal/service/cloudapi/account/models/model_data_source_account_vins_list.go b/internal/service/cloudapi/account/models/model_data_source_account_vins_list.go new file mode 100644 index 0000000..3daba9a --- /dev/null +++ b/internal/service/cloudapi/account/models/model_data_source_account_vins_list.go @@ -0,0 +1,45 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceAccountVinsListModel struct { + // optional fields + AccountID types.Int64 `tfsdk:"account_id"` + VinsID types.Int64 `tfsdk:"vins_id"` + Name types.String `tfsdk:"name"` + RGID types.Int64 `tfsdk:"rg_id"` + ExtIp types.String `tfsdk:"ext_ip"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Data []ItemVINSModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemVINSModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Computes types.Int64 `tfsdk:"computes"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + ExternalIP types.String `tfsdk:"external_ip"` + ExtnetId types.Int64 `tfsdk:"extnet_id"` + FreeIPs types.Int64 `tfsdk:"free_ips"` + ID types.Int64 `tfsdk:"vin_id"` + Name types.String `tfsdk:"vin_name"` + Network types.String `tfsdk:"network"` + PriVNFDevID types.Int64 `tfsdk:"pri_vnf_dev_id"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + Status types.String `tfsdk:"status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` +} diff --git a/internal/service/cloudapi/account/models/model_resource_account.go b/internal/service/cloudapi/account/models/model_resource_account.go new file mode 100644 index 0000000..b36b7ef --- /dev/null +++ b/internal/service/cloudapi/account/models/model_resource_account.go @@ -0,0 +1,74 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceAccountModel struct { + // request fields - required + AccountName types.String `tfsdk:"account_name"` + Username types.String `tfsdk:"username"` + + // request fields - optional + EmailAddress types.String `tfsdk:"emailaddress"` + SendAccessEmails types.Bool `tfsdk:"send_access_emails"` + Users types.List `tfsdk:"users"` + Restore types.Bool `tfsdk:"restore"` + Permanently types.Bool `tfsdk:"permanently"` + Enable types.Bool `tfsdk:"enable"` + ResourceLimits types.Object `tfsdk:"resource_limits"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + LastUpdated types.String `tfsdk:"last_updated"` + DCLocation types.String `tfsdk:"dc_location"` + AccountID types.Int64 `tfsdk:"account_id"` + CKey types.String `tfsdk:"ckey"` + ACL types.List `tfsdk:"acl"` + Company types.String `tfsdk:"company"` + CompanyURL types.String `tfsdk:"companyurl"` + ComputeFeatures types.List `tfsdk:"compute_features"` + Computes types.Object `tfsdk:"computes"` + CPUAllocationParameter types.String `tfsdk:"cpu_allocation_parameter"` + CPUAllocationRatio types.Float64 `tfsdk:"cpu_allocation_ratio"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeactivationTime types.Float64 `tfsdk:"deactivation_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DisplayName types.String `tfsdk:"displayname"` + GUID types.Int64 `tfsdk:"guid"` + Machines types.Object `tfsdk:"machines"` + Status types.String `tfsdk:"status"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + Version types.Int64 `tfsdk:"version"` + VINS types.List `tfsdk:"vins"` + VINSes types.Int64 `tfsdk:"vinses"` +} + +type UsersModel struct { + UserID types.String `tfsdk:"user_id"` + AccessType types.String `tfsdk:"access_type"` + RecursiveDelete types.Bool `tfsdk:"recursive_delete"` +} + +type ResourceLimitsInAccountResourceModel struct { + CUC types.Float64 `tfsdk:"cu_c"` + CUD types.Float64 `tfsdk:"cu_d"` + CUI types.Float64 `tfsdk:"cu_i"` + CUM types.Float64 `tfsdk:"cu_m"` + CUNP types.Float64 `tfsdk:"cu_np"` + GPUUnits types.Float64 `tfsdk:"gpu_units"` +} + +var ItemResourceLimitsInAccountResource = map[string]attr.Type{ + "cu_c": types.Float64Type, + "cu_d": types.Float64Type, + "cu_i": types.Float64Type, + "cu_m": types.Float64Type, + "cu_np": types.Float64Type, + "gpu_units": types.Float64Type, +} diff --git a/internal/service/cloudapi/account/resource_account.go b/internal/service/cloudapi/account/resource_account.go new file mode 100644 index 0000000..6eb908e --- /dev/null +++ b/internal/service/cloudapi/account/resource_account.go @@ -0,0 +1,253 @@ +package account + +import ( + "context" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceAccount{} + _ resource.ResourceWithImportState = &resourceAccount{} +) + +// NewResourceAccount is a helper function to simplify the provider implementation. +func NewResourceAccount() resource.Resource { + return &resourceAccount{} +} + +// resourceAccount is the resource implementation. +type resourceAccount struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceAccount) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + resp.Diagnostics.AddError( + "Only users with admin privileges are able to create accounts. Contact your platform administrator or import existing account.", + "Use 'terraform import basis_account. ' command to import existing account configuration", + ) + return +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceAccount) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceAccountModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceAccount: Error get state") + return + } + tflog.Info(ctx, "Read resourceAccount: got state successfully", map[string]any{"account_id": state.Id.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceAccount: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceAccount: set timeouts successfully", map[string]any{ + "account_id": state.Id.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // read status + resp.Diagnostics.Append(utilities.AccountReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceAccount: Error reading status") + return + } + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.AccountResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceAccount: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceAccount: Error set state") + return + } + tflog.Info(ctx, "End read resourceAccount") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceAccount) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceAccountModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error receiving the plan") + return + } + + logMap := map[string]any{"account_id": plan.Id.ValueString()} + tflog.Info(ctx, "Update resourceAccount: got plan successfully", logMap) + + // Retrieve values from state + var state models.ResourceAccountModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceAccount: got state successfully", logMap) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceAccount: set timeouts successfully", map[string]any{ + "account_id": state.Id.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + accountId, err := strconv.Atoi(state.Id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Update resourceAccount: Cannot parse ID from state", err.Error()) + return + } + + // enable/disable account + if !plan.Enable.Equal(state.Enable) && !plan.Enable.IsNull() { + resp.Diagnostics.Append(utilities.EnableDisableAccount(ctx, uint64(accountId), plan.Enable.ValueBool(), r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error enabling/disabling account") + return + } + } + + // general update account + resp.Diagnostics.Append(utilities.UpdateAccount(ctx, uint64(accountId), &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error with general account update") + return + } + + // add/delete users + if !plan.Users.Equal(state.Users) { + resp.Diagnostics.Append(utilities.AddDeleteUsersAccount(ctx, uint64(accountId), &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceAccount: Error adding/deleting users to/from account") + return + } + } + + tflog.Info(ctx, "Update resourceAccount: account update is completed", logMap) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.AccountResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceAccount) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceAccountModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceAccount: Error get state") + return + } + tflog.Info(ctx, "Delete resourceAccount: got state successfully", map[string]any{"account_id": state.Id.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceAccount: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceAccount: set timeouts successfully", map[string]any{ + "account_id": state.Id.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + permanently := state.Permanently.ValueBool() + if state.Permanently.IsNull() { + permanently = true + } // default true + + // Delete existing resource group + delReq := account.DeleteRequest{ + AccountID: uint64(state.AccountID.ValueInt64()), + Permanently: permanently, + } + + tflog.Info(ctx, "Delete resourceAccount: before call CloudAPI().Account().Delete", map[string]any{"req": delReq}) + _, err := r.client.CloudAPI().Account().Delete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceAccount: Error deleting account with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceAccount", map[string]any{"account_id": state.Id.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceAccount) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceAccount(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceAccount) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_account" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceAccount) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceAccount") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceAccount successfully") +} + +func (r *resourceAccount) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account.go b/internal/service/cloudapi/account/schemas/schema_data_source_account.go new file mode 100644 index 0000000..985ffd9 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account.go @@ -0,0 +1,162 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceAccount() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "account id", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "dc_location": schema.StringAttribute{ + Computed: true, + }, + "ckey": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "can_be_deleted": schema.BoolAttribute{ + Computed: true, + }, + "explicit": schema.BoolAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "right": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "user_group_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "company": schema.StringAttribute{ + Computed: true, + }, + "companyurl": schema.StringAttribute{ + Computed: true, + }, + "compute_features": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deactivation_time": schema.Float64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "displayname": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "resource_limits": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + "send_access_emails": schema.BoolAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "version": schema.Int64Attribute{ + Computed: true, + }, + "vins": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "computes": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "started": schema.Int64Attribute{ + Computed: true, + }, + "stopped": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "machines": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "halted": schema.Int64Attribute{ + Computed: true, + }, + "running": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "vinses": schema.Int64Attribute{ + Computed: true, + }, + "cpu_allocation_parameter": schema.StringAttribute{ + Computed: true, + }, + "cpu_allocation_ratio": schema.Float64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_audits.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_audits.go new file mode 100644 index 0000000..ae570f3 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_audits.go @@ -0,0 +1,42 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountAuditsList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "account id", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "call": schema.StringAttribute{ + Computed: true, + }, + "responsetime": schema.Float64Attribute{ + Computed: true, + }, + "statuscode": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Float64Attribute{ + Computed: true, + }, + "user": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_computes_list.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_computes_list.go new file mode 100644 index 0000000..d54e68f --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_computes_list.go @@ -0,0 +1,136 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountComputesList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "account id", + }, + + // optional attributes + "compute_id": schema.Int64Attribute{ + Optional: true, + Description: "find by compute id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "rg_name": schema.StringAttribute{ + Optional: true, + Description: "find by resource group name", + }, + "rg_id": schema.Int64Attribute{ + Optional: true, + Description: "find by resource group id", + }, + "tech_status": schema.StringAttribute{ + Optional: true, + Description: "find by tech status", + }, + "ip_address": schema.StringAttribute{ + Optional: true, + Description: "find by ip address", + }, + "extnet_name": schema.StringAttribute{ + Optional: true, + Description: "find by external network name", + }, + "extnet_id": schema.Int64Attribute{ + Optional: true, + Description: "find by external network id", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "cpus": schema.Int64Attribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "compute_id": schema.Int64Attribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "registered": schema.BoolAttribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "total_disks_size": schema.Int64Attribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "vins_connected": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_consumed_units.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_consumed_units.go new file mode 100644 index 0000000..a5fad98 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_consumed_units.go @@ -0,0 +1,41 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountConsumedUnits() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "account id", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "cu_c": schema.Float64Attribute{ + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_consumed_units_by_type.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_consumed_units_by_type.go new file mode 100644 index 0000000..bcc7148 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_consumed_units_by_type.go @@ -0,0 +1,27 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountConsumedUnitsByType() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "account id", + }, + "cu_type": schema.StringAttribute{ + Required: true, + Description: "cloud unit resource type", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "cu_result": schema.Float64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_disks_list.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_disks_list.go new file mode 100644 index 0000000..7931a2a --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_disks_list.go @@ -0,0 +1,81 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountDisksList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "account id", + }, + + // optional attributes + "disk_id": schema.Int64Attribute{ + Optional: true, + Description: "find by disk id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "disk_max_size": schema.Int64Attribute{ + Optional: true, + Description: "find by max size disk", + }, + "type": schema.StringAttribute{ + Optional: true, + Description: "find by type of the disks", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_flipgroups_list.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_flipgroups_list.go new file mode 100644 index 0000000..df68540 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_flipgroups_list.go @@ -0,0 +1,127 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountFlipgroupsList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the account", + }, + + // optional attributes + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "vins_id": schema.Int64Attribute{ + Optional: true, + Description: "find by vins ID", + }, + "vins_name": schema.StringAttribute{ + Optional: true, + Description: "find by vins name", + }, + "extnet_id": schema.Int64Attribute{ + Optional: true, + Description: "find by extnet ID", + }, + "by_ip": schema.StringAttribute{ + Optional: true, + Description: "find by ip address", + }, + "flipgroup_id": schema.Int64Attribute{ + Optional: true, + Description: "find by flipgroup id", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "client_type": schema.StringAttribute{ + Computed: true, + }, + "conn_type": schema.StringAttribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "default_gw": schema.StringAttribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "fg_id": schema.Int64Attribute{ + Computed: true, + }, + "ip": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "fg_name": schema.StringAttribute{ + Computed: true, + }, + "net_id": schema.Int64Attribute{ + Computed: true, + }, + "net_type": schema.StringAttribute{ + Computed: true, + }, + "netmask": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_get_resource_consumption.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_get_resource_consumption.go new file mode 100644 index 0000000..a460318 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_get_resource_consumption.go @@ -0,0 +1,131 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountGetResourceConsumption() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Required: true, + }, + "consumed": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + "ext_ips": schema.Int64Attribute{ + Computed: true, + }, + "ext_traffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + + "reserved": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + "ext_ips": schema.Int64Attribute{ + Computed: true, + }, + "ext_traffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + + "resource_limits": schema.SingleNestedAttribute{ + Optional: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_get_resource_consumption_list.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_get_resource_consumption_list.go new file mode 100644 index 0000000..c6341ee --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_get_resource_consumption_list.go @@ -0,0 +1,113 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountGetResourceListConsumption() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Required: true, + }, + "consumed": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + "ext_ips": schema.Int64Attribute{ + Computed: true, + }, + "ext_traffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "reserved": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + "ext_ips": schema.Int64Attribute{ + Computed: true, + }, + "ext_traffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "data_name": schema.StringAttribute{ + Computed: true, + }, + "disk_size": schema.Float64Attribute{ + Computed: true, + }, + "disk_size_max": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_list.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_list.go new file mode 100644 index 0000000..44360ea --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_list.go @@ -0,0 +1,102 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceAccountList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "filter by id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "filter by name", + }, + "acl": schema.StringAttribute{ + Optional: true, + Description: "filter by acl", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "filter by status", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "acl": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "explicit": schema.BoolAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "right": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "user_group_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "compute_features": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_list_deleted.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_list_deleted.go new file mode 100644 index 0000000..4d4b6e6 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_list_deleted.go @@ -0,0 +1,98 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceAccountListDeleted() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "filter by id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "filter by name", + }, + "acl": schema.StringAttribute{ + Optional: true, + Description: "filter by acl", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "acl": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "explicit": schema.BoolAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "right": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "user_group_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "compute_features": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_reserved_units.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_reserved_units.go new file mode 100644 index 0000000..4779959 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_reserved_units.go @@ -0,0 +1,37 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountReservedUnits() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + }, + + // computed attributes + "cu_c": schema.Float64Attribute{ + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Computed: true, + }, + "cu_dm": schema.Float64Attribute{ + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_rg_list.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_rg_list.go new file mode 100644 index 0000000..0245472 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_rg_list.go @@ -0,0 +1,232 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountRGList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the account", + }, + + // optional attributes + "rg_id": schema.Int64Attribute{ + Optional: true, + Description: "find by rg id", + }, + "vins_id": schema.Int64Attribute{ + Optional: true, + Description: "find by vins id", + }, + "vm_id": schema.Int64Attribute{ + Optional: true, + Description: "find by vm id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "find by status", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "computes": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "started": schema.Int64Attribute{ + Computed: true, + }, + "stopped": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "resources": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "consumed": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disksize": schema.Int64Attribute{ + Computed: true, + }, + "disksizemax": schema.Int64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "pool_name": schema.StringAttribute{ + Computed: true, + }, + "disksize": schema.Float64Attribute{ + Computed: true, + }, + "disksizemax": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + "limits": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disksize": schema.Int64Attribute{ + Computed: true, + }, + "disksizemax": schema.Int64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "reserved": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "disksize": schema.Float64Attribute{ + Computed: true, + }, + "disksizemax": schema.Float64Attribute{ + Computed: true, + }, + "extips": schema.Int64Attribute{ + Computed: true, + }, + "exttraffic": schema.Int64Attribute{ + Computed: true, + }, + "gpu": schema.Int64Attribute{ + Computed: true, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "seps": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "sep_id": schema.StringAttribute{ + Computed: true, + }, + "pool_name": schema.StringAttribute{ + Computed: true, + }, + "disksize": schema.Float64Attribute{ + Computed: true, + }, + "disksizemax": schema.Float64Attribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "vinses": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_templates_list.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_templates_list.go new file mode 100644 index 0000000..7ffefc8 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_templates_list.go @@ -0,0 +1,90 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountTemplatesList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the account", + }, + + // optional attributes + "include_deleted": schema.BoolAttribute{ + Optional: true, + Description: "include deleted images", + }, + "image_id": schema.Int64Attribute{ + Optional: true, + Description: "find by image id", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "find by name", + }, + "type": schema.StringAttribute{ + Optional: true, + Description: "find by type", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "unc_path": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "template_id": schema.Int64Attribute{ + Computed: true, + }, + "template_name": schema.StringAttribute{ + Computed: true, + }, + "public": schema.BoolAttribute{ + Computed: true, + }, + "size": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "username": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/schemas/schema_data_source_account_vins_list.go b/internal/service/cloudapi/account/schemas/schema_data_source_account_vins_list.go new file mode 100644 index 0000000..b4ea245 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_data_source_account_vins_list.go @@ -0,0 +1,102 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceAccountVinsList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Required: true, + }, + "vins_id": schema.Int64Attribute{ + Optional: true, + }, + "name": schema.StringAttribute{ + Optional: true, + }, + "rg_id": schema.Int64Attribute{ + Optional: true, + }, + "ext_ip": schema.StringAttribute{ + Optional: true, + }, + "sort_by": schema.StringAttribute{ + Optional: true, + }, + "page": schema.Int64Attribute{ + Optional: true, + }, + "size": schema.Int64Attribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.Int64Attribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "external_ip": schema.StringAttribute{ + Computed: true, + }, + "extnet_id": schema.Int64Attribute{ + Computed: true, + }, + "free_ips": schema.Int64Attribute{ + Computed: true, + }, + "vin_id": schema.Int64Attribute{ + Computed: true, + }, + "vin_name": schema.StringAttribute{ + Computed: true, + }, + "network": schema.StringAttribute{ + Computed: true, + }, + "pri_vnf_dev_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } + +} diff --git a/internal/service/cloudapi/account/schemas/schema_resource_account.go b/internal/service/cloudapi/account/schemas/schema_resource_account.go new file mode 100644 index 0000000..6a5ec39 --- /dev/null +++ b/internal/service/cloudapi/account/schemas/schema_resource_account.go @@ -0,0 +1,216 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaResourceAccount() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_name": schema.StringAttribute{ + Required: true, + Description: "name of the account", + }, + "username": schema.StringAttribute{ + Required: true, + Description: "username of owner the account", + }, + + // optional attributes + "emailaddress": schema.StringAttribute{ + Optional: true, + Description: "email", + }, + "send_access_emails": schema.BoolAttribute{ + Optional: true, + Description: "if true send emails when a user is granted access to resources", + }, + "users": schema.ListNestedAttribute{ + Optional: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "user_id": schema.StringAttribute{ + Required: true, + }, + "access_type": schema.StringAttribute{ + Required: true, + }, + "recursive_delete": schema.BoolAttribute{ + Optional: true, + // default is false + }, + }, + }, + }, + "restore": schema.BoolAttribute{ + Optional: true, + Description: "restore a deleted account", + }, + "permanently": schema.BoolAttribute{ + Optional: true, + Description: "whether to completely delete the account", + // default is false + }, + "enable": schema.BoolAttribute{ + Optional: true, + Description: "enable/disable account", + }, + "resource_limits": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "cu_c": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_d": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_i": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_m": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "cu_np": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + "gpu_units": schema.Float64Attribute{ + Optional: true, + Computed: true, + }, + }, + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "last_updated": schema.StringAttribute{ + Computed: true, + Description: "Timestamp of the last Terraform update of the disk resource.", + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "dc_location": schema.StringAttribute{ + Computed: true, + }, + "ckey": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "can_be_deleted": schema.BoolAttribute{ + Computed: true, + }, + "explicit": schema.BoolAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "right": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "user_group_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "company": schema.StringAttribute{ + Computed: true, + }, + "companyurl": schema.StringAttribute{ + Computed: true, + }, + "compute_features": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deactivation_time": schema.Float64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "displayname": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "version": schema.Int64Attribute{ + Computed: true, + }, + "vins": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "computes": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "started": schema.Int64Attribute{ + Computed: true, + }, + "stopped": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "machines": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "halted": schema.Int64Attribute{ + Computed: true, + }, + "running": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "vinses": schema.Int64Attribute{ + Computed: true, + }, + "cpu_allocation_parameter": schema.StringAttribute{ + Computed: true, + }, + "cpu_allocation_ratio": schema.Float64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account.go b/internal/service/cloudapi/account/utilities/utility_data_source_account.go new file mode 100644 index 0000000..8723b95 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account.go @@ -0,0 +1,24 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" +) + +func AccountDataSourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.RecordAccount, error) { + tflog.Info(ctx, fmt.Sprintf("AccountDataSourceCheckPresence: Get info about account with ID - %v", accountId)) + + recordAccount, err := c.CloudAPI().Account().Get(ctx, account.GetRequest{AccountID: accountId}) + if err != nil { + return nil, fmt.Errorf("cannot get info about extnet with error: %w", err) + } + + tflog.Info(ctx, "AccountDataSourceCheckPresence: response from CloudAPI().Account().Get", + map[string]any{"account_id": accountId, "response": recordAccount}) + + return recordAccount, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_audits_list.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_audits_list.go new file mode 100644 index 0000000..ad5fee3 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_audits_list.go @@ -0,0 +1,21 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" +) + +func AccountAuditsListDataSourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.ListAudits, error) { + tflog.Info(ctx, fmt.Sprintf("AccountAuditsListDataSourceCheckPresence: Get info about account audits with account ID - %v", accountId)) + + auditsList, err := c.CloudAPI().Account().Audits(ctx, account.AuditsRequest{AccountID: accountId}) + if err != nil { + return nil, fmt.Errorf("cannot get info about account audits with error: %w", err) + } + + return &auditsList, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_computes_list.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_computes_list.go new file mode 100644 index 0000000..9f79573 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_computes_list.go @@ -0,0 +1,63 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" +) + +func AccountComputesListCheckPresence(ctx context.Context, plan *models.DataSourceAccountComputesListModel, c *decort.DecortClient) (*account.ListComputes, error) { + tflog.Info(ctx, "AccountComputesListCheckPresence: Get info about account computes list") + + computesListReq := account.ListComputesRequest{ + AccountID: uint64(plan.AccountID.ValueInt64()), + } + + if !plan.ComputeID.IsNull() { + computesListReq.ComputeID = uint64(plan.ComputeID.ValueInt64()) + } + if !plan.Name.IsNull() { + computesListReq.Name = plan.Name.ValueString() + } + if !plan.RGName.IsNull() { + computesListReq.RGName = plan.RGName.ValueString() + } + if !plan.RGID.IsNull() { + computesListReq.RGID = uint64(plan.RGID.ValueInt64()) + } + if !plan.TechStatus.IsNull() { + computesListReq.TechStatus = plan.TechStatus.ValueString() + } + if !plan.IPAddress.IsNull() { + computesListReq.IPAddress = plan.IPAddress.ValueString() + } + if !plan.ExtNetName.IsNull() { + computesListReq.ExtNetName = plan.ExtNetName.ValueString() + } + if !plan.ExtNetID.IsNull() { + computesListReq.ExtNetID = uint64(plan.ExtNetID.ValueInt64()) + } + if !plan.Page.IsNull() { + computesListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.SortBy.IsNull() { + computesListReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Size.IsNull() { + computesListReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "AccountComputesListCheckPresence: before call CloudAPI().Account().ListComputes", map[string]any{"req": computesListReq}) + computesList, err := c.CloudAPI().Account().ListComputes(ctx, computesListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account computes list with error: %w", err) + } + + tflog.Info(ctx, "AccountComputesListCheckPresence: response from CloudAPI().Account().ListComputes") + + return computesList, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_consumed_units.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_consumed_units.go new file mode 100644 index 0000000..0563e62 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_consumed_units.go @@ -0,0 +1,24 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" +) + +func AccountConsumedUnitsDataSourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.ResourceLimits, error) { + tflog.Info(ctx, fmt.Sprintf("AccountConsumedUnitsDataSourceCheckPresence: Get info about account with ID - %v", accountId)) + + limits, err := c.CloudAPI().Account().GetConsumedAccountUnits(ctx, account.GetConsumedAccountUnitsRequest{AccountID: accountId}) + if err != nil { + return nil, fmt.Errorf("cannot get info about account consumed units with error: %w", err) + } + + tflog.Info(ctx, "AccountConsumedUnitsDataSourceCheckPresence: response from CloudAPI().Account().GetConsumedAccountUnits", + map[string]any{"account_id": accountId, "response": limits}) + + return limits, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_consumed_units_by_type.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_consumed_units_by_type.go new file mode 100644 index 0000000..5ff2efa --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_consumed_units_by_type.go @@ -0,0 +1,32 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" +) + +func AccountConsumedUnitsByTypeDataSourceCheckPresence(ctx context.Context, accountId uint64, cuType string, c *decort.DecortClient) (float64, error) { + tflog.Info(ctx, fmt.Sprintf("AccountConsumedUnitsByTypeDataSourceCheckPresence: Get info about account with ID - %v", accountId)) + + req := account.GetConsumedCloudUnitsByTypeRequest{ + AccountID: accountId, + CUType: cuType, + } + + tflog.Info(ctx, "AccountConsumedUnitsByTypeDataSourceCheckPresence: before call to from CloudAPI().Account().GetConsumedCloudUnitsByType", + map[string]any{"account_id": accountId, "req": req}) + + res, err := c.CloudAPI().Account().GetConsumedCloudUnitsByType(ctx, req) + if err != nil { + return 0, fmt.Errorf("cannot get info about account consumed units by type with error: %w", err) + } + + tflog.Info(ctx, "AccountConsumedUnitsByTypeDataSourceCheckPresence: response from CloudAPI().Account().GetConsumedCloudUnitsByType", + map[string]any{"account_id": accountId, "response": res}) + + return res, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_disks_list.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_disks_list.go new file mode 100644 index 0000000..f924107 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_disks_list.go @@ -0,0 +1,51 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" +) + +func AccountDisksListCheckPresence(ctx context.Context, plan *models.DataSourceAccountDisksListModel, c *decort.DecortClient) (*account.ListDisks, error) { + tflog.Info(ctx, "AccountDisksListCheckPresence: Get info about account disks list") + + disksListReq := account.ListDisksRequest{ + AccountID: uint64(plan.AccountID.ValueInt64()), + } + + if !plan.DiskID.IsNull() { + disksListReq.DiskID = uint64(plan.DiskID.ValueInt64()) + } + if !plan.Name.IsNull() { + disksListReq.Name = plan.Name.ValueString() + } + if !plan.DiskMaxSize.IsNull() { + disksListReq.DiskMaxSize = uint64(plan.DiskMaxSize.ValueInt64()) + } + if !plan.Type.IsNull() { + disksListReq.Type = plan.Type.ValueString() + } + if !plan.Page.IsNull() { + disksListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + disksListReq.Size = uint64(plan.Size.ValueInt64()) + } + if !plan.SortBy.IsNull() { + disksListReq.SortBy = plan.SortBy.ValueString() + } + + tflog.Info(ctx, "AccountDisksListCheckPresence: before call CloudAPI().Account().ListDisks", map[string]any{"req": disksListReq}) + disksList, err := c.CloudAPI().Account().ListDisks(ctx, disksListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account disks list with error: %w", err) + } + + tflog.Info(ctx, "AccountDisksListCheckPresence: response from CloudAPI().Account().ListDisks") + + return disksList, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_flipgroups_list.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_flipgroups_list.go new file mode 100644 index 0000000..78533c2 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_flipgroups_list.go @@ -0,0 +1,52 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" +) + +func AccountFlipgroupsListCheckPresence(ctx context.Context, plan *models.DataSourceAccountFlipgroupsListModel, c *decort.DecortClient) (*account.ListFLIPGroups, error) { + tflog.Info(ctx, "AccountFlipgroupsListCheckPresence: Get info about account flipgroups list") + + flipgroupsListReq := account.ListFLIPGroupsRequest{AccountID: uint64(plan.AccountID.ValueInt64())} + + if !plan.Name.IsNull() { + flipgroupsListReq.Name = plan.Name.ValueString() + } + if !plan.VINSID.IsNull() { + flipgroupsListReq.VINSID = uint64(plan.VINSID.ValueInt64()) + } + if !plan.VINSName.IsNull() { + flipgroupsListReq.VINSName = plan.VINSName.ValueString() + } + if !plan.ExtNetID.IsNull() { + flipgroupsListReq.ExtNetID = uint64(plan.ExtNetID.ValueInt64()) + } + if !plan.ByIP.IsNull() { + flipgroupsListReq.ByIP = plan.ByIP.ValueString() + } + if !plan.FLIPGroupID.IsNull() { + flipgroupsListReq.FLIPGroupID = uint64(plan.FLIPGroupID.ValueInt64()) + } + if !plan.Page.IsNull() { + flipgroupsListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + flipgroupsListReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "AccountListCheckPresence: before call CloudAPI().Account().ListFLIPGroups", map[string]any{"req": flipgroupsListReq}) + flipgroupsList, err := c.CloudAPI().Account().ListFLIPGroups(ctx, flipgroupsListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account flipgroups list with error: %w", err) + } + + tflog.Info(ctx, "AccountListCheckPresence: response from CloudAPI().Account().ListFLIPGroups") + + return flipgroupsList, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_get_resource_consumption.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_get_resource_consumption.go new file mode 100644 index 0000000..2d0c6f3 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_get_resource_consumption.go @@ -0,0 +1,24 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" +) + +func AccountGetResourceConsumptionDataSourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.RecordResourceConsumption, error) { + tflog.Info(ctx, fmt.Sprintf("AccountGetResourceConsumptionDataSourceCheckPresence: Get info about account with ID - %v", accountId)) + + record, err := c.CloudAPI().Account().GetResourceConsumption(ctx, account.GetResourceConsumptionRequest{AccountID: accountId}) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource with error: %w", err) + } + + tflog.Info(ctx, "AccountGetResourceConsumptionDataSourceCheckPresence: response from CloudAPI().Account().GetResourceConsumption", + map[string]any{"account_id": accountId, "response": record}) + + return record, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_get_resource_consumption_list.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_get_resource_consumption_list.go new file mode 100644 index 0000000..be21124 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_get_resource_consumption_list.go @@ -0,0 +1,24 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" +) + +func AccountGetResourceConsumptionListDataSourceCheckPresence(ctx context.Context, c *decort.DecortClient) (*account.ListResourceConsumption, error) { + tflog.Info(ctx, fmt.Sprintf("AccountGetResourceConsumptionListDataSourceCheckPresence: Get info about account resource consumption list")) + + record, err := c.CloudAPI().Account().ListResourceConsumption(ctx) + if err != nil { + return nil, fmt.Errorf("cannot get info about resource with error: %w", err) + } + + tflog.Info(ctx, "AccountGetResourceConsumptionListDataSourceCheckPresence: response from CloudAPI().Account().ListResourceConsumption", + map[string]any{"response": record}) + + return record, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_list.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_list.go new file mode 100644 index 0000000..afcbdab --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_list.go @@ -0,0 +1,49 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" +) + +func AccountListCheckPresence(ctx context.Context, plan *models.DataSourceAccountListModel, c *decort.DecortClient) (*account.ListAccounts, error) { + tflog.Info(ctx, "AccountListCheckPresence: Get info about account list") + + accListReq := account.ListRequest{} + + if !plan.ByID.IsNull() { + accListReq.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.Name.IsNull() { + accListReq.Name = plan.Name.ValueString() + } + if !plan.ACL.IsNull() { + accListReq.ACL = plan.ACL.ValueString() + } + if !plan.Status.IsNull() { + accListReq.Status = plan.Status.ValueString() + } + if !plan.Page.IsNull() { + accListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + accListReq.Size = uint64(plan.Size.ValueInt64()) + } + if !plan.SortBy.IsNull() { + accListReq.SortBy = plan.SortBy.ValueString() + } + + tflog.Info(ctx, "AccountListCheckPresence: before call CloudAPI().Account().List", map[string]any{"req": accListReq}) + accList, err := c.CloudAPI().Account().List(ctx, accListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account with error: %w", err) + } + + tflog.Info(ctx, "AccountListCheckPresence: response from CloudAPI().Account().List") + + return accList, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_list_deleted.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_list_deleted.go new file mode 100644 index 0000000..6494286 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_list_deleted.go @@ -0,0 +1,46 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" +) + +func AccountListDeletedCheckPresence(ctx context.Context, plan *models.DataSourceAccountListDeletedModel, c *decort.DecortClient) (*account.ListAccounts, error) { + tflog.Info(ctx, "AccountListDeletedCheckPresence: Get info about account list deleted") + + accListDelReq := account.ListDeletedRequest{} + + if !plan.ByID.IsNull() { + accListDelReq.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.Name.IsNull() { + accListDelReq.Name = plan.Name.ValueString() + } + if !plan.ACL.IsNull() { + accListDelReq.ACL = plan.ACL.ValueString() + } + if !plan.Page.IsNull() { + accListDelReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + accListDelReq.Size = uint64(plan.Size.ValueInt64()) + } + if !plan.SortBy.IsNull() { + accListDelReq.SortBy = plan.SortBy.ValueString() + } + + tflog.Info(ctx, "AccountListDeletedCheckPresence: before call CloudAPI().Account().ListDeleted", map[string]any{"req": accListDelReq}) + accListDel, err := c.CloudAPI().Account().ListDeleted(ctx, accListDelReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account with error: %w", err) + } + + tflog.Info(ctx, "AccountListDeletedCheckPresence: response from CloudAPI().Account().ListDeleted") + + return accListDel, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_reserved_units.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_reserved_units.go new file mode 100644 index 0000000..9e6c0d1 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_reserved_units.go @@ -0,0 +1,29 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" +) + +func AccountReservedUnitsCheck(ctx context.Context, plan *models.DataSourceAccountReservedUnitsModel, c *decort.DecortClient) (*account.ResourceLimits, error) { + tflog.Info(ctx, "AccountReservedUnitsCheck: Get info about account units") + + req := account.GetReservedAccountUnitsRequest{ + AccountID: uint64(plan.AccountID.ValueInt64()), + } + + tflog.Info(ctx, "AccountReservedUnitsCheck: before call CloudAPI().Account().GetReservedAccountUnits", map[string]any{"req": req}) + accountUnits, err := c.CloudAPI().Account().GetReservedAccountUnits(ctx, req) + if err != nil { + return nil, fmt.Errorf("cannot get info about account units: %w", err) + } + + tflog.Info(ctx, "AccountReservedUnitsCheck: response from CloudAPI().Account().GetReservedAccountUnits") + + return accountUnits, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_rg_list.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_rg_list.go new file mode 100644 index 0000000..1025567 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_rg_list.go @@ -0,0 +1,52 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" +) + +func AccountRGListCheckPresence(ctx context.Context, plan *models.DataSourceAccountRGListModel, c *decort.DecortClient) (*account.ListRG, error) { + tflog.Info(ctx, "AccountRGListCheckPresence: Get info about account rg list") + + rgListReq := account.ListRGRequest{AccountID: uint64(plan.AccountID.ValueInt64())} + + if !plan.RGID.IsNull() { + rgListReq.RGID = uint64(plan.RGID.ValueInt64()) + } + if !plan.VinsID.IsNull() { + rgListReq.VINSID = uint64(plan.VinsID.ValueInt64()) + } + if !plan.VMID.IsNull() { + rgListReq.VMID = uint64(plan.VMID.ValueInt64()) + } + if !plan.Name.IsNull() { + rgListReq.Name = plan.Name.ValueString() + } + if !plan.Status.IsNull() { + rgListReq.Status = plan.Status.ValueString() + } + if !plan.Page.IsNull() { + rgListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + rgListReq.Size = uint64(plan.Size.ValueInt64()) + } + if !plan.SortBy.IsNull() { + rgListReq.SortBy = plan.SortBy.ValueString() + } + + tflog.Info(ctx, "AccountRGListCheckPresence: before call CloudAPI().Account().ListRG", map[string]any{"req": rgListReq}) + rgList, err := c.CloudAPI().Account().ListRG(ctx, rgListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account with error: %w", err) + } + + tflog.Info(ctx, "AccountRGListCheckPresence: response from CloudAPI().Account().ListRG") + + return rgList, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_templates_list.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_templates_list.go new file mode 100644 index 0000000..f634f99 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_templates_list.go @@ -0,0 +1,51 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" +) + +func AccountTemplatesListCheckPresence(ctx context.Context, plan *models.DataSourceAccountTemplatesListModel, c *decort.DecortClient) (*account.ListTemplates, error) { + tflog.Info(ctx, "AccountTemplatesListCheckPresence: Get info about account templates list") + + tempListReq := account.ListTemplatesRequest{ + AccountID: uint64(plan.AccountID.ValueInt64()), + } + + if !plan.IncludeDeleted.IsNull() { + tempListReq.IncludeDeleted = plan.IncludeDeleted.ValueBool() + } + if !plan.ImageID.IsNull() { + tempListReq.ImageID = uint64(plan.ImageID.ValueInt64()) + } + if !plan.Name.IsNull() { + tempListReq.Name = plan.Name.ValueString() + } + if !plan.Type.IsNull() { + tempListReq.Type = plan.Type.ValueString() + } + if !plan.Page.IsNull() { + tempListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + tempListReq.Size = uint64(plan.Size.ValueInt64()) + } + if !plan.SortBy.IsNull() { + tempListReq.SortBy = plan.SortBy.ValueString() + } + + tflog.Info(ctx, "AccountTemplatesListCheckPresence: before call CloudAPI().Account().ListTemplates", map[string]any{"req": tempListReq}) + tempList, err := c.CloudAPI().Account().ListTemplates(ctx, tempListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about account templates list with error: %w", err) + } + + tflog.Info(ctx, "AccountTemplatesListCheckPresence: response from CloudAPI().Account().ListTemplates") + + return tempList, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_data_source_account_vins_list.go b/internal/service/cloudapi/account/utilities/utility_data_source_account_vins_list.go new file mode 100644 index 0000000..df27ae0 --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_data_source_account_vins_list.go @@ -0,0 +1,52 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" +) + +func AccountVinsListCheck(ctx context.Context, plan *models.DataSourceAccountVinsListModel, c *decort.DecortClient) (*account.ListVINS, error) { + tflog.Info(ctx, "AccountVinsListCheck: Get info about list vins") + + vinsListReq := account.ListVINSRequest{} + + if !plan.AccountID.IsNull() { + vinsListReq.AccountID = uint64(plan.AccountID.ValueInt64()) + } + if !plan.VinsID.IsNull() { + vinsListReq.VINSID = uint64(plan.VinsID.ValueInt64()) + } + if !plan.Name.IsNull() { + vinsListReq.Name = plan.Name.ValueString() + } + if !plan.RGID.IsNull() { + vinsListReq.RGID = uint64(plan.RGID.ValueInt64()) + } + if !plan.ExtIp.IsNull() { + vinsListReq.ExtIP = plan.ExtIp.ValueString() + } + if !plan.SortBy.IsNull() { + vinsListReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + vinsListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + vinsListReq.Size = uint64(plan.Size.ValueInt64()) + } + + tflog.Info(ctx, "AccountVinsListCheck: before call CloudAPI().Account().ListVINS", map[string]any{"req": vinsListReq}) + vinsList, err := c.CloudAPI().Account().ListVINS(ctx, vinsListReq) + if err != nil { + return nil, fmt.Errorf("cannot get info about vins list with error: %w", err) + } + + tflog.Info(ctx, "AccountVinsListCheck: response from CloudAPI().Account().ListVINS") + + return vinsList, err +} diff --git a/internal/service/cloudapi/account/utilities/utility_resource_account.go b/internal/service/cloudapi/account/utilities/utility_resource_account.go new file mode 100644 index 0000000..b38ea6e --- /dev/null +++ b/internal/service/cloudapi/account/utilities/utility_resource_account.go @@ -0,0 +1,392 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/account" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/account/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status" +) + +// AccountResourceCheckPresence checks if account with accountId exists +func AccountResourceCheckPresence(ctx context.Context, accountId uint64, c *decort.DecortClient) (*account.RecordAccount, error) { + tflog.Info(ctx, fmt.Sprintf("AccountResourceCheckPresence: Get info about resource with ID - %v", accountId)) + + accountRecord, err := c.CloudAPI().Account().Get(ctx, account.GetRequest{AccountID: accountId}) + if err != nil { + return nil, fmt.Errorf("AccountResourceCheckPresence: cannot get info about resource with error: %w", err) + } + + tflog.Info(ctx, "AccountResourceCheckPresence: response from CloudAPI().Account().Get", map[string]any{"account_id": accountId, "response": accountRecord}) + + return accountRecord, err +} + +// AccountReadStatus loads account resource by its id, gets it current status. Performs restore and enable if needed for +// Deleted status. +// In case of failure returns errors. +func AccountReadStatus(ctx context.Context, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "AccountReadStatus: Read status resource with ID", map[string]any{"account_id": state.Id.ValueString()}) + + diags := diag.Diagnostics{} + + accountId, err := strconv.ParseUint(state.Id.ValueString(), 10, 64) + if err != nil { + diags.AddError("AccountReadStatus: Cannot parse resource ID from state", err.Error()) + return diags + } + + recordAccount, err := AccountResourceCheckPresence(ctx, accountId, c) + if err != nil { + diags.AddError("AccountReadStatus: Unable to Read account before status check", err.Error()) + return diags + } + + // check resource status + switch recordAccount.Status { + case status.Disabled: + tflog.Info(ctx, "The account is in status Disabled, troubles may occur with update. Please, enable account first.") + + case status.Deleted: + restore := state.Restore.ValueBool() + if state.Restore.IsNull() { + restore = true + } // default true + if restore { + // attempt to restore account + tflog.Info(ctx, "AccountReadStatus: account with status.Deleted is being read, attempt to restore it", map[string]any{ + "account_id": accountId, + "status": recordAccount.Status}) + diags.Append(RestoreAccount(ctx, accountId, c)...) + if diags.HasError() { + tflog.Error(ctx, "AccountReadStatus: cannot restore account") + return diags + } + tflog.Info(ctx, "AccountReadStatus: account restored successfully", map[string]any{"account_id": accountId}) + state.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + } else { + tflog.Info(ctx, "AccountReadStatus: account is i status Deleted but restore is not specified") + } + + case status.Destroyed: + diags.AddError( + "AccountReadStatus: Account is in status Destroyed", + fmt.Sprintf("the resource with account_id %d cannot be read or updated because it has been destroyed", accountId), + ) + return diags + + case status.Destroying: + diags.AddError( + "AccountReadStatus: Account is in progress with status Destroying", + fmt.Sprintf("the resource with account_id %d cannot be read or updated because it is currently being destroyed", accountId), + ) + return diags + } + + return nil +} + +// RestoreAccount performs account Restore request. +// Returns error in case of failures. +func RestoreAccount(ctx context.Context, accountId uint64, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + restoreReq := account.RestoreRequest{ + AccountID: accountId, + } + + tflog.Info(ctx, "RestoreAccount: before calling CloudAPI().Account().Restore", map[string]any{"account_id": accountId, "req": restoreReq}) + + res, err := c.CloudAPI().Account().Restore(ctx, restoreReq) + if err != nil { + diags.AddError( + "RestoreAccount: cannot restore account", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "RestoreAccount: response from CloudAPI().Account().Restore", map[string]any{"account_id": accountId, "response": res}) + + return nil +} + +// EnableDisableAccount performs account Enable/Disable request. +// Returns error in case of failures. +func EnableDisableAccount(ctx context.Context, accountId uint64, enable bool, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start EnableDisableAccount", map[string]any{"account_id": accountId}) + + diags := diag.Diagnostics{} + + if enable { + tflog.Info(ctx, "EnableDisableAccount: before calling CloudAPI().Account().Enable", map[string]any{"account_id": accountId}) + res, err := c.CloudAPI().Account().Enable(ctx, account.DisableEnableRequest{AccountID: accountId}) + if err != nil { + diags.AddError( + "EnableDisableAccount: cannot enable account", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "EnableDisableAccount: response from CloudAPI().Account().Enable", map[string]any{"account_id": accountId, "response": res}) + + return nil + } + + tflog.Info(ctx, "EnableDisableAccount: before calling CloudAPI().Account().Disable", map[string]any{"account_id": accountId}) + res, err := c.CloudAPI().Account().Disable(ctx, account.DisableEnableRequest{AccountID: accountId}) + if err != nil { + diags.AddError( + "EnableDisableAccount: cannot disable account", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "EnableDisableAccount: response from CloudAPI().Account().Disable", map[string]any{"account_id": accountId, "response": res}) + + return nil +} + +// UpdateAccount updates disk data: account_name, resource_limits, send_access_emails. +// Returns error in case of failures. +func UpdateAccount(ctx context.Context, accountId uint64, plan, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start UpdateAccount", map[string]any{"account_id": accountId}) + + var diags diag.Diagnostics + var updateNeeded bool + + updateReq := account.UpdateRequest{ + AccountID: accountId, + } + + // check if account_name was changed + if !plan.AccountName.Equal(state.AccountName) { + updateReq.Name = plan.AccountName.ValueString() + updateNeeded = true + } + + // check if resource_limits were changed + if !plan.ResourceLimits.Equal(state.ResourceLimits) && !plan.ResourceLimits.IsUnknown() { + tflog.Info(ctx, "UpdateAccount: new ResourceLimits specified", map[string]any{"account_id": accountId}) + var resourceLimitsPlan models.ResourceLimitsInAccountResourceModel + diags.Append(plan.ResourceLimits.As(ctx, &resourceLimitsPlan, basetypes.ObjectAsOptions{})...) + if diags.HasError() { + tflog.Error(ctx, "UpdateAccount: cannot populate ResourceLimits with plan.ResourceLimits object element") + return diags + } + + if resourceLimitsPlan.CUM.ValueFloat64() == 0 { + updateReq.MaxMemoryCapacity = -1 + } else { + updateReq.MaxMemoryCapacity = int64(resourceLimitsPlan.CUM.ValueFloat64()) + } + + if resourceLimitsPlan.CUD.ValueFloat64() == 0 { + updateReq.MaxVDiskCapacity = -1 + } else { + updateReq.MaxVDiskCapacity = int64(resourceLimitsPlan.CUD.ValueFloat64()) + } + + if resourceLimitsPlan.CUC.ValueFloat64() == 0 { + updateReq.MaxCPUCapacity = -1 + } else { + updateReq.MaxCPUCapacity = int64(resourceLimitsPlan.CUC.ValueFloat64()) + } + + if resourceLimitsPlan.CUI.ValueFloat64() == 0 { + updateReq.MaxNumPublicIP = -1 + } else { + updateReq.MaxNumPublicIP = int64(resourceLimitsPlan.CUI.ValueFloat64()) + } + + if resourceLimitsPlan.CUNP.ValueFloat64() == 0 { + updateReq.MaxNetworkPeerTransfer = -1 + } else { + updateReq.MaxNetworkPeerTransfer = int64(resourceLimitsPlan.CUNP.ValueFloat64()) + } + + if resourceLimitsPlan.GPUUnits.ValueFloat64() == 0 { + updateReq.GPUUnits = -1 + } else { + updateReq.GPUUnits = int64(resourceLimitsPlan.GPUUnits.ValueFloat64()) + } + + updateNeeded = true + } + + // check if send_access_emails was changed + if !plan.SendAccessEmails.Equal(state.SendAccessEmails) && !plan.SendAccessEmails.IsNull() { + updateReq.SendAccessEmails = plan.SendAccessEmails.ValueBool() + updateNeeded = true + } + + if !updateNeeded { + tflog.Info(ctx, "UpdateAccount: no general account update is needed because neither account_name, nor resource_limits, nor send_access_emails were changed.", map[string]any{ + "account_id": plan.Id.ValueString(), + }) + return nil + } + + // perform account update + tflog.Info(ctx, "UpdateAccount: before calling CloudAPI().Account().Update", map[string]any{ + "account_id": accountId, + "req": updateReq, + }) + res, err := c.CloudAPI().Account().Update(ctx, updateReq) + if err != nil { + diags.AddError("UpdateAccount: Unable to update account", + err.Error()) + return diags + } + tflog.Info(ctx, "UpdateAccount: response from CloudAPI().Account().Update", map[string]any{ + "account_id": accountId, + "response": res}) + + return nil +} + +// AddDeleteUsersAccount adds/deletes users to/from account. +// In case of failure returns errors. +func AddDeleteUsersAccount(ctx context.Context, accountId uint64, plan, state *models.ResourceAccountModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start AddDeleteUsersAccount: new users specified", map[string]any{"account_id": accountId}) + diags := diag.Diagnostics{} + + usersPlan := make([]models.UsersModel, 0, len(plan.Users.Elements())) + diags.Append(plan.Users.ElementsAs(ctx, &usersPlan, true)...) + if diags.HasError() { + tflog.Error(ctx, "AddDeleteUsersAccount: cannot populate usersPlan with plan.Users list elements") + return diags + } + + usersState := make([]models.UsersModel, 0, len(state.Users.Elements())) + diags.Append(state.Users.ElementsAs(ctx, &usersState, true)...) + if diags.HasError() { + tflog.Error(ctx, "AddDeleteUsersAccount: cannot populate usersState with state.Users list elements") + return diags + } + + // define users to be deleted, added and updated + var deletedUsers, addedUsers, updatedUsers []models.UsersModel + for _, user := range usersState { + if !containsUser(usersPlan, user) { + deletedUsers = append(deletedUsers, user) + } + } + for _, user := range usersPlan { + if !containsUser(usersState, user) { + addedUsers = append(addedUsers, user) + } else if isChangedUser(usersState, user) { + updatedUsers = append(updatedUsers, user) + } + } + + // delete users + if len(deletedUsers) == 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: no users need to be deleted", map[string]any{"account_id": accountId}) + } + if len(deletedUsers) > 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: users need to be deleted", map[string]any{ + "accountId": accountId, + "deletedUsers": deletedUsers}) + + for _, user := range deletedUsers { + delUserReq := account.DeleteUserRequest{ + AccountID: accountId, + UserID: user.UserID.ValueString(), + RecursiveDelete: user.RecursiveDelete.ValueBool(), // default false + } + + tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudAPI().Account().DeleteUser", map[string]any{"account_id": accountId, "req": delUserReq}) + res, err := c.CloudAPI().Account().DeleteUser(ctx, delUserReq) + tflog.Info(ctx, "AddDeleteUsersAccount: response from CloudAPI().Account().DeleteUser", map[string]any{"account_id": accountId, "response": res}) + if err != nil { + diags.AddError( + "AddDeleteUsersAccount: can not delete user from account", + err.Error()) + } + } + } + + // add users + if len(addedUsers) == 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: no users needs to be added", map[string]any{"account_id": accountId}) + } + if len(addedUsers) > 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: users need to be added", map[string]any{"account_id": accountId}) + + for _, user := range addedUsers { + addUserReq := account.AddUserRequest{ + AccountID: accountId, + UserID: user.UserID.ValueString(), + AccessType: user.AccessType.ValueString(), + } + + tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudAPI().Account().AddUser", map[string]any{ + "account_id": accountId, + "addUserReq": addUserReq}) + res, err := c.CloudAPI().Account().AddUser(ctx, addUserReq) + if err != nil { + diags.AddError("AddDeleteUsersAccount: Unable to add users to account", + err.Error()) + } + tflog.Info(ctx, "AddDeleteUsersAccount: response from CloudAPI().Account().AddUser", map[string]any{ + "account_id": accountId, + "response": res}) + } + } + + // update users + if len(updatedUsers) == 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: no users needs to be updated", map[string]any{"account_id": accountId}) + } + if len(updatedUsers) > 0 { + tflog.Info(ctx, "AddDeleteUsersAccount: users need to be updated", map[string]any{"account_id": accountId}) + + for _, user := range updatedUsers { + updUserReq := account.UpdateUserRequest{ + AccountID: accountId, + UserID: user.UserID.ValueString(), + AccessType: user.AccessType.ValueString(), + } + + tflog.Info(ctx, "AddDeleteUsersAccount: before calling CloudAPI().Account().UpdateUser", map[string]any{ + "account_id": accountId, + "updatedUsers": updatedUsers}) + res, err := c.CloudAPI().Account().UpdateUser(ctx, updUserReq) + if err != nil { + diags.AddError("AddDeleteUsersAccount: Unable to update users", + err.Error()) + } + tflog.Info(ctx, "AddDeleteUsersAccount: response from CloudAPI().Account().UpdateUser", map[string]any{ + "account_id": accountId, + "response": res}) + } + } + + return diags +} + +func containsUser(users []models.UsersModel, target models.UsersModel) bool { + for _, user := range users { + if target.UserID == user.UserID { + return true + } + } + return false +} + +func isChangedUser(users []models.UsersModel, target models.UsersModel) bool { + for _, user := range users { + if user.UserID.Equal(target.UserID) && !user.AccessType.Equal(target.AccessType) { + return true + } + } + return false +} diff --git a/internal/service/cloudapi/bservice/data_source_bservice.go b/internal/service/cloudapi/bservice/data_source_bservice.go new file mode 100644 index 0000000..aba380e --- /dev/null +++ b/internal/service/cloudapi/bservice/data_source_bservice.go @@ -0,0 +1,91 @@ +package bservice + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceBService{} +) + +func NewDataSourceBService() datasource.DataSource { + return &dataSourceBService{} +} + +// dataSourceBService is the data source implementation. +type dataSourceBService struct { + client *decort.DecortClient +} + +func (d *dataSourceBService) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.RecordBasicServiceModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBService: Error get state") + return + } + bserviceId := uint64(state.ServiceId.ValueInt64()) + tflog.Info(ctx, "Read dataSourceBService: got state successfully", map[string]any{"service_id": bserviceId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBService: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceBService: set timeouts successfully", map[string]any{ + "service_id": bserviceId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.BServiceDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBService: Error flatten data source bservice") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBService: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceBService", map[string]any{"service_id": bserviceId}) +} + +func (d *dataSourceBService) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceBService(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceBService) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_bservice" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceBService) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceBService") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceBService successfully") +} diff --git a/internal/service/cloudapi/bservice/data_source_bservice__deleted_list.go b/internal/service/cloudapi/bservice/data_source_bservice__deleted_list.go new file mode 100644 index 0000000..01ff6bf --- /dev/null +++ b/internal/service/cloudapi/bservice/data_source_bservice__deleted_list.go @@ -0,0 +1,89 @@ +package bservice + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceBServiceDeletedList{} +) + +func NewDataSourceBServiceDeletedList() datasource.DataSource { + return &dataSourceBServiceDeletedList{} +} + +// dataSourceBServiceDeletedList is the data source implementation. +type dataSourceBServiceDeletedList struct { + client *decort.DecortClient +} + +func (d *dataSourceBServiceDeletedList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.ListBasicServicesDelModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceDeletedList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceBServiceDeletedList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceDeletedList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceBServiceDeletedList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.BServicesDeletedListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceDeletedList: Error flatten data source bservice list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceDeletedList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceBServiceDeletedList") +} + +func (d *dataSourceBServiceDeletedList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceBServiceDeletedList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceBServiceDeletedList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_bservice_deleted_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceBServiceDeletedList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceBServiceDeletedList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceBServiceDeletedList successfully") +} diff --git a/internal/service/cloudapi/bservice/data_source_bservice_group.go b/internal/service/cloudapi/bservice/data_source_bservice_group.go new file mode 100644 index 0000000..cd8ec4c --- /dev/null +++ b/internal/service/cloudapi/bservice/data_source_bservice_group.go @@ -0,0 +1,91 @@ +package bservice + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceBServiceGroup{} +) + +func NewDataSourceBServiceGroup() datasource.DataSource { + return &dataSourceBServiceGroup{} +} + +// dataSourceBServiceGroup is the data source implementation. +type dataSourceBServiceGroup struct { + client *decort.DecortClient +} + +func (d *dataSourceBServiceGroup) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.RecordGroupModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceGroup: Error get state") + return + } + bserviceId := uint64(state.ServiceID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceBServiceGroup: got state successfully", map[string]any{"service_id": bserviceId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceGroup: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceBServiceGroup: set timeouts successfully", map[string]any{ + "service_id": bserviceId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.BServiceGroupDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceGroup: Error flatten data source bservice group") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceGroup: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceBServiceGroup", map[string]any{"service_id": bserviceId}) +} + +func (d *dataSourceBServiceGroup) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceBServiceGroup(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceBServiceGroup) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_bservice_group" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceBServiceGroup) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceBServiceGroup") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceBServiceGroup successfully") +} diff --git a/internal/service/cloudapi/bservice/data_source_bservice_list.go b/internal/service/cloudapi/bservice/data_source_bservice_list.go new file mode 100644 index 0000000..be49478 --- /dev/null +++ b/internal/service/cloudapi/bservice/data_source_bservice_list.go @@ -0,0 +1,89 @@ +package bservice + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceBServiceList{} +) + +func NewDataSourceBServiceList() datasource.DataSource { + return &dataSourceBServiceList{} +} + +// dataSourceBServiceList is the data source implementation. +type dataSourceBServiceList struct { + client *decort.DecortClient +} + +func (d *dataSourceBServiceList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.ListBasicServicesModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceBServiceList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceBServiceList: set timeouts successfully", map[string]any{ + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.BServicesListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceList: Error flatten data source bservice list") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceBServiceList") +} + +func (d *dataSourceBServiceList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceBServiceList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceBServiceList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_bservice_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceBServiceList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceBServiceList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceBServiceList successfully") +} diff --git a/internal/service/cloudapi/bservice/data_source_bservice_snapshot_list.go b/internal/service/cloudapi/bservice/data_source_bservice_snapshot_list.go new file mode 100644 index 0000000..90fcc9c --- /dev/null +++ b/internal/service/cloudapi/bservice/data_source_bservice_snapshot_list.go @@ -0,0 +1,91 @@ +package bservice + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceBServiceSnapshotList{} +) + +func NewDataSourceBServiceSnapshotList() datasource.DataSource { + return &dataSourceBServiceSnapshotList{} +} + +// dataSourceBServiceSnapshotList is the data source implementation. +type dataSourceBServiceSnapshotList struct { + client *decort.DecortClient +} + +func (d *dataSourceBServiceSnapshotList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.ListInfoSnapshotsModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceSnapshotList: Error get state") + return + } + bserviceId := uint64(state.ServiceID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceBServiceSnapshotList: got state successfully", map[string]any{"service_id": bserviceId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceSnapshotList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceBServiceSnapshotList: set timeouts successfully", map[string]any{ + "service_id": bserviceId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.BServiceSnapshotListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceSnapshotList: Error flatten data source bservice") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceBServiceSnapshotList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceBServiceSnapshotList", map[string]any{"service_id": bserviceId}) +} + +func (d *dataSourceBServiceSnapshotList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceBServiceSnapshotList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceBServiceSnapshotList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_bservice_snapshot_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceBServiceSnapshotList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceBServiceSnapshotList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceBServiceSnapshotList successfully") +} diff --git a/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice.go b/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice.go new file mode 100644 index 0000000..baffd79 --- /dev/null +++ b/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice.go @@ -0,0 +1,116 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/utilities" +) + +// BServiceDataSource flattens data source for bservice. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func BServiceDataSource(ctx context.Context, state *models.RecordBasicServiceModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.BServiceDataSource") + + diags := diag.Diagnostics{} + + serivceId := uint64(state.ServiceId.ValueInt64()) + + record, err := utilities.BServiceDataSourceCheckPresence(ctx, serivceId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about bservice with ID %v", serivceId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.BServiceDataSource: before flatten", map[string]any{"service_id": serivceId, "record": record}) + + *state = models.RecordBasicServiceModel{ + + ServiceId: state.ServiceId, + Timeouts: state.Timeouts, + AccountID: types.Int64Value(int64(record.AccountID)), + AccountName: types.StringValue(record.AccountName), + BaseDomain: types.StringValue(record.BaseDomain), + CPUTotal: types.Int64Value(int64(record.CPUTotal)), + CreatedBy: types.StringValue(record.CreatedBy), + CreatedTime: types.Int64Value(int64(record.CreatedTime)), + DeletedBy: types.StringValue(record.DeletedBy), + DeletedTime: types.Int64Value(int64(record.DeletedTime)), + DiskTotal: types.Int64Value(int64(record.DiskTotal)), + GID: types.Int64Value(int64(record.GID)), + GUID: types.Int64Value(int64(record.GUID)), + Milestones: types.Int64Value(int64(record.Milestones)), + Name: types.StringValue(record.Name), + ParentSrvID: types.Int64Value(int64(record.ParentSrvID)), + ParentSrvType: types.StringValue(record.ParentSrvType), + RAMTotal: types.Int64Value(int64(record.RAMTotal)), + RGID: types.Int64Value(int64(record.RGID)), + RGName: types.StringValue(record.RGName), + SSHKey: types.StringValue(record.SSHKey), + SSHUser: types.StringValue(record.SSHUser), + Status: types.StringValue(record.Status), + TechStatus: types.StringValue(record.TechStatus), + UpdatedBy: types.StringValue(record.UpdatedBy), + UpdatedTime: types.Int64Value(int64(record.UpdatedTime)), + UserManaged: types.BoolValue(record.UserManaged), + } + + computesList := make([]models.ItemComputeModel, 0, len(record.Computes)) + for _, v := range record.Computes { + temp := models.ItemComputeModel{ + AccountID: types.Int64Value(int64(v.AccountID)), + Architecture: types.StringValue(v.Architecture), + CompGroupID: types.Int64Value(int64(v.CompGroupID)), + CompGroupName: types.StringValue(v.CompGroupName), + CompGroupRole: types.StringValue(v.CompGroupRole), + ID: types.Int64Value(int64(v.ID)), + Name: types.StringValue(v.Name), + RGID: types.Int64Value(int64(v.RGID)), + StackID: types.Int64Value(int64(v.StackID)), + Status: types.StringValue(v.Status), + TechStatus: types.StringValue(v.TechStatus), + } + computesList = append(computesList, temp) + } + + state.Computes = computesList + + groupsList := make([]models.ItemGroupModel, 0, len(record.Groups)) + for _, v := range record.Groups { + temp := models.ItemGroupModel{ + Computes: types.Int64Value(int64(v.Computes)), + Consistency: types.BoolValue(v.Consistency), + ID: types.Int64Value(int64(v.ID)), + Name: types.StringValue(v.Name), + Status: types.StringValue(v.Status), + TechStatus: types.StringValue(v.TechStatus), + } + groupsList = append(groupsList, temp) + } + + state.Groups = groupsList + + snapshotsList := make([]models.ItemSnapshotModel, 0, len(record.Snapshots)) + for _, v := range record.Snapshots { + temp := models.ItemSnapshotModel{ + GUID: types.StringValue(v.GUID), + Label: types.StringValue(v.Label), + Timestamp: types.Int64Value(int64(v.Timestamp)), + Valid: types.BoolValue(v.Valid), + } + snapshotsList = append(snapshotsList, temp) + } + + state.Snapshots = snapshotsList + + tflog.Info(ctx, "flattens.BServiceDataSource: after flatten", map[string]any{"service_id": state.ServiceId.ValueInt64()}) + + tflog.Info(ctx, "End flattens.BServiceDataSource", map[string]any{"service_id": state.ServiceId.ValueInt64()}) + return nil +} diff --git a/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_deleted_list.go b/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_deleted_list.go new file mode 100644 index 0000000..dca855f --- /dev/null +++ b/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_deleted_list.go @@ -0,0 +1,75 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/utilities" +) + +// BServicesDeletedListDataSource flattens data source for a list of basic services. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func BServicesDeletedListDataSource(ctx context.Context, state *models.ListBasicServicesDelModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.BServicesDeletedListDataSource") + + diags := diag.Diagnostics{} + + // Fetch the list of basic services from the API + recordList, err := utilities.BServiceDeletedListDataSourceCheckPresence(ctx, *state, c) + if err != nil { + diags.AddError("Cannot get list of basic services", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.BServicesDeletedListDataSource: before flatten", map[string]any{"record_list": recordList}) + + // Flatten the fetched data into the state + var itemList []models.ItemBasicServiceDelModel + for _, record := range recordList.Data { + item := models.ItemBasicServiceDelModel{ + AccountID: types.Int64Value(int64(record.AccountID)), + AccountName: types.StringValue(record.AccountName), + BaseDomain: types.StringValue(record.BaseDomain), + CreatedBy: types.StringValue(record.CreatedBy), + CreatedTime: types.Int64Value(int64(record.CreatedTime)), + DeletedBy: types.StringValue(record.DeletedBy), + DeletedTime: types.Int64Value(int64(record.DeletedTime)), + GID: types.Int64Value(int64(record.GID)), + GUID: types.Int64Value(int64(record.GUID)), + ID: types.Int64Value(int64(record.ID)), + Name: types.StringValue(record.Name), + ParentSrvID: types.Int64Value(int64(record.ParentSrvID)), + ParentSrvType: types.StringValue(record.ParentSrvType), + RGID: types.Int64Value(int64(record.RGID)), + RGName: types.StringValue(record.RGName), + SSHUser: types.StringValue(record.SSHUser), + Status: types.StringValue(record.Status), + TechStatus: types.StringValue(record.TechStatus), + UpdatedBy: types.StringValue(record.UpdatedBy), + UpdatedTime: types.Int64Value(int64(record.UpdatedTime)), + UserManaged: types.BoolValue(record.UserManaged), + } + + // Handle groups as a list of strings + groupList := make([]types.Int64, len(record.Groups)) + for i, group := range record.Groups { + groupList[i] = types.Int64Value(int64(group)) + } + + itemList = append(itemList, item) + } + + // Update state with the flattened data + state.Data = itemList + state.EntryCount = types.Int64Value(int64(len(itemList))) + + tflog.Info(ctx, "flattens.BServicesDeletedListDataSource: after flatten", map[string]any{"entry_count": state.EntryCount.ValueInt64()}) + tflog.Info(ctx, "End flattens.BServicesDeletedListDataSource") + + return nil +} diff --git a/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_group.go b/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_group.go new file mode 100644 index 0000000..fba9548 --- /dev/null +++ b/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_group.go @@ -0,0 +1,118 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/utilities" +) + +// BServiceGroupDataSource flattens data source for a group. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func BServiceGroupDataSource(ctx context.Context, state *models.RecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.BServiceGroupDataSource") + + diags := diag.Diagnostics{} + + serviceId := uint64(state.ServiceID.ValueInt64()) + + record, err := utilities.BServiceGroupDataSourceCheckPresence(ctx, *state, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about group with ID %v", serviceId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.BServiceGroupDataSource: before flatten", map[string]any{"service_id": serviceId, "record": record}) + + *state = models.RecordGroupModel{ + ServiceID: state.ServiceID, + ID: state.ID, + Timeouts: state.Timeouts, + AccountID: types.Int64Value(int64(record.AccountID)), + AccountName: types.StringValue(record.AccountName), + Consistency: types.BoolValue(record.Consistency), + CPU: types.Int64Value(int64(record.CPU)), + CreatedBy: types.StringValue(record.CreatedBy), + CreatedTime: types.Int64Value(int64(record.CreatedTime)), + DeletedBy: types.StringValue(record.DeletedBy), + DeletedTime: types.Int64Value(int64(record.DeletedTime)), + Disk: types.Int64Value(int64(record.Disk)), + Driver: types.StringValue(record.Driver), + GID: types.Int64Value(int64(record.GID)), + GUID: types.Int64Value(int64(record.GUID)), + ImageID: types.Int64Value(int64(record.ImageID)), + Milestones: types.Int64Value(int64(record.Milestones)), + Name: types.StringValue(record.Name), + RAM: types.Int64Value(int64(record.RAM)), + RGID: types.Int64Value(int64(record.RGID)), + RGName: types.StringValue(record.RGName), + Role: types.StringValue(record.Role), + SEPID: types.Int64Value(int64(record.SEPID)), + SeqNo: types.Int64Value(int64(record.SeqNo)), + Status: types.StringValue(record.Status), + TechStatus: types.StringValue(record.TechStatus), + TimeoutStart: types.Int64Value(int64(record.TimeoutStart)), + UpdatedBy: types.StringValue(record.UpdatedBy), + UpdatedTime: types.Int64Value(int64(record.UpdatedTime)), + } + + // Handle computes + computesList := make([]models.ItemGroupComputeModel, 0, len(record.Computes)) + for _, v := range record.Computes { + ipAddresses := make([]types.String, len(v.IPAddresses)) + for i, ip := range v.IPAddresses { + ipAddresses[i] = types.StringValue(ip) + } + + osUsers := make([]models.ItemOSUserModel, len(v.OSUsers)) + for j, user := range v.OSUsers { + osUsers[j] = models.ItemOSUserModel{ + Login: types.StringValue(user.Login), + Password: types.StringValue(user.Password), + } + } + + temp := models.ItemGroupComputeModel{ + ID: types.Int64Value(int64(v.ID)), + IPAddresses: flattens.FlattenSimpleTypeToList(ctx, types.StringType, ipAddresses), + Name: types.StringValue(v.Name), + OSUsers: osUsers, + } + computesList = append(computesList, temp) + } + + state.Computes = computesList + + // Handle ExtNets + extNetsList := make([]types.Int64, len(record.ExtNets)) + for i, extNet := range record.ExtNets { + extNetsList[i] = types.Int64Value(int64(extNet)) + } + state.ExtNets = flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, extNetsList) + + // Handle Parents + parentsList := make([]types.Int64, len(record.Parents)) + for i, parent := range record.Parents { + parentsList[i] = types.Int64Value(int64(parent)) + } + state.Parents = flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, parentsList) + + // Handle VINSes + vinsesList := make([]types.Int64, len(record.VINSes)) + for i, vins := range record.VINSes { + vinsesList[i] = types.Int64Value(int64(vins)) + } + state.VINSes = flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, vinsesList) + + tflog.Info(ctx, "flattens.BServiceGroupDataSource: after flatten", map[string]any{"service_id": state.ID.ValueInt64()}) + + tflog.Info(ctx, "End flattens.BServiceGroupDataSource", map[string]any{"service_id": state.ID.ValueInt64()}) + return nil +} diff --git a/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_list.go b/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_list.go new file mode 100644 index 0000000..9fa7c45 --- /dev/null +++ b/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_list.go @@ -0,0 +1,75 @@ +package flattens + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/utilities" +) + +// BServicesListDataSource flattens data source for a list of basic services. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func BServicesListDataSource(ctx context.Context, state *models.ListBasicServicesModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.BServicesListDataSource") + + diags := diag.Diagnostics{} + + // Fetch the list of basic services from the API + recordList, err := utilities.BServiceListDataSourceCheckPresence(ctx, *state, c) + if err != nil { + diags.AddError("Cannot get list of basic services", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.BServicesListDataSource: before flatten", map[string]any{"record_list": recordList}) + + // Flatten the fetched data into the state + var itemList []models.ItemBasicServiceModel + for _, record := range recordList.Data { + item := models.ItemBasicServiceModel{ + AccountID: types.Int64Value(int64(record.AccountID)), + AccountName: types.StringValue(record.AccountName), + BaseDomain: types.StringValue(record.BaseDomain), + CreatedBy: types.StringValue(record.CreatedBy), + CreatedTime: types.Int64Value(int64(record.CreatedTime)), + DeletedBy: types.StringValue(record.DeletedBy), + DeletedTime: types.Int64Value(int64(record.DeletedTime)), + GID: types.Int64Value(int64(record.GID)), + GUID: types.Int64Value(int64(record.GUID)), + ID: types.Int64Value(int64(record.ID)), + Name: types.StringValue(record.Name), + ParentSrvID: types.Int64Value(int64(record.ParentSrvID)), + ParentSrvType: types.StringValue(record.ParentSrvType), + RGID: types.Int64Value(int64(record.RGID)), + RGName: types.StringValue(record.RGName), + SSHUser: types.StringValue(record.SSHUser), + Status: types.StringValue(record.Status), + TechStatus: types.StringValue(record.TechStatus), + UpdatedBy: types.StringValue(record.UpdatedBy), + UpdatedTime: types.Int64Value(int64(record.UpdatedTime)), + UserManaged: types.BoolValue(record.UserManaged), + } + + // Handle groups as a list of strings + groupList := make([]types.Int64, len(record.Groups)) + for i, group := range record.Groups { + groupList[i] = types.Int64Value(int64(group)) + } + + itemList = append(itemList, item) + } + + // Update state with the flattened data + state.Data = itemList + state.EntryCount = types.Int64Value(int64(len(itemList))) + + tflog.Info(ctx, "flattens.BServicesListDataSource: after flatten", map[string]any{"entry_count": state.EntryCount.ValueInt64()}) + tflog.Info(ctx, "End flattens.BServicesListDataSource") + + return nil +} diff --git a/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_snapshot_list.go b/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_snapshot_list.go new file mode 100644 index 0000000..9b7ed7c --- /dev/null +++ b/internal/service/cloudapi/bservice/flattens/flatten_data_source_bservice_snapshot_list.go @@ -0,0 +1,56 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/utilities" +) + +// BServiceSnapshotListDataSource flattens data source for account. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func BServiceSnapshotListDataSource(ctx context.Context, state *models.ListInfoSnapshotsModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.BServiceSnapshotListDataSource") + + diags := diag.Diagnostics{} + + serivceId := uint64(state.ServiceID.ValueInt64()) + + record, err := utilities.BServiceSnapshotListDataSourceCheckPresence(ctx, serivceId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about bservice with ID %v", serivceId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.BServiceSnapshotListDataSource: before flatten", map[string]any{"service_id": serivceId, "record": record}) + + *state = models.ListInfoSnapshotsModel{ + ServiceID: state.ServiceID, + Timeouts: state.Timeouts, + EntryCount: types.Int64Value(int64(record.EntryCount)), + } + + dataList := make([]models.ItemSnapshotsModel, 0, len(record.Data)) + for _, v := range record.Data { + temp := models.ItemSnapshotsModel{ + GUID: types.StringValue(v.GUID), + Label: types.StringValue(v.Label), + Timestamp: types.Int64Value(int64(v.Timestamp)), + Valid: types.BoolValue(v.Valid), + } + dataList = append(dataList, temp) + } + + state.Data = dataList + + tflog.Info(ctx, "flattens.BServiceSnapshotListDataSource: after flatten", map[string]any{"service_id": state.ServiceID.ValueInt64()}) + + tflog.Info(ctx, "End flattens.BServiceSnapshotListDataSource", map[string]any{"service_id": state.ServiceID.ValueInt64()}) + return nil +} diff --git a/internal/service/cloudapi/bservice/flattens/flatten_resource_bservice.go b/internal/service/cloudapi/bservice/flattens/flatten_resource_bservice.go new file mode 100644 index 0000000..1eeef1d --- /dev/null +++ b/internal/service/cloudapi/bservice/flattens/flatten_resource_bservice.go @@ -0,0 +1,144 @@ +package flattens + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/utilities" +) + +func BServiceResource(ctx context.Context, state *models.RecordBasicServiceResourceModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.BServiceResource") + + diags := diag.Diagnostics{} + + serviceId := uint64(state.ServiceId.ValueInt64()) + if serviceId == 0 { + id, err := strconv.Atoi(state.ID.ValueString()) + if err != nil { + diags.AddError( + "flattens.BServiceResource: cannot parse resource ID from state", + err.Error()) + return diags + } + serviceId = uint64(id) + } + + recordBService, err := utilities.BServiceResourceCheckPresence(ctx, serviceId, c) + if err != nil { + diags.AddError(fmt.Sprintf("flattens.BServiceResource: Cannot get info about resource with ID %v", serviceId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.BServiceResource: before flatten", map[string]any{"service_id": serviceId, "recordBService": recordBService}) + + *state = models.RecordBasicServiceResourceModel{ + Name: state.Name, + RGID: state.RGID, + Permanently: state.Permanently, + Enable: state.Enable, + Restore: state.Restore, + Start: state.Start, + Snapshots: state.Snapshots, + Timeouts: state.Timeouts, + SSHKey: types.StringValue(recordBService.SSHKey), + SSHUser: types.StringValue(recordBService.SSHUser), + ServiceId: types.Int64Value(int64(recordBService.ID)), + AccountID: types.Int64Value(int64(recordBService.AccountID)), + Computes: flattenComputes(ctx, recordBService.Computes), + Groups: flattenGroups(ctx, recordBService.Groups), + AccountName: types.StringValue(recordBService.Name), + BaseDomain: types.StringValue(recordBService.BaseDomain), + CPUTotal: types.Int64Value(int64(recordBService.CPUTotal)), + CreatedBy: types.StringValue(recordBService.CreatedBy), + CreatedTime: types.Int64Value(int64(recordBService.CreatedTime)), + DeletedBy: types.StringValue(recordBService.DeletedBy), + DeletedTime: types.Int64Value(int64(recordBService.DeletedTime)), + DiskTotal: types.Int64Value(int64(recordBService.DiskTotal)), + GID: types.Int64Value(int64(recordBService.GID)), + GUID: types.Int64Value(int64(recordBService.GUID)), + Milestones: types.Int64Value(int64(recordBService.Milestones)), + ParentSrvID: types.Int64Value(int64(recordBService.ParentSrvID)), + ParentSrvType: types.StringValue(recordBService.ParentSrvType), + RAMTotal: types.Int64Value(int64(recordBService.RAMTotal)), + RGName: types.StringValue(recordBService.RGName), + Status: types.StringValue(recordBService.Status), + TechStatus: types.StringValue(recordBService.TechStatus), + UpdatedBy: types.StringValue(recordBService.UpdatedBy), + UpdatedTime: types.Int64Value(int64(recordBService.UpdatedTime)), + UserManaged: types.BoolValue(recordBService.UserManaged), + ID: types.StringValue(strconv.Itoa(int(serviceId))), + } + + tflog.Info(ctx, "flattens.BServiceResource: after flatten", map[string]any{"service_id": state.ID.ValueString()}) + tflog.Info(ctx, "End flattens.BServiceResource", map[string]any{"service_id": state.ID.ValueString()}) + return nil +} + +func flattenComputes(ctx context.Context, items bservice.ListComputes) types.List { + tflog.Info(ctx, "Start flattenComputes") + tempSlice := make([]types.Object, 0, len(items)) + for _, v := range items { + temp := models.ItemComputeResourceModel{ + AccountID: types.Int64Value(int64(v.AccountID)), + Architecture: types.StringValue(v.Architecture), + CompGroupID: types.Int64Value(int64(v.CompGroupID)), + CompGroupName: types.StringValue(v.CompGroupName), + CompGroupRole: types.StringValue(v.CompGroupRole), + ID: types.Int64Value(int64(v.ID)), + Name: types.StringValue(v.Name), + RGID: types.Int64Value(int64(v.RGID)), + StackID: types.Int64Value(int64(v.StackID)), + Status: types.StringValue(v.Status), + TechStatus: types.StringValue(v.TechStatus), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemComputeResource, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenComputes struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemComputeResource}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenComputes", diags)) + } + + tflog.Info(ctx, "End flattenComputes") + return res +} + +func flattenGroups(ctx context.Context, items bservice.ListGroups) types.List { + tflog.Info(ctx, "Start flattenGroups") + tempSlice := make([]types.Object, 0, len(items)) + for _, v := range items { + temp := models.ItemGroupResourceModel{ + Computes: types.Int64Value(int64(v.Computes)), + Consistency: types.BoolValue(v.Consistency), + ID: types.Int64Value(int64(v.ID)), + Name: types.StringValue(v.Name), + Status: types.StringValue(v.Status), + TechStatus: types.StringValue(v.TechStatus), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemGroupResource, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenGroups struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemGroupResource}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenGroups", diags)) + } + + tflog.Info(ctx, "End flattenGroups") + return res +} diff --git a/internal/service/cloudapi/bservice/flattens/flatten_resource_bservice_group.go b/internal/service/cloudapi/bservice/flattens/flatten_resource_bservice_group.go new file mode 100644 index 0000000..38fa76b --- /dev/null +++ b/internal/service/cloudapi/bservice/flattens/flatten_resource_bservice_group.go @@ -0,0 +1,151 @@ +package flattens + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/utilities" +) + +func BServiceGroupResource(ctx context.Context, plan *models.ResourceRecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start BServiceGroupResource", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + + diags := diag.Diagnostics{} + serviceID := plan.ServiceID.ValueInt64() + comgroupID := plan.CompgroupID.ValueInt64() + if serviceID == 0 { + id, err := strconv.Atoi(plan.SID.ValueString()) + if err != nil { + diags.AddError( + "flattens.BServiceGroupResource: cannot parse resource ID from state", + err.Error()) + return diags + } + serviceID = int64(id) + } + + if comgroupID == 0 { + id, err := strconv.Atoi(plan.ID.ValueString()) + if err != nil { + diags.AddError( + "flattens.BServiceGroupResource: cannot parse resource ID from state", + err.Error()) + return diags + } + comgroupID = int64(id) + } + + recordResourceGroup, err := utilities.BServiceGroupResourceCheckPresence(ctx, uint64(serviceID), uint64(comgroupID), c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about bservice group with service_id %d and compgroup_id %d", serviceID, comgroupID), err.Error()) + return diags + } + + *plan = models.ResourceRecordGroupModel{ + ServiceID: plan.ServiceID, + CompCount: plan.CompCount, + Name: plan.Name, + CPU: plan.CPU, + RAM: plan.RAM, + Disk: plan.Disk, + ImageID: plan.ImageID, + Driver: plan.Driver, + SEPID: types.Int64Value(int64(recordResourceGroup.SEPID)), + SepPool: types.StringValue(recordResourceGroup.PoolName), + CloudInit: plan.CloudInit, + Role: types.StringValue(recordResourceGroup.Role), + TimeoutStart: types.Int64Value(int64(recordResourceGroup.TimeoutStart)), + VINSes: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordResourceGroup.VINSes), + ExtNets: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordResourceGroup.ExtNets), + Mode: plan.Mode, + Start: plan.Start, + ForceStop: plan.ForceStop, + ForceUpdate: plan.ForceUpdate, + Parents: flattens.FlattenSimpleTypeToList(ctx, types.Int64Type, recordResourceGroup.Parents), + RemoveComputes: plan.RemoveComputes, + CompgroupID: plan.CompgroupID, + ID: types.StringValue(strconv.Itoa(int(plan.CompgroupID.ValueInt64()))), + SID: types.StringValue(strconv.Itoa(int(plan.ServiceID.ValueInt64()))), + Timeouts: plan.Timeouts, + AccountID: types.Int64Value(int64(recordResourceGroup.AccountID)), + AccountName: types.StringValue(recordResourceGroup.AccountName), + Computes: flattenGroupComputes(ctx, recordResourceGroup.Computes), + Consistency: types.BoolValue(recordResourceGroup.Consistency), + CreatedBy: types.StringValue(recordResourceGroup.CreatedBy), + CreatedTime: types.Int64Value(int64(recordResourceGroup.CreatedTime)), + DeletedBy: types.StringValue(recordResourceGroup.DeletedBy), + DeletedTime: types.Int64Value(int64(recordResourceGroup.DeletedTime)), + GID: types.Int64Value(int64(recordResourceGroup.GID)), + GUID: types.Int64Value(int64(recordResourceGroup.GUID)), + Milestones: types.Int64Value(int64(recordResourceGroup.Milestones)), + RGID: types.Int64Value(int64(recordResourceGroup.RGID)), + RGName: types.StringValue(recordResourceGroup.RGName), + SeqNo: types.Int64Value(int64(recordResourceGroup.SeqNo)), + Status: types.StringValue(recordResourceGroup.Status), + TechStatus: types.StringValue(recordResourceGroup.TechStatus), + UpdatedBy: types.StringValue(recordResourceGroup.UpdatedBy), + UpdatedTime: types.Int64Value(int64(recordResourceGroup.UpdatedTime)), + } + + tflog.Info(ctx, "End BServiceGroupResource", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + return nil +} + +func flattenGroupComputes(ctx context.Context, items bservice.ListGroupComputes) types.List { + tflog.Info(ctx, "Start flattenGroupComputes") + tempSlice := make([]types.Object, 0, len(items)) + for _, v := range items { + temp := models.ResourceItemGroupComputeModel{ + + ID: types.Int64Value(int64(v.ID)), + Name: types.StringValue(v.Name), + IPAddresses: flattens.FlattenSimpleTypeToList(ctx, types.StringType, v.IPAddresses), + OSUsers: flattenOSuser(ctx, v.OSUsers), + } + obj, diags := types.ObjectValueFrom(ctx, models.ResourceItemGroupCompute, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenGroupComputes struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ResourceItemGroupCompute}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenGroupComputes", diags)) + } + + tflog.Info(ctx, "End flattenGroupComputes") + return res +} + +func flattenOSuser(ctx context.Context, items bservice.ListOSUsers) types.List { + tflog.Info(ctx, "Start flattenOSuser") + tempSlice := make([]types.Object, 0, len(items)) + for _, v := range items { + temp := models.ResourceItemOSUserModel{ + Login: types.StringValue(v.Login), + Password: types.StringValue(v.Password), + } + obj, diags := types.ObjectValueFrom(ctx, models.ResourceItemOSUser, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenOSuser struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ResourceItemOSUser}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenOSuser", diags)) + } + + tflog.Info(ctx, "End flattenOSuser") + return res +} diff --git a/internal/service/cloudapi/bservice/models/model_data_source_bservice.go b/internal/service/cloudapi/bservice/models/model_data_source_bservice.go new file mode 100644 index 0000000..9359c0c --- /dev/null +++ b/internal/service/cloudapi/bservice/models/model_data_source_bservice.go @@ -0,0 +1,69 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type RecordBasicServiceModel struct { + ServiceId types.Int64 `tfsdk:"service_id"` + AccountID types.Int64 `tfsdk:"account_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + AccountName types.String `tfsdk:"account_name"` + BaseDomain types.String `tfsdk:"base_domain"` + Computes []ItemComputeModel `tfsdk:"computes"` + CPUTotal types.Int64 `tfsdk:"cpu_total"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DiskTotal types.Int64 `tfsdk:"disk_total"` + GID types.Int64 `tfsdk:"gid"` + Groups []ItemGroupModel `tfsdk:"groups"` + GUID types.Int64 `tfsdk:"guid"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"service_name"` + ParentSrvID types.Int64 `tfsdk:"parent_srv_id"` + ParentSrvType types.String `tfsdk:"parent_srv_type"` + RAMTotal types.Int64 `tfsdk:"ram_total"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + Snapshots []ItemSnapshotModel `tfsdk:"snapshots"` + SSHKey types.String `tfsdk:"ssh_key"` + SSHUser types.String `tfsdk:"ssh_user"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` +} + +type ItemComputeModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + Architecture types.String `tfsdk:"architecture"` + CompGroupID types.Int64 `tfsdk:"compgroup_id"` + CompGroupName types.String `tfsdk:"compgroup_name"` + CompGroupRole types.String `tfsdk:"compgroup_role"` + ID types.Int64 `tfsdk:"id"` + Name types.String `tfsdk:"name"` + RGID types.Int64 `tfsdk:"rg_id"` + StackID types.Int64 `tfsdk:"stack_id"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` +} + +type ItemGroupModel struct { + Computes types.Int64 `tfsdk:"computes"` + Consistency types.Bool `tfsdk:"consistency"` + ID types.Int64 `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` +} + +type ItemSnapshotModel struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + Timestamp types.Int64 `tfsdk:"timestamp"` + Valid types.Bool `tfsdk:"valid"` +} diff --git a/internal/service/cloudapi/bservice/models/model_data_source_bservice_deleted_list.go b/internal/service/cloudapi/bservice/models/model_data_source_bservice_deleted_list.go new file mode 100644 index 0000000..f704ac5 --- /dev/null +++ b/internal/service/cloudapi/bservice/models/model_data_source_bservice_deleted_list.go @@ -0,0 +1,45 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ListBasicServicesDelModel struct { + //optional fields + AccountID types.Int64 `tfsdk:"account_id"` + RGID types.Int64 `tfsdk:"rg_id"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //computed fields + Data []ItemBasicServiceDelModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemBasicServiceDelModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + BaseDomain types.String `tfsdk:"base_domain"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + GID types.Int64 `tfsdk:"gid"` + Groups []uint64 `tfsdk:"groups"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"service_id"` + Name types.String `tfsdk:"service_name"` + ParentSrvID types.Int64 `tfsdk:"parent_srv_id"` + ParentSrvType types.String `tfsdk:"parent_srv_type"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + SSHUser types.String `tfsdk:"ssh_user"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` +} diff --git a/internal/service/cloudapi/bservice/models/model_data_source_bservice_group.go b/internal/service/cloudapi/bservice/models/model_data_source_bservice_group.go new file mode 100644 index 0000000..d4fd7c9 --- /dev/null +++ b/internal/service/cloudapi/bservice/models/model_data_source_bservice_group.go @@ -0,0 +1,57 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type RecordGroupModel struct { + //required and optional fields + ServiceID types.Int64 `tfsdk:"service_id"` + ID types.Int64 `tfsdk:"compgroup_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //compute fields + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Computes []ItemGroupComputeModel `tfsdk:"computes"` + Consistency types.Bool `tfsdk:"consistency"` + CPU types.Int64 `tfsdk:"cpu"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Disk types.Int64 `tfsdk:"disk"` + Driver types.String `tfsdk:"driver"` + ExtNets types.List `tfsdk:"extnets"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + ImageID types.Int64 `tfsdk:"image_id"` + Milestones types.Int64 `tfsdk:"milestones"` + Name types.String `tfsdk:"compgroup_name"` + Parents types.List `tfsdk:"parents"` + RAM types.Int64 `tfsdk:"ram"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + Role types.String `tfsdk:"role"` + SEPID types.Int64 `tfsdk:"sep_id"` + SeqNo types.Int64 `tfsdk:"seq_no"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + TimeoutStart types.Int64 `tfsdk:"timeout_start"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + VINSes types.List `tfsdk:"vinses"` +} + +type ItemGroupComputeModel struct { + ID types.Int64 `tfsdk:"id"` + IPAddresses types.List `tfsdk:"ip_addresses"` + Name types.String `tfsdk:"name"` + OSUsers []ItemOSUserModel `tfsdk:"os_users"` +} + +type ItemOSUserModel struct { + Login types.String `tfsdk:"login"` + Password types.String `tfsdk:"password"` +} diff --git a/internal/service/cloudapi/bservice/models/model_data_source_bservice_list.go b/internal/service/cloudapi/bservice/models/model_data_source_bservice_list.go new file mode 100644 index 0000000..ff27c4d --- /dev/null +++ b/internal/service/cloudapi/bservice/models/model_data_source_bservice_list.go @@ -0,0 +1,48 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ListBasicServicesModel struct { + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + RGName types.String `tfsdk:"rg_name"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + AccountName types.String `tfsdk:"account_name"` + AccountID types.Int64 `tfsdk:"account_id"` + RGID types.Int64 `tfsdk:"rg_id"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Data []ItemBasicServiceModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +type ItemBasicServiceModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + BaseDomain types.String `tfsdk:"base_domain"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + GID types.Int64 `tfsdk:"gid"` + Groups []uint64 `tfsdk:"groups"` + GUID types.Int64 `tfsdk:"guid"` + ID types.Int64 `tfsdk:"service_id"` + Name types.String `tfsdk:"service_name"` + ParentSrvID types.Int64 `tfsdk:"parent_srv_id"` + ParentSrvType types.String `tfsdk:"parent_srv_type"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + SSHUser types.String `tfsdk:"ssh_user"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` +} diff --git a/internal/service/cloudapi/bservice/models/model_data_source_bservice_snapshot_list.go b/internal/service/cloudapi/bservice/models/model_data_source_bservice_snapshot_list.go new file mode 100644 index 0000000..d4294b5 --- /dev/null +++ b/internal/service/cloudapi/bservice/models/model_data_source_bservice_snapshot_list.go @@ -0,0 +1,23 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ListInfoSnapshotsModel struct { + //required fields + ServiceID types.Int64 `tfsdk:"service_id"` + + //computed fields + Data []ItemSnapshotsModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` + Timeouts timeouts.Value `tfsdk:"timeouts"` +} + +type ItemSnapshotsModel struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + Timestamp types.Int64 `tfsdk:"timestamp"` + Valid types.Bool `tfsdk:"valid"` +} diff --git a/internal/service/cloudapi/bservice/models/model_resource_bservice.go b/internal/service/cloudapi/bservice/models/model_resource_bservice.go new file mode 100644 index 0000000..b9ab5ab --- /dev/null +++ b/internal/service/cloudapi/bservice/models/model_resource_bservice.go @@ -0,0 +1,112 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type RecordBasicServiceResourceModel struct { + //required fields + Name types.String `tfsdk:"service_name"` + RGID types.Int64 `tfsdk:"rg_id"` + + //optional fields + SSHKey types.String `tfsdk:"ssh_key"` + SSHUser types.String `tfsdk:"ssh_user"` + Permanently types.Bool `tfsdk:"permanently"` + Enable types.Bool `tfsdk:"enable"` + Restore types.Bool `tfsdk:"restore"` + Start types.Bool `tfsdk:"start"` + ServiceId types.Int64 `tfsdk:"service_id"` + Snapshots types.List `tfsdk:"snapshots"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //computed fields + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + BaseDomain types.String `tfsdk:"base_domain"` + Computes types.List `tfsdk:"computes"` + CPUTotal types.Int64 `tfsdk:"cpu_total"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DiskTotal types.Int64 `tfsdk:"disk_total"` + GID types.Int64 `tfsdk:"gid"` + Groups types.List `tfsdk:"groups"` + GUID types.Int64 `tfsdk:"guid"` + Milestones types.Int64 `tfsdk:"milestones"` + ParentSrvID types.Int64 `tfsdk:"parent_srv_id"` + ParentSrvType types.String `tfsdk:"parent_srv_type"` + RAMTotal types.Int64 `tfsdk:"ram_total"` + RGName types.String `tfsdk:"rg_name"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + UserManaged types.Bool `tfsdk:"user_managed"` + ID types.String `tfsdk:"id"` +} + +type ItemComputeResourceModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + Architecture types.String `tfsdk:"architecture"` + CompGroupID types.Int64 `tfsdk:"compgroup_id"` + CompGroupName types.String `tfsdk:"compgroup_name"` + CompGroupRole types.String `tfsdk:"compgroup_role"` + ID types.Int64 `tfsdk:"id"` + Name types.String `tfsdk:"name"` + RGID types.Int64 `tfsdk:"rg_id"` + StackID types.Int64 `tfsdk:"stack_id"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` +} + +var ItemComputeResource = map[string]attr.Type{ + "account_id": types.Int64Type, + "architecture": types.StringType, + "compgroup_id": types.Int64Type, + "compgroup_name": types.StringType, + "compgroup_role": types.StringType, + "id": types.Int64Type, + "name": types.StringType, + "rg_id": types.Int64Type, + "stack_id": types.Int64Type, + "status": types.StringType, + "tech_status": types.StringType, +} + +type ItemGroupResourceModel struct { + Computes types.Int64 `tfsdk:"computes"` + Consistency types.Bool `tfsdk:"consistency"` + ID types.Int64 `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` +} + +var ItemGroupResource = map[string]attr.Type{ + "computes": types.Int64Type, + "consistency": types.BoolType, + "id": types.Int64Type, + "name": types.StringType, + "status": types.StringType, + "tech_status": types.StringType, +} + +type ItemSnapshotResourceModel struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + Rollback types.Bool `tfsdk:"rollback"` + Timestamp types.Int64 `tfsdk:"timestamp"` + Valid types.Bool `tfsdk:"valid"` +} + +var ItemSnapshotResource = map[string]attr.Type{ + "guid": types.StringType, + "label": types.StringType, + "rollback": types.BoolType, + "timestamp": types.Int64Type, + "valid": types.BoolType, +} diff --git a/internal/service/cloudapi/bservice/models/model_resource_bservice_group.go b/internal/service/cloudapi/bservice/models/model_resource_bservice_group.go new file mode 100644 index 0000000..3d53a27 --- /dev/null +++ b/internal/service/cloudapi/bservice/models/model_resource_bservice_group.go @@ -0,0 +1,82 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceRecordGroupModel struct { + //required fields + ServiceID types.Int64 `tfsdk:"service_id"` + CompCount types.Int64 `tfsdk:"comp_count"` + Name types.String `tfsdk:"compgroup_name"` + CPU types.Int64 `tfsdk:"cpu"` + RAM types.Int64 `tfsdk:"ram"` + Disk types.Int64 `tfsdk:"disk"` + ImageID types.Int64 `tfsdk:"image_id"` + Driver types.String `tfsdk:"driver"` + + //optional fields + SEPID types.Int64 `tfsdk:"sep_id"` + SepPool types.String `tfsdk:"sep_pool"` + CloudInit types.String `tfsdk:"cloud_init"` + Role types.String `tfsdk:"role"` + TimeoutStart types.Int64 `tfsdk:"timeout_start"` + VINSes types.List `tfsdk:"vinses"` + ExtNets types.List `tfsdk:"extnets"` + Mode types.String `tfsdk:"mode"` + Start types.Bool `tfsdk:"start"` + ForceStop types.Bool `tfsdk:"force_stop"` + ForceUpdate types.Bool `tfsdk:"force_update"` + Parents types.List `tfsdk:"parents"` + RemoveComputes types.List `tfsdk:"remove_computes"` + CompgroupID types.Int64 `tfsdk:"compgroup_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + //compute fields + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Computes types.List `tfsdk:"computes"` + Consistency types.Bool `tfsdk:"consistency"` + CreatedBy types.String `tfsdk:"created_by"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedBy types.String `tfsdk:"deleted_by"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + Milestones types.Int64 `tfsdk:"milestones"` + RGID types.Int64 `tfsdk:"rg_id"` + RGName types.String `tfsdk:"rg_name"` + SeqNo types.Int64 `tfsdk:"seq_no"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + UpdatedBy types.String `tfsdk:"updated_by"` + UpdatedTime types.Int64 `tfsdk:"updated_time"` + ID types.String `tfsdk:"id"` + SID types.String `tfsdk:"sid"` +} + +type ResourceItemGroupComputeModel struct { + ID types.Int64 `tfsdk:"id"` + IPAddresses types.List `tfsdk:"ip_addresses"` + Name types.String `tfsdk:"name"` + OSUsers types.List `tfsdk:"os_users"` +} + +var ResourceItemGroupCompute = map[string]attr.Type{ + "id": types.Int64Type, + "ip_addresses": types.ListType{ElemType: types.StringType}, + "name": types.StringType, + "os_users": types.ListType{ElemType: types.ObjectType{AttrTypes: ResourceItemOSUser}}, +} + +type ResourceItemOSUserModel struct { + Login types.String `tfsdk:"login"` + Password types.String `tfsdk:"password"` +} + +var ResourceItemOSUser = map[string]attr.Type{ + "login": types.StringType, + "password": types.StringType, +} diff --git a/internal/service/cloudapi/bservice/resource_bservice.go b/internal/service/cloudapi/bservice/resource_bservice.go new file mode 100644 index 0000000..9de792f --- /dev/null +++ b/internal/service/cloudapi/bservice/resource_bservice.go @@ -0,0 +1,316 @@ +package bservice + +import ( + "context" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceBService{} + _ resource.ResourceWithImportState = &resourceBService{} +) + +// NewresourceBService is a helper function to simplify the provider implementation. +func NewResourceBService() resource.Resource { + return &resourceBService{} +} + +// resourceBService is the resource implementation. +type resourceBService struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceBService) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create resource group + var plan models.RecordBasicServiceResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceBService: Error receiving the plan") + return + } + + contextCreateMap := map[string]any{ + "name": plan.Name.ValueString(), + "rg_id": plan.RGID.ValueInt64(), + } + tflog.Info(ctx, "Create resourceBService: got plan successfully", contextCreateMap) + tflog.Info(ctx, "Create resourceBService: start creating", contextCreateMap) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceBService: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceBService: set timeouts successfully", map[string]any{ + "name": plan.Name.ValueString(), + "rg_id": plan.RGID.ValueInt64(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Make create request and get response + id, diags := utilities.BServiceResourceCreate(ctx, &plan, r.client) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + plan.ID = types.StringValue(strconv.Itoa(int(*id))) + tflog.Info(ctx, "BServiceResourceCreatee: BService created", map[string]any{"service_id": id}) + + tflog.Info(ctx, "BServiceResourceCreatee: resource creation is completed", map[string]any{"service_id": id}) + + currentSnapshots, diags := types.ListValue(plan.Snapshots.Type(ctx), plan.Snapshots.Elements()) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.BServiceResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + if !plan.Snapshots.Equal(currentSnapshots) && !currentSnapshots.IsNull() { + resp.Diagnostics.Append(utilities.SnapshotsBService(ctx, currentSnapshots, plan.Snapshots, uint64(plan.ServiceId.ValueInt64()), r.client)...) + + if resp.Diagnostics.HasError() { + tflog.Warn(ctx, "Create resourceBService: Error snapshosts bservice") + return + } + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } + +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceBService) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.RecordBasicServiceResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceBService: Error get state") + return + } + tflog.Info(ctx, "Read resourceBService: got state successfully", map[string]any{"service_id": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceBService: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceBService: set timeouts successfully", map[string]any{ + "service_id": state.ID.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // read status + resp.Diagnostics.Append(utilities.BSerivceReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceBService: Error reading status") + return + } + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.BServiceResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceBService: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceBService: Error set state") + return + } + tflog.Info(ctx, "End read resourceBService") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceBService) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.RecordBasicServiceResourceModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBService: Error receiving the plan") + return + } + + logMap := map[string]any{"service_id": plan.ID.ValueString()} + tflog.Info(ctx, "Update resourceBService: got plan successfully", logMap) + + // Retrieve values from state + var state models.RecordBasicServiceResourceModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBService: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceBService: got state successfully", logMap) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBService: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceBService: set timeouts successfully", map[string]any{ + "service_id": state.ID.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + _, err := strconv.Atoi(state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Update resourceBService: Cannot parse ID from state", err.Error()) + return + } + + // enable/disable bservice + if !plan.Enable.Equal(state.Enable) && !plan.Enable.IsNull() { + resp.Diagnostics.Append(utilities.EnableDisableBService(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBService: Error enabling/disabling bservice") + return + } + } + + // start/stop bservice + if !plan.Start.Equal(state.Start) && !plan.Start.IsNull() { + resp.Diagnostics.Append(utilities.StartStopBService(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBService: Error start/stop bservice") + return + } + } + + // SnapshotsBService bservice + if !plan.Snapshots.Equal(state.Snapshots) && !plan.Snapshots.IsNull() { + resp.Diagnostics.Append(utilities.SnapshotsBService(ctx, state.Snapshots, plan.Snapshots, uint64(plan.ServiceId.ValueInt64()), r.client)...) + + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBService: Error snapshosts bservice") + return + } + } + + tflog.Info(ctx, "Update resourceBService: bservice update is completed", logMap) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.BServiceResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceBService) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.RecordBasicServiceResourceModel + + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceBService: Error get state") + return + } + tflog.Info(ctx, "Delete resourceBService: got state successfully", map[string]any{"serice_id": state.ID.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceBService: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceBService: set timeouts successfully", map[string]any{ + "service_id": state.ID.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + permanently := state.Permanently.ValueBool() + if state.Permanently.IsNull() { + permanently = true + } // default true + + // Delete existing resource group + delReq := bservice.DeleteRequest{ + ServiceID: uint64(state.ServiceId.ValueInt64()), + Permanently: permanently, + } + + tflog.Info(ctx, "Delete resourceBService: before call CloudAPI().BService().Delete", map[string]any{"req": delReq}) + _, err := r.client.CloudAPI().BService().Delete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceBService: Error deleting BService with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceBService", map[string]any{"service_id": state.ID.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceBService) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceBService(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceBService) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_bservice" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceBService) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceBService") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceBService successfully") +} + +func (r *resourceBService) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudapi/bservice/resource_bservice_group.go b/internal/service/cloudapi/bservice/resource_bservice_group.go new file mode 100644 index 0000000..37ea950 --- /dev/null +++ b/internal/service/cloudapi/bservice/resource_bservice_group.go @@ -0,0 +1,347 @@ +package bservice + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceBServiceGroup{} + _ resource.ResourceWithImportState = &resourceBServiceGroup{} +) + +// NewResourceBServiceGroup is a helper function to simplify the provider implementation. +func NewResourceBServiceGroup() resource.Resource { + return &resourceBServiceGroup{} +} + +// resourceBServiceGroup is the resource implementation. +type resourceBServiceGroup struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceBServiceGroup) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create resource group + var plan models.ResourceRecordGroupModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceBServiceGroup: Error receiving the plan") + return + } + + contextCreateMap := map[string]any{ + "service_id": plan.ServiceID.ValueInt64(), + "compgroup_id": plan.CompgroupID.ValueInt64(), + } + tflog.Info(ctx, "Create resourceBServiceGroup: got plan successfully", contextCreateMap) + tflog.Info(ctx, "Create resourceBServiceGroup: start creating", contextCreateMap) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceBServiceGroup: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceBServiceGroup: set timeouts successfully", map[string]any{ + "service_id": plan.ServiceID.ValueInt64(), + "compgroup_id": plan.CompgroupID.ValueInt64(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Make create request and get response + diags = utilities.BServiceGroupResourceCreate(ctx, &plan, r.client) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + tflog.Info(ctx, "BServiceResourceCreatee: BService group created", map[string]any{"service_id": plan.ServiceID.ValueInt64()}) + + tflog.Info(ctx, "BServiceResourceCreatee: resource creation is completed", map[string]any{"service_id": plan.ServiceID.ValueInt64()}) + + currentParents, diags := types.ListValue(plan.Parents.Type(ctx), plan.Parents.Elements()) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.BServiceGroupResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + if !plan.Parents.Equal(currentParents) && !currentParents.IsNull() { + resp.Diagnostics.Append(utilities.BServiceGroupParents(ctx, plan.Parents, currentParents, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceBServiceGroup) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceRecordGroupModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceBServiceGroup: Error get state") + return + } + tflog.Info(ctx, "Read resourceBServiceGroup: got state successfully", map[string]any{"service_id": state.ID.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceBServiceGroup: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceBServiceGroup: set timeouts successfully", map[string]any{ + "service_id": state.ID.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // read status + resp.Diagnostics.Append(utilities.BServiceGroupReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceBServiceGroup: Error reading status") + return + } + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.BServiceGroupResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceBServiceGroup: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceBServiceGroup: Error set state") + return + } + tflog.Info(ctx, "End read resourceBServiceGroup") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceBServiceGroup) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceRecordGroupModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBServiceGroup: Error receiving the plan") + return + } + + logMap := map[string]any{"service_id": plan.ID.ValueString()} + tflog.Info(ctx, "Update resourceBServiceGroup: got plan successfully", logMap) + + // Retrieve values from state + var state models.ResourceRecordGroupModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBServiceGroup: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceBServiceGroup: got state successfully", logMap) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBServiceGroup: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceBServiceGroup: set timeouts successfully", map[string]any{ + "service_id": state.ID.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + _, err := strconv.Atoi(state.ID.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Update resourceBServiceGroup: Cannot parse ID from state", err.Error()) + return + } + + if !plan.CompCount.Equal(state.CompCount) && !plan.CompCount.IsNull() { + resp.Diagnostics.Append(utilities.BServiceGroupResize(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBServiceGroup: Error enabling/disabling bservice") + return + } + } + + if !plan.Start.Equal(state.Start) && !plan.Start.IsNull() { + resp.Diagnostics.Append(utilities.BServiceGroupStartStop(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBServiceGroup: Error start/stop bservice") + return + } + } + + if !plan.Name.Equal(state.Name) || !plan.RAM.Equal(state.RAM) || !plan.CPU.Equal(state.CPU) || !plan.Disk.Equal(state.Disk) || !plan.Role.Equal(state.Role) { + resp.Diagnostics.Append(utilities.BServiceGroupUpdate(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBServiceGroup: Error update bservice group") + return + } + } + + if !plan.ExtNets.Equal(state.ExtNets) && !plan.ExtNets.IsNull() { + resp.Diagnostics.Append(utilities.BServiceGroupExtNet(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBServiceGroup: Error update extnets bservice") + return + } + } + + if !plan.VINSes.Equal(state.VINSes) && !plan.VINSes.IsNull() { + resp.Diagnostics.Append(utilities.BServiceGroupVinses(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBServiceGroup: Error update vinses bservice") + return + } + } + + if !plan.Parents.Equal(state.Parents) && !plan.Parents.IsNull() { + resp.Diagnostics.Append(utilities.BServiceGroupParents(ctx, plan.Parents, state.Parents, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBServiceGroup: Error update parents bservice") + return + } + } + + if !plan.RemoveComputes.Equal(state.RemoveComputes) && !plan.RemoveComputes.IsNull() { + resp.Diagnostics.Append(utilities.BServiceGroupRemoveComputes(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceBServiceGroup: Error remove computes bservice") + return + } + } + + tflog.Info(ctx, "Update resourceBServiceGroup: bservice update is completed", logMap) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.BServiceGroupResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceBServiceGroup) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceRecordGroupModel + + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceBServiceGroup: Error get state") + return + } + tflog.Info(ctx, "Delete resourceBServiceGroup: got state successfully", map[string]any{"serice_id": state.ID.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceBServiceGroup: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceBServiceGroup: set timeouts successfully", map[string]any{ + "service_id": state.ID.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + // Delete existing resource group + delReq := bservice.GroupRemoveRequest{ + ServiceID: uint64(state.ServiceID.ValueInt64()), + CompGroupID: uint64(state.CompgroupID.ValueInt64()), + } + + tflog.Info(ctx, "Delete resourceBServiceGroup: before call CloudAPI().BService().GroupRemove", map[string]any{"req": delReq}) + _, err := r.client.CloudAPI().BService().GroupRemove(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceBServiceGroup: Error deleting BService group with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceBServiceGroup", map[string]any{"service_id": state.ID.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceBServiceGroup) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceBServiceGroup(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceBServiceGroup) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_bservice_group" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceBServiceGroup) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceBServiceGroup") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceBServiceGroup successfully") +} + +func (r *resourceBServiceGroup) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + idParts := strings.Split(req.ID, ",") + + if len(idParts) != 2 || idParts[0] == "" || idParts[1] == "" { + resp.Diagnostics.AddError( + "Unexpected Import Identifier", + fmt.Sprintf("Expected import identifier with format: service_id,compgroup_name. Got: %q", req.ID), + ) + return + } + + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("sid"), idParts[0])...) + resp.Diagnostics.Append(resp.State.SetAttribute(ctx, path.Root("id"), idParts[1])...) +} diff --git a/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice.go b/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice.go new file mode 100644 index 0000000..e13e835 --- /dev/null +++ b/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice.go @@ -0,0 +1,172 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceBService() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "service_id": schema.Int64Attribute{ + Required: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "base_domain": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "architecture": schema.StringAttribute{ + Computed: true, + }, + "compgroup_id": schema.Int64Attribute{ + Computed: true, + }, + "compgroup_name": schema.StringAttribute{ + Computed: true, + }, + "compgroup_role": schema.StringAttribute{ + Computed: true, + }, + "id": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "stack_id": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "cpu_total": schema.Int64Attribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "disk_total": schema.Int64Attribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "groups": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "computes": schema.Int64Attribute{ + Computed: true, + }, + "consistency": schema.BoolAttribute{ + Computed: true, + }, + "id": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "service_name": schema.StringAttribute{ + Computed: true, + }, + "parent_srv_id": schema.Int64Attribute{ + Computed: true, + }, + "parent_srv_type": schema.StringAttribute{ + Computed: true, + }, + "ram_total": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + "valid": schema.BoolAttribute{ + Computed: true, + }, + }, + }, + }, + "ssh_key": schema.StringAttribute{ + Computed: true, + }, + "ssh_user": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_deleted_list.go b/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_deleted_list.go new file mode 100644 index 0000000..602d967 --- /dev/null +++ b/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_deleted_list.go @@ -0,0 +1,103 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceBServiceDeletedList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Optional: true, + }, + "rg_id": schema.Int64Attribute{ + Optional: true, + }, + "sort_by": schema.StringAttribute{ + Optional: true, + }, + "page": schema.Int64Attribute{ + Optional: true, + }, + "size": schema.Int64Attribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "base_domain": schema.StringAttribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "groups": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "service_id": schema.Int64Attribute{ + Computed: true, + }, + "service_name": schema.StringAttribute{ + Computed: true, + }, + "parent_srv_id": schema.Int64Attribute{ + Computed: true, + }, + "parent_srv_type": schema.StringAttribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "ssh_user": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_group.go b/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_group.go new file mode 100644 index 0000000..802d727 --- /dev/null +++ b/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_group.go @@ -0,0 +1,137 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceBServiceGroup() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "service_id": schema.Int64Attribute{ + Required: true, + }, + "compgroup_id": schema.Int64Attribute{ + Required: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.Int64Attribute{ + Computed: true, + }, + "ip_addresses": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "os_users": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "login": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "consistency": schema.BoolAttribute{ + Computed: true, + }, + "cpu": schema.Int64Attribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "disk": schema.Int64Attribute{ + Computed: true, + }, + "driver": schema.StringAttribute{ + Computed: true, + }, + "extnets": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "compgroup_name": schema.StringAttribute{ + Computed: true, + }, + "parents": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "ram": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "seq_no": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "timeout_start": schema.Int64Attribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "vinses": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + } +} diff --git a/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_list.go b/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_list.go new file mode 100644 index 0000000..84964e9 --- /dev/null +++ b/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_list.go @@ -0,0 +1,121 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceBServiceList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "by_id": schema.Int64Attribute{ + Optional: true, + }, + "name": schema.StringAttribute{ + Optional: true, + }, + "rg_name": schema.StringAttribute{ + Optional: true, + }, + "status": schema.StringAttribute{ + Optional: true, + }, + "tech_status": schema.StringAttribute{ + Optional: true, + }, + "account_name": schema.StringAttribute{ + Optional: true, + }, + "account_id": schema.Int64Attribute{ + Optional: true, + }, + "rg_id": schema.Int64Attribute{ + Optional: true, + }, + "sort_by": schema.StringAttribute{ + Optional: true, + }, + "page": schema.Int64Attribute{ + Optional: true, + }, + "size": schema.Int64Attribute{ + Optional: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "base_domain": schema.StringAttribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "groups": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "service_id": schema.Int64Attribute{ + Computed: true, + }, + "service_name": schema.StringAttribute{ + Computed: true, + }, + "parent_srv_id": schema.Int64Attribute{ + Computed: true, + }, + "parent_srv_type": schema.StringAttribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "ssh_user": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_snapshot_list.go b/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_snapshot_list.go new file mode 100644 index 0000000..5eeff66 --- /dev/null +++ b/internal/service/cloudapi/bservice/schemas/schema_data_source_bservice_snapshot_list.go @@ -0,0 +1,35 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceBServiceSnapshotList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "service_id": schema.Int64Attribute{ + Required: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + "valid": schema.BoolAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/bservice/schemas/schema_resource_bservice.go b/internal/service/cloudapi/bservice/schemas/schema_resource_bservice.go new file mode 100644 index 0000000..103ce5b --- /dev/null +++ b/internal/service/cloudapi/bservice/schemas/schema_resource_bservice.go @@ -0,0 +1,199 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +func MakeSchemaResourceBService() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "service_name": schema.StringAttribute{ + Required: true, + }, + "rg_id": schema.Int64Attribute{ + Required: true, + }, + "ssh_key": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + "ssh_user": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + "permanently": schema.BoolAttribute{ + Optional: true, + }, + "enable": schema.BoolAttribute{ + Optional: true, + }, + "restore": schema.BoolAttribute{ + Optional: true, + }, + "start": schema.BoolAttribute{ + Optional: true, + }, + "service_id": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Optional: true, + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "rollback": schema.BoolAttribute{ + Optional: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + "valid": schema.BoolAttribute{ + Computed: true, + }, + }, + }, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "base_domain": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "architecture": schema.StringAttribute{ + Computed: true, + }, + "compgroup_id": schema.Int64Attribute{ + Computed: true, + }, + "compgroup_name": schema.StringAttribute{ + Computed: true, + }, + "compgroup_role": schema.StringAttribute{ + Computed: true, + }, + "id": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "stack_id": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "cpu_total": schema.Int64Attribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "disk_total": schema.Int64Attribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "groups": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "computes": schema.Int64Attribute{ + Computed: true, + }, + "consistency": schema.BoolAttribute{ + Computed: true, + }, + "id": schema.Int64Attribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "parent_srv_id": schema.Int64Attribute{ + Computed: true, + }, + "parent_srv_type": schema.StringAttribute{ + Computed: true, + }, + "ram_total": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "user_managed": schema.BoolAttribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + } +} diff --git a/internal/service/cloudapi/bservice/schemas/schema_resource_bservice_group.go b/internal/service/cloudapi/bservice/schemas/schema_resource_bservice_group.go new file mode 100644 index 0000000..8d3720c --- /dev/null +++ b/internal/service/cloudapi/bservice/schemas/schema_resource_bservice_group.go @@ -0,0 +1,196 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/validate" +) + +func MakeSchemaResourceBServiceGroup() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "service_id": schema.Int64Attribute{ + Required: true, + }, + "compgroup_name": schema.StringAttribute{ + Required: true, + }, + "comp_count": schema.Int64Attribute{ + Required: true, + }, + "cpu": schema.Int64Attribute{ + Required: true, + }, + "ram": schema.Int64Attribute{ + Required: true, + Validators: []validator.Int64{ + validate.DivisibleBy(constants.DivisibleByRAM), + int64validator.AtLeast(constants.MinRamPerCompute), + }, + }, + "disk": schema.Int64Attribute{ + Required: true, + }, + "image_id": schema.Int64Attribute{ + Required: true, + }, + "driver": schema.StringAttribute{ + Required: true, + }, + "sep_id": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "sep_pool": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + "cloud_init": schema.StringAttribute{ + Optional: true, + }, + "role": schema.StringAttribute{ + Optional: true, + Computed: true, + }, + "timeout_start": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "extnets": schema.ListAttribute{ + Optional: true, + Computed: true, + ElementType: types.Int64Type, + }, + "vinses": schema.ListAttribute{ + Optional: true, + Computed: true, + ElementType: types.Int64Type, + }, + "mode": schema.StringAttribute{ + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf("RELATIVE", "ABSOLUTE"), + }, + }, + "start": schema.BoolAttribute{ + Optional: true, + }, + "force_stop": schema.BoolAttribute{ + Optional: true, + }, + "force_update": schema.BoolAttribute{ + Optional: true, + }, + "parents": schema.ListAttribute{ + Optional: true, + Computed: true, + ElementType: types.Int64Type, + }, + "remove_computes": schema.ListAttribute{ + Optional: true, + ElementType: types.Int64Type, + }, + "compgroup_id": schema.Int64Attribute{ + Optional: true, + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "id": schema.Int64Attribute{ + Computed: true, + }, + "ip_addresses": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "os_users": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "login": schema.StringAttribute{ + Computed: true, + }, + "password": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + }, + }, + }, + "consistency": schema.BoolAttribute{ + Computed: true, + }, + "created_by": schema.StringAttribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_by": schema.StringAttribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "rg_id": schema.Int64Attribute{ + Computed: true, + }, + "rg_name": schema.StringAttribute{ + Computed: true, + }, + "seq_no": schema.Int64Attribute{ + Computed: true, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "updated_by": schema.StringAttribute{ + Computed: true, + }, + "updated_time": schema.Int64Attribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "sid": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + } +} diff --git a/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice.go b/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice.go new file mode 100644 index 0000000..a760bd8 --- /dev/null +++ b/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice.go @@ -0,0 +1,22 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" +) + +func BServiceDataSourceCheckPresence(ctx context.Context, serviceId uint64, c *decort.DecortClient) (*bservice.RecordBasicService, error) { + tflog.Info(ctx, fmt.Sprintf("BServiceDataSourceCheckPresence: Get info about bserivce with ID - %v", serviceId)) + + record, err := c.CloudAPI().BService().Get(ctx, bservice.GetRequest{ServiceID: serviceId}) + + if err != nil { + return nil, fmt.Errorf("cannot get info about bservice with error: %w", err) + } + + return record, err +} diff --git a/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_deleted_list.go b/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_deleted_list.go new file mode 100644 index 0000000..7254d32 --- /dev/null +++ b/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_deleted_list.go @@ -0,0 +1,41 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" +) + +func BServiceDeletedListDataSourceCheckPresence(ctx context.Context, plan models.ListBasicServicesDelModel, c *decort.DecortClient) (*bservice.ListBasicServices, error) { + tflog.Info(ctx, "BServiceListDataSourceCheckPresence: Get deleted list bserivce") + + accListDelReq := bservice.ListDeletedRequest{} + + if !plan.AccountID.IsNull() { + accListDelReq.AccountID = uint64(plan.AccountID.ValueInt64()) + } + if !plan.RGID.IsNull() { + accListDelReq.RGID = uint64(plan.RGID.ValueInt64()) + } + if !plan.SortBy.IsNull() { + accListDelReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + accListDelReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + accListDelReq.Size = uint64(plan.Size.ValueInt64()) + } + + record, err := c.CloudAPI().BService().ListDeleted(ctx, accListDelReq) + + if err != nil { + return nil, fmt.Errorf("cannot get bservice deleted list with error: %w", err) + } + + return record, err +} diff --git a/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_group.go b/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_group.go new file mode 100644 index 0000000..3c8e209 --- /dev/null +++ b/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_group.go @@ -0,0 +1,25 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" +) + +func BServiceGroupDataSourceCheckPresence(ctx context.Context, state models.RecordGroupModel, c *decort.DecortClient) (*bservice.RecordGroup, error) { + tflog.Info(ctx, fmt.Sprintf("BServiceGroupDataSourceCheckPresence: Get info about bserivce with ID - %v", state.ServiceID)) + + req := bservice.GroupGetRequest{ServiceID: uint64(state.ServiceID.ValueInt64()), CompGroupID: uint64(state.ID.ValueInt64())} + + record, err := c.CloudAPI().BService().GroupGet(ctx, req) + + if err != nil { + return nil, fmt.Errorf("cannot get info about bservice with error: %w", err) + } + + return record, err +} diff --git a/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_list.go b/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_list.go new file mode 100644 index 0000000..0618858 --- /dev/null +++ b/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_list.go @@ -0,0 +1,59 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" +) + +func BServiceListDataSourceCheckPresence(ctx context.Context, plan models.ListBasicServicesModel, c *decort.DecortClient) (*bservice.ListBasicServices, error) { + tflog.Info(ctx, "BServiceListDataSourceCheckPresence: Get list bserivce") + + accListReq := bservice.ListRequest{} + + if !plan.ByID.IsNull() { + accListReq.ByID = uint64(plan.ByID.ValueInt64()) + } + if !plan.Name.IsNull() { + accListReq.Name = plan.Name.ValueString() + } + if !plan.RGName.IsNull() { + accListReq.RGName = plan.RGName.ValueString() + } + if !plan.Status.IsNull() { + accListReq.Status = plan.Status.ValueString() + } + if !plan.TechStatus.IsNull() { + accListReq.TechStatus = plan.TechStatus.ValueString() + } + if !plan.AccountName.IsNull() { + accListReq.AccountName = plan.AccountName.ValueString() + } + if !plan.AccountID.IsNull() { + accListReq.AccountID = uint64(plan.AccountID.ValueInt64()) + } + if !plan.RGID.IsNull() { + accListReq.RGID = uint64(plan.RGID.ValueInt64()) + } + if !plan.SortBy.IsNull() { + accListReq.SortBy = plan.SortBy.ValueString() + } + if !plan.Page.IsNull() { + accListReq.Page = uint64(plan.Page.ValueInt64()) + } + if !plan.Size.IsNull() { + accListReq.Size = uint64(plan.Size.ValueInt64()) + } + + record, err := c.CloudAPI().BService().List(ctx, accListReq) + + if err != nil { + return nil, fmt.Errorf("cannot get bservice list with error: %w", err) + } + + return record, err +} diff --git a/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_snapshot_list.go b/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_snapshot_list.go new file mode 100644 index 0000000..4ba6531 --- /dev/null +++ b/internal/service/cloudapi/bservice/utilities/utility_data_source_bservice_snapshot_list.go @@ -0,0 +1,24 @@ +package utilities + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" +) + +func BServiceSnapshotListDataSourceCheckPresence(ctx context.Context, serviceID uint64, c *decort.DecortClient) (*bservice.ListInfoSnapshots, error) { + tflog.Info(ctx, "BServiceSnapshotListDataSourceCheckPresence: Get deleted list bserivce") + + req := bservice.SnapshotListRequest{ServiceID: serviceID} + + record, err := c.CloudAPI().BService().SnapshotList(ctx, req) + + if err != nil { + return nil, fmt.Errorf("cannot get bservice snapshots list with error: %w", err) + } + + return record, err +} diff --git a/internal/service/cloudapi/bservice/utilities/utility_resource_bservice.go b/internal/service/cloudapi/bservice/utilities/utility_resource_bservice.go new file mode 100644 index 0000000..13d7bf3 --- /dev/null +++ b/internal/service/cloudapi/bservice/utilities/utility_resource_bservice.go @@ -0,0 +1,386 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/ic" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status" +) + +// BServiceResourceCheckPresence checks if BService with serviceID exists +func BServiceResourceCheckPresence(ctx context.Context, serviceID uint64, c *decort.DecortClient) (*bservice.RecordBasicService, error) { + tflog.Info(ctx, fmt.Sprintf("BServiceResourceCheckPresence: Get info about service with ID - %v", serviceID)) + + record, err := c.CloudAPI().BService().Get(ctx, bservice.GetRequest{ServiceID: serviceID}) + if err != nil { + return nil, fmt.Errorf("BServiceResourceCheckPresence: cannot get info about resource with error: %w", err) + } + + tflog.Info(ctx, "BServiceResourceCheckPresence: response from CloudAPI().BService().Get", map[string]any{"service_id": serviceID, "response": record}) + + return record, err +} + +// BServiceReadStatus loads BService resource by its id, gets it current status. Performs restore and enable if needed for +// Deleted status. +// In case of failure returns errors. +func BSerivceReadStatus(ctx context.Context, state *models.RecordBasicServiceResourceModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "BSerivceReadStatus: Read status resource with ID", map[string]any{"resource_id": state.ID.ValueString()}) + + diags := diag.Diagnostics{} + + resourceId, err := strconv.ParseUint(state.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("BSerivceReadStatus: Cannot parse resource ID from state", err.Error()) + return diags + } + + record, err := BServiceResourceCheckPresence(ctx, resourceId, c) + if err != nil { + diags.AddError("BSerivceReadStatus: Unable to Read bservice before status check", err.Error()) + return diags + } + + // check resource status + switch record.Status { + case status.Disabled: + tflog.Info(ctx, "The BService is in status Disabled, troubles may occur with update. Please, enable BService first.") + case status.Modeled: + diags.AddError("The basic service is in status: %s, please, contact support for more information", record.Status) + case status.Disabling: + tflog.Info(ctx, fmt.Sprintf("The basic service is in status: %s, troubles can occur with the update.", record.Status)) + case status.Deleted: + restore := state.Restore.ValueBool() + if state.Restore.IsNull() { + restore = true + } // default true + if restore { + // attempt to restore bservice + tflog.Info(ctx, "BServiceReadStatus: BService with status.Deleted is being read, attempt to restore it", map[string]any{ + "BService": state.ID.ValueString(), + "status": record.Status}) + diags.Append(restoreBservice(ctx, resourceId, c)...) + if diags.HasError() { + tflog.Error(ctx, "BServiceReadStatus: cannot restore BService") + return diags + } + tflog.Info(ctx, "BServiceReadStatus: BService restored successfully", map[string]any{"service_id": resourceId}) + } else { + tflog.Info(ctx, "BServiceReadStatus: BService is i status Deleted but restore is not specified") + } + + case status.Destroyed: + diags.AddError( + "BSerivceReadStatus: BService is in status Destroyed", + fmt.Sprintf("the resource with bservice_id %d cannot be read or updated because it has been destroyed", resourceId), + ) + return diags + + case status.Destroying: + diags.AddError( + "BSerivceReadStatus: BService is in progress with status Destroying", + fmt.Sprintf("the resource with bservice_id %d cannot be read or updated because it is currently being destroyed", resourceId), + ) + return diags + } + + return nil +} + +func BServiceResourceCreate(ctx context.Context, plan *models.RecordBasicServiceResourceModel, c *decort.DecortClient) (*uint64, diag.Diagnostics) { + tflog.Info(ctx, "Start BServiceResourceCreate", map[string]any{"service_id": plan.ServiceId.ValueInt64()}) + + diags := diag.Diagnostics{} + + err := ic.ExistRG(ctx, uint64(plan.RGID.ValueInt64()), c) + if err != nil { + diags.AddError(fmt.Sprintf("resourceBasicServiceCreate: can't create basic service because RGID %d is not allowed or does not exist", plan.RGID.ValueInt64()), err.Error()) + return nil, diags + } + + req := bservice.CreateRequest{} + + req.Name = plan.Name.ValueString() + req.RGID = uint64(plan.RGID.ValueInt64()) + + if !plan.SSHKey.IsNull() { + req.SSHKey = plan.SSHKey.ValueString() + } + + if !plan.SSHUser.IsNull() { + req.SSHUser = plan.SSHUser.ValueString() + } + + // Make request and get response + serviceId, err := c.CloudAPI().BService().Create(ctx, req) + if err != nil { + tflog.Error(ctx, "Error response for create bservice", map[string]any{"error": err.Error()}) + diags.AddError("Unable to Create bservice", err.Error()) + return nil, diags + } + + plan.ID = types.StringValue(strconv.Itoa(int(serviceId))) + + enable := plan.Enable.ValueBool() + if enable && (plan.Status.ValueString() == status.Disabled || plan.Status.ValueString() == status.Created) { + tflog.Info(ctx, "resourceBasicServiceCreate: before calling CloudAPI().BService().Enable", map[string]any{"service_id": serviceId}) + res, err := c.CloudAPI().BService().Enable(ctx, bservice.EnableRequest{ServiceID: serviceId}) + if err != nil { + diags.AddWarning( + "resourceBasicServiceCreate: cannot enable BService", + err.Error(), + ) + return &serviceId, diags + } + tflog.Info(ctx, "resourceBasicServiceCreate: response from CloudAPI().BService().Enable", map[string]any{"service_id": serviceId, "response": res}) + return &serviceId, diags + } + + if plan.Start.ValueBool() { + if !enable { + diags.AddWarning( + "can not start bservice that is not enabled. Set enable = true and start = true to enable and start bservice", + fmt.Sprintf("service_id: %v", serviceId), + ) + return &serviceId, diags + } + _, err := c.CloudAPI().BService().Start(ctx, bservice.StartRequest{ + ServiceID: serviceId, + }) + if err != nil { + diags.AddWarning( + "resourceBasicServiceCreate: cannot start BService", + err.Error(), + ) + return &serviceId, diags + } + } + tflog.Info(ctx, "End resourceBasicServiceCreate", map[string]any{"service_id": serviceId}) + return &serviceId, diags +} + +// EnableDisableBService performs BService Enable/Disable request. +// Returns error in case of failures. +func EnableDisableBService(ctx context.Context, plan *models.RecordBasicServiceResourceModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start EnableDisableBService", map[string]any{"service_id": plan.ID.ValueString()}) + + diags := diag.Diagnostics{} + + serviceID, err := strconv.Atoi(plan.ID.ValueString()) + if err != nil { + diags.AddError("EnableDisableBService: Cannot parse ID from state", err.Error()) + return diags + } + + if plan.Enable.ValueBool() { + tflog.Info(ctx, "EnableDisableBService: before calling CloudAPI().BService().Enable", map[string]any{"service_id": serviceID}) + res, err := c.CloudAPI().BService().Enable(ctx, bservice.EnableRequest{ServiceID: uint64(serviceID)}) + if err != nil { + diags.AddError( + "EnableDisableBService: cannot enable BService", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "EnableDisableBService: response from CloudAPI().BService().Enable", map[string]any{"service_id": serviceID, "response": res}) + return nil + } else { + tflog.Info(ctx, "EnableDisableBService: before calling CloudAPI().BService().Disable", map[string]any{"service_id": serviceID}) + res, err := c.CloudAPI().BService().Disable(ctx, bservice.DisableRequest{ServiceID: uint64(serviceID)}) + if err != nil { + diags.AddError( + "EnableDisableBService: cannot disable BService", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "EnableDisableBService: response from CloudAPI().BService().Disable", map[string]any{"service_id": serviceID, "response": res}) + } + + return nil +} + +// StartStopBService performs BService Start/Stop request. +// Returns error in case of failures. +func StartStopBService(ctx context.Context, plan *models.RecordBasicServiceResourceModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start StartStopBService", map[string]any{"service_id": plan.ID.ValueString()}) + + diags := diag.Diagnostics{} + + serviceID, err := strconv.Atoi(plan.ID.ValueString()) + if err != nil { + diags.AddError("StartStopBService: Cannot parse ID from state", err.Error()) + return diags + } + + if plan.Start.ValueBool() { + tflog.Info(ctx, "StartStopBService: before calling CloudAPI().BService().Start", map[string]any{"service_id": serviceID}) + res, err := c.CloudAPI().BService().Start(ctx, bservice.StartRequest{ServiceID: uint64(serviceID)}) + if err != nil { + diags.AddError( + "StartStopBService: cannot start BService", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "StartStopBService: response from CloudAPI().BService().Start", map[string]any{"service_id": serviceID, "response": res}) + return nil + } else { + tflog.Info(ctx, "StartStopBService: before calling CloudAPI().BService().Stop", map[string]any{"service_id": serviceID}) + res, err := c.CloudAPI().BService().Stop(ctx, bservice.StopRequest{ServiceID: uint64(serviceID)}) + if err != nil { + diags.AddError( + "StartStopBService: cannot stop BService", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "StartStopBService: response from CloudAPI().BService().Stop", map[string]any{"service_id": serviceID, "response": res}) + } + + return nil +} + +func SnapshotsBService(ctx context.Context, oldSnapshots basetypes.ListValue, newSnapshots basetypes.ListValue, serviceID uint64, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + // Handle snapshot changes in the plan + tflog.Info(ctx, "Start SnapshotsBService", map[string]any{"service_id": serviceID}) + + deletedSnapshots := make([]models.ItemSnapshotResourceModel, 0) + addedSnapshots := make([]models.ItemSnapshotResourceModel, 0) + updatedSnapshots := make([]models.ItemSnapshotResourceModel, 0) + + oldSnapshotsList := make([]models.ItemSnapshotResourceModel, 0, len(oldSnapshots.Elements())) + newSnapshotsList := make([]models.ItemSnapshotResourceModel, 0, len(newSnapshots.Elements())) + + diags.Append(oldSnapshots.ElementsAs(ctx, &oldSnapshotsList, true)...) + if diags.HasError() { + tflog.Error(ctx, "SnapshotsBService: cannot populate SnapshotsBService with plan.Snapshots object element") + return diags + } + + diags.Append(newSnapshots.ElementsAs(ctx, &newSnapshotsList, true)...) + if diags.HasError() { + tflog.Error(ctx, "SnapshotsBService: cannot populate SnapshotsBService with plan.Snapshots object element") + return diags + } + + for _, el := range oldSnapshotsList { + if !isContainsSnapshot(newSnapshotsList, el) { + deletedSnapshots = append(deletedSnapshots, el) + } + } + + for _, el := range newSnapshotsList { + if !isContainsSnapshot(oldSnapshotsList, el) { + addedSnapshots = append(addedSnapshots, el) + } else if isRollback(oldSnapshotsList, el) { + updatedSnapshots = append(updatedSnapshots, el) + } + } + + tflog.Debug(ctx, "SnapshotsBService: Snapshots to be deleted", map[string]any{"deleted_snapshots": deletedSnapshots}) + tflog.Debug(ctx, "SnapshotsBService: Snapshots to be added", map[string]any{"added_snapshots": addedSnapshots}) + tflog.Debug(ctx, "SnapshotsBService: Snapshots to be updated", map[string]any{"updated_snapshots": updatedSnapshots}) + + if len(deletedSnapshots) > 0 { + for _, snapshot := range deletedSnapshots { + req := bservice.SnapshotDeleteRequest{ + ServiceID: serviceID, + Label: snapshot.Label.ValueString(), + } + + _, err := c.CloudAPI().BService().SnapshotDelete(ctx, req) + if err != nil { + tflog.Error(ctx, "SnapshotsBService: Failed to delete snapshot") + return diags + } + tflog.Info(ctx, "Deleted snapshot", map[string]any{"service_id": serviceID, "label": snapshot.Label}) + } + } + + if len(addedSnapshots) > 0 { + for _, snapshot := range addedSnapshots { + req := bservice.SnapshotCreateRequest{ + ServiceID: serviceID, + Label: snapshot.Label.ValueString(), + } + + _, err := c.CloudAPI().BService().SnapshotCreate(ctx, req) + if err != nil { + tflog.Error(ctx, "SnapshotsBService: Failed to create snapshot") + return diags + } + tflog.Info(ctx, "Created snapshot", map[string]any{"service_id": serviceID, "label": snapshot.Label}) + } + } + + if len(updatedSnapshots) > 0 { + for _, snapshot := range updatedSnapshots { + req := bservice.SnapshotRollbackRequest{ + ServiceID: serviceID, + Label: snapshot.Label.ValueString(), + } + + _, err := c.CloudAPI().BService().SnapshotRollback(ctx, req) + if err != nil { + tflog.Error(ctx, "SnapshotsBService: Failed to rollback snapshot") + return diags + } + tflog.Info(ctx, "Rolled back snapshot", map[string]any{"service_id": serviceID, "label": snapshot.Label}) + } + } + return nil + +} + +func isContainsSnapshot(els []models.ItemSnapshotResourceModel, el models.ItemSnapshotResourceModel) bool { + for _, elOld := range els { + if elOld.GUID == el.GUID { + return true + } + } + return false +} + +func isRollback(els []models.ItemSnapshotResourceModel, el models.ItemSnapshotResourceModel) bool { + for _, elOld := range els { + if elOld.GUID == el.GUID && elOld.Rollback != el.Rollback && el.Rollback.ValueBool() { + return true + } + } + return false +} + +// restoreBservice performs BService Restore request. +// Returns error in case of failures. +func restoreBservice(ctx context.Context, serviceID uint64, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + restoreReq := bservice.RestoreRequest{ + ServiceID: serviceID, + } + + tflog.Info(ctx, "restoreBservice: before calling CloudAPI().BService().Restore", map[string]any{"service_id": serviceID, "req": restoreReq}) + + res, err := c.CloudAPI().BService().Restore(ctx, restoreReq) + if err != nil { + diags.AddError( + "restoreBservice: cannot restore BService", + err.Error(), + ) + return diags + } + tflog.Info(ctx, "restoreBservice: response from CloudAPI().BService().Restore", map[string]any{"service_id": serviceID, "response": res}) + + return nil +} diff --git a/internal/service/cloudapi/bservice/utilities/utility_resource_bservice_group.go b/internal/service/cloudapi/bservice/utilities/utility_resource_bservice_group.go new file mode 100644 index 0000000..c67ca1f --- /dev/null +++ b/internal/service/cloudapi/bservice/utilities/utility_resource_bservice_group.go @@ -0,0 +1,403 @@ +package utilities + +import ( + "context" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/bservice" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/bservice/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/status" +) + +func BServiceGroupResourceCheckPresence(ctx context.Context, serviceID uint64, compGroupID uint64, c *decort.DecortClient) (*bservice.RecordGroup, error) { + tflog.Info(ctx, fmt.Sprintf("BServiceGroupResourceCheckPresence: Get info about bserivce group with ID - %v", compGroupID)) + + req := bservice.GroupGetRequest{ServiceID: serviceID, CompGroupID: compGroupID} + record, err := c.CloudAPI().BService().GroupGet(ctx, req) + + if err != nil { + return nil, fmt.Errorf("cannot get info about bservice with error: %w", err) + } + + return record, err +} + +func BServiceGroupResourceCreate(ctx context.Context, plan *models.ResourceRecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start BServiceGroupResourceCreate", map[string]any{"service": plan.ServiceID.ValueInt64()}) + + diags := diag.Diagnostics{} + + req := bservice.GroupAddRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + Name: plan.Name.ValueString(), + Count: uint64(plan.CompCount.ValueInt64()), + CPU: uint64(plan.CPU.ValueInt64()), + RAM: uint64(plan.RAM.ValueInt64()), + Disk: uint64(plan.Disk.ValueInt64()), + ImageID: uint64(plan.ImageID.ValueInt64()), + Driver: plan.Driver.ValueString(), + } + + if !plan.SEPID.IsNull() { + req.SEPID = uint64(plan.SEPID.ValueInt64()) + } + + if !plan.SepPool.IsNull() { + req.SEPPool = plan.SepPool.ValueString() + } + + if !plan.Role.IsNull() { + req.Role = plan.Role.ValueString() + } + + if !plan.TimeoutStart.IsNull() { + req.TimeoutStart = uint64(plan.TimeoutStart.ValueInt64()) + } + + if !plan.VINSes.IsNull() { + result := make([]uint64, 0, len(plan.VINSes.Elements())) + + diags.Append(plan.VINSes.ElementsAs(ctx, &result, true)...) + if diags.HasError() { + tflog.Error(ctx, "BServiceGroupResourceCreate: cannot populate result with plan.VINSes object element") + return diags + } + + req.VINSes = result + } + + if !plan.ExtNets.IsNull() { + result := make([]uint64, 0, len(plan.ExtNets.Elements())) + + diags.Append(plan.ExtNets.ElementsAs(ctx, &result, true)...) + if diags.HasError() { + tflog.Error(ctx, "BServiceGroupResourceCreate: cannot populate result with plan.ExtNets object element") + return diags + } + + req.ExtNets = result + } + + // Make request and get response + compgroupId, err := c.CloudAPI().BService().GroupAdd(ctx, req) + if err != nil { + tflog.Error(ctx, "Error response for group add", map[string]any{"error": err.Error()}) + diags.AddError("Unable to add group", err.Error()) + return diags + } + plan.ID = types.StringValue(strconv.Itoa(int(plan.CompgroupID.ValueInt64()))) + plan.SID = types.StringValue(strconv.Itoa(int(plan.ServiceID.ValueInt64()))) + + if plan.Start.ValueBool() { + tflog.Info(ctx, "BServiceGroupResourceCreate: start group", map[string]any{"service_id": plan.ServiceID.ValueInt64()}) + _, err := c.CloudAPI().BService().GroupStart(ctx, bservice.GroupStartRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + CompGroupID: compgroupId, + }) + if err != nil { + diags.AddWarning( + "resourceBasicServiceCreate: cannot start BService", + err.Error(), + ) + return diags + } + + } + tflog.Info(ctx, "End BServiceGroupResourceCreate", map[string]any{"service_id": compgroupId}) + return diags +} + +func BServiceGroupResize(ctx context.Context, plan *models.ResourceRecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "BServiceGroupResize: start.", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + + diags := diag.Diagnostics{} + + req := bservice.GroupResizeRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + CompGroupID: uint64(plan.CompgroupID.ValueInt64()), + Count: plan.CompCount.ValueInt64(), + Mode: plan.Mode.ValueString(), + } + + _, err := c.CloudAPI().BService().GroupResize(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot resize group with ServiceID - %d,CompgroupID - %d ", plan.ServiceID.ValueInt64(), plan.CompCount.ValueInt64()), err.Error()) + return diags + } + + tflog.Info(ctx, "BServiceGroupResize: resize group successfully", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + return diags +} + +func BServiceGroupUpdate(ctx context.Context, plan *models.ResourceRecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "BServiceGroupUpdate: start.", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + + diags := diag.Diagnostics{} + + req := bservice.GroupUpdateRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + CompGroupID: uint64(plan.CompgroupID.ValueInt64()), + Name: plan.Name.ValueString(), + Role: plan.Role.ValueString(), + CPU: uint64(plan.CPU.ValueInt64()), + RAM: uint64(plan.RAM.ValueInt64()), + Disk: uint64(plan.Disk.ValueInt64()), + Force: plan.ForceUpdate.ValueBool(), + } + + _, err := c.CloudAPI().BService().GroupUpdate(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot update group with ServiceID - %d,CompgroupID - %d ", plan.ServiceID.ValueInt64(), plan.CompgroupID.ValueInt64()), err.Error()) + return diags + } + + tflog.Info(ctx, "BServiceGroupUpdate: update group successfully", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + return diags +} + +func BServiceGroupReadStatus(ctx context.Context, plan *models.ResourceRecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Read status BServiceGroupReadStatus with ID", map[string]any{"service_id": plan.ServiceID.ValueInt64()}) + + diags := diag.Diagnostics{} + + serviceID, err := strconv.ParseUint(plan.SID.ValueString(), 10, 64) + if err != nil { + diags.AddError("BServiceGroupReadStatus: Cannot parse resource ID from state", err.Error()) + return diags + } + + compGroupID, err := strconv.ParseUint(plan.ID.ValueString(), 10, 64) + if err != nil { + diags.AddError("BServiceGroupReadStatus: Cannot parse resource ID from state", err.Error()) + return diags + } + + resource, err := BServiceGroupResourceCheckPresence(ctx, serviceID, compGroupID, c) + if err != nil { + diags.AddError("Cannot get info about bservice group ", err.Error()) + return diags + } + + switch resource.Status { + case status.Modeled: + diags.AddError("Error:", fmt.Sprintf("The bservice group is in status: %s, please, contact support for more information", resource.Status)) + return diags + case status.Destroying: + diags.AddError("Error:", fmt.Sprintf("The bservice group is in progress with status: %s", resource.Status)) + return diags + case status.Destroyed: + diags.AddError("Error:", "The resource cannot be updated because it has been destroyed") + return diags + } + + tflog.Info(ctx, "Read status bservice group successfully", map[string]any{"compgroup_id": plan.ID.ValueString()}) + return diags +} + +func BServiceGroupStartStop(ctx context.Context, plan *models.ResourceRecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start/Stop bservice group", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + + diags := diag.Diagnostics{} + + if plan.Start.ValueBool() { + req := bservice.GroupStartRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + CompGroupID: uint64(plan.CompgroupID.ValueInt64()), + } + + _, err := c.CloudAPI().BService().GroupStart(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot start bservice group with ServiceID - %d,CompgroupID - %d ", plan.ServiceID.ValueInt64(), plan.CompCount.ValueInt64()), err.Error()) + return diags + } + } else { + req := bservice.GroupStopRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + CompGroupID: uint64(plan.CompgroupID.ValueInt64()), + Force: plan.ForceStop.ValueBool(), + } + _, err := c.CloudAPI().BService().GroupStop(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot stop bservice group with ServiceID - %d,CompgroupID - %d ", plan.ServiceID.ValueInt64(), plan.CompCount.ValueInt64()), err.Error()) + return diags + } + } + + tflog.Info(ctx, "Start/Stop bservice group successfully", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + return diags +} + +func BServiceGroupExtNet(ctx context.Context, plan *models.ResourceRecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "update ExtNets", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + + diags := diag.Diagnostics{} + + extnetList := make([]uint64, 0, len(plan.ExtNets.Elements())) + + diags.Append(plan.ExtNets.ElementsAs(ctx, &extnetList, true)...) + if diags.HasError() { + tflog.Error(ctx, "BServiceGroupExtNet: cannot populate result with plan.ExtNets object element") + return diags + } + + req := bservice.GroupUpdateExtNetRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + CompGroupID: uint64(plan.CompgroupID.ValueInt64()), + ExtNets: extnetList, + } + + _, err := c.CloudAPI().BService().GroupUpdateExtNet(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot update bservice group extnets with ServiceID - %d,CompgroupID - %d ", plan.ServiceID.ValueInt64(), plan.CompCount.ValueInt64()), err.Error()) + return diags + } + + tflog.Info(ctx, "BServiceGroupExtNet: update successfully", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + return diags +} + +func BServiceGroupVinses(ctx context.Context, plan *models.ResourceRecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "update Vinses", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + + diags := diag.Diagnostics{} + + vinsesList := make([]uint64, 0, len(plan.VINSes.Elements())) + + diags.Append(plan.VINSes.ElementsAs(ctx, &vinsesList, true)...) + if diags.HasError() { + tflog.Error(ctx, "BServiceGroupVinses: cannot populate result with plan.VINSes object element") + return diags + } + + req := bservice.GroupUpdateVINSRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + CompGroupID: uint64(plan.ServiceID.ValueInt64()), + VINSes: vinsesList, + } + + _, err := c.CloudAPI().BService().GroupUpdateVINS(ctx, req) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot update bservice group vinses with ServiceID - %d,CompgroupID - %d ", plan.ServiceID.ValueInt64(), plan.CompCount.ValueInt64()), err.Error()) + return diags + } + + tflog.Info(ctx, "BServiceGroupVinses: update successfully", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + return diags +} + +func BServiceGroupParents(ctx context.Context, newParents basetypes.ListValue, oldParents basetypes.ListValue, plan *models.ResourceRecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + tflog.Info(ctx, "Start BServiceGroupParents", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + + deletedParents := make([]uint64, 0) + addedParents := make([]uint64, 0) + + oldParentsList := make([]uint64, 0, len(oldParents.Elements())) + newParentsList := make([]uint64, 0, len(newParents.Elements())) + + diags.Append(oldParents.ElementsAs(ctx, &oldParentsList, true)...) + if diags.HasError() { + tflog.Error(ctx, "BServiceGroupParents: cannot populate uint64 with plan.Parents object element") + return diags + } + + diags.Append(newParents.ElementsAs(ctx, &newParentsList, true)...) + if diags.HasError() { + tflog.Error(ctx, "BServiceGroupParents: cannot populate uint64 with plan.Parents object element") + return diags + } + + for _, el := range oldParentsList { + if !isContainsParent(newParentsList, el) { + deletedParents = append(deletedParents, el) + } + } + + for _, el := range newParentsList { + if !isContainsParent(oldParentsList, el) { + addedParents = append(addedParents, el) + } + } + + if len(deletedParents) > 0 { + for _, parent := range deletedParents { + + req := bservice.GroupParentRemoveRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + CompGroupID: uint64(plan.CompCount.ValueInt64()), + ParentID: parent, + } + + _, err := c.CloudAPI().BService().GroupParentRemove(ctx, req) + if err != nil { + tflog.Error(ctx, "BServiceGroupParents: Failed to remove parent") + return diags + } + } + tflog.Info(ctx, "Deleted parents", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + } + + if len(addedParents) > 0 { + for _, parent := range addedParents { + + req := bservice.GroupParentAddRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + CompGroupID: uint64(plan.CompCount.ValueInt64()), + ParentID: parent, + } + + _, err := c.CloudAPI().BService().GroupParentAdd(ctx, req) + if err != nil { + tflog.Error(ctx, "BServiceGroupParents: Failed to add parent") + return diags + } + } + tflog.Info(ctx, "Added parents", map[string]any{"service_id": plan.ServiceID.ValueInt64(), "compgroup_id": plan.CompgroupID.ValueInt64()}) + } + + return diags +} + +func BServiceGroupRemoveComputes(ctx context.Context, plan *models.ResourceRecordGroupModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + rcs := plan.RemoveComputes + + rcsList := make([]uint64, 0, len(rcs.Elements())) + + diags.Append(rcs.ElementsAs(ctx, &rcsList, true)...) + if diags.HasError() { + tflog.Error(ctx, "BServiceGroupRemoveComputes: cannot populate uint64 with plan.RemoveComputes object element") + return diags + } + if len(rcsList) > 0 { + for _, rc := range rcsList { + req := bservice.GroupComputeRemoveRequest{ + ServiceID: uint64(plan.ServiceID.ValueInt64()), + CompGroupID: uint64(plan.CompCount.ValueInt64()), + ComputeID: rc, + } + + _, err := c.CloudAPI().BService().GroupComputeRemove(ctx, req) + if err != nil { + tflog.Error(ctx, "BServiceGroupRemoveComputes: Failed to remove compute") + return diags + } + } + } + return diags +} + +func isContainsParent(els []uint64, el uint64) bool { + for _, elOld := range els { + if elOld == el { + return true + } + } + return false +} diff --git a/internal/service/cloudapi/disks/data_source_disk.go b/internal/service/cloudapi/disks/data_source_disk.go new file mode 100644 index 0000000..a2d00fa --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk.go @@ -0,0 +1,91 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDisk{} +) + +func NewDataSourceDisk() datasource.DataSource { + return &dataSourceDisk{} +} + +// dataSourceDisk is the data source implementation. +type dataSourceDisk struct { + client *decort.DecortClient +} + +func (d *dataSourceDisk) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDisk: Error get state") + return + } + diskId := uint64(state.DiskID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceDisk: got state successfully", map[string]any{"disk_id": diskId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDisk: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDisk: set timeouts successfully", map[string]any{ + "disk_id": diskId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDisk: Error flatten data source disk") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDisk: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDisk", map[string]any{"disk_id": diskId}) +} + +func (d *dataSourceDisk) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDisk(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDisk) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDisk) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDisk") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDisk successfully") +} diff --git a/internal/service/cloudapi/disks/data_source_disk_list.go b/internal/service/cloudapi/disks/data_source_disk_list.go new file mode 100644 index 0000000..b1b34b3 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_list.go @@ -0,0 +1,88 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskList{} +) + +func NewDataSourceDiskList() datasource.DataSource { + return &dataSourceDiskList{} +} + +// dataSourceDisk is the data source implementation. +type dataSourceDiskList struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskList: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceDiskList: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskList: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskList: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskList") +} + +func (d *dataSourceDiskList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskList successfully") +} diff --git a/internal/service/cloudapi/disks/data_source_disk_list_deleted.go b/internal/service/cloudapi/disks/data_source_disk_list_deleted.go new file mode 100644 index 0000000..abbb767 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_list_deleted.go @@ -0,0 +1,88 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskListDeleted{} +) + +func NewDataSourceDiskListDeleted() datasource.DataSource { + return &dataSourceDiskListDeleted{} +} + +// dataSourceDisk is the data source implementation. +type dataSourceDiskListDeleted struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskListDeleted) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskListDeletedModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceDiskListDeleted: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskListDeleted: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskListDeletedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListDeleted: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskListDeleted") +} + +func (d *dataSourceDiskListDeleted) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskListDeleted(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskListDeleted) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk_list_deleted" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskListDeleted) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskListDeleted") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskListDeleted successfully") +} diff --git a/internal/service/cloudapi/disks/data_source_disk_list_types.go b/internal/service/cloudapi/disks/data_source_disk_list_types.go new file mode 100644 index 0000000..2cefb17 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_list_types.go @@ -0,0 +1,89 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskListTypes{} +) + +func NewDataSourceDiskListTypes() datasource.DataSource { + return &dataSourceDiskListTypes{} +} + +// dataSourceDiskListTypes is the data source implementation. +type dataSourceDiskListTypes struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskListTypes) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskListTypesModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypes: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceDiskListTypes: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypes: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskListTypes: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskListTypesDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypes: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypes: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskListTypes") +} + +func (d *dataSourceDiskListTypes) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskListTypes(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskListTypes) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk_list_types" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskListTypes) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskListTypes") + tflog.Info(ctx, "Get Configure dataSourceDiskListTypes") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskListTypes successfully") +} diff --git a/internal/service/cloudapi/disks/data_source_disk_list_types_detailed.go b/internal/service/cloudapi/disks/data_source_disk_list_types_detailed.go new file mode 100644 index 0000000..287670b --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_list_types_detailed.go @@ -0,0 +1,88 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskListTypesDetailed{} +) + +func NewDataSourceDiskListTypesDetailed() datasource.DataSource { + return &dataSourceDiskListTypesDetailed{} +} + +// dataSourceDiskListTypesDetailed is the data source implementation. +type dataSourceDiskListTypesDetailed struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskListTypesDetailed) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskListTypesDetailedModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceDiskListTypesDetailed: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskListTypesDetailed: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskListTypesDetailedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListTypesDetailed: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskListTypesDetailed") +} + +func (d *dataSourceDiskListTypesDetailed) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskListTypesDetailed(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskListTypesDetailed) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk_list_types_detailed" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskListTypesDetailed) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskListTypesDetailed") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskListTypesDetailed successfully") +} diff --git a/internal/service/cloudapi/disks/data_source_disk_list_unattached.go b/internal/service/cloudapi/disks/data_source_disk_list_unattached.go new file mode 100644 index 0000000..4440751 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_list_unattached.go @@ -0,0 +1,88 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskListUnattached{} +) + +func NewDataSourceDiskListUnattached() datasource.DataSource { + return &dataSourceDiskListUnattached{} +} + +// dataSourceDiskListUnattached is the data source implementation. +type dataSourceDiskListUnattached struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskListUnattached) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskListUnattachedModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error get state") + return + } + tflog.Info(ctx, "Read dataSourceDiskListUnattached: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskListUnattached: set timeouts successfully", map[string]any{"readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskListUnattachedDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error flatten") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskListUnattached: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskListUnattached") +} + +func (d *dataSourceDiskListUnattached) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskListUnattached(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskListUnattached) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk_list_unattached" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskListUnattached) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskListUnattached") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskListUnattached successfully") +} diff --git a/internal/service/cloudapi/disks/data_source_disk_replication.go b/internal/service/cloudapi/disks/data_source_disk_replication.go new file mode 100644 index 0000000..791eed7 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_replication.go @@ -0,0 +1,91 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskReplication{} +) + +func NewDataSourceDiskReplication() datasource.DataSource { + return &dataSourceDiskReplication{} +} + +// dataSourceDisk is the data source implementation. +type dataSourceDiskReplication struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskReplication) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.RecordDiskModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskReplication: Error get state") + return + } + diskId := uint64(state.DiskId.ValueInt64()) + tflog.Info(ctx, "Read dataSourceDiskReplication: got state successfully", map[string]any{"disk_id": diskId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskReplication: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskReplication: set timeouts successfully", map[string]any{ + "disk_id": diskId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskReplicationDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskReplication: Error flatten data source disk") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskReplication: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskReplication", map[string]any{"disk_id": diskId}) +} + +func (d *dataSourceDiskReplication) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskReplication(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskReplication) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk_replication" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskReplication) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDisk") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDisk successfully") +} diff --git a/internal/service/cloudapi/disks/data_source_disk_snapshot.go b/internal/service/cloudapi/disks/data_source_disk_snapshot.go new file mode 100644 index 0000000..fe7e93d --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_snapshot.go @@ -0,0 +1,96 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskSnapshot{} +) + +func NewDataSourceDiskSnapshot() datasource.DataSource { + return &dataSourceDiskSnapshot{} +} + +// dataSourceDiskSnapshotList is the data source implementation. +type dataSourceDiskSnapshot struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskSnapshot) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskSnapshotModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error get state") + return + } + + ctxSnpsht := map[string]any{ + "disk_id": uint64(state.DiskID.ValueInt64()), + "label": state.Label.ValueString(), + } + tflog.Info(ctx, "Read dataSourceDiskSnapshot: got state successfully", ctxSnpsht) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskSnapshot: set timeouts successfully", map[string]any{ + "disk_id": uint64(state.DiskID.ValueInt64()), + "label": state.Label.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskSnapshotDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshot: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskSnapshot", ctxSnpsht) +} + +func (d *dataSourceDiskSnapshot) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskSnapshot(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskSnapshot) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk_snapshot" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskSnapshot) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskSnapshot") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskSnapshot successfully") +} diff --git a/internal/service/cloudapi/disks/data_source_disk_snapshot_list.go b/internal/service/cloudapi/disks/data_source_disk_snapshot_list.go new file mode 100644 index 0000000..a5c7fd7 --- /dev/null +++ b/internal/service/cloudapi/disks/data_source_disk_snapshot_list.go @@ -0,0 +1,91 @@ +package disks + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ datasource.DataSource = &dataSourceDiskSnapshotList{} +) + +func NewDataSourceDiskSnapshotList() datasource.DataSource { + return &dataSourceDiskSnapshotList{} +} + +// dataSourceDiskSnapshotList is the data source implementation. +type dataSourceDiskSnapshotList struct { + client *decort.DecortClient +} + +func (d *dataSourceDiskSnapshotList) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + // Read Terraform configuration data into the model + var state models.DataSourceDiskSnapshotListModel + resp.Diagnostics.Append(req.Config.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error get state") + return + } + diskId := uint64(state.DiskID.ValueInt64()) + tflog.Info(ctx, "Read dataSourceDiskSnapshotList: got state successfully", map[string]any{"disk_id": diskId}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout30s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error set timeout") + return + } + tflog.Info(ctx, "Read dataSourceDiskSnapshotList: set timeouts successfully", map[string]any{ + "disk_id": diskId, + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Map response body to schema + resp.Diagnostics.Append(flattens.DiskSnapshotListDataSource(ctx, &state, d.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error flatten data source") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read dataSourceDiskSnapshotList: Error set state") + return + } + tflog.Info(ctx, "End read dataSourceDiskSnapshotList", map[string]any{"disk_id": diskId}) +} + +func (d *dataSourceDiskSnapshotList) Schema(ctx context.Context, _ datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaDataSourceDiskSnapshotList(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx), + }, + } +} + +func (d *dataSourceDiskSnapshotList) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk_snapshot_list" +} + +// Configure adds the provider configured client to the data source. +func (d *dataSourceDiskSnapshotList) Configure(ctx context.Context, req datasource.ConfigureRequest, resp *datasource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure dataSourceDiskSnapshotList") + d.client = client.DataSource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure dataSourceDiskSnapshotList successfully") +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_data_source_disk.go b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk.go new file mode 100644 index 0000000..47baa91 --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk.go @@ -0,0 +1,108 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskDataSource flattens data source for disk. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskDataSource(ctx context.Context, state *models.DataSourceDiskModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskDataSource") + + diags := diag.Diagnostics{} + + diskId := uint64(state.DiskID.ValueInt64()) + + recordDisk, err := utilities.DataSourceDiskCheckPresence(ctx, diskId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.DiskDataSource: before flatten", map[string]any{"disk_id": diskId, "recordDisk": recordDisk}) + + id := uuid.New() + diskAcl, _ := json.Marshal(recordDisk.ACL) + *state = models.DataSourceDiskModel{ + DiskID: state.DiskID, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + ACL: types.StringValue(string(diskAcl)), + Computes: flattenComputes(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + DeviceName: types.StringValue(recordDisk.DeviceName), + GID: types.Int64Value(int64(recordDisk.GID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + Name: types.StringValue(recordDisk.Name), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepID: types.Int64Value(int64(recordDisk.SepID)), + SepType: types.StringValue(recordDisk.SepType), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + state.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskDataSource: cannot flatten recordDisk.Images to state.Images", diags)) + } + state.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskDataSource: cannot flatten recordDisk.PresentTo to state.PresentTo", diags)) + } + + iotune := models.IOTuneModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.DiskDataSource iotune struct to obj", diags)) + } + state.IOTune = obj + + tflog.Info(ctx, "flattens.DiskDataSource: end flatten", map[string]any{"disk_id": state.DiskID.ValueInt64()}) + return nil +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list.go b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list.go new file mode 100644 index 0000000..efb3a34 --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list.go @@ -0,0 +1,128 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskListDataSource flattens data source for disk list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskListDataSource(ctx context.Context, state *models.DataSourceDiskListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskListDataSource") + + diags := diag.Diagnostics{} + + diskList, diags := utilities.DataSourceDiskListCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskListDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceDiskListModel{ + ByID: state.ByID, + Name: state.Name, + AccountName: state.AccountName, + DiskMaxSize: state.DiskMaxSize, + Status: state.Status, + Shared: state.Shared, + AccountID: state.AccountID, + Type: state.Type, + SEPID: state.SEPID, + PoolName: state.PoolName, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(diskList.EntryCount)), + } + + items := make([]models.ItemDiskModel, 0, diskList.EntryCount) + for _, recordDisk := range diskList.Data { + diskAcl, _ := json.Marshal(recordDisk.ACL) + d := models.ItemDiskModel{ + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + ACL: types.StringValue(string(diskAcl)), + Computes: flattenComputes(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + DeviceName: types.StringValue(recordDisk.DeviceName), + GID: types.Int64Value(int64(recordDisk.GID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + DiskId: types.Int64Value(int64(recordDisk.ID)), + DiskName: types.StringValue(recordDisk.Name), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepID: types.Int64Value(int64(recordDisk.SepID)), + SepType: types.StringValue(recordDisk.SepType), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListDataSource: cannot flatten recordDisk.Images to d.Images", diags)) + } + d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags)) + } + + iotune := models.IOTuneModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.DiskListDataSource iotune struct to obj", diags)) + } + d.IOTune = obj + + items = append(items, d) + } + state.Items = items + + tflog.Info(ctx, "flattens.DiskListDataSource: end flatten") + return nil +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_deleted.go b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_deleted.go new file mode 100644 index 0000000..1934994 --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_deleted.go @@ -0,0 +1,125 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskListDeletedDataSource flattens data source for disk list deleted. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskListDeletedDataSource(ctx context.Context, state *models.DataSourceDiskListDeletedModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskListDeletedDataSource") + + diags := diag.Diagnostics{} + + diskList, diags := utilities.DataSourceDiskListDeletedCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskListDeletedDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceDiskListDeletedModel{ + ByID: state.ByID, + Name: state.Name, + AccountName: state.AccountName, + DiskMaxSize: state.DiskMaxSize, + Shared: state.Shared, + AccountID: state.AccountID, + Type: state.Type, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(diskList.EntryCount)), + } + + items := make([]models.ItemDiskModel, 0, diskList.EntryCount) + for _, recordDisk := range diskList.Data { + diskAcl, _ := json.Marshal(recordDisk.ACL) + d := models.ItemDiskModel{ + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + ACL: types.StringValue(string(diskAcl)), + Computes: flattenComputes(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + DeviceName: types.StringValue(recordDisk.DeviceName), + GID: types.Int64Value(int64(recordDisk.GID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + DiskId: types.Int64Value(int64(recordDisk.ID)), + DiskName: types.StringValue(recordDisk.Name), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepID: types.Int64Value(int64(recordDisk.SepID)), + SepType: types.StringValue(recordDisk.SepType), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListDeletedDataSource: cannot flatten recordDisk.Images to d.Images", diags)) + } + d.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListDeletedDataSource: cannot flatten recordDisk.PresentTo to d.PresentTo", diags)) + } + + iotune := models.IOTuneModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.DiskListDeletedDataSource iotune struct to obj", diags)) + } + d.IOTune = obj + + items = append(items, d) + } + state.Items = items + + tflog.Info(ctx, "flattens.DiskListDeletedDataSource: end flatten") + return nil +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_types.go b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_types.go new file mode 100644 index 0000000..1b3de9c --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_types.go @@ -0,0 +1,50 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskListTypesDataSource flattens data source for disk list types. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskListTypesDataSource(ctx context.Context, state *models.DataSourceDiskListTypesModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskListTypesDataSource") + + diags := diag.Diagnostics{} + + listTypes, diags := utilities.DataSourceDiskListTypesCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskListTypesDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceDiskListTypesModel{ + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(listTypes.EntryCount)), + } + + state.Types, diags = types.ListValueFrom(ctx, types.StringType, listTypes.Data) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListTypesDataSource: cannot flatten listTypes.Data to state.Types", diags)) + } + + tflog.Info(ctx, "flattens.DiskListTypesDataSource: end flatten") + return nil +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_types_detailed.go b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_types_detailed.go new file mode 100644 index 0000000..4c38322 --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_types_detailed.go @@ -0,0 +1,82 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskListTypesDetailedDataSource flattens data source for disk list types detailed. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskListTypesDetailedDataSource(ctx context.Context, state *models.DataSourceDiskListTypesDetailedModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskListTypesDetailedDataSource") + + diags := diag.Diagnostics{} + + listTypes, diags := utilities.DataSourceDiskListTypesDetailedCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskListTypesDetailedDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceDiskListTypesDetailedModel{ + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(listTypes.EntryCount)), + } + + items := make([]models.ItemDiskTypeDetailedModel, 0, len(listTypes.Data)) + for _, typeListDetailed := range listTypes.Data { + typeMap := typeListDetailed.(map[string]interface{}) + + t := models.ItemDiskTypeDetailedModel{ + SepID: types.Int64Value(int64(typeMap["sepId"].(float64))), + SepName: types.StringValue(typeMap["sepName"].(string)), + } + + var pools []models.ItemPoolModel + poolsTemp := typeMap["pools"].([]interface{}) + for _, pool := range poolsTemp { + poolsMap := pool.(map[string]interface{}) + p := models.ItemPoolModel{ + Name: types.StringValue(poolsMap["name"].(string)), + System: types.StringValue(poolsMap["system"].(string)), + } + + p.Types, diags = types.ListValueFrom(ctx, types.StringType, flattenTypes(poolsMap["types"].([]interface{}))) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListTypesDetailedDataSource: cannot flatten poolsMap[\"types\"] to p.Types", diags)) + } + pools = append(pools, p) + } + t.Pools = pools + items = append(items, t) + } + state.Items = items + + tflog.Info(ctx, "flattens.DiskListTypesDetailedDataSource: end flatten") + return nil +} + +func flattenTypes(typesInterface []interface{}) []string { + var typesList []string + for _, typ := range typesInterface { + typesList = append(typesList, typ.(string)) + } + return typesList +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_unattached.go b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_unattached.go new file mode 100644 index 0000000..9d16d09 --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_list_unattached.go @@ -0,0 +1,132 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskListUnattachedDataSource flattens data source for disk list unattached. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskListUnattachedDataSource(ctx context.Context, state *models.DataSourceDiskListUnattachedModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskListUnattachedDataSource") + + diags := diag.Diagnostics{} + + diskList, diags := utilities.DataSourceDiskListUnattachedCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskListUnattachedDataSource: before flatten") + + id := uuid.New() + *state = models.DataSourceDiskListUnattachedModel{ + ByID: state.ByID, + AccountName: state.AccountName, + DiskMaxSize: state.DiskMaxSize, + Status: state.Status, + AccountID: state.AccountID, + SepID: state.SepID, + PoolName: state.PoolName, + Type: state.Type, + SortBy: state.SortBy, + Page: state.Page, + Size: state.Size, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + EntryCount: types.Int64Value(int64(diskList.EntryCount)), + } + + items := make([]models.ItemDiskUnattachedModel, 0, diskList.EntryCount) + for _, recordDisk := range diskList.Data { + diskAcl, _ := json.Marshal(recordDisk.ACL) + d := models.ItemDiskUnattachedModel{ + CKey: types.StringValue(recordDisk.CKey), + Meta: flattens.Meta(ctx, recordDisk.Meta), + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + ACL: types.StringValue(string(diskAcl)), + BootPartition: types.Int64Value(int64(recordDisk.BootPartition)), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + DiskPath: types.StringValue(recordDisk.DiskPath), + GID: types.Int64Value(int64(recordDisk.GID)), + GUID: types.Int64Value(int64(recordDisk.GUID)), + DiskId: types.Int64Value(int64(recordDisk.ID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + Iqn: types.StringValue(recordDisk.IQN), + Login: types.StringValue(recordDisk.Login), + Milestones: types.Int64Value(int64(recordDisk.Milestones)), + DiskName: types.StringValue(recordDisk.Name), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + Passwd: types.StringValue(recordDisk.Password), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PurgeAttempts: types.Int64Value(int64(recordDisk.PurgeAttempts)), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + RealityDeviceNumber: types.Int64Value(int64(recordDisk.RealityDeviceNumber)), + ReferenceID: types.StringValue(recordDisk.ReferenceID), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepID: types.Int64Value(int64(recordDisk.SEPID)), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + d.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskListUnattachedDataSource: cannot flatten recordDisk.Images to d.Images", diags)) + } + + iotune := models.IOTuneModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.DiskListUnattachedDataSource iotune struct to obj", diags)) + } + d.IOTune = obj + + items = append(items, d) + } + state.Items = items + + tflog.Info(ctx, "flattens.DiskListUnattachedDataSource: end flatten") + return nil +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_replication.go b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_replication.go new file mode 100644 index 0000000..16209ee --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_replication.go @@ -0,0 +1,130 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskReplicationDataSource flattens data source for disk. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskReplicationDataSource(ctx context.Context, state *models.RecordDiskModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskReplicationDataSource") + + diags := diag.Diagnostics{} + + recordDisk, status, err := utilities.DataSourceDiskReplicationCheckPresence(ctx, state, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about disk"), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.DiskReplicationDataSource: before flatten") + + diskAcl, _ := json.Marshal(recordDisk.ACL) + + *state = models.RecordDiskModel{ + DiskId: state.DiskId, + ID: state.ID, + Timeouts: state.Timeouts, + ACL: types.StringValue(string(diskAcl)), + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + Computes: flattenDRComputes(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + DeviceName: types.StringValue(recordDisk.DeviceName), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + GID: types.Int64Value(int64(recordDisk.GID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.Images), + Name: types.StringValue(recordDisk.Name), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.PresentTo), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + Replication: &models.ItemReplicationModel{}, + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepType: types.StringValue(recordDisk.SepType), + SepID: types.Int64Value(int64(recordDisk.SepID)), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + StatusReplication: types.StringValue(*status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + iotune := models.DiskReplicationIOTune{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + state.IOTune = iotune + + itemReplication := models.ItemReplicationModel{ + DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)), + PoolID: types.StringValue(recordDisk.Replication.PoolID), + Role: types.StringValue(recordDisk.Replication.Role), + SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID), + StorageID: types.StringValue(recordDisk.Replication.StorageID), + VolumeID: types.StringValue(recordDisk.Replication.VolumeID), + } + + state.Replication = &itemReplication + + tflog.Info(ctx, "flattens.ReplicationDiskDataSource: end flatten") + return nil +} + +func flattenDRComputes(ctx context.Context, items map[string]string) types.List { + tflog.Info(ctx, "Start flattenDRComputes") + tempSlice := make([]types.Object, 0, len(items)) + for id, name := range items { + temp := models.ItemComputeModel{ + ComputeId: types.StringValue(id), + ComputeName: types.StringValue(name), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenDRComputes struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenDRComputes", diags)) + } + + tflog.Info(ctx, "End flattenDRComputes") + return res +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_snapshot.go b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_snapshot.go new file mode 100644 index 0000000..9f69568 --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_snapshot.go @@ -0,0 +1,50 @@ +package flattens + +import ( + "context" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskSnapshotDataSource flattens data source for disk snapshot. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskSnapshotDataSource(ctx context.Context, state *models.DataSourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskSnapshotDataSource") + + diskId := uint64(state.DiskID.ValueInt64()) + + item, diags := utilities.DataSourceDiskSnapshotCheckPresence(ctx, state, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskSnapshotDataSource: before flatten", map[string]any{"disk_id": diskId, "snapshot": item}) + + id := uuid.New() + *state = models.DataSourceDiskSnapshotModel{ + DiskID: state.DiskID, + Label: state.Label, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + GUID: types.StringValue(item.GUID), + ResID: types.StringValue(item.ResID), + SnapSetGUID: types.StringValue(item.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(item.SnapSetTime)), + TimeStamp: types.Int64Value(int64(item.TimeStamp)), + } + + tflog.Info(ctx, "flattens.DiskSnapshotDataSource: end flatten", map[string]any{ + "disk_id": state.DiskID.ValueInt64(), + "label": state.Label.ValueString(), + }) + return nil +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_snapshot_list.go b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_snapshot_list.go new file mode 100644 index 0000000..c4e0b6f --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_data_source_disk_snapshot_list.go @@ -0,0 +1,46 @@ +package flattens + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskSnapshotListDataSource flattens data source for disk snapshot list. +// Return error in case data source is not found on the platform. +// Flatten errors are added to tflog. +func DiskSnapshotListDataSource(ctx context.Context, state *models.DataSourceDiskSnapshotListModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskSnapshotListDataSource") + + diags := diag.Diagnostics{} + + diskId := uint64(state.DiskID.ValueInt64()) + + snapshots, err := utilities.DiskSnapshotListCheckPresence(ctx, diskId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about disk snapshot list with disk ID %v", diskId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.DiskSnapshotListDataSource: before flatten", map[string]any{"disk_id": diskId, "snapshots": snapshots}) + + id := uuid.New() + *state = models.DataSourceDiskSnapshotListModel{ + DiskID: state.DiskID, + Timeouts: state.Timeouts, + + // computed fields + Id: types.StringValue(id.String()), + Items: flattenSnapshots(ctx, *snapshots), + } + + tflog.Info(ctx, "flattens.DiskSnapshotListDataSource: end flatten", map[string]any{"disk_id": state.DiskID.ValueInt64()}) + return nil +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_resource_disk.go b/internal/service/cloudapi/disks/flattens/flatten_resource_disk.go new file mode 100644 index 0000000..791e448 --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_resource_disk.go @@ -0,0 +1,189 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskResource flattens resource for disk. +// Return error in case resource is not found on the platform. +// Flatten errors are added to tflog. +func DiskResource(ctx context.Context, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskResource") + + diags := diag.Diagnostics{} + + diskId, err := strconv.ParseUint(plan.Id.ValueString(), 10, 64) + if err != nil { + diags.AddError("flattens.DiskResource: Cannot parse disk ID from state", err.Error()) + return diags + } + + recordDisk, err := utilities.DiskCheckPresence(ctx, diskId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.DiskResource: before flatten", map[string]any{"disk_id": diskId, "recordDisk": recordDisk}) + + diskAcl, _ := json.Marshal(recordDisk.ACL) + *plan = models.ResourceDiskModel{ + // required fields + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + DiskName: types.StringValue(recordDisk.Name), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + GID: types.Int64Value(int64(recordDisk.GID)), + + // optional fields + Description: plan.Description, + Pool: plan.Pool, + SEPID: plan.SEPID, + Type: plan.Type, + Detach: plan.Detach, + Permanently: plan.Permanently, + Reason: plan.Reason, + Shareable: plan.Shareable, + Timeouts: plan.Timeouts, + + // computed fields + LastUpdated: plan.LastUpdated, + Id: types.StringValue(strconv.Itoa(int(recordDisk.ID))), + DiskId: types.Int64Value(int64(recordDisk.ID)), + AccountName: types.StringValue(recordDisk.AccountName), + ACL: types.StringValue(string(diskAcl)), + Computes: flattenComputes(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + DeviceName: types.StringValue(recordDisk.DeviceName), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepType: types.StringValue(recordDisk.SepType), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + TechStatus: types.StringValue(recordDisk.TechStatus), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + plan.Images, diags = types.ListValueFrom(ctx, types.StringType, recordDisk.Images) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskResource: cannot flatten recordDisk.Images to plan.Images", diags)) + } + plan.PresentTo, diags = types.ListValueFrom(ctx, types.Int64Type, recordDisk.PresentTo) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("flattens.DiskResource: cannot flatten recordDisk.PresentTo to plan.PresentTo", diags)) + } + + if plan.Description.IsUnknown() { + plan.Description = types.StringValue(recordDisk.Description) + } + if plan.Pool.IsUnknown() { + plan.Pool = types.StringValue(recordDisk.Pool) + } + if plan.SEPID.IsUnknown() { + plan.SEPID = types.Int64Value(int64(recordDisk.SepID)) + } + if plan.Shareable.IsUnknown() { + plan.Shareable = types.BoolValue(recordDisk.Shareable) + } + if plan.Type.IsUnknown() { + plan.Type = types.StringValue(recordDisk.Type) + } + + iotune := models.IOTuneModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemIOTune, iotune) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.DiskResource iotune struct to obj", diags)) + } + plan.IOTune = obj + + tflog.Info(ctx, "flattens.DiskResource: after flatten", map[string]any{"disk_id": plan.Id.ValueString()}) + + tflog.Info(ctx, "End flattens.DiskResource") + return nil +} + +func flattenComputes(ctx context.Context, items map[string]string) types.List { + tflog.Info(ctx, "Start flattenComputes") + tempSlice := make([]types.Object, 0, len(items)) + for id, name := range items { + temp := models.ItemComputeModel{ + ComputeId: types.StringValue(id), + ComputeName: types.StringValue(name), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenComputes struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenComputes", diags)) + } + + tflog.Info(ctx, "End flattenComputes") + return res +} + +func flattenSnapshots(ctx context.Context, snapshots disks.ListSnapshots) types.List { + tflog.Info(ctx, "Start flattenSnapshots") + tempSlice := make([]types.Object, 0, len(snapshots)) + for _, item := range snapshots { + temp := models.ItemSnapshotModel{ + GUID: types.StringValue(item.GUID), + Label: types.StringValue(item.Label), + ResID: types.StringValue(item.ResID), + SnapSetGUID: types.StringValue(item.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(item.SnapSetTime)), + TimeStamp: types.Int64Value(int64(item.TimeStamp)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemSnapshot, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemSnapshot}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenSnapshots", diags)) + } + + tflog.Info(ctx, "End flattenSnapshots") + return res +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_resource_disk_replication.go b/internal/service/cloudapi/disks/flattens/flatten_resource_disk_replication.go new file mode 100644 index 0000000..5c6e42e --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_resource_disk_replication.go @@ -0,0 +1,146 @@ +package flattens + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// ReplicationDiskresource flattens resource for disk. +// Return error in case resource is not found on the platform. +// Flatten errors are added to tflog. +func DiskReplicationResource(ctx context.Context, state *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskReplicationresource") + + diags := diag.Diagnostics{} + + recordDisk, status, err := utilities.ResourceDiskReplicationCheckPresence(ctx, state, c) + if err != nil { + diags.AddError("Cannot get info about disk", err.Error()) + return diags + } + + tflog.Info(ctx, "flattens.DiskReplicationresource: before flatten") + + diskAcl, _ := json.Marshal(recordDisk.ACL) + + *state = models.ResourceRecordDiskReplicationModel{ + DiskId: state.DiskId, + Name: state.Name, + SepID: state.SepID, + ReplicationId: state.ReplicationId, + Timeouts: state.Timeouts, + PoolName: state.PoolName, + Pause: state.Pause, + Reverse: state.Reverse, + Start: state.Start, + Detach: state.Detach, + Permanently: state.Permanently, + Reason: state.Reason, + Id: types.StringValue(strconv.Itoa(int(recordDisk.Replication.DiskID))), + ACL: types.StringValue(string(diskAcl)), + AccountID: types.Int64Value(int64(recordDisk.AccountID)), + AccountName: types.StringValue(recordDisk.AccountName), + Computes: flattenRComputes(ctx, recordDisk.Computes), + CreatedTime: types.Int64Value(int64(recordDisk.CreatedTime)), + DeletedTime: types.Int64Value(int64(recordDisk.DeletedTime)), + DeviceName: types.StringValue(recordDisk.DeviceName), + Description: types.StringValue(recordDisk.Description), + DestructionTime: types.Int64Value(int64(recordDisk.DestructionTime)), + GID: types.Int64Value(int64(recordDisk.GID)), + ImageID: types.Int64Value(int64(recordDisk.ImageID)), + Images: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.Images), + Order: types.Int64Value(int64(recordDisk.Order)), + Params: types.StringValue(recordDisk.Params), + ParentID: types.Int64Value(int64(recordDisk.ParentID)), + PCISlot: types.Int64Value(int64(recordDisk.PCISlot)), + Pool: types.StringValue(recordDisk.Pool), + PresentTo: flattens.FlattenSimpleTypeToList(ctx, types.StringType, recordDisk.PresentTo), + PurgeTime: types.Int64Value(int64(recordDisk.PurgeTime)), + ResID: types.StringValue(recordDisk.ResID), + ResName: types.StringValue(recordDisk.ResName), + Role: types.StringValue(recordDisk.Role), + SepType: types.StringValue(recordDisk.SepType), + Shareable: types.BoolValue(recordDisk.Shareable), + SizeMax: types.Int64Value(int64(recordDisk.SizeMax)), + SizeUsed: types.Float64Value(recordDisk.SizeUsed), + Snapshots: flattenSnapshots(ctx, recordDisk.Snapshots), + Status: types.StringValue(recordDisk.Status), + StatusReplication: types.StringValue(*status), + TechStatus: types.StringValue(recordDisk.TechStatus), + Type: types.StringValue(recordDisk.Type), + VMID: types.Int64Value(int64(recordDisk.VMID)), + } + + iotune := models.ResourceDiskReplicationIOTuneModel{ + ReadBytesSec: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSec)), + ReadBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadBytesSecMax)), + ReadIOPSSec: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSec)), + ReadIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.ReadIOPSSecMax)), + SizeIOPSSec: types.Int64Value(int64(recordDisk.IOTune.SizeIOPSSec)), + TotalBytesSec: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSec)), + TotalBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalBytesSecMax)), + TotalIOPSSec: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSec)), + TotalIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.TotalIOPSSecMax)), + WriteBytesSec: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSec)), + WriteBytesSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteBytesSecMax)), + WriteIOPSSec: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSec)), + WriteIOPSSecMax: types.Int64Value(int64(recordDisk.IOTune.WriteIOPSSecMax)), + } + obj, diags := types.ObjectValueFrom(ctx, models.ResourceDiskReplicationIOTune, iotune) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.ReplicationDiskresource iotune struct to obj", diags)) + } + state.IOTune = obj + + itemReplication := models.ResourceItemReplicationModel{ + DiskID: types.Int64Value(int64(recordDisk.Replication.DiskID)), + PoolID: types.StringValue(recordDisk.Replication.PoolID), + Role: types.StringValue(recordDisk.Replication.Role), + SelfVolumeID: types.StringValue(recordDisk.Replication.SelfVolumeID), + StorageID: types.StringValue(recordDisk.Replication.StorageID), + VolumeID: types.StringValue(recordDisk.Replication.VolumeID), + } + + obj, diags = types.ObjectValueFrom(ctx, models.ResourceItemReplication, itemReplication) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattens.ReplicationDiskresource Replication struct to obj", diags)) + } + state.Replication = obj + + tflog.Info(ctx, "flattens.ReplicationDiskresource: end flatten") + return nil +} + +func flattenRComputes(ctx context.Context, items map[string]string) types.List { + tflog.Info(ctx, "Start flattenRComputes") + tempSlice := make([]types.Object, 0, len(items)) + for id, name := range items { + temp := models.ItemComputeModel{ + ComputeId: types.StringValue(id), + ComputeName: types.StringValue(name), + } + obj, diags := types.ObjectValueFrom(ctx, models.ItemCompute, temp) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenRComputes struct to obj", diags)) + } + tempSlice = append(tempSlice, obj) + } + + res, diags := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: models.ItemCompute}, tempSlice) + if diags != nil { + tflog.Error(ctx, fmt.Sprint("Error flattenRComputes", diags)) + } + + tflog.Info(ctx, "End flattenRComputes") + return res +} diff --git a/internal/service/cloudapi/disks/flattens/flatten_resource_disk_snapshot.go b/internal/service/cloudapi/disks/flattens/flatten_resource_disk_snapshot.go new file mode 100644 index 0000000..10dd256 --- /dev/null +++ b/internal/service/cloudapi/disks/flattens/flatten_resource_disk_snapshot.go @@ -0,0 +1,65 @@ +package flattens + +import ( + "context" + "fmt" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// DiskSnapshotResource flattens resource for disk snapshot. +// Return error in case resource is not found on the platform. +// Flatten errors are added to tflog. +func DiskSnapshotResource(ctx context.Context, plan *models.ResourceDiskSnapshotModel, c *decort.DecortClient) diag.Diagnostics { + tflog.Info(ctx, "Start flattens.DiskSnapshotResource", map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString()}) + + recordSnapshot, diags := utilities.DiskSnapshotCheckPresence(ctx, plan, c) + if diags.HasError() { + return diags + } + + tflog.Info(ctx, "flattens.DiskSnapshotResource: before flatten", map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString(), + "recordDisk": recordSnapshot}) + + id := plan.Id + if !strings.Contains(id.ValueString(), "#") { + id = types.StringValue(fmt.Sprintf("%d#%s", plan.DiskID.ValueInt64(), plan.Label.ValueString())) + } + *plan = models.ResourceDiskSnapshotModel{ + // required fields + DiskID: plan.DiskID, + Label: types.StringValue(recordSnapshot.Label), + + // optional fields + Rollback: plan.Rollback, + TimeStamp: plan.TimeStamp, + Timeouts: plan.Timeouts, + + // computed fields + Id: id, + GUID: types.StringValue(recordSnapshot.GUID), + ResID: types.StringValue(recordSnapshot.ResID), + SnapSetGUID: types.StringValue(recordSnapshot.SnapSetGUID), + SnapSetTime: types.Int64Value(int64(recordSnapshot.SnapSetTime)), + } + + if plan.TimeStamp.IsUnknown() { + plan.TimeStamp = types.Int64Value(int64(recordSnapshot.TimeStamp)) + } + + tflog.Info(ctx, "flattens.DiskResource: after flatten", map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString()}) + + return nil +} diff --git a/internal/service/cloudapi/disks/input_checks.go b/internal/service/cloudapi/disks/input_checks.go new file mode 100644 index 0000000..42dc495 --- /dev/null +++ b/internal/service/cloudapi/disks/input_checks.go @@ -0,0 +1,99 @@ +package disks + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/ic" +) + +// resourceDiskCreateInputChecks checks if user provided account_id and gid exist on the platform during disk creation. +func resourceDiskCreateInputChecks(ctx context.Context, plan *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + accountId := uint64(plan.AccountID.ValueInt64()) + tflog.Info(ctx, "resourceDiskCreateInputChecks: exist account check", map[string]any{"account_id": accountId}) + err := ic.ExistAccount(ctx, accountId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about account with ID %v", accountId), err.Error()) + } + + gid := uint64(plan.GID.ValueInt64()) + tflog.Info(ctx, "resourceDiskCreateInputChecks: exist gid check", map[string]any{"gid": gid}) + err = ic.ExistGID(ctx, gid, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about GID %v", gid), err.Error()) + } + return diags +} + +// resourceDiskReplicationInputChecks checks if user provided disk_id exist on the platform during disk replication. +func resourceDiskReplicationInputChecks(ctx context.Context, plan *models.ResourceRecordDiskReplicationModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + diskId := uint64(plan.DiskId.ValueInt64()) + tflog.Info(ctx, "resourceDiskCreateInputChecks: exist disk check", map[string]any{"disk_id": diskId}) + err := ic.ExistDiskID(ctx, diskId, c) + if err != nil { + diags.AddError(fmt.Sprintf("Cannot get info about disk with ID %v", diskId), err.Error()) + } + + return diags + +} + +// resourceDiskUpdateInputChecks checks if user provided: +// account_id and gid exist on the platform during disk creation, +// description, pool, sep_id, type are not attempted to be changed. +func resourceDiskUpdateInputChecks(ctx context.Context, plan, state *models.ResourceDiskModel, c *decort.DecortClient) diag.Diagnostics { + diags := diag.Diagnostics{} + + // check accountId and gid + diags.Append(resourceDiskCreateInputChecks(ctx, plan, c)...) + + // check description + if !plan.Description.Equal(state.Description) && !plan.Description.IsUnknown() { + diags.AddError( + "resourceDiskUpdateInputChecks: description change is not allowed", + fmt.Sprintf("cannot change description from %s to %s for disk id %s", + state.Description.ValueString(), + plan.Description.ValueString(), + plan.Id.ValueString())) + } + + // check pool + if !plan.Pool.Equal(state.Pool) && !plan.Pool.IsUnknown() { + diags.AddError( + "resourceDiskUpdateInputChecks: pool change is not allowed", + fmt.Sprintf("cannot change pool from %s to %s for disk id %s", + state.Pool.ValueString(), + plan.Pool.ValueString(), + plan.Id.ValueString())) + } + + // check sep_id + if !plan.SEPID.Equal(state.SEPID) && !plan.SEPID.IsUnknown() { + diags.AddError( + "resourceDiskUpdateInputChecks: sep_id change is not allowed", + fmt.Sprintf("cannot change sep_id from %d to %d for disk id %s", + state.SEPID.ValueInt64(), + plan.SEPID.ValueInt64(), + plan.Id.ValueString())) + } + + // check type + if !plan.Type.Equal(state.Type) && !plan.Type.IsUnknown() { + diags.AddError( + "resourceDiskUpdateInputChecks: type change is not allowed", + fmt.Sprintf("cannot change type from %s to %s for disk id %s", + state.Type.ValueString(), + plan.Type.ValueString(), + plan.Id.ValueString())) + } + + return diags +} diff --git a/internal/service/cloudapi/disks/models/model_data_source_disk.go b/internal/service/cloudapi/disks/models/model_data_source_disk.go new file mode 100644 index 0000000..a52106b --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_data_source_disk.go @@ -0,0 +1,49 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskModel struct { + // request fields + DiskID types.Int64 `tfsdk:"disk_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + ACL types.String `tfsdk:"acl"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Computes types.List `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DeviceName types.String `tfsdk:"devicename"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + GID types.Int64 `tfsdk:"gid"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + IOTune types.Object `tfsdk:"iotune"` + Name types.String `tfsdk:"disk_name"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PresentTo types.List `tfsdk:"present_to"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SepType types.String `tfsdk:"sep_type"` + SepID types.Int64 `tfsdk:"sep_id"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots types.List `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} diff --git a/internal/service/cloudapi/disks/models/model_data_source_disk_list.go b/internal/service/cloudapi/disks/models/model_data_source_disk_list.go new file mode 100644 index 0000000..c3585be --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_data_source_disk_list.go @@ -0,0 +1,69 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskListModel struct { + // request fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + AccountName types.String `tfsdk:"account_name"` + DiskMaxSize types.Int64 `tfsdk:"disk_max_size"` + Status types.String `tfsdk:"status"` + Shared types.Bool `tfsdk:"shared"` + AccountID types.Int64 `tfsdk:"account_id"` + Type types.String `tfsdk:"type"` + SEPID types.Int64 `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"pool_name"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemDiskModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemDiskModel struct { + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + ACL types.String `tfsdk:"acl"` + Computes types.List `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + DeviceName types.String `tfsdk:"devicename"` + GID types.Int64 `tfsdk:"gid"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + IOTune types.Object `tfsdk:"iotune"` + MachineID types.Int64 `tfsdk:"machine_id"` + MachineName types.String `tfsdk:"machine_name"` + DiskId types.Int64 `tfsdk:"disk_id"` + DiskName types.String `tfsdk:"disk_name"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PresentTo types.List `tfsdk:"present_to"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SepID types.Int64 `tfsdk:"sep_id"` + SepType types.String `tfsdk:"sep_type"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots types.List `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} diff --git a/internal/service/cloudapi/disks/models/model_data_source_disk_list_deleted.go b/internal/service/cloudapi/disks/models/model_data_source_disk_list_deleted.go new file mode 100644 index 0000000..0536266 --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_data_source_disk_list_deleted.go @@ -0,0 +1,26 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskListDeletedModel struct { + // request fields + ByID types.Int64 `tfsdk:"by_id"` + Name types.String `tfsdk:"name"` + AccountName types.String `tfsdk:"account_name"` + DiskMaxSize types.Int64 `tfsdk:"disk_max_size"` + Shared types.Bool `tfsdk:"shared"` + AccountID types.Int64 `tfsdk:"account_id"` + Type types.String `tfsdk:"type"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemDiskModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} diff --git a/internal/service/cloudapi/disks/models/model_data_source_disk_list_types.go b/internal/service/cloudapi/disks/models/model_data_source_disk_list_types.go new file mode 100644 index 0000000..6e191bc --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_data_source_disk_list_types.go @@ -0,0 +1,19 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskListTypesModel struct { + // request fields - optional + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Types types.List `tfsdk:"types"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} diff --git a/internal/service/cloudapi/disks/models/model_data_source_disk_list_types_detailed.go b/internal/service/cloudapi/disks/models/model_data_source_disk_list_types_detailed.go new file mode 100644 index 0000000..cd30e40 --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_data_source_disk_list_types_detailed.go @@ -0,0 +1,31 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskListTypesDetailedModel struct { + // request fields - optional + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemDiskTypeDetailedModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemDiskTypeDetailedModel struct { + Pools []ItemPoolModel `tfsdk:"pools"` + SepID types.Int64 `tfsdk:"sep_id"` + SepName types.String `tfsdk:"sep_name"` +} + +type ItemPoolModel struct { + Name types.String `tfsdk:"name"` + System types.String `tfsdk:"system"` + Types types.List `tfsdk:"types"` +} diff --git a/internal/service/cloudapi/disks/models/model_data_source_disk_list_unattached.go b/internal/service/cloudapi/disks/models/model_data_source_disk_list_unattached.go new file mode 100644 index 0000000..4c2924f --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_data_source_disk_list_unattached.go @@ -0,0 +1,73 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskListUnattachedModel struct { + // request fields + ByID types.Int64 `tfsdk:"by_id"` + AccountName types.String `tfsdk:"account_name"` + DiskMaxSize types.Int64 `tfsdk:"disk_max_size"` + Status types.String `tfsdk:"status"` + AccountID types.Int64 `tfsdk:"account_id"` + SepID types.Int64 `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"pool_name"` + Type types.String `tfsdk:"type"` + SortBy types.String `tfsdk:"sort_by"` + Page types.Int64 `tfsdk:"page"` + Size types.Int64 `tfsdk:"size"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items []ItemDiskUnattachedModel `tfsdk:"items"` + EntryCount types.Int64 `tfsdk:"entry_count"` +} + +type ItemDiskUnattachedModel struct { + CKey types.String `tfsdk:"ckey"` + Meta types.List `tfsdk:"meta"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + ACL types.String `tfsdk:"acl"` + BootPartition types.Int64 `tfsdk:"boot_partition"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + DiskPath types.String `tfsdk:"disk_path"` + GID types.Int64 `tfsdk:"gid"` + GUID types.Int64 `tfsdk:"guid"` + DiskId types.Int64 `tfsdk:"disk_id"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + IOTune types.Object `tfsdk:"iotune"` + Iqn types.String `tfsdk:"iqn"` + Login types.String `tfsdk:"login"` + Milestones types.Int64 `tfsdk:"milestones"` + DiskName types.String `tfsdk:"disk_name"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + Passwd types.String `tfsdk:"passwd"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PurgeAttempts types.Int64 `tfsdk:"purge_attempts"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + RealityDeviceNumber types.Int64 `tfsdk:"reality_device_number"` + ReferenceID types.String `tfsdk:"reference_id"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SepID types.Int64 `tfsdk:"sep_id"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots types.List `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} diff --git a/internal/service/cloudapi/disks/models/model_data_source_disk_replication.go b/internal/service/cloudapi/disks/models/model_data_source_disk_replication.go new file mode 100644 index 0000000..68a18ff --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_data_source_disk_replication.go @@ -0,0 +1,86 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type RecordDiskModel struct { + // request fields - required + DiskId types.Int64 `tfsdk:"disk_id"` + ID types.Int64 `tfsdk:"replica_disk_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + ACL types.String `tfsdk:"acl"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Computes types.List `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DeviceName types.String `tfsdk:"devicename"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + GID types.Int64 `tfsdk:"gid"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + IOTune DiskReplicationIOTune `tfsdk:"iotune"` + Name types.String `tfsdk:"disk_name"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PresentTo types.List `tfsdk:"present_to"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + Replication *ItemReplicationModel `tfsdk:"replication"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SepType types.String `tfsdk:"sep_type"` + SepID types.Int64 `tfsdk:"sep_id"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots types.List `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + StatusReplication types.String `tfsdk:"status_replication"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} + +type DiskReplicationIOTune struct { + ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"` + ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"` + ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"` + ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"` + SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"` + TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"` + TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"` + TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"` + TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"` + WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"` + WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"` + WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"` + WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"` +} + +type ItemReplicationModel struct { + DiskID types.Int64 `tfsdk:"disk_id"` + PoolID types.String `tfsdk:"pool_id"` + Role types.String `tfsdk:"role"` + SelfVolumeID types.String `tfsdk:"self_volume_id"` + StorageID types.String `tfsdk:"storage_id"` + VolumeID types.String `tfsdk:"volume_id"` +} + +type DiskReplicationItemSnapshot struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + ReferenceID types.String `tfsdk:"reference_id"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + TimeStamp types.Int64 `tfsdk:"timestamp"` +} diff --git a/internal/service/cloudapi/disks/models/model_data_source_disk_snapshot.go b/internal/service/cloudapi/disks/models/model_data_source_disk_snapshot.go new file mode 100644 index 0000000..1fbc6c6 --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_data_source_disk_snapshot.go @@ -0,0 +1,21 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskSnapshotModel struct { + // request fields + DiskID types.Int64 `tfsdk:"disk_id"` + Label types.String `tfsdk:"label"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + GUID types.String `tfsdk:"guid"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + TimeStamp types.Int64 `tfsdk:"timestamp"` +} diff --git a/internal/service/cloudapi/disks/models/model_data_source_disk_snapshot_list.go b/internal/service/cloudapi/disks/models/model_data_source_disk_snapshot_list.go new file mode 100644 index 0000000..4ecce86 --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_data_source_disk_snapshot_list.go @@ -0,0 +1,16 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/datasource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DataSourceDiskSnapshotListModel struct { + // request fields + DiskID types.Int64 `tfsdk:"disk_id"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + Items types.List `tfsdk:"items"` +} diff --git a/internal/service/cloudapi/disks/models/model_resource_disk.go b/internal/service/cloudapi/disks/models/model_resource_disk.go new file mode 100644 index 0000000..9a7f639 --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_resource_disk.go @@ -0,0 +1,116 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceDiskModel struct { + // request fields - required + AccountID types.Int64 `tfsdk:"account_id"` + DiskName types.String `tfsdk:"disk_name"` + SizeMax types.Int64 `tfsdk:"size_max"` + GID types.Int64 `tfsdk:"gid"` + + // request fields - optional + Description types.String `tfsdk:"desc"` + Pool types.String `tfsdk:"pool"` + SEPID types.Int64 `tfsdk:"sep_id"` + Type types.String `tfsdk:"type"` + Detach types.Bool `tfsdk:"detach"` + Permanently types.Bool `tfsdk:"permanently"` + Reason types.String `tfsdk:"reason"` + Shareable types.Bool `tfsdk:"shareable"` + IOTune types.Object `tfsdk:"iotune"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + LastUpdated types.String `tfsdk:"last_updated"` + ACL types.String `tfsdk:"acl"` + AccountName types.String `tfsdk:"account_name"` + Computes types.List `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DeviceName types.String `tfsdk:"devicename"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + DiskId types.Int64 `tfsdk:"disk_id"` + ImageID types.Int64 `tfsdk:"image_id"` + Images types.List `tfsdk:"images"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + PresentTo types.List `tfsdk:"present_to"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SepType types.String `tfsdk:"sep_type"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots types.List `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + TechStatus types.String `tfsdk:"tech_status"` + VMID types.Int64 `tfsdk:"vmid"` +} + +type IOTuneModel struct { + ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"` + ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"` + ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"` + ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"` + SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"` + TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"` + TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"` + TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"` + TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"` + WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"` + WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"` + WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"` + WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"` +} + +type ItemSnapshotModel struct { + GUID types.String `tfsdk:"guid"` + Label types.String `tfsdk:"label"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` + TimeStamp types.Int64 `tfsdk:"timestamp"` +} + +type ItemComputeModel struct { + ComputeId types.String `tfsdk:"compute_id"` + ComputeName types.String `tfsdk:"compute_name"` +} + +var ItemCompute = map[string]attr.Type{ + "compute_id": types.StringType, + "compute_name": types.StringType, +} + +var ItemSnapshot = map[string]attr.Type{ + "guid": types.StringType, + "label": types.StringType, + "res_id": types.StringType, + "snap_set_guid": types.StringType, + "snap_set_time": types.Int64Type, + "timestamp": types.Int64Type, +} + +var ItemIOTune = map[string]attr.Type{ + "read_bytes_sec": types.Int64Type, + "read_bytes_sec_max": types.Int64Type, + "read_iops_sec": types.Int64Type, + "read_iops_sec_max": types.Int64Type, + "size_iops_sec": types.Int64Type, + "total_bytes_sec": types.Int64Type, + "total_bytes_sec_max": types.Int64Type, + "total_iops_sec": types.Int64Type, + "total_iops_sec_max": types.Int64Type, + "write_bytes_sec": types.Int64Type, + "write_bytes_sec_max": types.Int64Type, + "write_iops_sec": types.Int64Type, + "write_iops_sec_max": types.Int64Type, +} diff --git a/internal/service/cloudapi/disks/models/model_resource_disk_replication.go b/internal/service/cloudapi/disks/models/model_resource_disk_replication.go new file mode 100644 index 0000000..508ad84 --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_resource_disk_replication.go @@ -0,0 +1,110 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceRecordDiskReplicationModel struct { + // request fields + DiskId types.Int64 `tfsdk:"disk_id"` + Name types.String `tfsdk:"disk_name"` + SepID types.Int64 `tfsdk:"sep_id"` + PoolName types.String `tfsdk:"pool_name"` + Pause types.Bool `tfsdk:"pause"` + Reverse types.Bool `tfsdk:"reverse"` + Start types.Bool `tfsdk:"start"` + Detach types.Bool `tfsdk:"detach"` + Permanently types.Bool `tfsdk:"permanently"` + Reason types.String `tfsdk:"reason"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + ACL types.String `tfsdk:"acl"` + AccountID types.Int64 `tfsdk:"account_id"` + AccountName types.String `tfsdk:"account_name"` + Computes types.List `tfsdk:"computes"` + CreatedTime types.Int64 `tfsdk:"created_time"` + DeletedTime types.Int64 `tfsdk:"deleted_time"` + DeviceName types.String `tfsdk:"devicename"` + Description types.String `tfsdk:"desc"` + DestructionTime types.Int64 `tfsdk:"destruction_time"` + GID types.Int64 `tfsdk:"gid"` + ImageID types.Int64 `tfsdk:"image_id"` + ReplicationId types.Int64 `tfsdk:"replica_disk_id"` + Images types.List `tfsdk:"images"` + IOTune types.Object `tfsdk:"iotune"` + Order types.Int64 `tfsdk:"order"` + Params types.String `tfsdk:"params"` + ParentID types.Int64 `tfsdk:"parent_id"` + PCISlot types.Int64 `tfsdk:"pci_slot"` + Pool types.String `tfsdk:"pool"` + PresentTo types.List `tfsdk:"present_to"` + PurgeTime types.Int64 `tfsdk:"purge_time"` + Replication types.Object `tfsdk:"replication"` + ResID types.String `tfsdk:"res_id"` + ResName types.String `tfsdk:"res_name"` + Role types.String `tfsdk:"role"` + SepType types.String `tfsdk:"sep_type"` + Shareable types.Bool `tfsdk:"shareable"` + SizeMax types.Int64 `tfsdk:"size_max"` + SizeUsed types.Float64 `tfsdk:"size_used"` + Snapshots types.List `tfsdk:"snapshots"` + Status types.String `tfsdk:"status"` + StatusReplication types.String `tfsdk:"status_replication"` + TechStatus types.String `tfsdk:"tech_status"` + Type types.String `tfsdk:"type"` + VMID types.Int64 `tfsdk:"vmid"` +} + +type ResourceDiskReplicationIOTuneModel struct { + ReadBytesSec types.Int64 `tfsdk:"read_bytes_sec"` + ReadBytesSecMax types.Int64 `tfsdk:"read_bytes_sec_max"` + ReadIOPSSec types.Int64 `tfsdk:"read_iops_sec"` + ReadIOPSSecMax types.Int64 `tfsdk:"read_iops_sec_max"` + SizeIOPSSec types.Int64 `tfsdk:"size_iops_sec"` + TotalBytesSec types.Int64 `tfsdk:"total_bytes_sec"` + TotalBytesSecMax types.Int64 `tfsdk:"total_bytes_sec_max"` + TotalIOPSSec types.Int64 `tfsdk:"total_iops_sec"` + TotalIOPSSecMax types.Int64 `tfsdk:"total_iops_sec_max"` + WriteBytesSec types.Int64 `tfsdk:"write_bytes_sec"` + WriteBytesSecMax types.Int64 `tfsdk:"write_bytes_sec_max"` + WriteIOPSSec types.Int64 `tfsdk:"write_iops_sec"` + WriteIOPSSecMax types.Int64 `tfsdk:"write_iops_sec_max"` +} + +var ResourceDiskReplicationIOTune = map[string]attr.Type{ + "read_bytes_sec": types.Int64Type, + "read_bytes_sec_max": types.Int64Type, + "read_iops_sec": types.Int64Type, + "read_iops_sec_max": types.Int64Type, + "size_iops_sec": types.Int64Type, + "total_bytes_sec": types.Int64Type, + "total_bytes_sec_max": types.Int64Type, + "total_iops_sec": types.Int64Type, + "total_iops_sec_max": types.Int64Type, + "write_bytes_sec": types.Int64Type, + "write_bytes_sec_max": types.Int64Type, + "write_iops_sec": types.Int64Type, + "write_iops_sec_max": types.Int64Type, +} + +type ResourceItemReplicationModel struct { + DiskID types.Int64 `tfsdk:"disk_id"` + PoolID types.String `tfsdk:"pool_id"` + Role types.String `tfsdk:"role"` + SelfVolumeID types.String `tfsdk:"self_volume_id"` + StorageID types.String `tfsdk:"storage_id"` + VolumeID types.String `tfsdk:"volume_id"` +} + +var ResourceItemReplication = map[string]attr.Type{ + "disk_id": types.Int64Type, + "pool_id": types.StringType, + "role": types.StringType, + "self_volume_id": types.StringType, + "storage_id:": types.StringType, + "volume_id": types.StringType, +} diff --git a/internal/service/cloudapi/disks/models/model_resource_disk_snapshot.go b/internal/service/cloudapi/disks/models/model_resource_disk_snapshot.go new file mode 100644 index 0000000..13a33ce --- /dev/null +++ b/internal/service/cloudapi/disks/models/model_resource_disk_snapshot.go @@ -0,0 +1,25 @@ +package models + +import ( + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ResourceDiskSnapshotModel struct { + // request fields - required + DiskID types.Int64 `tfsdk:"disk_id"` + Label types.String `tfsdk:"label"` + + // request fields - optional + Rollback types.Bool `tfsdk:"rollback"` + TimeStamp types.Int64 `tfsdk:"timestamp"` + Timeouts timeouts.Value `tfsdk:"timeouts"` + + // response fields + Id types.String `tfsdk:"id"` + LastUpdated types.String `tfsdk:"last_updated"` + GUID types.String `tfsdk:"guid"` + ResID types.String `tfsdk:"res_id"` + SnapSetGUID types.String `tfsdk:"snap_set_guid"` + SnapSetTime types.Int64 `tfsdk:"snap_set_time"` +} diff --git a/internal/service/cloudapi/disks/resource_disk.go b/internal/service/cloudapi/disks/resource_disk.go new file mode 100644 index 0000000..7b0b5d0 --- /dev/null +++ b/internal/service/cloudapi/disks/resource_disk.go @@ -0,0 +1,354 @@ +package disks + +import ( + "context" + "reflect" + "strconv" + "time" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceDisk{} + _ resource.ResourceWithImportState = &resourceDisk{} +) + +// NewResourceDisk is a helper function to simplify the provider implementation. +func NewResourceDisk() resource.Resource { + return &resourceDisk{} +} + +// resourceDisk is the resource implementation. +type resourceDisk struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceDisk) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create resource group + var plan models.ResourceDiskModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDisk: Error receiving the plan") + return + } + + contextCreateMap := map[string]any{ + "account_id": plan.AccountID.ValueInt64(), + "disk_name": plan.DiskName.ValueString(), + "size_max": plan.SizeMax.ValueInt64(), + "gid": plan.GID.ValueInt64(), + } + tflog.Info(ctx, "Create resourceDisk: got plan successfully", contextCreateMap) + tflog.Info(ctx, "Create resourceDisk: start creating", contextCreateMap) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDisk: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceDisk: set timeouts successfully", map[string]any{ + "account_id": plan.AccountID.ValueInt64(), + "disk_name": plan.DiskName.ValueString(), + "size_max": plan.SizeMax.ValueInt64(), + "gid": plan.GID.ValueInt64(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceDisk: starting input checks", contextCreateMap) + resp.Diagnostics.Append(resourceDiskCreateInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDisk: Error input checks") + return + } + tflog.Info(ctx, "Create resourceDisk: input checks successful", contextCreateMap) + + // Make create request and get response + createReq := utilities.CreateRequestResourceDisk(ctx, &plan) + tflog.Info(ctx, "Create resourceDisk: before call CloudAPI().Disks().Create", map[string]any{"req": createReq}) + diskId, err := r.client.CloudAPI().Disks().Create(ctx, createReq) + if err != nil { + resp.Diagnostics.AddError( + "Create resourceDisk: unable to Create Disk", + err.Error(), + ) + return + } + plan.Id = types.StringValue(strconv.Itoa(int(diskId))) + tflog.Info(ctx, "Create resourceDisk: disk created", map[string]any{"diskId": diskId, "disk_name": plan.DiskName.ValueString()}) + + // additional settings after disk creation: in case of failures, warnings are added to resp.Diagnostics, + // because additional settings failure is not critical. If errors were added instead of warnings, terraform + // framework would mark resource as tainted and delete it, which would be unwanted behaviour. + + // sets io limits to disk if needed, warnings added to resp.Diagnostics in case of failure. + if !plan.IOTune.IsUnknown() { + resp.Diagnostics.Append(utilities.LimitIOCreateDisk(ctx, diskId, &plan, r.client)...) + } + + // share disk if needed, warnings added to resp.Diagnostics in case of failure. + if !plan.Shareable.IsUnknown() && plan.Shareable.ValueBool() { // if shareable = true + resp.Diagnostics.Append(utilities.ShareableCreateDisk(ctx, diskId, r.client)...) + } + + tflog.Info(ctx, "Create resourceDisk: resource creation is completed", map[string]any{"disk_id": diskId}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceDisk) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceDiskModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDisk: Error get state") + return + } + tflog.Info(ctx, "Read resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()}) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDisk: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceDisk: set timeouts successfully", map[string]any{ + "disk_id": state.Id.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // read status + resp.Diagnostics.Append(utilities.DiskReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDisk: Error reading disk status") + return + } + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.DiskResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDisk: Error flatten disk") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDisk: Error set state") + return + } + tflog.Info(ctx, "End read resourceDisk") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceDisk) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceDiskModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error receiving the plan") + return + } + tflog.Info(ctx, "Update resourceDisk: got plan successfully", map[string]any{"disk_id": plan.Id.ValueString()}) + + // Retrieve values from state + var state models.ResourceDiskModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()}) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceDisk: set timeouts successfully", map[string]any{ + "disk_id": state.Id.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // Checking if inputs are valid + tflog.Info(ctx, "Update resourceDisk: starting input checks", map[string]any{"disk_id": plan.Id.ValueString()}) + resp.Diagnostics.Append(resourceDiskUpdateInputChecks(ctx, &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error input checks") + return + } + tflog.Info(ctx, "Update resourceDisk: input checks successful", map[string]any{"disk_id": state.Id.ValueString()}) + + diskId, err := strconv.Atoi(state.Id.ValueString()) + if err != nil { + resp.Diagnostics.AddError("Update resourceDisk: Cannot parse disk ID from state", err.Error()) + return + } + + // resize disk + if !plan.SizeMax.Equal(state.SizeMax) { + resp.Diagnostics.Append(utilities.SizeMaxUpdateDisk(ctx, uint64(diskId), &plan, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error resizing disk") + return + } + } + + // rename disk + if !plan.DiskName.Equal(state.DiskName) { + resp.Diagnostics.Append(utilities.NameUpdateDisk(ctx, uint64(diskId), &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error renaming disk") + return + } + } + + // change io limits + if !reflect.DeepEqual(plan.IOTune, state.IOTune) && !plan.IOTune.IsUnknown() { + resp.Diagnostics.Append(utilities.LimitIOUpdateDisk(ctx, uint64(diskId), &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error setting new io limits to disk") + return + } + } + + // share/unshare disk + if !plan.Shareable.Equal(state.Shareable) && !plan.Shareable.IsUnknown() { + resp.Diagnostics.Append(utilities.ShareableUpdateDisk(ctx, uint64(diskId), plan.Shareable.ValueBool(), r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDisk: Error with disk share/unshare") + return + } + } + + tflog.Info(ctx, "Update resourceDisk: disk update is completed", map[string]any{"disk_id": plan.Id.ValueString()}) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.DiskResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceDisk) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceDiskModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDisk: Error get state") + return + } + tflog.Info(ctx, "Delete resourceDisk: got state successfully", map[string]any{"disk_id": state.Id.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDisk: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceDisk: set timeouts successfully", map[string]any{ + "disk_id": state.Id.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + // Delete existing resource group + delReq := disks.DeleteRequest{ + DiskID: uint64(state.DiskId.ValueInt64()), + Detach: state.Detach.ValueBool(), // default false + Permanently: state.Permanently.ValueBool(), // default false + } + + if !state.Reason.IsNull() { + delReq.Reason = state.Reason.ValueString() + } + + tflog.Info(ctx, "Delete resourceDisk: before call CloudAPI().Disks().Delete", map[string]any{"req": delReq}) + _, err := r.client.CloudAPI().Disks().Delete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceDisk: Error deleting disk with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceDisk", map[string]any{"disk_id": state.Id.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceDisk) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceDisk(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceDisk) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceDisk) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceDisk") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceDisk successfully") +} + +func (r *resourceDisk) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudapi/disks/resource_disk_replication.go b/internal/service/cloudapi/disks/resource_disk_replication.go new file mode 100644 index 0000000..3aa5e40 --- /dev/null +++ b/internal/service/cloudapi/disks/resource_disk_replication.go @@ -0,0 +1,326 @@ +package disks + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceDiskReplication{} + _ resource.ResourceWithImportState = &resourceDiskReplication{} +) + +// NewResourceDisk is a helper function to simplify the provider implementation. +func NewResourceDiskReplications() resource.Resource { + return &resourceDiskReplication{} +} + +// resourceDiskReplication is the resource implementation. +type resourceDiskReplication struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceDiskReplication) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create resource group + var plan models.ResourceRecordDiskReplicationModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskReplication: Error receiving the plan") + return + } + + tflog.Info(ctx, "Create resourceDiskReplication: got plan successfully") + tflog.Info(ctx, "Create resourceDiskReplication: start creating") + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskReplication: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceDiskReplication: set timeouts successfully") + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + tflog.Info(ctx, "Create resourceDiskReplication: starting input checks") + resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskReplication: Error input checks") + return + } + tflog.Info(ctx, "Create resourceDiskReplication: input checks successful") + + reqCreate := disks.ReplicateRequest{ + DiskID: uint64(plan.DiskId.ValueInt64()), + Name: plan.Name.ValueString(), + SepID: uint64(plan.SepID.ValueInt64()), + PoolName: plan.PoolName.ValueString(), + } + + diskReplicaId, err := r.client.CloudAPI().Disks().Replicate(ctx, reqCreate) + if err != nil { + resp.Diagnostics.AddError( + "Create resourceDiskReplication: unable to replicate disk", + err.Error(), + ) + return + } + + tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: create replica complete for disk with ID: %d", plan.DiskId.ValueInt64())) + + start := plan.Start.ValueBool() + ok := !(plan.Start.IsNull() || plan.Start.IsUnknown()) + + if ok && !start { + tflog.Info(ctx, fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, try to stop", uint64(plan.DiskId.ValueInt64()), diskReplicaId)) + reqStop := disks.ReplicationStopRequest{ + DiskID: uint64(plan.DiskId.ValueInt64()), + } + _, err = r.client.CloudAPI().Disks().ReplicationStop(ctx, reqStop) + if err != nil { + resp.Diagnostics.AddError( + fmt.Sprintf("resourceDiskReplicationCreate: replication between disk with ID: %d and replica with ID: %d, stoped", uint64(plan.DiskId.ValueInt64()), diskReplicaId), + err.Error(), + ) + return + } + } + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceDiskReplication) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceRecordDiskReplicationModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error get state") + return + } + tflog.Info(ctx, "Read resourceDiskReplication: got state successfully") + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceDiskReplication: set timeouts successfully", map[string]any{ + "disk_id": state.DiskId.ValueInt64(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // read status + resp.Diagnostics.Append(utilities.ReplicationDiskReadStatus(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error reading disk status") + return + } + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error set state") + return + } + tflog.Info(ctx, "End read resourceDiskReplication") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceDiskReplication) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceRecordDiskReplicationModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the plan") + return + } + tflog.Info(ctx, "Update resourceDiskReplication: got plan successfully", map[string]any{"disk_id": plan.DiskId.ValueInt64()}) + + // Retrieve values from state + var state models.ResourceRecordDiskReplicationModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()}) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceDiskReplication: set timeouts successfully", map[string]any{ + "disk_id": state.DiskId.ValueInt64(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + // Checking if inputs are valid + tflog.Info(ctx, "Update resourceDiskReplication: starting input checks", map[string]any{"disk_id": plan.DiskId.ValueInt64()}) + resp.Diagnostics.Append(resourceDiskReplicationInputChecks(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: Error input checks") + return + } + tflog.Info(ctx, "Update resourceDiskReplication: input checks successful", map[string]any{"disk_id": state.DiskId.ValueInt64()}) + + if !plan.Start.Equal(state.Start) { + resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateStartStop(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdateStartStop") + return + } + } + + if !plan.Pause.Equal(state.Pause) { + resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdatePause(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdatePause") + return + } + } + + if !plan.Reverse.Equal(state.Reverse) { + resp.Diagnostics.Append(utilities.UtilityDiskReplicationUpdateReverse(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskReplication: error with UtilityDiskReplicationUpdatePause") + return + } + } + + tflog.Info(ctx, "Update resourceDiskReplication: disk update is completed", map[string]any{"disk_id": plan.DiskId.ValueInt64()}) + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.DiskReplicationResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error flatten disk") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskReplication: Error set state") + return + } + tflog.Info(ctx, "End read resourceDiskReplication") +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceDiskReplication) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceRecordDiskReplicationModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDiskReplication: Error get state") + return + } + tflog.Info(ctx, "Delete resourceDiskReplication: got state successfully", map[string]any{"disk_id": state.DiskId.ValueInt64()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDiskReplication: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceDiskReplication: set timeouts successfully", map[string]any{ + "disk_id": state.DiskId.ValueInt64(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + // Delete existing resource group + delReq := disks.DeleteRequest{ + DiskID: uint64(state.DiskId.ValueInt64()), + Detach: state.Detach.ValueBool(), // default false + Permanently: state.Permanently.ValueBool(), // default false + } + + if !state.Reason.IsNull() { + delReq.Reason = state.Reason.ValueString() + } + + tflog.Info(ctx, "Delete resourceDiskReplication: before call CloudAPI().Disks().Delete", map[string]any{"req": delReq}) + _, err := r.client.CloudAPI().Disks().Delete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceDiskReplication: Error deleting disk with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceDiskReplication", map[string]any{"disk_id": state.DiskId.ValueInt64()}) +} + +// Schema defines the schema for the resource. +func (r *resourceDiskReplication) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceDiskReplication(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceDiskReplication) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk_replication" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceDiskReplication) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceDiskReplication") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceDiskReplication successfully") +} + +func (r *resourceDiskReplication) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudapi/disks/resource_disk_snapshot.go b/internal/service/cloudapi/disks/resource_disk_snapshot.go new file mode 100644 index 0000000..94d0064 --- /dev/null +++ b/internal/service/cloudapi/disks/resource_disk_snapshot.go @@ -0,0 +1,295 @@ +package disks + +import ( + "context" + "time" + + "github.com/hashicorp/terraform-plugin-framework-timeouts/resource/timeouts" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-log/tflog" + decort "repository.basistech.ru/BASIS/decort-golang-sdk" + "repository.basistech.ru/BASIS/decort-golang-sdk/pkg/cloudapi/disks" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/client" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/constants" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/flattens" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/models" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/schemas" + "repository.basistech.ru/BASIS/terraform-provider-dynamix/internal/service/cloudapi/disks/utilities" +) + +// Ensure the implementation satisfies the expected interfaces. +var ( + _ resource.Resource = &resourceDiskSnapshot{} + _ resource.ResourceWithImportState = &resourceDiskSnapshot{} +) + +// NewResourceDiskSnapshot is a helper function to simplify the provider implementation. +func NewResourceDiskSnapshot() resource.Resource { + return &resourceDiskSnapshot{} +} + +// resourceDiskSnapshot is the resource implementation. +type resourceDiskSnapshot struct { + client *decort.DecortClient +} + +// Create creates the resource and sets the initial Terraform state. +func (r *resourceDiskSnapshot) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + // Get plan to create resource group + var plan models.ResourceDiskSnapshotModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskSnapshot: Error receiving the plan") + return + } + + ctxCreateSnpsht := map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString(), + } + tflog.Info(ctx, "Create resourceDiskSnapshot: got plan successfully", ctxCreateSnpsht) + tflog.Info(ctx, "Create resourceDiskSnapshot: start creating", ctxCreateSnpsht) + + // Set timeouts + createTimeout, diags := plan.Timeouts.Create(ctx, constants.Timeout600s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskSnapshot: Error set timeout") + return + } + tflog.Info(ctx, "Create resourceDiskSnapshot: set timeouts successfully", map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString(), + "createTimeout": createTimeout}) + + ctx, cancel := context.WithTimeout(ctx, createTimeout) + defer cancel() + + // Check if input values are valid in the platform + _, diags = utilities.DiskSnapshotCheckPresence(ctx, &plan, r.client) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskSnapshot: disk snapshot does not exist") + return + } + tflog.Info(ctx, "Create resourceDiskSnapshot: snapshot successfully loaded", ctxCreateSnpsht) + + if plan.Rollback.ValueBool() { // default is false + resp.Diagnostics.Append(utilities.RollbackDiskSnapshot(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Create resourceDiskSnapshot: Error rollback snapshot") + return + } + } + + tflog.Info(ctx, "Create resourceDiskSnapshot: resource creation is completed", ctxCreateSnpsht) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Read refreshes the Terraform state with the latest data. +func (r *resourceDiskSnapshot) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + // Get current state + var state models.ResourceDiskSnapshotModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskSnapshot: Error get state") + return + } + tflog.Info(ctx, "Read resourceDiskSnapshot: got state successfully", map[string]any{ + "disk_id": state.DiskID.ValueInt64(), + "label": state.Label.ValueString(), + }) + + // Set timeouts + readTimeout, diags := state.Timeouts.Read(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskSnapshot: Error set timeout") + return + } + tflog.Info(ctx, "Read resourceDiskSnapshot: set timeouts successfully", map[string]any{ + "disk_id": state.DiskID.ValueInt64(), + "label": state.Label.ValueString(), + "readTimeout": readTimeout}) + + ctx, cancel := context.WithTimeout(ctx, readTimeout) + defer cancel() + + // Overwrite items with refreshed state + resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &state, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskSnapshot: Error flatten disk snapshot") + return + } + + // Set refreshed state + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Read resourceDiskSnapshot: Error set state") + return + } + tflog.Info(ctx, "End read resourceDiskSnapshot") +} + +// Update updates the resource and sets the updated Terraform state on success. +func (r *resourceDiskSnapshot) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + // Retrieve values from plan + var plan models.ResourceDiskSnapshotModel + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskSnapshot: Error receiving the plan") + return + } + + ctxSnpsht := map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString(), + } + tflog.Info(ctx, "Update resourceDiskSnapshot: got plan successfully", ctxSnpsht) + + // Retrieve values from state + var state models.ResourceDiskSnapshotModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskSnapshot: Error receiving the state") + return + } + tflog.Info(ctx, "Update resourceDiskSnapshot: got state successfully", ctxSnpsht) + + // Set timeouts + updateTimeout, diags := plan.Timeouts.Update(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskSnapshot: Error set timeout") + return + } + tflog.Info(ctx, "Update resourceDiskSnapshot: set timeouts successfully", map[string]any{ + "disk_id": plan.DiskID.ValueInt64(), + "label": plan.Label.ValueString(), + "updateTimeout": updateTimeout}) + + ctx, cancel := context.WithTimeout(ctx, updateTimeout) + defer cancel() + + if !plan.Rollback.Equal(state.Rollback) && plan.Rollback.ValueBool() { + resp.Diagnostics.Append(utilities.RollbackDiskSnapshot(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Update resourceDiskSnapshot: Error rollback snapshot") + return + } + } + + tflog.Info(ctx, "Update resourceDiskSnapshot: disk snapshot update is completed", ctxSnpsht) + + // Map response body to schema and populate Computed attribute values + resp.Diagnostics.Append(flattens.DiskSnapshotResource(ctx, &plan, r.client)...) + if resp.Diagnostics.HasError() { + return + } + + // Set data last update + plan.LastUpdated = types.StringValue(time.Now().Format(time.RFC850)) + + // Set state to fully populated data + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) + if resp.Diagnostics.HasError() { + return + } +} + +// Delete deletes the resource and removes the Terraform state on success. +func (r *resourceDiskSnapshot) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + // Get current state + var state models.ResourceDiskSnapshotModel + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDiskSnapshot: Error get state") + return + } + tflog.Info(ctx, "Delete resourceDiskSnapshot: got state successfully", map[string]any{ + "disk_id": state.DiskID.ValueInt64(), + "label": state.Label.ValueString()}) + + // Set timeouts + deleteTimeout, diags := state.Timeouts.Delete(ctx, constants.Timeout300s) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDiskSnapshot: Error set timeout") + return + } + tflog.Info(ctx, "Delete resourceDiskSnapshot: set timeouts successfully", map[string]any{ + "disk_id": state.DiskID.ValueInt64(), + "label": state.Label.ValueString(), + "deleteTimeout": deleteTimeout}) + + ctx, cancel := context.WithTimeout(ctx, deleteTimeout) + defer cancel() + + // Check if input values are valid in the platform + _, diags = utilities.DiskSnapshotCheckPresence(ctx, &state, r.client) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + tflog.Error(ctx, "Delete resourceDiskSnapshot: disk snapshot does not exist") + return + } + + delReq := disks.SnapshotDeleteRequest{ + DiskID: uint64(state.DiskID.ValueInt64()), + Label: state.Label.ValueString(), + } + + tflog.Info(ctx, "Delete resourceDiskSnapshot: before call CloudAPI().Disks().SnapshotDelete", map[string]any{"req": delReq}) + _, err := r.client.CloudAPI().Disks().SnapshotDelete(ctx, delReq) + if err != nil { + resp.Diagnostics.AddError("Delete resourceDiskSnapshot: Error deleting disk with error: ", err.Error()) + return + } + + tflog.Info(ctx, "End delete resourceDiskSnapshot", map[string]any{ + "disk_id": state.Id.ValueString(), + "label": state.Label.ValueString()}) +} + +// Schema defines the schema for the resource. +func (r *resourceDiskSnapshot) Schema(ctx context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: schemas.MakeSchemaResourceDiskSnapshot(), + Blocks: map[string]schema.Block{ + "timeouts": timeouts.Block(ctx, timeouts.Opts{Create: true, Read: true, Update: true, Delete: true}), + }, + } +} + +// Metadata returns the resource type name. +func (r *resourceDiskSnapshot) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = req.ProviderTypeName + "_disk_snapshot" +} + +// Configure adds the provider configured client to the resource. +func (r *resourceDiskSnapshot) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) { + tflog.Info(ctx, "Get Configure resourceDiskSnapshot") + r.client = client.Resource(ctx, &req, resp) + tflog.Info(ctx, "Getting Configure resourceDiskSnapshot successfully") +} + +func (r *resourceDiskSnapshot) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + // Retrieve import ID and save to id attribute + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} diff --git a/internal/service/cloudapi/disks/schemas/schema_data_source_disk.go b/internal/service/cloudapi/disks/schemas/schema_data_source_disk.go new file mode 100644 index 0000000..0787900 --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_data_source_disk.go @@ -0,0 +1,197 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDisk() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "disk_id": schema.Int64Attribute{ + Required: true, + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list.go b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list.go new file mode 100644 index 0000000..ee51429 --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list.go @@ -0,0 +1,265 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "Find by ID", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "Find by name", + }, + "account_name": schema.StringAttribute{ + Optional: true, + Description: "Find by account name", + }, + "disk_max_size": schema.Int64Attribute{ + Optional: true, + Description: "Find by max disk size", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "Find by status", + }, + "shared": schema.BoolAttribute{ + Optional: true, + Description: "Find by shared field", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "ID of the account the disks belong to", + }, + "type": schema.StringAttribute{ + Optional: true, + Description: "type of the disks", + }, + "sep_id": schema.Int64Attribute{ + Optional: true, + Description: "find by sep ID", + }, + "pool_name": schema.StringAttribute{ + Optional: true, + Description: "find by pool name", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "Page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "Page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "machine_id": schema.Int64Attribute{ + Computed: true, + }, + "machine_name": schema.StringAttribute{ + Computed: true, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_deleted.go b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_deleted.go new file mode 100644 index 0000000..c341c03 --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_deleted.go @@ -0,0 +1,253 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskListDeleted() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "Find by ID", + }, + "name": schema.StringAttribute{ + Optional: true, + Description: "Find by name", + }, + "account_name": schema.StringAttribute{ + Optional: true, + Description: "Find by account name", + }, + "disk_max_size": schema.Int64Attribute{ + Optional: true, + Description: "Find by max disk size", + }, + "shared": schema.BoolAttribute{ + Optional: true, + Description: "Find by shared field", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "ID of the account the disks belong to", + }, + "type": schema.StringAttribute{ + Optional: true, + Description: "type of the disks", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "Page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "Page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "machine_id": schema.Int64Attribute{ + Computed: true, + }, + "machine_name": schema.StringAttribute{ + Computed: true, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_types.go b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_types.go new file mode 100644 index 0000000..c5a68c7 --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_types.go @@ -0,0 +1,36 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskListTypes() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "Page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "Page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "types": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_types_detailed.go b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_types_detailed.go new file mode 100644 index 0000000..82be58a --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_types_detailed.go @@ -0,0 +1,62 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskListTypesDetailed() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "Page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "Page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "pools": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "name": schema.StringAttribute{ + Computed: true, + }, + "system": schema.StringAttribute{ + Computed: true, + }, + "types": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + }, + }, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "sep_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_unattached.go b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_unattached.go new file mode 100644 index 0000000..b6feb97 --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_list_unattached.go @@ -0,0 +1,265 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskListUnattached() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // optional attributes + "by_id": schema.Int64Attribute{ + Optional: true, + Description: "Find by ID", + }, + "account_name": schema.StringAttribute{ + Optional: true, + Description: "Find by account name", + }, + "disk_max_size": schema.Int64Attribute{ + Optional: true, + Description: "Find by max disk size", + }, + "status": schema.StringAttribute{ + Optional: true, + Description: "Find by status", + }, + "account_id": schema.Int64Attribute{ + Optional: true, + Description: "ID of the account the disks belong to", + }, + "sep_id": schema.Int64Attribute{ + Optional: true, + Description: "find by sep ID", + }, + "pool_name": schema.StringAttribute{ + Optional: true, + Description: "find by pool name", + }, + "type": schema.StringAttribute{ + Optional: true, + Description: "type of the disks", + }, + "sort_by": schema.StringAttribute{ + Optional: true, + Description: "sort by one of supported fields, format +|-(field)", + }, + "page": schema.Int64Attribute{ + Optional: true, + Description: "Page number", + }, + "size": schema.Int64Attribute{ + Optional: true, + Description: "Page size", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "ckey": schema.StringAttribute{ + Computed: true, + }, + "meta": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "boot_partition": schema.Int64Attribute{ + Computed: true, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "disk_path": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "guid": schema.Int64Attribute{ + Computed: true, + }, + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "iqn": schema.StringAttribute{ + Computed: true, + }, + "login": schema.StringAttribute{ + Computed: true, + }, + "milestones": schema.Int64Attribute{ + Computed: true, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "passwd": schema.StringAttribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "purge_attempts": schema.Int64Attribute{ + Computed: true, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "reality_device_number": schema.Int64Attribute{ + Computed: true, + }, + "reference_id": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "entry_count": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_data_source_disk_replication.go b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_replication.go new file mode 100644 index 0000000..1867ec7 --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_replication.go @@ -0,0 +1,225 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaDataSourceDiskReplication() map[string]schema.Attribute { + return map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Required: true, + Description: "Id of primary disk", + }, + "replica_disk_id": schema.Int64Attribute{ + Required: true, + Description: "Id of secondary disk", + }, + "status_replication": schema.StringAttribute{ + Computed: true, + Description: "Status of replication", + }, + "account_id": schema.Int64Attribute{ + Computed: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "account_name": schema.StringAttribute{ + Computed: true, + Description: "The name of the subscriber '(account') to whom this disk belongs", + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "disk_name": schema.StringAttribute{ + Computed: true, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "replication": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "pool_id": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "self_volume_id": schema.StringAttribute{ + Computed: true, + }, + "storage_id": schema.StringAttribute{ + Computed: true, + }, + "volume_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_id": schema.Int64Attribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_data_source_disk_snapshot.go b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_snapshot.go new file mode 100644 index 0000000..55f0b24 --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_snapshot.go @@ -0,0 +1,39 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceDiskSnapshot() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "disk_id": schema.Int64Attribute{ + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "label": schema.StringAttribute{ + Required: true, + Description: "Name of the snapshot", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "guid": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_data_source_disk_snapshot_list.go b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_snapshot_list.go new file mode 100644 index 0000000..97eae57 --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_data_source_disk_snapshot_list.go @@ -0,0 +1,45 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" +) + +func MakeSchemaDataSourceDiskSnapshotList() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "disk_id": schema.Int64Attribute{ + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + }, + "items": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_resource_disk.go b/internal/service/cloudapi/disks/schemas/schema_resource_disk.go new file mode 100644 index 0000000..b9dd7ea --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_resource_disk.go @@ -0,0 +1,269 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaResourceDisk() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "account_id": schema.Int64Attribute{ + Required: true, + Description: "ID of the account", + }, + "disk_name": schema.StringAttribute{ + Required: true, + Description: "Iname of disk", + }, + "size_max": schema.Int64Attribute{ + Required: true, + Description: "size in GB, default is 10", + }, + "gid": schema.Int64Attribute{ + Required: true, + Description: "ID of the grid (platform)", + }, + + // optional attributes + "desc": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "description of disk", + }, + "pool": schema.StringAttribute{ + Optional: true, + Computed: true, + Description: "Pool for disk location", + }, + "sep_id": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Storage endpoint provider ID to create disk", + }, + "type": schema.StringAttribute{ + Optional: true, + Computed: true, + Validators: []validator.String{ + stringvalidator.OneOf("B", "D", "T"), // case is not ignored + }, + Description: "(B;D;T) B=Boot;D=Data;T=Temp", + // default is D + }, + "detach": schema.BoolAttribute{ + Optional: true, + Description: "Detaching the disk from compute", + // default is false + }, + "permanently": schema.BoolAttribute{ + Optional: true, + Description: "Whether to completely delete the disk, works only with non attached disks", + // default is false + }, + "reason": schema.StringAttribute{ + Optional: true, + Description: "Reason for deletion", + }, + "shareable": schema.BoolAttribute{ + Optional: true, + Computed: true, + Description: "share disk", + }, + "iotune": schema.SingleNestedAttribute{ + Optional: true, + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of bytes to read per second", + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum number of bytes to read", + }, + "read_iops_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of io read operations per second", + }, + "read_iops_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum number of io read operations", + }, + "size_iops_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Size of io operations", + }, + "total_bytes_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Total size bytes per second", + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum total size of bytes per second", + }, + "total_iops_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Total number of io operations per second", + }, + "total_iops_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum total number of io operations per second", + }, + "write_bytes_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of bytes to write per second", + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum number of bytes to write per second", + }, + "write_iops_sec": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Number of write operations per second", + }, + "write_iops_sec_max": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Maximum number of write operations per second", + }, + }, + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "last_updated": schema.StringAttribute{ + Computed: true, + Description: "Timestamp of the last Terraform update of the disk resource.", + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_resource_disk_replication.go b/internal/service/cloudapi/disks/schemas/schema_resource_disk_replication.go new file mode 100644 index 0000000..96fc861 --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_resource_disk_replication.go @@ -0,0 +1,253 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func MakeSchemaResourceDiskReplication() map[string]schema.Attribute { + // required attributes + return map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Required: true, + }, + "disk_name": schema.StringAttribute{ + Required: true, + }, + "sep_id": schema.Int64Attribute{ + Required: true, + }, + "pool_name": schema.StringAttribute{ + Required: true, + }, + + // optional attributes + "pause": schema.BoolAttribute{ + Optional: true, + }, + "reverse": schema.BoolAttribute{ + Optional: true, + }, + "start": schema.BoolAttribute{ + Optional: true, + }, + "detach": schema.BoolAttribute{ + Optional: true, + }, + "permanently": schema.BoolAttribute{ + Optional: true, + }, + "reason": schema.StringAttribute{ + Optional: true, + }, + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "replica_disk_id": schema.Int64Attribute{ + Computed: true, + }, + "status_replication": schema.StringAttribute{ + Computed: true, + }, + "account_id": schema.Int64Attribute{ + Computed: true, + }, + "account_name": schema.StringAttribute{ + Computed: true, + }, + "acl": schema.StringAttribute{ + Computed: true, + }, + "computes": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "compute_id": schema.StringAttribute{ + Computed: true, + }, + "compute_name": schema.StringAttribute{ + Computed: true, + }, + }, + }, + }, + "created_time": schema.Int64Attribute{ + Computed: true, + }, + "deleted_time": schema.Int64Attribute{ + Computed: true, + }, + "desc": schema.StringAttribute{ + Computed: true, + }, + "destruction_time": schema.Int64Attribute{ + Computed: true, + }, + "devicename": schema.StringAttribute{ + Computed: true, + }, + "gid": schema.Int64Attribute{ + Computed: true, + }, + "image_id": schema.Int64Attribute{ + Computed: true, + }, + "images": schema.ListAttribute{ + Computed: true, + ElementType: types.StringType, + }, + "iotune": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "read_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "read_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "size_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "total_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_bytes_sec_max": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec": schema.Int64Attribute{ + Computed: true, + }, + "write_iops_sec_max": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + "order": schema.Int64Attribute{ + Computed: true, + }, + "params": schema.StringAttribute{ + Computed: true, + }, + "parent_id": schema.Int64Attribute{ + Computed: true, + }, + "pci_slot": schema.Int64Attribute{ + Computed: true, + }, + "pool": schema.StringAttribute{ + Computed: true, + }, + "present_to": schema.ListAttribute{ + Computed: true, + ElementType: types.Int64Type, + }, + "purge_time": schema.Int64Attribute{ + Computed: true, + }, + "replication": schema.SingleNestedAttribute{ + Computed: true, + Attributes: map[string]schema.Attribute{ + "disk_id": schema.Int64Attribute{ + Computed: true, + }, + "pool_id": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "self_volume_id": schema.StringAttribute{ + Computed: true, + }, + "storage_id": schema.StringAttribute{ + Computed: true, + }, + "volume_id": schema.StringAttribute{ + Computed: true, + }, + }, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "res_name": schema.StringAttribute{ + Computed: true, + }, + "role": schema.StringAttribute{ + Computed: true, + }, + "sep_type": schema.StringAttribute{ + Computed: true, + }, + "shareable": schema.BoolAttribute{ + Computed: true, + }, + "size_max": schema.Int64Attribute{ + Computed: true, + }, + "size_used": schema.Float64Attribute{ + Computed: true, + }, + "snapshots": schema.ListNestedAttribute{ + Computed: true, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "guid": schema.StringAttribute{ + Computed: true, + }, + "label": schema.StringAttribute{ + Computed: true, + }, + "res_id": schema.StringAttribute{ + Computed: true, + }, + "snap_set_guid": schema.StringAttribute{ + Computed: true, + }, + "snap_set_time": schema.Int64Attribute{ + Computed: true, + }, + "timestamp": schema.Int64Attribute{ + Computed: true, + }, + }, + }, + }, + "status": schema.StringAttribute{ + Computed: true, + }, + "tech_status": schema.StringAttribute{ + Computed: true, + }, + "type": schema.StringAttribute{ + Computed: true, + }, + "vmid": schema.Int64Attribute{ + Computed: true, + }, + } +} diff --git a/internal/service/cloudapi/disks/schemas/schema_resource_disk_snapshot.go b/internal/service/cloudapi/disks/schemas/schema_resource_disk_snapshot.go new file mode 100644 index 0000000..5b11fbd --- /dev/null +++ b/internal/service/cloudapi/disks/schemas/schema_resource_disk_snapshot.go @@ -0,0 +1,58 @@ +package schemas + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" +) + +func MakeSchemaResourceDiskSnapshot() map[string]schema.Attribute { + return map[string]schema.Attribute{ + // required attributes + "disk_id": schema.Int64Attribute{ + Required: true, + Description: "The unique ID of the subscriber-owner of the disk", + }, + "label": schema.StringAttribute{ + Required: true, + Description: "Name of the snapshot", + }, + + // optional attributes + "rollback": schema.BoolAttribute{ + Optional: true, + Description: "Needed in order to make a snapshot rollback", + // default is false + }, + "timestamp": schema.Int64Attribute{ + Optional: true, + Computed: true, + Description: "Snapshot time", + }, + + // computed attributes + "id": schema.StringAttribute{ + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + // id is generated as "#