parent
							
								
									71ddaa3345
								
							
						
					
					
						commit
						31be0a0b54
					
				| @ -0,0 +1,10 @@ | ||||
| FROM docker.io/hashicorp/terraform:latest | ||||
| 
 | ||||
| WORKDIR /opt/decort/tf/ | ||||
| COPY provider.tf ./ | ||||
| COPY terraform-provider-decort ./terraform.d/plugins/digitalenergy.online/decort/decort/3.1.1/linux_amd64/ | ||||
| RUN terraform init | ||||
| 
 | ||||
| WORKDIR /tf | ||||
| COPY entrypoint.sh / | ||||
| ENTRYPOINT ["/entrypoint.sh", "/bin/terraform"] | ||||
| @ -0,0 +1,4 @@ | ||||
| #!/bin/sh | ||||
| 
 | ||||
| cp -aL /opt/decort/tf/* /opt/decort/tf/.* ./ | ||||
| exec "$@" | ||||
| @ -0,0 +1,82 @@ | ||||
| /* | ||||
| Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. | ||||
| Authors: | ||||
| Petr Krutov, <petr.krutov@digitalenergy.online> | ||||
| Stanislav Solovev, <spsolovev@digitalenergy.online> | ||||
| Kasim Baybikov, <kmbaybikov@basistech.ru> | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| /* | ||||
| Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud | ||||
| Orchestration Technology) with Terraform by Hashicorp. | ||||
| 
 | ||||
| Source code: https://github.com/rudecs/terraform-provider-decort
 | ||||
| 
 | ||||
| Please see README.md to learn where to place source code so that it | ||||
| builds seamlessly. | ||||
| 
 | ||||
| Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
 | ||||
| */ | ||||
| 
 | ||||
| package disks | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 
 | ||||
| 	"github.com/google/uuid" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/constants" | ||||
| ) | ||||
| 
 | ||||
| func dataSourceDiskListTypesRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { | ||||
| 	listTypes, err := utilityDiskListTypesCheckPresence(ctx, d, m) | ||||
| 	if err != nil { | ||||
| 		return diag.FromErr(err) | ||||
| 	} | ||||
| 
 | ||||
| 	id := uuid.New() | ||||
| 	d.SetId(id.String()) | ||||
| 	d.Set("types", listTypes) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func dataSourceDiskListTypesSchemaMake() map[string]*schema.Schema { | ||||
| 	res := map[string]*schema.Schema{ | ||||
| 		"types": { | ||||
| 			Type:     schema.TypeList, | ||||
| 			Computed: true, | ||||
| 			Elem: &schema.Schema{ | ||||
| 				Type: schema.TypeString, | ||||
| 			}, | ||||
| 			Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", | ||||
| 		}, | ||||
| 	} | ||||
| 	return res | ||||
| } | ||||
| 
 | ||||
| func DataSourceDiskListTypes() *schema.Resource { | ||||
| 	return &schema.Resource{ | ||||
| 		SchemaVersion: 1, | ||||
| 		ReadContext:   dataSourceDiskListTypesRead, | ||||
| 
 | ||||
| 		Timeouts: &schema.ResourceTimeout{ | ||||
| 			Read:    &constants.Timeout30s, | ||||
| 			Default: &constants.Timeout60s, | ||||
| 		}, | ||||
| 
 | ||||
| 		Schema: dataSourceDiskListTypesSchemaMake(), | ||||
| 	} | ||||
| } | ||||
| @ -0,0 +1,133 @@ | ||||
| /* | ||||
| Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. | ||||
| Authors: | ||||
| Petr Krutov, <petr.krutov@digitalenergy.online> | ||||
| Stanislav Solovev, <spsolovev@digitalenergy.online> | ||||
| Kasim Baybikov, <kmbaybikov@basistech.ru> | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| /* | ||||
| Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud | ||||
| Orchestration Technology) with Terraform by Hashicorp. | ||||
| 
 | ||||
| Source code: https://github.com/rudecs/terraform-provider-decort
 | ||||
| 
 | ||||
| Please see README.md to learn where to place source code so that it | ||||
| builds seamlessly. | ||||
| 
 | ||||
| Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
 | ||||
| */ | ||||
| 
 | ||||
| package disks | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 
 | ||||
| 	"github.com/google/uuid" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/constants" | ||||
| ) | ||||
| 
 | ||||
| func flattenDiskListTypesDetailed(tld TypesDetailedList) []map[string]interface{} { | ||||
| 	res := make([]map[string]interface{}, 0) | ||||
| 	for _, typeListDetailed := range tld { | ||||
| 		temp := map[string]interface{}{ | ||||
| 			"pools":  flattenListTypesDetailedPools(typeListDetailed.Pools), | ||||
| 			"sep_id": typeListDetailed.SepID, | ||||
| 		} | ||||
| 		res = append(res, temp) | ||||
| 	} | ||||
| 	return res | ||||
| } | ||||
| 
 | ||||
| func flattenListTypesDetailedPools(pools PoolList) []interface{} { | ||||
| 	res := make([]interface{}, 0) | ||||
| 	for _, pool := range pools { | ||||
| 		temp := map[string]interface{}{ | ||||
| 			"name":  pool.Name, | ||||
| 			"types": pool.Types, | ||||
| 		} | ||||
| 		res = append(res, temp) | ||||
| 	} | ||||
| 
 | ||||
| 	return res | ||||
| } | ||||
| 
 | ||||
| func dataSourceDiskListTypesDetailedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { | ||||
| 	listTypesDetailed, err := utilityDiskListTypesDetailedCheckPresence(ctx, d, m) | ||||
| 	if err != nil { | ||||
| 		return diag.FromErr(err) | ||||
| 	} | ||||
| 
 | ||||
| 	id := uuid.New() | ||||
| 	d.SetId(id.String()) | ||||
| 	d.Set("items", flattenDiskListTypesDetailed(listTypesDetailed)) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func dataSourceDiskListTypesDetailedSchemaMake() map[string]*schema.Schema { | ||||
| 	res := map[string]*schema.Schema{ | ||||
| 		"items": { | ||||
| 			Type:     schema.TypeList, | ||||
| 			Computed: true, | ||||
| 			Elem: &schema.Resource{ | ||||
| 				Schema: map[string]*schema.Schema{ | ||||
| 					"pools": { | ||||
| 						Type:     schema.TypeList, | ||||
| 						Computed: true, | ||||
| 						Elem: &schema.Resource{ | ||||
| 							Schema: map[string]*schema.Schema{ | ||||
| 								"name": { | ||||
| 									Type:        schema.TypeString, | ||||
| 									Computed:    true, | ||||
| 									Description: "Pool name", | ||||
| 								}, | ||||
| 								"types": { | ||||
| 									Type:     schema.TypeList, | ||||
| 									Computed: true, | ||||
| 									Elem: &schema.Schema{ | ||||
| 										Type: schema.TypeString, | ||||
| 									}, | ||||
| 									Description: "The types of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", | ||||
| 								}, | ||||
| 							}, | ||||
| 						}, | ||||
| 					}, | ||||
| 					"sep_id": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Storage endpoint provider ID to create disk", | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	return res | ||||
| } | ||||
| 
 | ||||
| func DataSourceDiskListTypesDetailed() *schema.Resource { | ||||
| 	return &schema.Resource{ | ||||
| 		SchemaVersion: 1, | ||||
| 		ReadContext:   dataSourceDiskListTypesDetailedRead, | ||||
| 
 | ||||
| 		Timeouts: &schema.ResourceTimeout{ | ||||
| 			Read:    &constants.Timeout30s, | ||||
| 			Default: &constants.Timeout60s, | ||||
| 		}, | ||||
| 
 | ||||
| 		Schema: dataSourceDiskListTypesDetailedSchemaMake(), | ||||
| 	} | ||||
| } | ||||
| @ -0,0 +1,485 @@ | ||||
| /* | ||||
| Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. | ||||
| Authors: | ||||
| Petr Krutov, <petr.krutov@digitalenergy.online> | ||||
| Stanislav Solovev, <spsolovev@digitalenergy.online> | ||||
| Kasim Baybikov, <kmbaybikov@basistech.ru> | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| /* | ||||
| Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud | ||||
| Orchestration Technology) with Terraform by Hashicorp. | ||||
| 
 | ||||
| Source code: https://github.com/rudecs/terraform-provider-decort
 | ||||
| 
 | ||||
| Please see README.md to learn where to place source code so that it | ||||
| builds seamlessly. | ||||
| 
 | ||||
| Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
 | ||||
| */ | ||||
| 
 | ||||
| package disks | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/json" | ||||
| 	"net/url" | ||||
| 	"strconv" | ||||
| 
 | ||||
| 	"github.com/google/uuid" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/constants" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/controller" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/flattens" | ||||
| 	log "github.com/sirupsen/logrus" | ||||
| ) | ||||
| 
 | ||||
| func utilityDiskListUnattachedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (UnattachedList, error) { | ||||
| 	unattachedList := UnattachedList{} | ||||
| 	c := m.(*controller.ControllerCfg) | ||||
| 	urlValues := &url.Values{} | ||||
| 	if accountId, ok := d.GetOk("accountId"); ok { | ||||
| 		urlValues.Add("accountId", strconv.Itoa(accountId.(int))) | ||||
| 	} | ||||
| 
 | ||||
| 	log.Debugf("utilityDiskListUnattachedCheckPresence: load disk Unattached list") | ||||
| 	unattachedListRaw, err := c.DecortAPICall(ctx, "POST", disksListUnattachedAPI, urlValues) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	err = json.Unmarshal([]byte(unattachedListRaw), &unattachedList) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return unattachedList, nil | ||||
| } | ||||
| 
 | ||||
| func flattenDiskListUnattached(ul UnattachedList) []map[string]interface{} { | ||||
| 	res := make([]map[string]interface{}, 0) | ||||
| 	for _, unattachedDisk := range ul { | ||||
| 		unattachedDiskAcl, _ := json.Marshal(unattachedDisk.Acl) | ||||
| 		tmp := map[string]interface{}{ | ||||
| 			"_ckey":                 unattachedDisk.Ckey, | ||||
| 			"_meta":                 flattens.FlattenMeta(unattachedDisk.Meta), | ||||
| 			"account_id":            unattachedDisk.AccountID, | ||||
| 			"account_name":          unattachedDisk.AccountName, | ||||
| 			"acl":                   string(unattachedDiskAcl), | ||||
| 			"boot_partition":        unattachedDisk.BootPartition, | ||||
| 			"created_time":          unattachedDisk.CreatedTime, | ||||
| 			"deleted_time":          unattachedDisk.DeletedTime, | ||||
| 			"desc":                  unattachedDisk.Desc, | ||||
| 			"destruction_time":      unattachedDisk.DestructionTime, | ||||
| 			"disk_path":             unattachedDisk.DiskPath, | ||||
| 			"gid":                   unattachedDisk.GridID, | ||||
| 			"guid":                  unattachedDisk.GUID, | ||||
| 			"disk_id":               unattachedDisk.ID, | ||||
| 			"image_id":              unattachedDisk.ImageID, | ||||
| 			"images":                unattachedDisk.Images, | ||||
| 			"iotune":                flattenIOTune(unattachedDisk.IOTune), | ||||
| 			"iqn":                   unattachedDisk.IQN, | ||||
| 			"login":                 unattachedDisk.Login, | ||||
| 			"milestones":            unattachedDisk.Milestones, | ||||
| 			"disk_name":             unattachedDisk.Name, | ||||
| 			"order":                 unattachedDisk.Order, | ||||
| 			"params":                unattachedDisk.Params, | ||||
| 			"parent_id":             unattachedDisk.ParentID, | ||||
| 			"passwd":                unattachedDisk.Passwd, | ||||
| 			"pci_slot":              unattachedDisk.PciSlot, | ||||
| 			"pool":                  unattachedDisk.Pool, | ||||
| 			"purge_attempts":        unattachedDisk.PurgeAttempts, | ||||
| 			"purge_time":            unattachedDisk.PurgeTime, | ||||
| 			"reality_device_number": unattachedDisk.RealityDeviceNumber, | ||||
| 			"reference_id":          unattachedDisk.ReferenceID, | ||||
| 			"res_id":                unattachedDisk.ResID, | ||||
| 			"res_name":              unattachedDisk.ResName, | ||||
| 			"role":                  unattachedDisk.Role, | ||||
| 			"sep_id":                unattachedDisk.SepID, | ||||
| 			"size_max":              unattachedDisk.SizeMax, | ||||
| 			"size_used":             unattachedDisk.SizeUsed, | ||||
| 			"snapshots":             flattenDiskSnapshotList(unattachedDisk.Snapshots), | ||||
| 			"status":                unattachedDisk.Status, | ||||
| 			"tech_status":           unattachedDisk.TechStatus, | ||||
| 			"type":                  unattachedDisk.Type, | ||||
| 			"vmid":                  unattachedDisk.VMID, | ||||
| 		} | ||||
| 		res = append(res, tmp) | ||||
| 	} | ||||
| 	return res | ||||
| } | ||||
| 
 | ||||
| func dataSourceDiskListUnattachedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { | ||||
| 	diskListUnattached, err := utilityDiskListUnattachedCheckPresence(ctx, d, m) | ||||
| 	if err != nil { | ||||
| 		return diag.FromErr(err) | ||||
| 	} | ||||
| 
 | ||||
| 	id := uuid.New() | ||||
| 	d.SetId(id.String()) | ||||
| 	d.Set("items", flattenDiskListUnattached(diskListUnattached)) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func DataSourceDiskListUnattached() *schema.Resource { | ||||
| 	return &schema.Resource{ | ||||
| 		SchemaVersion: 1, | ||||
| 
 | ||||
| 		ReadContext: dataSourceDiskListUnattachedRead, | ||||
| 
 | ||||
| 		Timeouts: &schema.ResourceTimeout{ | ||||
| 			Read:    &constants.Timeout30s, | ||||
| 			Default: &constants.Timeout60s, | ||||
| 		}, | ||||
| 
 | ||||
| 		Schema: dataSourceDiskListUnattachedSchemaMake(), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func dataSourceDiskListUnattachedSchemaMake() map[string]*schema.Schema { | ||||
| 	res := map[string]*schema.Schema{ | ||||
| 		"account_id": { | ||||
| 			Type:        schema.TypeInt, | ||||
| 			Optional:    true, | ||||
| 			Description: "ID of the account the disks belong to", | ||||
| 		}, | ||||
| 
 | ||||
| 		"items": { | ||||
| 			Type:     schema.TypeList, | ||||
| 			Computed: true, | ||||
| 			Elem: &schema.Resource{ | ||||
| 				Schema: map[string]*schema.Schema{ | ||||
| 					"_ckey": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "CKey", | ||||
| 					}, | ||||
| 					"_meta": { | ||||
| 						Type:     schema.TypeList, | ||||
| 						Computed: true, | ||||
| 						Elem: &schema.Schema{ | ||||
| 							Type: schema.TypeString, | ||||
| 						}, | ||||
| 						Description: "Meta parameters", | ||||
| 					}, | ||||
| 					"account_id": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "ID of the account the disks belong to", | ||||
| 					}, | ||||
| 					"account_name": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "The name of the subscriber '(account') to whom this disk belongs", | ||||
| 					}, | ||||
| 					"acl": { | ||||
| 						Type:     schema.TypeString, | ||||
| 						Computed: true, | ||||
| 					}, | ||||
| 					"boot_partition": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Number of disk partitions", | ||||
| 					}, | ||||
| 					"created_time": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Created time", | ||||
| 					}, | ||||
| 					"deleted_time": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Deleted time", | ||||
| 					}, | ||||
| 					"desc": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Description of disk", | ||||
| 					}, | ||||
| 					"destruction_time": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Time of final deletion", | ||||
| 					}, | ||||
| 					"disk_path": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Disk path", | ||||
| 					}, | ||||
| 					"gid": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "ID of the grid (platform)", | ||||
| 					}, | ||||
| 					"guid": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Disk ID on the storage side", | ||||
| 					}, | ||||
| 					"disk_id": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "The unique ID of the subscriber-owner of the disk", | ||||
| 					}, | ||||
| 					"image_id": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Image ID", | ||||
| 					}, | ||||
| 					"images": { | ||||
| 						Type:     schema.TypeList, | ||||
| 						Computed: true, | ||||
| 						Elem: &schema.Schema{ | ||||
| 							Type: schema.TypeString, | ||||
| 						}, | ||||
| 						Description: "IDs of images using the disk", | ||||
| 					}, | ||||
| 					"iotune": { | ||||
| 						Type:     schema.TypeList, | ||||
| 						Computed: true, | ||||
| 						Elem: &schema.Resource{ | ||||
| 							Schema: map[string]*schema.Schema{ | ||||
| 								"read_bytes_sec": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Number of bytes to read per second", | ||||
| 								}, | ||||
| 								"read_bytes_sec_max": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Maximum number of bytes to read", | ||||
| 								}, | ||||
| 								"read_iops_sec": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Number of io read operations per second", | ||||
| 								}, | ||||
| 								"read_iops_sec_max": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Maximum number of io read operations", | ||||
| 								}, | ||||
| 								"size_iops_sec": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Size of io operations", | ||||
| 								}, | ||||
| 								"total_bytes_sec": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Total size bytes per second", | ||||
| 								}, | ||||
| 								"total_bytes_sec_max": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Maximum total size of bytes per second", | ||||
| 								}, | ||||
| 								"total_iops_sec": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Total number of io operations per second", | ||||
| 								}, | ||||
| 								"total_iops_sec_max": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Maximum total number of io operations per second", | ||||
| 								}, | ||||
| 								"write_bytes_sec": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Number of bytes to write per second", | ||||
| 								}, | ||||
| 								"write_bytes_sec_max": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Maximum number of bytes to write per second", | ||||
| 								}, | ||||
| 								"write_iops_sec": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Number of write operations per second", | ||||
| 								}, | ||||
| 								"write_iops_sec_max": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Maximum number of write operations per second", | ||||
| 								}, | ||||
| 							}, | ||||
| 						}, | ||||
| 					}, | ||||
| 					"iqn": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Disk IQN", | ||||
| 					}, | ||||
| 					"login": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Login to access the disk", | ||||
| 					}, | ||||
| 					"milestones": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Milestones", | ||||
| 					}, | ||||
| 					"disk_name": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Name of disk", | ||||
| 					}, | ||||
| 					"order": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Disk order", | ||||
| 					}, | ||||
| 					"params": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Disk params", | ||||
| 					}, | ||||
| 					"parent_id": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "ID of the parent disk", | ||||
| 					}, | ||||
| 					"passwd": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Password to access the disk", | ||||
| 					}, | ||||
| 					"pci_slot": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "ID of the pci slot to which the disk is connected", | ||||
| 					}, | ||||
| 					"pool": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Pool for disk location", | ||||
| 					}, | ||||
| 					"purge_attempts": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Number of deletion attempts", | ||||
| 					}, | ||||
| 					"purge_time": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Time of the last deletion attempt", | ||||
| 					}, | ||||
| 					"reality_device_number": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Reality device number", | ||||
| 					}, | ||||
| 					"reference_id": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "ID of the reference to the disk", | ||||
| 					}, | ||||
| 					"res_id": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Resource ID", | ||||
| 					}, | ||||
| 					"res_name": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Name of the resource", | ||||
| 					}, | ||||
| 					"role": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Disk role", | ||||
| 					}, | ||||
| 					"sep_id": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Storage endpoint provider ID to create disk", | ||||
| 					}, | ||||
| 					"size_max": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Size in GB", | ||||
| 					}, | ||||
| 					"size_used": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Number of used space, in GB", | ||||
| 					}, | ||||
| 					"snapshots": { | ||||
| 						Type:     schema.TypeList, | ||||
| 						Computed: true, | ||||
| 						Elem: &schema.Resource{ | ||||
| 							Schema: map[string]*schema.Schema{ | ||||
| 								"guid": { | ||||
| 									Type:        schema.TypeString, | ||||
| 									Computed:    true, | ||||
| 									Description: "ID of the snapshot", | ||||
| 								}, | ||||
| 								"label": { | ||||
| 									Type:        schema.TypeString, | ||||
| 									Computed:    true, | ||||
| 									Description: "Name of the snapshot", | ||||
| 								}, | ||||
| 								"res_id": { | ||||
| 									Type:        schema.TypeString, | ||||
| 									Computed:    true, | ||||
| 									Description: "Reference to the snapshot", | ||||
| 								}, | ||||
| 								"snap_set_guid": { | ||||
| 									Type:        schema.TypeString, | ||||
| 									Computed:    true, | ||||
| 									Description: "The set snapshot ID", | ||||
| 								}, | ||||
| 								"snap_set_time": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "The set time of the snapshot", | ||||
| 								}, | ||||
| 								"timestamp": { | ||||
| 									Type:        schema.TypeInt, | ||||
| 									Computed:    true, | ||||
| 									Description: "Snapshot time", | ||||
| 								}, | ||||
| 							}, | ||||
| 						}, | ||||
| 					}, | ||||
| 					"status": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Disk status", | ||||
| 					}, | ||||
| 					"tech_status": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Technical status of the disk", | ||||
| 					}, | ||||
| 					"type": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "The type of disk in terms of its role in compute: 'B=Boot, D=Data, T=Temp'", | ||||
| 					}, | ||||
| 					"vmid": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Virtual Machine ID (Deprecated)", | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	return res | ||||
| } | ||||
| @ -0,0 +1,129 @@ | ||||
| /* | ||||
| Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. | ||||
| Authors: | ||||
| Petr Krutov, <petr.krutov@digitalenergy.online> | ||||
| Stanislav Solovev, <spsolovev@digitalenergy.online> | ||||
| Kasim Baybikov, <kmbaybikov@basistech.ru> | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| /* | ||||
| Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud | ||||
| Orchestration Technology) with Terraform by Hashicorp. | ||||
| 
 | ||||
| Source code: https://github.com/rudecs/terraform-provider-decort
 | ||||
| 
 | ||||
| Please see README.md to learn where to place source code so that it | ||||
| builds seamlessly. | ||||
| 
 | ||||
| Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
 | ||||
| */ | ||||
| 
 | ||||
| package disks | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 
 | ||||
| 	"github.com/google/uuid" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/constants" | ||||
| ) | ||||
| 
 | ||||
| func dataSourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { | ||||
| 	disk, err := utilityDiskCheckPresence(ctx, d, m) | ||||
| 	if disk == nil { | ||||
| 		if err != nil { | ||||
| 			return diag.FromErr(err) | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	snapshots := disk.Snapshots | ||||
| 	snapshot := Snapshot{} | ||||
| 	label := d.Get("label").(string) | ||||
| 	for _, sn := range snapshots { | ||||
| 		if label == sn.Label { | ||||
| 			snapshot = sn | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	if label != snapshot.Label { | ||||
| 		return diag.Errorf("Snapshot with label \"%v\" not found", label) | ||||
| 	} | ||||
| 
 | ||||
| 	id := uuid.New() | ||||
| 	d.SetId(id.String()) | ||||
| 	d.Set("timestamp", snapshot.TimeStamp) | ||||
| 	d.Set("guid", snapshot.Guid) | ||||
| 	d.Set("res_id", snapshot.ResId) | ||||
| 	d.Set("snap_set_guid", snapshot.SnapSetGuid) | ||||
| 	d.Set("snap_set_time", snapshot.SnapSetTime) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func DataSourceDiskSnapshot() *schema.Resource { | ||||
| 	return &schema.Resource{ | ||||
| 		SchemaVersion: 1, | ||||
| 
 | ||||
| 		ReadContext: dataSourceDiskSnapshotRead, | ||||
| 
 | ||||
| 		Timeouts: &schema.ResourceTimeout{ | ||||
| 			Read:    &constants.Timeout30s, | ||||
| 			Default: &constants.Timeout60s, | ||||
| 		}, | ||||
| 
 | ||||
| 		Schema: dataSourceDiskSnapshotSchemaMake(), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func dataSourceDiskSnapshotSchemaMake() map[string]*schema.Schema { | ||||
| 	rets := map[string]*schema.Schema{ | ||||
| 		"disk_id": { | ||||
| 			Type:        schema.TypeInt, | ||||
| 			Required:    true, | ||||
| 			Description: "The unique ID of the subscriber-owner of the disk", | ||||
| 		}, | ||||
| 		"label": { | ||||
| 			Type:        schema.TypeString, | ||||
| 			Required:    true, | ||||
| 			Description: "Name of the snapshot", | ||||
| 		}, | ||||
| 		"guid": { | ||||
| 			Type:        schema.TypeString, | ||||
| 			Computed:    true, | ||||
| 			Description: "ID of the snapshot", | ||||
| 		}, | ||||
| 		"timestamp": { | ||||
| 			Type:        schema.TypeInt, | ||||
| 			Computed:    true, | ||||
| 			Description: "Snapshot time", | ||||
| 		}, | ||||
| 		"res_id": { | ||||
| 			Type:        schema.TypeString, | ||||
| 			Computed:    true, | ||||
| 			Description: "Reference to the snapshot", | ||||
| 		}, | ||||
| 		"snap_set_guid": { | ||||
| 			Type:        schema.TypeString, | ||||
| 			Computed:    true, | ||||
| 			Description: "The set snapshot ID", | ||||
| 		}, | ||||
| 		"snap_set_time": { | ||||
| 			Type:        schema.TypeInt, | ||||
| 			Computed:    true, | ||||
| 			Description: "The set time of the snapshot", | ||||
| 		}, | ||||
| 	} | ||||
| 	return rets | ||||
| } | ||||
| @ -0,0 +1,121 @@ | ||||
| /* | ||||
| Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. | ||||
| Authors: | ||||
| Petr Krutov, <petr.krutov@digitalenergy.online> | ||||
| Stanislav Solovev, <spsolovev@digitalenergy.online> | ||||
| Kasim Baybikov, <kmbaybikov@basistech.ru> | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| /* | ||||
| Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud | ||||
| Orchestration Technology) with Terraform by Hashicorp. | ||||
| 
 | ||||
| Source code: https://github.com/rudecs/terraform-provider-decort
 | ||||
| 
 | ||||
| Please see README.md to learn where to place source code so that it | ||||
| builds seamlessly. | ||||
| 
 | ||||
| Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
 | ||||
| */ | ||||
| 
 | ||||
| package disks | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 
 | ||||
| 	"github.com/google/uuid" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/constants" | ||||
| ) | ||||
| 
 | ||||
| func dataSourceDiskSnapshotListRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { | ||||
| 	disk, err := utilityDiskCheckPresence(ctx, d, m) | ||||
| 	if disk == nil { | ||||
| 		if err != nil { | ||||
| 			return diag.FromErr(err) | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	id := uuid.New() | ||||
| 	d.SetId(id.String()) | ||||
| 	d.Set("items", flattenDiskSnapshotList(disk.Snapshots)) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func DataSourceDiskSnapshotList() *schema.Resource { | ||||
| 	return &schema.Resource{ | ||||
| 		SchemaVersion: 1, | ||||
| 
 | ||||
| 		ReadContext: dataSourceDiskSnapshotListRead, | ||||
| 
 | ||||
| 		Timeouts: &schema.ResourceTimeout{ | ||||
| 			Read:    &constants.Timeout30s, | ||||
| 			Default: &constants.Timeout60s, | ||||
| 		}, | ||||
| 
 | ||||
| 		Schema: dataSourceDiskSnapshotListSchemaMake(), | ||||
| 	} | ||||
| } | ||||
| 
 | ||||
| func dataSourceDiskSnapshotListSchemaMake() map[string]*schema.Schema { | ||||
| 	rets := map[string]*schema.Schema{ | ||||
| 		"disk_id": { | ||||
| 			Type:        schema.TypeInt, | ||||
| 			Required:    true, | ||||
| 			Description: "The unique ID of the subscriber-owner of the disk", | ||||
| 		}, | ||||
| 		"items": { | ||||
| 			Type:     schema.TypeList, | ||||
| 			Computed: true, | ||||
| 			Elem: &schema.Resource{ | ||||
| 				Schema: map[string]*schema.Schema{ | ||||
| 					"label": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Name of the snapshot", | ||||
| 					}, | ||||
| 					"guid": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "ID of the snapshot", | ||||
| 					}, | ||||
| 					"timestamp": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "Snapshot time", | ||||
| 					}, | ||||
| 					"res_id": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "Reference to the snapshot", | ||||
| 					}, | ||||
| 					"snap_set_guid": { | ||||
| 						Type:        schema.TypeString, | ||||
| 						Computed:    true, | ||||
| 						Description: "The set snapshot ID", | ||||
| 					}, | ||||
| 					"snap_set_time": { | ||||
| 						Type:        schema.TypeInt, | ||||
| 						Computed:    true, | ||||
| 						Description: "The set time of the snapshot", | ||||
| 					}, | ||||
| 				}, | ||||
| 			}, | ||||
| 		}, | ||||
| 	} | ||||
| 	return rets | ||||
| } | ||||
| @ -0,0 +1,69 @@ | ||||
| /* | ||||
| Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. | ||||
| Authors: | ||||
| Petr Krutov, <petr.krutov@digitalenergy.online> | ||||
| Stanislav Solovev, <spsolovev@digitalenergy.online> | ||||
| Kasim Baybikov, <kmbaybikov@basistech.ru> | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| /* | ||||
| Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud | ||||
| Orchestration Technology) with Terraform by Hashicorp. | ||||
| 
 | ||||
| Source code: https://github.com/rudecs/terraform-provider-decort
 | ||||
| 
 | ||||
| Please see README.md to learn where to place source code so that it | ||||
| builds seamlessly. | ||||
| 
 | ||||
| Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
 | ||||
| */ | ||||
| 
 | ||||
| package disks | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 
 | ||||
| 	"github.com/google/uuid" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/constants" | ||||
| ) | ||||
| 
 | ||||
| func dataSourceDiskListDeletedRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { | ||||
| 	diskList, err := utilityDiskListCheckPresence(ctx, d, m, disksListDeletedAPI) | ||||
| 	if err != nil { | ||||
| 		return diag.FromErr(err) | ||||
| 	} | ||||
| 
 | ||||
| 	id := uuid.New() | ||||
| 	d.SetId(id.String()) | ||||
| 	d.Set("items", flattenDiskList(diskList)) | ||||
| 
 | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func DataSourceDiskListDeleted() *schema.Resource { | ||||
| 	return &schema.Resource{ | ||||
| 		SchemaVersion: 1, | ||||
| 		ReadContext:   dataSourceDiskListDeletedRead, | ||||
| 
 | ||||
| 		Timeouts: &schema.ResourceTimeout{ | ||||
| 			Read:    &constants.Timeout30s, | ||||
| 			Default: &constants.Timeout60s, | ||||
| 		}, | ||||
| 
 | ||||
| 		Schema: dataSourceDiskListSchemaMake(), | ||||
| 	} | ||||
| } | ||||
| @ -0,0 +1,246 @@ | ||||
| /* | ||||
| Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. | ||||
| Authors: | ||||
| Petr Krutov, <petr.krutov@digitalenergy.online> | ||||
| Stanislav Solovev, <spsolovev@digitalenergy.online> | ||||
| Kasim Baybikov, <kmbaybikov@basistech.ru> | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| /* | ||||
| Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud | ||||
| Orchestration Technology) with Terraform by Hashicorp. | ||||
| 
 | ||||
| Source code: https://github.com/rudecs/terraform-provider-decort
 | ||||
| 
 | ||||
| Please see README.md to learn where to place source code so that it | ||||
| builds seamlessly. | ||||
| 
 | ||||
| Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
 | ||||
| */ | ||||
| 
 | ||||
| package disks | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"net/url" | ||||
| 	"strconv" | ||||
| 
 | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/diag" | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/constants" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/controller" | ||||
| 	log "github.com/sirupsen/logrus" | ||||
| ) | ||||
| 
 | ||||
| func resourceDiskSnapshotCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { | ||||
| 	urlValues := &url.Values{} | ||||
| 	c := m.(*controller.ControllerCfg) | ||||
| 	disk, err := utilityDiskCheckPresence(ctx, d, m) | ||||
| 	if disk == nil { | ||||
| 		if err != nil { | ||||
| 			return diag.FromErr(err) | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	snapshots := disk.Snapshots | ||||
| 	snapshot := Snapshot{} | ||||
| 	label := d.Get("label").(string) | ||||
| 	for _, sn := range snapshots { | ||||
| 		if label == sn.Label { | ||||
| 			snapshot = sn | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	if label != snapshot.Label { | ||||
| 		return diag.Errorf("Snapshot with label \"%v\" not found", label) | ||||
| 	} | ||||
| 	if rollback := d.Get("rollback").(bool); rollback { | ||||
| 		urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int))) | ||||
| 		urlValues.Add("label", label) | ||||
| 		urlValues.Add("timestamp", strconv.Itoa(d.Get("timestamp").(int))) | ||||
| 		log.Debugf("resourceDiskCreate: Snapshot rollback with label", label) | ||||
| 		_, err := c.DecortAPICall(ctx, "POST", disksSnapshotRollbackAPI, urlValues) | ||||
| 		if err != nil { | ||||
| 			return diag.FromErr(err) | ||||
| 		} | ||||
| 		urlValues = &url.Values{} | ||||
| 	} | ||||
| 	return resourceDiskSnapshotRead(ctx, d, m) | ||||
| } | ||||
| 
 | ||||
| func resourceDiskSnapshotRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { | ||||
| 	disk, err := utilityDiskCheckPresence(ctx, d, m) | ||||
| 	if disk == nil { | ||||
| 		if err != nil { | ||||
| 			return diag.FromErr(err) | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	snapshots := disk.Snapshots | ||||
| 	snapshot := Snapshot{} | ||||
| 	label := d.Get("label").(string) | ||||
| 	for _, sn := range snapshots { | ||||
| 		if label == sn.Label { | ||||
| 			snapshot = sn | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	if label != snapshot.Label { | ||||
| 		return diag.Errorf("Snapshot with label \"%v\" not found", label) | ||||
| 	} | ||||
| 
 | ||||
| 	d.SetId(d.Get("label").(string)) | ||||
| 	d.Set("timestamp", snapshot.TimeStamp) | ||||
| 	d.Set("guid", snapshot.Guid) | ||||
| 	d.Set("res_id", snapshot.ResId) | ||||
| 	d.Set("snap_set_guid", snapshot.SnapSetGuid) | ||||
| 	d.Set("snap_set_time", snapshot.SnapSetTime) | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func resourceDiskSnapshotUpdate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { | ||||
| 	urlValues := &url.Values{} | ||||
| 	c := m.(*controller.ControllerCfg) | ||||
| 	disk, err := utilityDiskCheckPresence(ctx, d, m) | ||||
| 	if disk == nil { | ||||
| 		if err != nil { | ||||
| 			return diag.FromErr(err) | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	snapshots := disk.Snapshots | ||||
| 	snapshot := Snapshot{} | ||||
| 	label := d.Get("label").(string) | ||||
| 	for _, sn := range snapshots { | ||||
| 		if label == sn.Label { | ||||
| 			snapshot = sn | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	if label != snapshot.Label { | ||||
| 		return diag.Errorf("Snapshot with label \"%v\" not found", label) | ||||
| 	} | ||||
| 	if d.HasChange("rollback") && d.Get("rollback").(bool) == true { | ||||
| 		urlValues.Add("diskId", strconv.Itoa(d.Get("disk_id").(int))) | ||||
| 		urlValues.Add("label", label) | ||||
| 		urlValues.Add("timestamp", strconv.Itoa(d.Get("timestamp").(int))) | ||||
| 		log.Debugf("resourceDiskUpdtae: Snapshot rollback with label", label) | ||||
| 		_, err := c.DecortAPICall(ctx, "POST", disksSnapshotRollbackAPI, urlValues) | ||||
| 		if err != nil { | ||||
| 			return diag.FromErr(err) | ||||
| 		} | ||||
| 		urlValues = &url.Values{} | ||||
| 	} | ||||
| 
 | ||||
| 	return resourceDiskSnapshotRead(ctx, d, m) | ||||
| } | ||||
| 
 | ||||
| func resourceDiskSnapshotDelete(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics { | ||||
| 	c := m.(*controller.ControllerCfg) | ||||
| 
 | ||||
| 	disk, err := utilityDiskCheckPresence(ctx, d, m) | ||||
| 	if disk == nil { //if disk not exits, can't call snapshotDelete
 | ||||
| 		d.SetId("") | ||||
| 		if err != nil { | ||||
| 			return diag.FromErr(err) | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 
 | ||||
| 	params := &url.Values{} | ||||
| 	params.Add("diskId", strconv.Itoa(d.Get("disk_id").(int))) | ||||
| 	params.Add("label", d.Get("label").(string)) | ||||
| 
 | ||||
| 	_, err = c.DecortAPICall(ctx, "POST", disksSnapshotDeleteAPI, params) | ||||
| 	if err != nil { | ||||
| 		return diag.FromErr(err) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
| 
 | ||||
| func resourceDiskSnapshotSchemaMake() map[string]*schema.Schema { | ||||
| 	rets := map[string]*schema.Schema{ | ||||
| 		"disk_id": { | ||||
| 			Type:        schema.TypeInt, | ||||
| 			Required:    true, | ||||
| 			ForceNew:    true, | ||||
| 			Description: "The unique ID of the subscriber-owner of the disk", | ||||
| 		}, | ||||
| 		"label": { | ||||
| 			Type:        schema.TypeString, | ||||
| 			Required:    true, | ||||
| 			ForceNew:    true, | ||||
| 			Description: "Name of the snapshot", | ||||
| 		}, | ||||
| 		"rollback": { | ||||
| 			Type:        schema.TypeBool, | ||||
| 			Optional:    true, | ||||
| 			Default:     false, | ||||
| 			Description: "Needed in order to make a snapshot rollback", | ||||
| 		}, | ||||
| 		"guid": { | ||||
| 			Type:        schema.TypeString, | ||||
| 			Computed:    true, | ||||
| 			Description: "ID of the snapshot", | ||||
| 		}, | ||||
| 		"timestamp": { | ||||
| 			Type:        schema.TypeInt, | ||||
| 			Optional:    true, | ||||
| 			Computed:    true, | ||||
| 			Description: "Snapshot time", | ||||
| 		}, | ||||
| 		"res_id": { | ||||
| 			Type:        schema.TypeString, | ||||
| 			Computed:    true, | ||||
| 			Description: "Reference to the snapshot", | ||||
| 		}, | ||||
| 		"snap_set_guid": { | ||||
| 			Type:        schema.TypeString, | ||||
| 			Computed:    true, | ||||
| 			Description: "The set snapshot ID", | ||||
| 		}, | ||||
| 		"snap_set_time": { | ||||
| 			Type:        schema.TypeInt, | ||||
| 			Computed:    true, | ||||
| 			Description: "The set time of the snapshot", | ||||
| 		}, | ||||
| 	} | ||||
| 	return rets | ||||
| } | ||||
| 
 | ||||
| func ResourceDiskSnapshot() *schema.Resource { | ||||
| 	return &schema.Resource{ | ||||
| 		SchemaVersion: 1, | ||||
| 
 | ||||
| 		CreateContext: resourceDiskSnapshotCreate, | ||||
| 		ReadContext:   resourceDiskSnapshotRead, | ||||
| 		UpdateContext: resourceDiskSnapshotUpdate, | ||||
| 		DeleteContext: resourceDiskSnapshotDelete, | ||||
| 
 | ||||
| 		Importer: &schema.ResourceImporter{ | ||||
| 			StateContext: schema.ImportStatePassthroughContext, | ||||
| 		}, | ||||
| 
 | ||||
| 		Timeouts: &schema.ResourceTimeout{ | ||||
| 			Create:  &constants.Timeout600s, | ||||
| 			Read:    &constants.Timeout300s, | ||||
| 			Update:  &constants.Timeout300s, | ||||
| 			Delete:  &constants.Timeout300s, | ||||
| 			Default: &constants.Timeout300s, | ||||
| 		}, | ||||
| 
 | ||||
| 		Schema: resourceDiskSnapshotSchemaMake(), | ||||
| 	} | ||||
| } | ||||
| @ -0,0 +1,62 @@ | ||||
| /* | ||||
| Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. | ||||
| Authors: | ||||
| Petr Krutov, <petr.krutov@digitalenergy.online> | ||||
| Stanislav Solovev, <spsolovev@digitalenergy.online> | ||||
| Kasim Baybikov, <kmbaybikov@basistech.ru> | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| /* | ||||
| Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud | ||||
| Orchestration Technology) with Terraform by Hashicorp. | ||||
| 
 | ||||
| Source code: https://github.com/rudecs/terraform-provider-decort
 | ||||
| 
 | ||||
| Please see README.md to learn where to place source code so that it | ||||
| builds seamlessly. | ||||
| 
 | ||||
| Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
 | ||||
| */ | ||||
| 
 | ||||
| package disks | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/json" | ||||
| 	"net/url" | ||||
| 
 | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/controller" | ||||
| 	log "github.com/sirupsen/logrus" | ||||
| ) | ||||
| 
 | ||||
| func utilityDiskListTypesDetailedCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (TypesDetailedList, error) { | ||||
| 	listTypesDetailed := TypesDetailedList{} | ||||
| 	c := m.(*controller.ControllerCfg) | ||||
| 	urlValues := &url.Values{} | ||||
| 	urlValues.Add("detailed", "true") | ||||
| 	log.Debugf("utilityDiskListTypesDetailedCheckPresence: load disk list Types Detailed") | ||||
| 	diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListTypesAPI, urlValues) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	err = json.Unmarshal([]byte(diskListRaw), &listTypesDetailed) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return listTypesDetailed, nil | ||||
| } | ||||
| @ -0,0 +1,62 @@ | ||||
| /* | ||||
| Copyright (c) 2019-2022 Digital Energy Cloud Solutions LLC. All Rights Reserved. | ||||
| Authors: | ||||
| Petr Krutov, <petr.krutov@digitalenergy.online> | ||||
| Stanislav Solovev, <spsolovev@digitalenergy.online> | ||||
| Kasim Baybikov, <kmbaybikov@basistech.ru> | ||||
| 
 | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
| 
 | ||||
|     http://www.apache.org/licenses/LICENSE-2.0
 | ||||
| 
 | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
| 
 | ||||
| /* | ||||
| Terraform DECORT provider - manage resources provided by DECORT (Digital Energy Cloud | ||||
| Orchestration Technology) with Terraform by Hashicorp. | ||||
| 
 | ||||
| Source code: https://github.com/rudecs/terraform-provider-decort
 | ||||
| 
 | ||||
| Please see README.md to learn where to place source code so that it | ||||
| builds seamlessly. | ||||
| 
 | ||||
| Documentation: https://github.com/rudecs/terraform-provider-decort/wiki
 | ||||
| */ | ||||
| 
 | ||||
| package disks | ||||
| 
 | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/json" | ||||
| 	"net/url" | ||||
| 
 | ||||
| 	"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" | ||||
| 	"github.com/rudecs/terraform-provider-decort/internal/controller" | ||||
| 	log "github.com/sirupsen/logrus" | ||||
| ) | ||||
| 
 | ||||
| func utilityDiskListTypesCheckPresence(ctx context.Context, d *schema.ResourceData, m interface{}) (TypesList, error) { | ||||
| 	typesList := TypesList{} | ||||
| 	c := m.(*controller.ControllerCfg) | ||||
| 	urlValues := &url.Values{} | ||||
| 	urlValues.Add("detailed", "false") | ||||
| 	log.Debugf("utilityDiskListTypesCheckPresence: load disk list Types Detailed") | ||||
| 	diskListRaw, err := c.DecortAPICall(ctx, "POST", disksListTypesAPI, urlValues) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	err = json.Unmarshal([]byte(diskListRaw), &typesList) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 
 | ||||
| 	return typesList, nil | ||||
| } | ||||
| @ -0,0 +1,32 @@ | ||||
| package status | ||||
| 
 | ||||
| type Status = string | ||||
| 
 | ||||
| var ( | ||||
| 	//The disk is linked to any Compute
 | ||||
| 	Assigned Status = "ASSIGNED" | ||||
| 
 | ||||
| 	//An object model has been created in the database
 | ||||
| 	Modeled Status = "MODELED" | ||||
| 
 | ||||
| 	//In the process of creation
 | ||||
| 	Creating Status = "CREATING" | ||||
| 
 | ||||
| 	//Creating
 | ||||
| 	Created Status = "CREATED" | ||||
| 
 | ||||
| 	//Physical resources are allocated for the object
 | ||||
| 	Allocated Status = "ALLOCATED" | ||||
| 
 | ||||
| 	//The object has released (returned to the platform) the physical resources that it occupied
 | ||||
| 	Unallocated Status = "UNALLOCATED" | ||||
| 
 | ||||
| 	//Permanently deleted
 | ||||
| 	Destroyed Status = "DESTROYED" | ||||
| 
 | ||||
| 	//Deleted to Trash
 | ||||
| 	Deleted Status = "DELETED" | ||||
| 
 | ||||
| 	//Deleted from storage
 | ||||
| 	Purged Status = "PURGED" | ||||
| ) | ||||
| @ -0,0 +1,9 @@ | ||||
| terraform { | ||||
|   required_providers { | ||||
|     decort = { | ||||
|       source  = "digitalenergy.online/decort/decort" | ||||
|       version = "3.1.1" | ||||
|     } | ||||
|   } | ||||
| } | ||||
| 
 | ||||
					Loading…
					
					
				
		Reference in new issue