This commit is contained in:
2025-11-14 12:44:02 +03:00
parent 06336697a6
commit becbe65993
16 changed files with 3050 additions and 537 deletions

View File

@@ -166,34 +166,36 @@ class decort_k8s(DecortController):
if wg[param] is None:
wg[param] = default_value
k8s_id = self.k8s_provision(self.amodule.params['name'],
self.amodule.params['k8ci_id'],
self.amodule.params['rg_id'],
self.amodule.params['vins_id'],
self.amodule.params['network_plugin'],
self.amodule.params['master_count'],
self.amodule.params['master_cpu'],
self.amodule.params['master_ram'],
self.amodule.params['master_disk'],
self.amodule.params['master_sepid'],
self.amodule.params['master_pool'],
target_wgs[0],
self.amodule.params['extnet_id'],
self.amodule.params['with_lb'],
self.amodule.params['ha_lb'],
self.amodule.params['additionalSANs'],
self.amodule.params['init_conf'],
self.amodule.params['cluster_conf'],
self.amodule.params['kublet_conf'],
self.amodule.params['kubeproxy_conf'],
self.amodule.params['join_conf'],
self.amodule.params['oidc_cert'],
self.amodule.params['description'],
self.amodule.params['extnet_only'],
master_chipset,
lb_sysctl=self.amodule.params['lb_sysctl'],
zone_id=self.aparams['zone_id'],
)
k8s_id = self.k8s_provision(
self.amodule.params['name'],
self.amodule.params['k8ci_id'],
self.amodule.params['rg_id'],
self.amodule.params['vins_id'],
self.amodule.params['network_plugin'],
self.amodule.params['master_count'],
self.amodule.params['master_cpu'],
self.amodule.params['master_ram'],
self.amodule.params['master_disk'],
self.amodule.params['master_sepid'],
self.amodule.params['master_pool'],
target_wgs[0],
self.amodule.params['extnet_id'],
self.amodule.params['with_lb'],
self.amodule.params['ha_lb'],
self.amodule.params['additionalSANs'],
self.amodule.params['init_conf'],
self.amodule.params['cluster_conf'],
self.amodule.params['kublet_conf'],
self.amodule.params['kubeproxy_conf'],
self.amodule.params['join_conf'],
self.amodule.params['oidc_cert'],
self.amodule.params['description'],
self.amodule.params['extnet_only'],
master_chipset=master_chipset,
lb_sysctl=self.amodule.params['lb_sysctl'],
zone_id=self.aparams['zone_id'],
storage_policy_id=self.aparams['storage_policy_id'],
)
if not k8s_id:
if k8s_id == 0:
@@ -241,6 +243,12 @@ class decort_k8s(DecortController):
)
self.exit(fail=True)
if (
self.aparams['name'] is not None
and self.aparams['name'] != self.k8s_info['name']
):
self.k8s_update(id=self.k8s_id, name=self.aparams['name'])
if preupdate:
# K8s info updating
self.k8s_info = self.k8s_get_by_id(k8s_id=self.k8s_id)
@@ -275,9 +283,6 @@ class decort_k8s(DecortController):
type='str',
default='',
),
quotas=dict(
type='dict',
),
state=dict(
type='str',
default='present',
@@ -448,6 +453,9 @@ class decort_k8s(DecortController):
zone_id=dict(
type='int',
),
storage_policy_id=dict(
type='int',
),
),
supports_check_mode=True,
required_one_of=[
@@ -499,6 +507,32 @@ class decort_k8s(DecortController):
'K8s cluster must be stopped to migrate to a zone.'
)
aparam_storage_policy_id = self.aparams['storage_policy_id']
if aparam_storage_policy_id is not None:
computes_ids = []
for master_node in self.k8s_info['k8sGroups']['masters'][
'detailedInfo'
]:
computes_ids.append(master_node['id'])
for wg in self.k8s_info['k8sGroups']['workers']:
workers_ids = [
worker['id'] for worker in wg['detailedInfo']
]
computes_ids.extend(workers_ids)
for compute_id in computes_ids:
_, compute_info, _ = self._compute_get_by_id(
comp_id=compute_id
)
for disk in compute_info['disks']:
if aparam_storage_policy_id != disk['storage_policy_id']:
check_errors = True
self.message(
msg='Check for parameter "storage_policy_id" '
'failed: storage_policy_id can not be changed '
f'for k8s cluster ID {self.k8s_id} compute ID '
f'{compute_id} disk ID {disk['id']}'
)
if check_errors:
self.exit(fail=True)
@@ -540,6 +574,26 @@ class decort_k8s(DecortController):
if self.check_aparam_zone_id() is False:
check_errors = True
aparam_storage_policy_id = self.aparams['storage_policy_id']
if aparam_storage_policy_id is None:
check_errors = True
self.message(
msg='Check for parameter "storage_policy_id" failed: '
'storage_policy_id must be specified when creating '
'a new cluster'
)
elif (
aparam_storage_policy_id
not in self.rg_info['storage_policy_ids']
):
check_errors = True
self.message(
msg='Check for parameter "storage_policy_id" failed: '
f'RG ID {self.rg_id} does not have access to '
f'storage_policy_id {aparam_storage_policy_id}'
)
if check_errors:
self.exit(fail=True)