Skip to content

Commit 37d584c

Browse files
committed
Synchronization with 8.2 LINSTOR before a stable release
Last commit: 9207abe "fix(linstor): check if resource is tiebreaker (#62)" Signed-off-by: Ronan Abhamon <[email protected]>
1 parent 006a372 commit 37d584c

File tree

9 files changed

+498
-200
lines changed

9 files changed

+498
-200
lines changed

dev_requirements.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,4 @@ coverage
22
astroid==2.3.3
33
pylint==2.4.4
44
bitarray
5+
python-linstor

drivers/LinstorSR.py

Lines changed: 37 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -362,9 +362,6 @@ def load(self, sr_uuid):
362362
self._linstor = None # Ensure that LINSTOR attribute exists.
363363
self._journaler = None
364364

365-
self._is_master = False
366-
if 'SRmaster' in self.dconf and self.dconf['SRmaster'] == 'true':
367-
self._is_master = True
368365
self._group_name = self.dconf['group-name']
369366

370367
self._vdi_shared_time = 0
@@ -437,7 +434,7 @@ def connect():
437434

438435
return wrapped_method(self, *args, **kwargs)
439436

440-
if not self._is_master:
437+
if not self.is_master():
441438
if self.cmd in [
442439
'sr_create', 'sr_delete', 'sr_update', 'sr_probe',
443440
'sr_scan', 'vdi_create', 'vdi_delete', 'vdi_resize',
@@ -472,7 +469,7 @@ def connect():
472469

473470
# Ensure we use a non-locked volume when vhdutil is called.
474471
if (
475-
self._is_master and self.cmd.startswith('vdi_') and
472+
self.is_master() and self.cmd.startswith('vdi_') and
476473
self.cmd != 'vdi_create'
477474
):
478475
self._linstor.ensure_volume_is_not_locked(
@@ -487,7 +484,7 @@ def connect():
487484
#
488485
# If the command is a SR command we want at least to remove
489486
# resourceless volumes.
490-
if self._is_master and self.cmd not in [
487+
if self.is_master() and self.cmd not in [
491488
'vdi_attach', 'vdi_detach',
492489
'vdi_activate', 'vdi_deactivate',
493490
'vdi_epoch_begin', 'vdi_epoch_end',
@@ -650,17 +647,17 @@ def delete(self, uuid):
650647
opterr='Cannot get controller node name'
651648
)
652649

653-
host = None
650+
host_ref = None
654651
if node_name == 'localhost':
655-
host = util.get_this_host_ref(self.session)
652+
host_ref = util.get_this_host_ref(self.session)
656653
else:
657654
for slave in util.get_all_slaves(self.session):
658655
r_name = self.session.xenapi.host.get_record(slave)['hostname']
659656
if r_name == node_name:
660-
host = slave
657+
host_ref = slave
661658
break
662659

663-
if not host:
660+
if not host_ref:
664661
raise xs_errors.XenError(
665662
'LinstorSRDelete',
666663
opterr='Failed to find host with hostname: {}'.format(
@@ -677,7 +674,7 @@ def delete(self, uuid):
677674
'groupName': self._group_name,
678675
}
679676
self._exec_manager_command(
680-
host, 'destroy', args, 'LinstorSRDelete'
677+
host_ref, 'destroy', args, 'LinstorSRDelete'
681678
)
682679
except Exception as e:
683680
try:
@@ -766,22 +763,31 @@ def scan(self, uuid):
766763
# is started without a shared and mounted /var/lib/linstor path.
767764
try:
768765
self._linstor.get_database_path()
769-
except Exception:
766+
except Exception as e:
770767
# Failed to get database path, ensure we don't have
771768
# VDIs in the XAPI database...
772769
if self.session.xenapi.SR.get_VDIs(
773770
self.session.xenapi.SR.get_by_uuid(self.uuid)
774771
):
775772
raise xs_errors.XenError(
776773
'SRUnavailable',
777-
opterr='Database is not mounted'
774+
opterr='Database is not mounted or node name is invalid ({})'.format(e)
778775
)
779776

780777
# Update the database before the restart of the GC to avoid
781778
# bad sync in the process if new VDIs have been introduced.
782779
super(LinstorSR, self).scan(self.uuid)
783780
self._kick_gc()
784781

782+
def is_master(self):
783+
if not hasattr(self, '_is_master'):
784+
if 'SRmaster' not in self.dconf:
785+
self._is_master = self.session is not None and util.is_master(self.session)
786+
else:
787+
self._is_master = self.dconf['SRmaster'] == 'true'
788+
789+
return self._is_master
790+
785791
@_locked_load
786792
def vdi(self, uuid):
787793
return LinstorVDI(self, uuid)
@@ -967,7 +973,7 @@ def _synchronize_metadata_and_xapi(self):
967973
)
968974

969975
def _synchronize_metadata(self):
970-
if not self._is_master:
976+
if not self.is_master():
971977
return
972978

973979
util.SMlog('Synchronize metadata...')
@@ -1014,7 +1020,7 @@ def _load_vdis(self):
10141020
if self._vdis_loaded:
10151021
return
10161022

1017-
assert self._is_master
1023+
assert self.is_master()
10181024

10191025
# We use a cache to avoid repeated JSON parsing.
10201026
# The performance gain is not big but we can still
@@ -1492,7 +1498,7 @@ def _reconnect(self):
14921498
controller_uri,
14931499
self._group_name,
14941500
repair=(
1495-
self._is_master and
1501+
self.is_master() and
14961502
self.srcmd.cmd in self.ops_exclusive
14971503
),
14981504
logger=util.SMlog
@@ -1660,8 +1666,11 @@ def create(self, sr_uuid, vdi_uuid, size):
16601666
volume_name = REDO_LOG_VOLUME_NAME
16611667

16621668
self._linstor.create_volume(
1663-
self.uuid, volume_size, persistent=False,
1664-
volume_name=volume_name
1669+
self.uuid,
1670+
volume_size,
1671+
persistent=False,
1672+
volume_name=volume_name,
1673+
high_availability=volume_name is not None
16651674
)
16661675
volume_info = self._linstor.get_volume_info(self.uuid)
16671676

@@ -1792,6 +1801,7 @@ def attach(self, sr_uuid, vdi_uuid):
17921801
writable = 'args' not in self.sr.srcmd.params or \
17931802
self.sr.srcmd.params['args'][0] == 'true'
17941803

1804+
if not attach_from_config or self.sr.is_master():
17951805
# We need to inflate the volume if we don't have enough place
17961806
# to mount the VHD image. I.e. the volume capacity must be greater
17971807
# than the VHD size + bitmap size.
@@ -1825,7 +1835,7 @@ def attach(self, sr_uuid, vdi_uuid):
18251835
return self._attach_using_http_nbd()
18261836

18271837
# Ensure we have a path...
1828-
self._create_chain_paths(self.uuid)
1838+
self.sr._vhdutil.create_chain_paths(self.uuid, readonly=not writable)
18291839

18301840
self.attached = True
18311841
return VDI.VDI.attach(self, self.sr.uuid, self.uuid)
@@ -1873,7 +1883,7 @@ def detach(self, sr_uuid, vdi_uuid):
18731883
)
18741884

18751885
# We remove only on slaves because the volume can be used by the GC.
1876-
if self.sr._is_master:
1886+
if self.sr.is_master():
18771887
return
18781888

18791889
while vdi_uuid:
@@ -1894,7 +1904,7 @@ def detach(self, sr_uuid, vdi_uuid):
18941904

18951905
def resize(self, sr_uuid, vdi_uuid, size):
18961906
util.SMlog('LinstorVDI.resize for {}'.format(self.uuid))
1897-
if not self.sr._is_master:
1907+
if not self.sr.is_master():
18981908
raise xs_errors.XenError(
18991909
'VDISize',
19001910
opterr='resize on slave not allowed'
@@ -2153,7 +2163,7 @@ def update(self, sr_uuid, vdi_uuid):
21532163
# --------------------------------------------------------------------------
21542164

21552165
def _prepare_thin(self, attach):
2156-
if self.sr._is_master:
2166+
if self.sr.is_master():
21572167
if attach:
21582168
attach_thin(
21592169
self.session, self.sr._journaler, self._linstor,
@@ -2352,7 +2362,7 @@ def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None):
23522362
raise xs_errors.XenError('SnapshotChainTooLong')
23532363

23542364
# Ensure we have a valid path if we don't have a local diskful.
2355-
self._create_chain_paths(self.uuid)
2365+
self.sr._vhdutil.create_chain_paths(self.uuid, readonly=True)
23562366

23572367
volume_path = self.path
23582368
if not util.pathexists(volume_path):
@@ -2499,10 +2509,10 @@ def _snapshot(self, snap_type, cbtlog=None, cbt_consistency=None):
24992509
active_uuid, clone_info, force_undo=True
25002510
)
25012511
self.sr._journaler.remove(LinstorJournaler.CLONE, active_uuid)
2502-
except Exception as e:
2512+
except Exception as clean_error:
25032513
util.SMlog(
25042514
'WARNING: Failed to clean up failed snapshot: {}'
2505-
.format(e)
2515+
.format(clean_error)
25062516
)
25072517
raise xs_errors.XenError('VDIClone', opterr=str(e))
25082518

@@ -2739,7 +2749,7 @@ def _attach_using_http_nbd(self):
27392749

27402750
# 0. Fetch drbd path.
27412751
must_get_device_path = True
2742-
if not self.sr._is_master:
2752+
if not self.sr.is_master():
27432753
# We are on a slave, we must try to find a diskful locally.
27442754
try:
27452755
volume_info = self._linstor.get_volume_info(self.uuid)
@@ -2754,7 +2764,7 @@ def _attach_using_http_nbd(self):
27542764
must_get_device_path = hostname in volume_info.diskful
27552765

27562766
drbd_path = None
2757-
if must_get_device_path or self.sr._is_master:
2767+
if must_get_device_path or self.sr.is_master():
27582768
# If we are master, we must ensure we have a diskless
27592769
# or diskful available to init HA.
27602770
# It also avoid this error in xensource.log
@@ -2812,37 +2822,6 @@ def _detach_using_http_nbd(self):
28122822
self._kill_persistent_nbd_server(volume_name)
28132823
self._kill_persistent_http_server(volume_name)
28142824

2815-
def _create_chain_paths(self, vdi_uuid):
2816-
# OPTIMIZE: Add a limit_to_first_allocated_block param to limit vhdutil calls.
2817-
# Useful for the snapshot code algorithm.
2818-
2819-
while vdi_uuid:
2820-
path = self._linstor.get_device_path(vdi_uuid)
2821-
if not util.pathexists(path):
2822-
raise xs_errors.XenError(
2823-
'VDIUnavailable', opterr='Could not find: {}'.format(path)
2824-
)
2825-
2826-
# Diskless path can be created on the fly, ensure we can open it.
2827-
def check_volume_usable():
2828-
while True:
2829-
try:
2830-
with open(path, 'r+'):
2831-
pass
2832-
except IOError as e:
2833-
if e.errno == errno.ENODATA:
2834-
time.sleep(2)
2835-
continue
2836-
if e.errno == errno.EROFS:
2837-
util.SMlog('Volume not attachable because RO. Openers: {}'.format(
2838-
self.sr._linstor.get_volume_openers(vdi_uuid)
2839-
))
2840-
raise
2841-
break
2842-
util.retry(check_volume_usable, 15, 2)
2843-
2844-
vdi_uuid = self.sr._vhdutil.get_vhd_info(vdi_uuid).parentUuid
2845-
28462825
# ------------------------------------------------------------------------------
28472826

28482827

0 commit comments

Comments
 (0)