Skip to content

Commit 2fd9d96

Browse files
authored
Merge pull request #9239 from tangledbytes/utkarsh/backport/5.20/dbs3/1
[Backport 5.20 | DBS3] PR 9142, PR 9236
2 parents 0cce309 + 0e73144 commit 2fd9d96

File tree

4 files changed

+220
-20
lines changed

4 files changed

+220
-20
lines changed

src/sdk/glacier.js

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -300,6 +300,32 @@ class Glacier {
300300
return restore_status.state === Glacier.RESTORE_STATUS_CAN_RESTORE;
301301
}
302302

303+
/**
304+
* encode_log takes in data intended for the backend and encodes
305+
* it.
306+
*
307+
* This method must be overwritten for all the backends if they need
308+
* different encodings for their logs.
309+
* @param {string} data
310+
* @returns {string}
311+
*/
312+
encode_log(data) {
313+
return data;
314+
}
315+
316+
/**
317+
* decode_log takes in data intended for the backend and decodes
318+
* it.
319+
*
320+
* This method must be overwritten for all the backends if they need
321+
* different encodings for their logs.
322+
* @param {string} data
323+
* @returns {string}
324+
*/
325+
decode_log(data) {
326+
return data;
327+
}
328+
303329
/**
304330
* get_restore_status returns status of the object at the given
305331
* file_path

src/sdk/glacier_tapecloud.js

Lines changed: 68 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -191,6 +191,8 @@ class TapeCloudUtils {
191191
}
192192

193193
class TapeCloudGlacier extends Glacier {
194+
static LOG_DELIM = ' -- ';
195+
194196
/**
195197
* @param {nb.NativeFSContext} fs_context
196198
* @param {LogFile} log_file
@@ -200,8 +202,14 @@ class TapeCloudGlacier extends Glacier {
200202
async stage_migrate(fs_context, log_file, failure_recorder) {
201203
dbg.log2('TapeCloudGlacier.stage_migrate starting for', log_file.log_path);
202204

205+
// Wrap failure recorder to make sure we correctly encode the entries
206+
// before appending them to the failure log
207+
const encoded_failure_recorder = async failure => failure_recorder(this.encode_log(failure));
208+
203209
try {
204210
await log_file.collect(Glacier.MIGRATE_STAGE_WAL_NAME, async (entry, batch_recorder) => {
211+
entry = this.decode_log(entry);
212+
205213
let entry_fh;
206214
let should_migrate = true;
207215
try {
@@ -230,7 +238,7 @@ class TapeCloudGlacier extends Glacier {
230238
// Can't really do anything if this fails - provider
231239
// needs to make sure that appropriate error handling
232240
// is being done there
233-
await failure_recorder(entry);
241+
await encoded_failure_recorder(entry);
234242
return;
235243
}
236244

@@ -240,14 +248,14 @@ class TapeCloudGlacier extends Glacier {
240248
// Mark the file staged
241249
try {
242250
await entry_fh.replacexattr(fs_context, { [Glacier.XATTR_STAGE_MIGRATE]: Date.now().toString() });
243-
await batch_recorder(entry);
251+
await batch_recorder(this.encode_log(entry));
244252
} catch (error) {
245253
dbg.error('failed to mark the entry migrate staged', error);
246254

247255
// Can't really do anything if this fails - provider
248256
// needs to make sure that appropriate error handling
249257
// is being done there
250-
await failure_recorder(entry);
258+
await encoded_failure_recorder(entry);
251259
} finally {
252260
entry_fh?.close(fs_context);
253261
}
@@ -268,16 +276,23 @@ class TapeCloudGlacier extends Glacier {
268276
*/
269277
async migrate(fs_context, log_file, failure_recorder) {
270278
dbg.log2('TapeCloudGlacier.migrate starting for', log_file.log_path);
279+
280+
// Wrap failure recorder to make sure we correctly encode the entries
281+
// before appending them to the failure log
282+
const encoded_failure_recorder = async failure => failure_recorder(this.encode_log(failure));
283+
271284
try {
272285
// This will throw error only if our eeadm error handler
273286
// panics as well and at that point it's okay to
274287
// not handle the error and rather keep the log file around
275-
await this._migrate(log_file.log_path, failure_recorder);
288+
await this._migrate(log_file.log_path, encoded_failure_recorder);
276289

277290
// Un-stage all the files - We don't need to deal with the cases
278291
// where some files have migrated and some have not as that is
279292
// not important for staging/un-staging.
280293
await log_file.collect_and_process(async entry => {
294+
entry = this.decode_log(entry);
295+
281296
let fh;
282297
try {
283298
fh = await nb_native().fs.open(fs_context, entry);
@@ -293,7 +308,7 @@ class TapeCloudGlacier extends Glacier {
293308
// Add the enty to the failure log - This could be wasteful as it might
294309
// add entries which have already been migrated but this is a better
295310
// retry.
296-
await failure_recorder(entry);
311+
await encoded_failure_recorder(entry);
297312
} finally {
298313
await fh?.close(fs_context);
299314
}
@@ -315,8 +330,14 @@ class TapeCloudGlacier extends Glacier {
315330
async stage_restore(fs_context, log_file, failure_recorder) {
316331
dbg.log2('TapeCloudGlacier.stage_restore starting for', log_file.log_path);
317332

333+
// Wrap failure recorder to make sure we correctly encode the entries
334+
// before appending them to the failure log
335+
const encoded_failure_recorder = async failure => failure_recorder(this.encode_log(failure));
336+
318337
try {
319338
await log_file.collect(Glacier.RESTORE_STAGE_WAL_NAME, async (entry, batch_recorder) => {
339+
entry = this.decode_log(entry);
340+
320341
let fh;
321342
try {
322343
fh = await nb_native().fs.open(fs_context, entry);
@@ -343,9 +364,9 @@ class TapeCloudGlacier extends Glacier {
343364
// 3. If we read corrupt value then either the file is getting staged or is
344365
// getting un-staged - In either case we must requeue.
345366
if (stat.xattr[Glacier.XATTR_STAGE_MIGRATE]) {
346-
await failure_recorder(entry);
367+
await encoded_failure_recorder(entry);
347368
} else {
348-
await batch_recorder(entry);
369+
await batch_recorder(this.encode_log(entry));
349370
}
350371
} catch (error) {
351372
if (error.code === 'ENOENT') {
@@ -357,7 +378,7 @@ class TapeCloudGlacier extends Glacier {
357378
'adding log entry', entry,
358379
'to failure recorder due to error', error,
359380
);
360-
await failure_recorder(entry);
381+
await encoded_failure_recorder(entry);
361382
} finally {
362383
await fh?.close(fs_context);
363384
}
@@ -379,25 +400,32 @@ class TapeCloudGlacier extends Glacier {
379400
async restore(fs_context, log_file, failure_recorder) {
380401
dbg.log2('TapeCloudGlacier.restore starting for', log_file.log_path);
381402

403+
// Wrap failure recorder to make sure we correctly encode the entries
404+
// before appending them to the failure log
405+
const encoded_failure_recorder = async failure => failure_recorder(this.encode_log(failure));
406+
382407
try {
383408
const success = await this._recall(
384409
log_file.log_path,
385410
async entry_path => {
411+
entry_path = this.decode_log(entry_path);
386412
dbg.log2('TapeCloudGlacier.restore.partial_failure - entry:', entry_path);
387-
await failure_recorder(entry_path);
413+
await encoded_failure_recorder(entry_path);
388414
},
389415
async entry_path => {
416+
entry_path = this.decode_log(entry_path);
390417
dbg.log2('TapeCloudGlacier.restore.partial_success - entry:', entry_path);
391-
await this._finalize_restore(fs_context, entry_path, failure_recorder);
418+
await this._finalize_restore(fs_context, entry_path, encoded_failure_recorder);
392419
}
393420
);
394421

395422
// We will iterate through the entire log file iff and we get a success message from
396423
// the recall call.
397424
if (success) {
398425
await log_file.collect_and_process(async (entry_path, batch_recorder) => {
426+
entry_path = this.decode_log(entry_path);
399427
dbg.log2('TapeCloudGlacier.restore.batch - entry:', entry_path);
400-
await this._finalize_restore(fs_context, entry_path, failure_recorder);
428+
await this._finalize_restore(fs_context, entry_path, encoded_failure_recorder);
401429
});
402430
}
403431

@@ -421,6 +449,32 @@ class TapeCloudGlacier extends Glacier {
421449
return result.toLowerCase().trim() === 'true';
422450
}
423451

452+
/**
453+
* encode_log takes string of data and escapes all the backslash and newline
454+
* characters
455+
* @example
456+
* // /Users/noobaa/data/buc/obj\nfile => /Users/noobaa/data/buc/obj\\nfile
457+
* // /Users/noobaa/data/buc/obj\file => /Users/noobaa/data/buc/obj\\file
458+
* @param {string} data
459+
* @returns {string}
460+
*/
461+
encode_log(data) {
462+
const encoded = data.replace(/\\/g, '\\\\').replace(/\n/g, '\\n');
463+
return `${TapeCloudGlacier.LOG_DELIM}${encoded}`;
464+
}
465+
466+
/**
467+
*
468+
* @param {string} data
469+
* @returns {string}
470+
*/
471+
decode_log(data) {
472+
if (!data.startsWith(TapeCloudGlacier.LOG_DELIM)) return data;
473+
return data.substring(TapeCloudGlacier.LOG_DELIM.length)
474+
.replace(/\\n/g, '\n')
475+
.replace(/\\\\/g, '\\');
476+
}
477+
424478
// ============= PRIVATE FUNCTIONS =============
425479

426480
/**
@@ -482,11 +536,9 @@ class TapeCloudGlacier extends Glacier {
482536
throw error;
483537
}
484538

485-
const xattr_get_keys = [Glacier.XATTR_RESTORE_REQUEST];
486-
if (fs_context.use_dmapi) {
487-
xattr_get_keys.push(Glacier.GPFS_DMAPI_XATTR_TAPE_PREMIG);
488-
}
489-
const stat = await fh.stat(fs_context, { xattr_get_keys });
539+
// stat will by default read GPFS_DMAPI_XATTR_TAPE_PREMIG and
540+
// user.noobaa.restore.request
541+
const stat = await fh.stat(fs_context, {});
490542

491543
// This is a hacky solution and would work only if
492544
// config.NSFS_GLACIER_DMAPI_ENABLE is enabled. This prevents

src/sdk/namespace_fs.js

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3702,13 +3702,13 @@ class NamespaceFS {
37023702
async append_to_migrate_wal(entry) {
37033703
if (!config.NSFS_GLACIER_LOGS_ENABLED) return;
37043704

3705-
await NamespaceFS.migrate_wal.append(entry);
3705+
await NamespaceFS.migrate_wal.append(Glacier.getBackend().encode_log(entry));
37063706
}
37073707

37083708
async append_to_restore_wal(entry) {
37093709
if (!config.NSFS_GLACIER_LOGS_ENABLED) return;
37103710

3711-
await NamespaceFS.restore_wal.append(entry);
3711+
await NamespaceFS.restore_wal.append(Glacier.getBackend().encode_log(entry));
37123712
}
37133713

37143714
static get migrate_wal() {

0 commit comments

Comments
 (0)