diff --git a/ocaml/libs/uuid/uuidx.ml b/ocaml/libs/uuid/uuidx.ml index de471da0e1b..01dbda46899 100644 --- a/ocaml/libs/uuid/uuidx.ml +++ b/ocaml/libs/uuid/uuidx.ml @@ -83,3 +83,15 @@ let string_of_uuid = to_string let uuid_of_int_array = of_int_array let int_array_of_uuid = to_int_array + +module Hash = struct + (** Derive a deterministic UUID from a string: the same + string maps to the same UUID. We are using our own namespace; the + namespace is not a secret *) + + let namespace = + let ns = "e93e0639-2bdb-4a59-8b46-352b3f408c19" in + Uuidm.(of_string ns |> Option.get) + + let string str = Uuidm.v5 namespace str +end diff --git a/ocaml/libs/uuid/uuidx.mli b/ocaml/libs/uuid/uuidx.mli index 57b4058b8ca..618235b4ae6 100644 --- a/ocaml/libs/uuid/uuidx.mli +++ b/ocaml/libs/uuid/uuidx.mli @@ -81,3 +81,11 @@ val make_cookie : unit -> cookie val cookie_of_string : string -> cookie val string_of_cookie : cookie -> string + +module Hash : sig + (** hash a string (deterministically) into a UUID. This uses + namespace UUID e93e0639-2bdb-4a59-8b46-352b3f408c19. *) + + (* UUID Version 5 derived from argument string and namespace UUID *) + val string : string -> 'a t +end diff --git a/ocaml/message-switch/cli/dune b/ocaml/message-switch/cli/dune index beb3741dc85..c0741e71348 100644 --- a/ocaml/message-switch/cli/dune +++ b/ocaml/message-switch/cli/dune @@ -5,6 +5,7 @@ cmdliner message-switch-core message-switch-unix + mtime rpclib.core rpclib.json threads.posix diff --git a/ocaml/message-switch/cli/main.ml b/ocaml/message-switch/cli/main.ml index 197061a17ea..19324a5a25f 100644 --- a/ocaml/message-switch/cli/main.ml +++ b/ocaml/message-switch/cli/main.ml @@ -76,23 +76,36 @@ let help = ; `P (Printf.sprintf "Check bug reports at %s" project_url) ] +(* Durations, in nanoseconds *) +let second = 1_000_000_000L + +let minute = 60_000_000_000L + +let hour = 3600_000_000_000L + +let day = 86400_000_000_000L + (* Commands *) let diagnostics common_opts = Client.connect ~switch:common_opts.Common.path () >>|= fun t -> Client.diagnostics ~t () >>|= fun d -> let open Message_switch_core.Protocol in - let in_the_past = Int64.sub d.Diagnostics.current_time in + let in_the_past ts = + if d.Diagnostics.current_time < ts then + 0L + else + Int64.sub d.Diagnostics.current_time ts + in let time f x = - let open Int64 in - let secs = div (f x) 1_000_000_000L in - let secs' = rem secs 60L in - let mins = div secs 60L in - let mins' = rem mins 60L in - let hours = div mins 60L in - let hours' = rem hours 24L in - let days = div hours 24L in - let fragment name = function + let timespan = f x in + let ( // ) = Int64.div in + let ( %% ) = Int64.rem in + let secs = timespan %% minute // second in + let mins = timespan %% hour // minute in + let hours = timespan %% day // hour in + let days = timespan // day in + let format name = function | 0L -> [] | 1L -> @@ -101,11 +114,10 @@ let diagnostics common_opts = [Printf.sprintf "%Ld %ss" n name] in let bits = - fragment "day" days - @ fragment "hour" hours' - @ fragment "min" mins' - @ fragment "second" secs' - @ [] + format "day" days + @ format "hour" hours + @ format "min" mins + @ format "second" secs in let length = List.length bits in let _, rev_bits = @@ -122,7 +134,16 @@ let diagnostics common_opts = ) (0, []) bits in - String.concat "" (List.rev rev_bits) ^ "ago" + let format_secs ts = + Mtime.Span.(Format.asprintf "%a " pp (of_uint64_ns ts)) + in + let timestrings = + if rev_bits = [] then + [format_secs (timespan %% minute)] + else + List.rev rev_bits + in + String.concat "" timestrings ^ "ago" in let origin = function | Anonymous id -> diff --git a/ocaml/quicktest/qt.ml b/ocaml/quicktest/qt.ml index 1764f12ce8f..d390f0dfc38 100644 --- a/ocaml/quicktest/qt.ml +++ b/ocaml/quicktest/qt.ml @@ -132,26 +132,32 @@ module VM = struct Some x end - let install rpc session_id ~template ~name = + let install rpc session_id ~template ~name ?sr () = let template_uuid = Client.Client.VM.get_uuid ~rpc ~session_id ~self:template in - let newvm_uuid = - cli_cmd - [ - "vm-install" - ; "template-uuid=" ^ template_uuid - ; "new-name-label=" ^ name - ] + let cmd = + ["vm-install"; "template-uuid=" ^ template_uuid; "new-name-label=" ^ name] in + let sr_uuid = + Option.map + (fun sr -> Client.Client.SR.get_uuid ~rpc ~session_id ~self:sr) + sr + in + let cmd = + cmd @ Option.fold ~none:[] ~some:(fun x -> ["sr-uuid=" ^ x]) sr_uuid + in + let newvm_uuid = cli_cmd cmd in Client.Client.VM.get_by_uuid ~rpc ~session_id ~uuid:newvm_uuid let uninstall rpc session_id vm = let uuid = Client.Client.VM.get_uuid ~rpc ~session_id ~self:vm in cli_cmd ["vm-uninstall"; "uuid=" ^ uuid; "--force"] |> ignore - let with_new rpc session_id ~template f = - let vm = install rpc session_id ~template ~name:"temp_quicktest_vm" in + let with_new rpc session_id ~template ?sr f = + let vm = + install rpc session_id ~template ~name:"temp_quicktest_vm" ?sr () + in Xapi_stdext_pervasives.Pervasiveext.finally (fun () -> f vm) (fun () -> uninstall rpc session_id vm) diff --git a/ocaml/quicktest/qt.mli b/ocaml/quicktest/qt.mli index f0edde13a56..15dbb785f28 100644 --- a/ocaml/quicktest/qt.mli +++ b/ocaml/quicktest/qt.mli @@ -50,7 +50,12 @@ module VM : sig end val with_new : - rpc -> API.ref_session -> template:API.ref_VM -> (API.ref_VM -> 'a) -> 'a + rpc + -> API.ref_session + -> template:API.ref_VM + -> ?sr:API.ref_SR + -> (API.ref_VM -> 'a) + -> 'a val dom0_of_host : rpc -> API.ref_session -> API.ref_host -> API.ref_VM (** Return a host's domain zero *) diff --git a/ocaml/quicktest/quicktest_vm_lifecycle.ml b/ocaml/quicktest/quicktest_vm_lifecycle.ml index 88fd9b8d664..b3de6b5b309 100644 --- a/ocaml/quicktest/quicktest_vm_lifecycle.ml +++ b/ocaml/quicktest/quicktest_vm_lifecycle.ml @@ -91,12 +91,18 @@ let one rpc session_id vm test = | Halted -> wait_for_domid (fun domid' -> domid' = -1L) -let test rpc session_id vm_template () = - Qt.VM.with_new rpc session_id ~template:vm_template (fun vm -> +let test rpc session_id sr_info vm_template () = + let sr = sr_info.Qt.sr in + Qt.VM.with_new rpc session_id ~template:vm_template ~sr (fun vm -> List.iter (one rpc session_id vm) all_possible_tests ) let tests () = let open Qt_filter in - [[("VM lifecycle tests", `Slow, test)] |> conn |> vm_template "CoreOS"] + [ + [("VM lifecycle tests", `Slow, test)] + |> conn + |> sr SR.(all |> allowed_operations [`vdi_create]) + |> vm_template "CoreOS" + ] |> List.concat diff --git a/ocaml/xapi/sm.ml b/ocaml/xapi/sm.ml index 2526aa79b6c..0ae899f63f0 100644 --- a/ocaml/xapi/sm.ml +++ b/ocaml/xapi/sm.ml @@ -152,7 +152,7 @@ let sr_update ~dbg dconf driver sr = let call = Sm_exec.make_call ~sr_ref:sr dconf "sr_update" [] in Sm_exec.parse_unit (Sm_exec.exec_xmlrpc ~dbg (driver_filename driver) call) -let vdi_create ~dbg dconf driver sr sm_config vdi_type size name_label +let vdi_create ~dbg ?vdi_uuid dconf driver sr sm_config vdi_type size name_label name_description metadata_of_pool is_a_snapshot snapshot_time snapshot_of read_only = with_dbg ~dbg ~name:"vdi_create" @@ fun di -> @@ -164,8 +164,8 @@ let vdi_create ~dbg dconf driver sr sm_config vdi_type size name_label ) ; srmaster_only dconf ; let call = - Sm_exec.make_call ~sr_ref:sr ~vdi_sm_config:sm_config ~vdi_type dconf - "vdi_create" + Sm_exec.make_call ?vdi_uuid ~sr_ref:sr ~vdi_sm_config:sm_config ~vdi_type + dconf "vdi_create" [ sprintf "%Lu" size ; name_label diff --git a/ocaml/xapi/sm_exec.ml b/ocaml/xapi/sm_exec.ml index a6d0d231ee2..a55b61d72f9 100644 --- a/ocaml/xapi/sm_exec.ml +++ b/ocaml/xapi/sm_exec.ml @@ -69,8 +69,8 @@ type call = { } let make_call ?driver_params ?sr_sm_config ?vdi_sm_config ?vdi_type - ?vdi_location ?new_uuid ?sr_ref ?vdi_ref (subtask_of, device_config) cmd - args = + ?vdi_location ?new_uuid ?sr_ref ?vdi_ref ?vdi_uuid + (subtask_of, device_config) cmd args = Server_helpers.exec_with_new_task "sm_exec" (fun __context -> (* Only allow a subset of calls if the SR has been introduced by a DR task. *) Option.iter @@ -117,7 +117,22 @@ let make_call ?driver_params ?sr_sm_config ?vdi_sm_config ?vdi_type Option.map (fun self -> Db.VDI.get_location ~__context ~self) vdi_ref in let vdi_uuid = - Option.map (fun self -> Db.VDI.get_uuid ~__context ~self) vdi_ref + match (cmd, vdi_ref, vdi_uuid) with + | "vdi_create", None, (Some x as uuid) -> + debug "%s: cmd=%s vdi_uuid=%s" __FUNCTION__ cmd x ; + uuid + (* when creating a VDI we sometimes want to provide the UUID + rather than letting the backend pick one. This is to + support backup VDIs CP-46179. So in that case, use the + provided UUID but not for other commands *) + | _, None, Some uuid -> + warn "%s: cmd=%s vdi_uuid=%s - should not happen" __FUNCTION__ cmd + uuid ; + None + | _, Some self, _ -> + Db.VDI.get_uuid ~__context ~self |> Option.some + | _, None, None -> + None in let vdi_on_boot = Option.map diff --git a/ocaml/xapi/storage_smapiv1.ml b/ocaml/xapi/storage_smapiv1.ml index 9ca3660eeb6..c7bdd772a28 100644 --- a/ocaml/xapi/storage_smapiv1.ml +++ b/ocaml/xapi/storage_smapiv1.ml @@ -691,19 +691,33 @@ module SMAPIv1 : Server_impl = struct let uuid = require_uuid vi in vdi_info_from_db ~__context (Db.VDI.get_by_uuid ~__context ~uuid) - let create _context ~dbg ~sr ~vdi_info = + let create _context ~dbg ~sr ~(vdi_info : Storage_interface.vdi_info) = with_dbg ~name:"VDI.create" ~dbg @@ fun di -> let dbg = Debuginfo.to_string di in try Server_helpers.exec_with_new_task "VDI.create" ~subtask_of:(Ref.of_string dbg) (fun __context -> - let sr = Db.SR.get_by_uuid ~__context ~uuid:(s_of_sr sr) in + let sr_uuid = s_of_sr sr in + let sr = Db.SR.get_by_uuid ~__context ~uuid:sr_uuid in let vi = + (* we want to set vdi_uuid when creating a backup VDI with + a specific UUID. SM picks up vdi_uuid instead of creating + a new random UUID; Cf. Xapi_vdi.create *) + let vdi_uuid = + match vdi_info.uuid with + | Some uuid when uuid = Uuidx.(Hash.string sr_uuid |> to_string) + -> + info "%s: creating a backup VDI %s" __FUNCTION__ uuid ; + vdi_info.uuid + | _ -> + None + in Sm.call_sm_functions ~__context ~sR:sr (fun device_config _type -> - Sm.vdi_create ~dbg device_config _type sr vdi_info.sm_config - vdi_info.ty vdi_info.virtual_size vdi_info.name_label - vdi_info.name_description vdi_info.metadata_of_pool - vdi_info.is_a_snapshot vdi_info.snapshot_time + Sm.vdi_create ~dbg ?vdi_uuid device_config _type sr + vdi_info.sm_config vdi_info.ty vdi_info.virtual_size + vdi_info.name_label vdi_info.name_description + vdi_info.metadata_of_pool vdi_info.is_a_snapshot + vdi_info.snapshot_time (s_of_vdi vdi_info.snapshot_of) vdi_info.read_only ) diff --git a/ocaml/xapi/xapi_vdi.ml b/ocaml/xapi/xapi_vdi.ml index ac989551b85..bc7bafb23d0 100644 --- a/ocaml/xapi/xapi_vdi.ml +++ b/ocaml/xapi/xapi_vdi.ml @@ -625,13 +625,27 @@ let create ~__context ~name_label ~name_description ~sR ~virtual_size ~_type | `cbt_metadata -> "cbt_metadata" in + (* special case: we want to use a specific UUID for Pool Meta Data + Backup *) + let uuid_ = + match (_type, name_label) with + | `user, "Pool Metadata Backup" -> + let sr = Db.SR.get_uuid ~__context ~self:sR in + let uuid = Uuidx.(Hash.string sr |> to_string) in + info "%s: using deterministic UUID for '%s' VDI: %s" __FUNCTION__ + name_label uuid ; + Some uuid + | _ -> + None + in let open Storage_access in let task = Context.get_task_id __context in let open Storage_interface in let vdi_info = { Storage_interface.default_vdi_info with - name_label + uuid= uuid_ + ; name_label ; name_description ; ty= vdi_type ; read_only diff --git a/scripts/plugins/perfmon b/scripts/plugins/perfmon index 2186c938938..e3dc2452691 100644 --- a/scripts/plugins/perfmon +++ b/scripts/plugins/perfmon @@ -14,16 +14,17 @@ def send_perfmon_cmd(cmd): "Return True for success, or ERROR_%d: otherwise" if len(cmd) >= cmdmaxlen: return "ERROR_0: command too long" + cmd_bytes = cmd.encode() try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) - rc = sock.sendto(cmd, cmdsockname) + rc = sock.sendto(cmd_bytes, cmdsockname) except socket.error as e: err, msg = e.args return "ERROR_%d: %s" % (err, msg) except Exception: return "ERROR_1: unknown error" - return str(rc == len(cmd)) + return str(rc == len(cmd_bytes)) def stop(session, args): diff --git a/scripts/xe-backup-metadata b/scripts/xe-backup-metadata index 4aa09b7f74c..47b21108b9d 100755 --- a/scripts/xe-backup-metadata +++ b/scripts/xe-backup-metadata @@ -39,6 +39,7 @@ function usage { echo " -k: Number of older backups to preserve (default: ${history_kept})" echo " -n: Just try to find a backup VDI and stop the script after that" echo " -f Force backup even when less than 10% free capacity is left on the backup VDI" + echo " -y: Assume non-interactive mode and yes to all questions" echo " -v: Verbose output" echo echo @@ -48,10 +49,33 @@ function usage { exit 1 } +function uuid5 { + # could use a modern uuidgen but it's not on XS 8 + # should work with Python 2 and 3 + python -c "import uuid; print (uuid.uuid5(uuid.UUID('$1'), '$2'))" +} + +function validate_vdi_uuid { + # we check that vdi has the expected UUID which depends on the UUID of + # the SR. This is a deterministic hash of the SR UUID and the + # namespace UUID $NS. This UUID must match what Xapi's Uuidx module is using. + local NS="e93e0639-2bdb-4a59-8b46-352b3f408c19" + local sr="$1" + local vdi="$2" + local uuid + + uuid=$(uuid5 "$NS" "$sr") + if [ "$vdi" != "$uuid" ]; then + return 1 + else + return 0 + fi +} + function test_sr { sr_uuid_found=$(${XE} sr-list uuid="$1" --minimal) if [ "${sr_uuid_found}" != "$1" ]; then - echo Invalid SR UUID specified: $1 + echo "Invalid SR UUID specified: $1" usage fi } @@ -63,7 +87,8 @@ just_find_vdi=0 fs_uninitialised=0 usage_alert=90 force_backup=0 -while getopts "hvink:u:dcf" opt ; do +yes=0 +while getopts "yhvink:u:dcf" opt ; do case $opt in h) usage ;; c) create_vdi=1 ; fs_uninitialised=1 ;; @@ -73,6 +98,7 @@ while getopts "hvink:u:dcf" opt ; do d) leave_mounted=1 ;; n) just_find_vdi=1 ;; v) debug="" ;; + y) yes=1 ;; f) force_backup=1 ;; *) echo "Invalid option"; usage ;; esac @@ -89,32 +115,32 @@ fi # determine if the SR UUID is vaid if [ -z "${sr_uuid}" ]; then # use the default-SR from the pool - sr_uuid=$(${XE} pool-param-get uuid=${pool_uuid} param-name=default-SR) + sr_uuid=$(${XE} pool-param-get uuid="${pool_uuid}" param-name=default-SR) fi test_sr "${sr_uuid}" -sr_name=$(${XE} sr-param-get uuid=${sr_uuid} param-name=name-label) +sr_name=$(${XE} sr-param-get uuid="${sr_uuid}" param-name=name-label) # see if a backup VDI already exists on the selected SR -vdi_uuid=$(${XE} vdi-list other-config:ctxs-pool-backup=true sr-uuid=${sr_uuid} params=uuid --minimal) +vdi_uuid=$(${XE} vdi-list other-config:ctxs-pool-backup=true sr-uuid="${sr_uuid}" params=uuid --minimal) mnt= function cleanup { trap "" TERM INT cd / if [ ! -z "${mnt}" ]; then - umount ${mnt} >/dev/null 2>&1 - rmdir ${mnt} + umount "${mnt}" >/dev/null 2>&1 + rmdir "${mnt}" fi if [ ! -z "${vbd_uuid}" ]; then ${debug} echo -n "Unplugging VBD: " - ${XE} vbd-unplug uuid=${vbd_uuid} timeout=20 + ${XE} vbd-unplug uuid="${vbd_uuid}" timeout=20 # poll for the device to go away if we know its name if [ "${device}" != "" ]; then device_gone=0 for ((i=0; i<10; i++)); do ${debug} echo -n "." - if [ ! -b ${device} ]; then + if [ ! -b "${device}" ]; then ${debug} echo " done" device_gone=1 break @@ -123,22 +149,35 @@ function cleanup { done if [ ${device_gone} -eq 0 ]; then ${debug} echo " failed" - echo Please destroy VBD ${vbd_uuid} manually. + echo "Please destroy VBD ${vbd_uuid} manually." else - ${XE} vbd-destroy uuid=${vbd_uuid} + ${XE} vbd-destroy uuid="${vbd_uuid}" fi fi fi if [ ${fs_uninitialised} -eq 1 -a -n "${vdi_uuid}" ] ; then - ${XE} vdi-destroy uuid=${vdi_uuid} + ${XE} vdi-destroy uuid="${vdi_uuid}" fi } -echo Using SR: ${sr_name} +# if we can't validate the UUID of the VDI, prompt the user +if [ -n "${vdi_uuid}" ]; then + if ! validate_vdi_uuid "${sr_uuid}" "${vdi_uuid}" && [ "$yes" -eq 0 ]; then + echo "Backup VDI $vdi_uuid was most likley create by an earlier" + echo "version of this code. Make sure this is a VDI that you" + echo "created as we can't validate it without mounting it." + read -p "Continue? [Y/N]" -n 1 -r; echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi + fi +fi + +echo "Using SR: ${sr_name}" if [ -z "${vdi_uuid}" ]; then if [ "${create_vdi}" -gt 0 ]; then echo -n "Creating new backup VDI: " - vdi_uuid=$(${XE} vdi-create virtual-size=500MiB sr-uuid=${sr_uuid} type=user name-label="Pool Metadata Backup") + label="Pool Metadata Backup" + # the label must match what xapi_vdi.ml is using for backup VDIs + vdi_uuid=$(${XE} vdi-create virtual-size=500MiB sr-uuid="${sr_uuid}" type=user name-label="${label}") init_fs=1 if [ $? -ne 0 ]; then echo failed @@ -148,8 +187,8 @@ if [ -z "${vdi_uuid}" ]; then echo "Backup VDI not found, aborting. You can initialise one using the '-c' flag." exit 3 fi - echo ${vdi_uuid} - ${XE} vdi-param-set uuid=${vdi_uuid} other-config:ctxs-pool-backup=true + echo "${vdi_uuid}" + ${XE} vdi-param-set uuid="${vdi_uuid}" other-config:ctxs-pool-backup=true else ${debug} echo "Using existing backup VDI: ${vdi_uuid}" fs_uninitialised=0 @@ -160,110 +199,110 @@ if [ ${just_find_vdi} -gt 0 ]; then fi ${debug} echo -n "Creating VBD: " -vbd_uuid=$(${XE} vbd-create vm-uuid=${CONTROL_DOMAIN_UUID} vdi-uuid=${vdi_uuid} device=autodetect) -${debug} echo ${vbd_uuid} +vbd_uuid=$(${XE} vbd-create vm-uuid="${CONTROL_DOMAIN_UUID}" vdi-uuid="${vdi_uuid}" device=autodetect) +${debug} echo "${vbd_uuid}" if [ $? -ne 0 -o -z "${vbd_uuid}" ]; then - echo error creating VBD + echo "error creating VBD" cleanup exit 1 fi ${debug} echo -n "Plugging VBD: " -${XE} vbd-plug uuid=${vbd_uuid} -device=/dev/$(${XE} vbd-param-get uuid=${vbd_uuid} param-name=device) +${XE} vbd-plug uuid="${vbd_uuid}" +device=/dev/$(${XE} vbd-param-get uuid="${vbd_uuid}" param-name=device) -if [ ! -b ${device} ]; then - ${debug} echo ${device}: not a block special +if [ ! -b "${device}" ]; then + ${debug} echo "${device}: not a block special" cleanup exit 1 fi -${debug} echo ${device} +${debug} echo "${device}" -if [ $init_fs -eq 1 ]; then +if [ "$init_fs" -eq 1 ]; then ${debug} echo -n "Creating filesystem: " - mkfs.ext3 -j -F ${device} > /dev/null 2>&1 + mkfs.ext3 -j -F "${device}" > /dev/null 2>&1 ${debug} echo "done" fs_uninitialised=0 fi ${debug} echo -n "Mounting filesystem: " -mnt=/var/run/pool-backup-${vdi_uuid} -mkdir -p ${mnt} +mnt="/var/run/pool-backup-${vdi_uuid}" +mkdir -p "${mnt}" -/sbin/fsck -a ${device} >/dev/null 2>&1 +/sbin/fsck -a "${device}" >/dev/null 2>&1 if [ $? -ne 0 ]; then ${debug} fsck failed. Please correct manually cleanup exit 1 fi -mount ${device} ${mnt} > /dev/null 2>&1 +mount "${device}" "${mnt}" > /dev/null 2>&1 if [ $? -ne 0 ]; then ${debug} echo failed cleanup exit 1 fi -${debug} echo ${mnt} +${debug} echo "${mnt}" if [ ${leave_mounted} -eq 0 ]; then - lrconf=${mnt}/conf/${vdi_uuid} - if [ ! -f ${lrconf} ]; then + lrconf="${mnt}/conf/${vdi_uuid}" + if [ ! -f "${lrconf}" ]; then ${debug} echo -n "Initialising rotation: " - mkdir -p ${mnt}/conf/ - echo "${mnt}/${pool_uuid}.db {" >> ${lrconf} - echo " rotate ${history_kept}" >> ${lrconf} - echo " missingok" >> ${lrconf} - echo "}" >> ${lrconf} - echo done - echo ${metadata_version} >> ${mnt}/.ctxs-metadata-backup + mkdir -p "${mnt}/conf/" + echo "${mnt}/${pool_uuid}.db {" >> "${lrconf}" + echo " rotate ${history_kept}" >> "${lrconf}" + echo " missingok" >> "${lrconf}" + echo "}" >> "${lrconf}" + echo "done" + echo "${metadata_version}" >> "${mnt}/.ctxs-metadata-backup" fi # check the usage of the backup VDI - usage=`cd ${mnt} && df . | sed -n "2p" | awk '{ print $5 }' | tr -d '%'` + usage=`cd "${mnt}" && df . | sed -n "2p" | awk '{ print $5 }' | tr -d '%'` echo "Checking backup VDI space usage: $usage%" - if [ $usage -gt $usage_alert ] && [ ${force_backup} -eq 0 ]; then - echo "Running out of space, you can use "-d" option to attach VDI and free more space, exit now." + if [ "$usage" -gt "$usage_alert" ] && [ "${force_backup}" -eq 0 ]; then + echo "Running out of space, you can use '-d' option to attach VDI and free more space, exit now." cleanup exit 1 fi # invoke logrotate to rotate over old pool db backups echo -n "Rotating old backups: " - logrotate -f ${lrconf} - num_found=$(find ${mnt} -name \*.db\.* | wc -l) - echo found ${num_found} + logrotate -f "${lrconf}" + num_found=$(find "${mnt}" -name '*.db.*' | wc -l) + echo "found ${num_found}" # perform the pool database dump echo -n "Backing up pool database: " - ${XE} pool-dump-database file-name=${mnt}/${pool_uuid}.db + ${XE} pool-dump-database file-name="${mnt}/${pool_uuid}.db" echo done # backup the VM metadata for each VM in the pool into a dated directory datetime=$(date +%F-%H-%M-%S) - metadir=${mnt}/metadata/${datetime} - mkdir -p ${metadir} + metadir="${mnt}/metadata/${datetime}" + mkdir -p "${metadir}" echo -n "Cleaning old VM metadata: " IFS=" " - todelete=$(cd ${mnt}/metadata && ls -1 |sort -n | head -n -${history_kept} | xargs echo) + todelete=$(cd "${mnt}/metadata" && ls -1 |sort -n | head -n -${history_kept} | xargs echo) for dir in ${todelete}; do - rm -rf ${mnt}/metadata/${dir} + rm -rf "${mnt}/metadata/${dir}" done echo done IFS="," echo -n "Backing up SR metadata: " - mkdir -p ${metadir} - "@LIBEXECDIR@/backup-sr-metadata.py" -f ${metadir}/SRMETA.xml + mkdir -p "${metadir}" + "@LIBEXECDIR@/backup-sr-metadata.py" -f "${metadir}/SRMETA.xml" echo "done" echo -n "Backing up VM metadata: " ${debug} echo "" - mkdir -p ${metadir}/all + mkdir -p "${metadir}/all" for vmuuid in $(${XE} vm-list params=uuid is-control-domain=false --minimal); do ${debug} echo -n . - ${XE} vm-export --metadata uuid=${vmuuid} filename=${metadir}/all/${vmuuid}.vmmeta >/dev/null 2>&1 + ${XE} vm-export --metadata uuid="${vmuuid}" filename="${metadir}/all/${vmuuid}.vmmeta" >/dev/null 2>&1 done echo "done" echo -n "Backing up Template metadata: " @@ -271,13 +310,13 @@ if [ ${leave_mounted} -eq 0 ]; then template_uuids=$("@LIBEXECDIR@/print-custom-templates") if [ $? -eq 0 ]; then for tmpl_uuid in ${template_uuids}; do - ${XE} template-export --metadata template-uuid=${tmpl_uuid} filename=${metadir}/all/${tmpl_uuid}.vmmeta >/dev/null 2>&1 + ${XE} template-export --metadata template-uuid="${tmpl_uuid}" filename="${metadir}/all/${tmpl_uuid}.vmmeta" >/dev/null 2>&1 done fi echo "done" - "@LIBEXECDIR@/link-vms-by-sr.py" -d ${metadir} + "@LIBEXECDIR@/link-vms-by-sr.py" -d "${metadir}" else - cd ${mnt} + cd "${mnt}" env PS1="Mounted backup VDI on: ${mnt}\nPress ^D to exit shell and safely detach it.\n\n[\u@\h \W]\$ " bash fi diff --git a/scripts/xe-restore-metadata b/scripts/xe-restore-metadata index 81beb51b704..093cd772192 100755 --- a/scripts/xe-restore-metadata +++ b/scripts/xe-restore-metadata @@ -47,11 +47,18 @@ function usage { function test_sr { sr_uuid_found=$(${XE} sr-list uuid="$1" --minimal) if [ "${sr_uuid_found}" != "$1" ]; then - echo Invalid SR UUID specified: $1 + echo "Invalid SR UUID specified: $1" usage fi } +# name space to hash SRs for a deterministic VDI UUID +NS="e93e0639-2bdb-4a59-8b46-352b3f408c19" +function uuid5 { + # could use a modern uuidgen but it's not on XS 8 + python -c "import uuid; print (uuid.uuid5(uuid.UUID('$1'), '$2'))" +} + dry_run=0 sr_uuid= yes=0 @@ -94,35 +101,42 @@ fi # determine if the SR UUID is vaid if [ -z "${sr_uuid}" ]; then # use the default-SR from the pool - sr_uuid=$(${XE} pool-param-get uuid=${pool_uuid} param-name=default-SR) + sr_uuid=$(${XE} pool-param-get uuid="${pool_uuid}" param-name=default-SR) fi test_sr "${sr_uuid}" -sr_name=$(${XE} sr-param-get uuid=${sr_uuid} param-name=name-label) +sr_name=$(${XE} sr-param-get uuid="${sr_uuid}" param-name=name-label) + +# probe first for a VDI with known UUID derived from the SR to avoid +# scanning for a VDI +backup_vdi=$(uuid5 "${NS}" "${sr_uuid}") +if [ -z "${vdis}" ]; then + vdis=$(${XE} vdi-list uuid="${backup_vdi}" sr-uuid="${sr_uuid}" read-only=false --minimal) +fi # get a list of all VDIs if an override has not been provided on the cmd line if [ -z "${vdis}" ]; then - vdis=$(${XE} vdi-list params=uuid sr-uuid=${sr_uuid} read-only=false --minimal) + vdis=$(${XE} vdi-list params=uuid sr-uuid="${sr_uuid}" read-only=false --minimal) fi mnt= function cleanup { cd / if [ ! -z "${mnt}" ]; then - umount ${mnt} >/dev/null 2>&1 - rmdir ${mnt} + umount "${mnt}" >/dev/null 2>&1 + rmdir "${mnt}" mnt="" fi if [ ! -z "${vbd_uuid}" ]; then ${debug} echo -n "Unplugging VBD: " >&2 - ${XE} vbd-unplug uuid=${vbd_uuid} timeout=20 + ${XE} vbd-unplug uuid="${vbd_uuid}" timeout=20 # poll for the device to go away if we know its name if [ "${device}" != "" ]; then device_gone=0 for ((i=0; i<10; i++)); do ${debug} echo -n "." >&2 - if [ ! -b ${device} ]; then + if [ ! -b "${device}" ]; then ${debug} echo " done" >&2 device_gone=1 break @@ -131,9 +145,9 @@ function cleanup { done if [ ${device_gone} -eq 0 ]; then ${debug} echo " failed" >&2 - ${debug} echo Please destroy VBD ${vbd_uuid} manually. >&2 + ${debug} echo "Please destroy VBD ${vbd_uuid} manually." >&2 else - ${XE} vbd-destroy uuid=${vbd_uuid} + ${XE} vbd-destroy uuid="${vbd_uuid}" vbd_uuid="" fi fi @@ -142,88 +156,96 @@ function cleanup { } if [ -z "${vdis}" ]; then - echo No VDIs found on SR. >&2 + echo "No VDIs found on SR." >&2 exit 0 fi trap cleanup SIGINT ERR for vdi_uuid in ${vdis}; do + if [ "${vdi_uuid}" != "${backup_vdi}" ] && [ "$yes" -eq 0 ]; then + echo "Probing VDI ${vdi_uuid}." + echo "This VDI was created with a prior version of this code." + echo "Its validity can't be checked without mounting it first." + read -p "Continue? [Y/N]" -n 1 -r; echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi + fi + ${debug} echo -n "Creating VBD: " >&2 - vbd_uuid=$(${XE} vbd-create vm-uuid=${CONTROL_DOMAIN_UUID} vdi-uuid=${vdi_uuid} device=autodetect 2>/dev/null) + vbd_uuid=$(${XE} vbd-create vm-uuid="${CONTROL_DOMAIN_UUID}" vdi-uuid="${vdi_uuid}" device=autodetect 2>/dev/null) if [ $? -ne 0 -o -z "${vbd_uuid}" ]; then - ${debug} echo error creating VBD for VDI ${vdi_uuid} >&2 + ${debug} echo "error creating VBD for VDI ${vdi_uuid}" >&2 cleanup continue fi - ${debug} echo ${vbd_uuid} >&2 + ${debug} echo "${vbd_uuid}" >&2 ${debug} echo -n "Plugging VBD: " >&2 - ${XE} vbd-plug uuid=${vbd_uuid} - device=/dev/$(${XE} vbd-param-get uuid=${vbd_uuid} param-name=device) + ${XE} vbd-plug uuid="${vbd_uuid}" + device=/dev/$(${XE} vbd-param-get uuid="${vbd_uuid}" param-name=device) - if [ ! -b ${device} ]; then - ${debug} echo ${device}: not a block special >&2 + if [ ! -b "${device}" ]; then + ${debug} echo "${device}: not a block special" >&2 cleanup continue fi - ${debug} echo ${device} >&2 + ${debug} echo "${device}" >&2 ${debug} echo -n "Probing device: " >&2 probecmd="@LIBEXECDIR@/probe-device-for-file" metadata_stamp="/.ctxs-metadata-backup" mnt= - ${probecmd} ${device} ${metadata_stamp} + ${probecmd} "${device}" "${metadata_stamp}" if [ $? -eq 0 ]; then - ${debug} echo found metadata backup >&2 + ${debug} echo "found metadata backup" >&2 ${debug} echo -n "Mounting filesystem: " >&2 - mnt=/var/run/pool-backup-${vdi_uuid} - mkdir -p ${mnt} - /sbin/fsck -a ${device} >/dev/null 2>&1 + mnt="/var/run/pool-backup-${vdi_uuid}" + mkdir -p "${mnt}" + /sbin/fsck -a "${device}" >/dev/null 2>&1 if [ $? -ne 0 ]; then - echo File system integrity error. Please correct manually. >&2 + echo "File system integrity error. Please correct manually." >&2 cleanup continue fi - mount ${device} ${mnt} >/dev/null 2>&1 + mount "${device}" "${mnt}" >/dev/null 2>&1 if [ $? -ne 0 ]; then ${debug} echo failed >&2 cleanup else if [ -e "${mnt}/.ctxs-metadata-backup" ]; then - ${debug} echo Found backup metadata on VDI: ${vdi_uuid} >&2 - xe vdi-param-set uuid=${vdi_uuid} other-config:ctxs-pool-backup=true + ${debug} echo "Found backup metadata on VDI: ${vdi_uuid}" >&2 + xe vdi-param-set uuid="${vdi_uuid}" other-config:ctxs-pool-backup=true break fi fi else - ${debug} echo backup metadata not found >&2 + ${debug} echo "backup metadata not found" >&2 fi cleanup done if [ $just_probe -gt 0 ]; then - echo ${vdi_uuid} + echo "${vdi_uuid}" cleanup exit 0 fi -cd ${mnt} +cd "${mnt}" ${debug} echo "" >&2 -if [ ! -d ${mnt}/metadata ]; then - echo Metadata backups not found. >&2 +if [ ! -d "${mnt}/metadata" ]; then + echo "Metadata backups not found." >&2 cleanup exit 1 fi -cd ${mnt}/metadata +cd "${mnt}/metadata" -if [ $just_list_dates -gt 0 ]; then - ls -1r ${mnt}/metadata +if [ "$just_list_dates" -gt 0 ]; then + ls -1r "${mnt}/metadata" cleanup exit 0 fi @@ -231,54 +253,54 @@ fi if [ -z "${chosen_date}" ]; then chosen_metadata_dir=$(ls | sort -n | tail -1) if [ -z "${chosen_metadata_dir}" ]; then - echo No VM metadata backups found in ${mnt}/metadata >&2 + echo "No VM metadata backups found in ${mnt}/metadata" >&2 cleanup exit 1 fi else - if [ ! -d ${mnt}/metadata/${chosen_date} ]; then - echo Date directory "${chosen_date}" not found >&2 + if [ ! -d "${mnt}/metadata/${chosen_date}" ]; then + echo "Date directory ${chosen_date} not found" >&2 cleanup exit 1 fi - chosen_metadata_dir=${chosen_date} + chosen_metadata_dir="${chosen_date}" fi case ${restore_mode} in sr) - full_dir=${mnt}/metadata/${chosen_metadata_dir}/by-sr/${sr_uuid} + full_dir="${mnt}/metadata/${chosen_metadata_dir}/by-sr/${sr_uuid}" ;; all) - full_dir=${mnt}/metadata/${chosen_metadata_dir}/all + full_dir="${mnt}/metadata/${chosen_metadata_dir}/all" ;; esac -if [ ! -d ${full_dir} ]; then - echo No VM metadata exports were found for the selected SR >&2 +if [ ! -d "${full_dir}" ]; then + echo "No VM metadata exports were found for the selected SR" >&2 cleanup exit 1 fi -${debug} echo Selected: ${full_dir} +${debug} echo "Selected: ${full_dir}" -cd ${full_dir} +cd "${full_dir}" ${debug} echo "" >&2 -${debug} echo Latest VM metadata found is: >&2 +${debug} echo "Latest VM metadata found is": >&2 ${debug} ls >&2 -if [ $yes -eq 0 ]; then +if [ "$yes" -eq 0 ]; then echo "Do you wish to reimport all VM metadata?" - echo "Please type in "yes" and to continue." + echo "Please type in 'yes' and to continue." read response if [ "$response" != "yes" ]; then - echo Aborting metadata restore. + echo "Aborting metadata restore." cleanup exit 1 fi fi ${debug} echo "" >&2 -${debug} echo Restoring VM metadata: >&2 +${debug} echo "Restoring VM metadata:" >&2 trap - ERR @@ -297,8 +319,8 @@ else fi shopt -s nullglob for meta in *.vmmeta; do - echo xe vm-import filename=${meta} sr-uuid=${sr_uuid} --metadata --preserve${force_flag}${dry_run_flag} - "@OPTDIR@/bin/xe" vm-import filename="${full_dir}/${meta}" sr-uuid=${sr_uuid} --metadata --preserve${force_flag}${dry_run_flag} + echo xe vm-import filename="${meta}" sr-uuid="${sr_uuid}" --metadata --preserve"${force_flag}""${dry_run_flag}" + "@OPTDIR@/bin/xe" vm-import filename="${full_dir}/${meta}" sr-uuid="${sr_uuid}" --metadata --preserve"${force_flag}""${dry_run_flag}" if [ $? -gt 0 ]; then error_count=$(( $error_count + 1 )) else @@ -306,16 +328,16 @@ for meta in *.vmmeta; do fi done -smmeta_file=${mnt}/metadata/${chosen_metadata_dir}/SRMETA.xml +smmeta_file="${mnt}/metadata/${chosen_metadata_dir}/SRMETA.xml" if [ "$restore_mode" == "all" ]; then cmd="@LIBEXECDIR@/restore-sr-metadata.py -f ${smmeta_file}" else cmd="@LIBEXECDIR@/restore-sr-metadata.py -u ${sr_uuid} -f ${smmeta_file}" fi -if [ -e ${smmeta_file} ]; then - if [ ${dry_run} -gt 0 ]; then - echo ${cmd} +if [ -e "${smmeta_file}" ]; then + if [ "${dry_run}" -gt 0 ]; then + echo "${cmd}" else ${cmd} fi @@ -323,4 +345,4 @@ fi echo "Restored ${good_count} VM(s), and ${error_count} error(s)" cleanup -exit ${error_count} +exit "${error_count}"