diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..9003fbb --- /dev/null +++ b/.flake8 @@ -0,0 +1,42 @@ +[flake8] +ignore = + # Module level import not at top of file + E402 + # Whitespace before ':'; Removed per Black documentation + E203 + # Invalid escape sequence + W605 + # Python3.7+ compatibility checks + W606 + # Ambiguous variable name l + E741 + # Line break occurred before a binary operator + W503 + # Missing docstring in public module + D100 + # Missing docstring in public class + D101 + # Missing docstring in public method + D102 + # Missing docstring in public function + D103 + # Missing docstring in public package + D104 + # Missing docstring in magic method + D105 + # Missing docstring in __init__ + D107 + # One-line docstring should fit on one line with quotes + D200 + # No blank lines allowed after function docstring + D202 + # 1 blank line required between summary line and description + D205 + # First line should end with a period + D400 + # First line should be in imperative mood + D401 +max-line-length = 79 +max-complexity = 18 +select = B,C,D,E,F,W,T4,B9 +extend-ignore = E203 diff --git a/.isort.cfg b/.isort.cfg new file mode 100644 index 0000000..6b2fa0c --- /dev/null +++ b/.isort.cfg @@ -0,0 +1,18 @@ +# +# Copyright (c) 2022 by Delphix. All rights reserved. +# + +[settings] +default_section=THIRDPARTY + +extra_standard_library=posixpath,ntpath,Queue + +# Every import should try its best to be on one line +force_single_line=True + +# Settings needed to be compatible with black +multi_line_output=3 +include_trailing_comma=True +force_grid_wrap=0 +use_parentheses=True +line_length=79 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..75238fc --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,39 @@ +repos: +- repo: https://github.com/psf/black + rev: 22.3.0 + hooks: + - id: black + exclude: > + (?x)^( + )$ + args: [--line-length=79] +- repo: https://github.com/pycqa/flake8 + rev: 3.9.2 + hooks: + - id: flake8 + exclude: > + (?x)^( + )$ +- repo: local + hooks: + - id: copyright + name: copyright + entry: copyright.sh + language: script + types: [text] + exclude: > + (?x)^( + .flake8| + .pre-commit-config.yaml| + pyproject.toml| + schema.json| + .*__init__.py| + src/templates/service_file_template.txt| + src/config/logger_conf.ini| + README.md| + .github/workflows/codeql.yml| + .github/CODEOWNERS| + .isort.cfg| + LICENSE| + .gitignore| + )$ diff --git a/copyright.sh b/copyright.sh new file mode 100755 index 0000000..e9c7812 --- /dev/null +++ b/copyright.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# +# Copyright (c) 2022, 2023 by Delphix. All rights reserved. +# + +function verify_copyright() { + file=$1 + current_year=$(date +%Y) + if [[ $(grep -e "Copyright (c).*$current_year .*Delphix. All rights reserved." "$file") ]] ; then + return 0 + else + echo "Copyright check failed for file: $file" + return 1 + fi + +} + +code=0 +for file in "$@" ; do + verify_copyright "$file" + code=$(($? + $code)) +done +exit $code diff --git a/src/controller/couchbase_lib/_bucket.py b/src/controller/couchbase_lib/_bucket.py index e22ef1f..f88727d 100644 --- a/src/controller/couchbase_lib/_bucket.py +++ b/src/controller/couchbase_lib/_bucket.py @@ -1,30 +1,31 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ - This class contains methods for all bucket related operations .This is child class of Resource and parent class - of CouchbaseOperation + This class contains methods for all bucket related operations . + This is child class of Resource and parent class + of CouchbaseOperation. """ -####################################################################################################################### -import logging -from utils import utilities import json + +############################################################################## +import logging from os.path import join -from internal_exceptions.database_exceptions import BucketOperationError + from controller import helper_lib -from controller.helper_lib import remap_bucket_json from controller.couchbase_lib._mixin_interface import MixinInterface from controller.resource_builder import Resource from db_commands.commands import CommandFactory -from db_commands.constants import ENV_VAR_KEY, EVICTION_POLICY +from db_commands.constants import ENV_VAR_KEY +from internal_exceptions.database_exceptions import BucketOperationError +from utils import utilities logger = logging.getLogger(__name__) class _BucketMixin(Resource, MixinInterface): - def __init__(self, builder): super(_BucketMixin, self).__init__(builder) @@ -39,31 +40,35 @@ def bucket_edit(self, bucket_name, flush_value=1): logger.debug("Editing bucket: {} ".format(bucket_name)) self.__validate_bucket_name(bucket_name) env = _BucketMixin.generate_environment_map(self) - kwargs = {ENV_VAR_KEY: { - 'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } env.update(kwargs[ENV_VAR_KEY]) - # command = CommandFactory.bucket_edit(bucket_name=bucket_name, flush_value=flush_value, **env) - command, env_vars = CommandFactory.bucket_edit_expect(bucket_name=bucket_name, flush_value=flush_value, **env) + command, env_vars = CommandFactory.bucket_edit_expect( + bucket_name=bucket_name, flush_value=flush_value, **env + ) kwargs[ENV_VAR_KEY].update(env_vars) logger.debug("edit bucket {}".format(command)) return utilities.execute_expect(self.connection, command, **kwargs) def bucket_edit_ramquota(self, bucket_name, _ramsize): """ - :param bucket_name: Required bucket_name on which edit operation will run + :param bucket_name: Required bucket_name on which edit operation will + run :param _ramsize: :return: """ # It requires the before bucket delete logger.debug("Editing bucket: {} ".format(bucket_name)) self.__validate_bucket_name(bucket_name) - kwargs = {ENV_VAR_KEY: { - 'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } env = _BucketMixin.generate_environment_map(self) env.update(kwargs[ENV_VAR_KEY]) - # command = CommandFactory.bucket_edit_ramquota(bucket_name=bucket_name, ramsize=_ramsize, **env) - command, env_vars = CommandFactory.bucket_edit_ramquota_expect(bucket_name=bucket_name, - ramsize=_ramsize, **env) + command, env_vars = CommandFactory.bucket_edit_ramquota_expect( + bucket_name=bucket_name, ramsize=_ramsize, **env + ) kwargs[ENV_VAR_KEY].update(env_vars) logger.debug("edit ram bucket {}".format(command)) return utilities.execute_expect(self.connection, command, **kwargs) @@ -73,11 +78,13 @@ def bucket_delete(self, bucket_name): logger.debug("Deleting bucket: {} ".format(bucket_name)) self.__validate_bucket_name(bucket_name) env = _BucketMixin.generate_environment_map(self) - kwargs = {ENV_VAR_KEY: { - 'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } env.update(kwargs[ENV_VAR_KEY]) - # command = CommandFactory.bucket_delete(bucket_name=bucket_name, **env) - command, env_vars = CommandFactory.bucket_delete_expect(bucket_name=bucket_name, **env) + command, env_vars = CommandFactory.bucket_delete_expect( + bucket_name=bucket_name, **env + ) kwargs[ENV_VAR_KEY].update(env_vars) logger.debug("delete bucket {}".format(command)) return utilities.execute_expect(self.connection, command, **kwargs) @@ -87,12 +94,13 @@ def bucket_flush(self, bucket_name): logger.debug("Flushing bucket: {} ".format(bucket_name)) self.__validate_bucket_name(bucket_name) env = _BucketMixin.generate_environment_map(self) - kwargs = {ENV_VAR_KEY: { - 'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } env.update(kwargs[ENV_VAR_KEY]) - # command, env_vars = CommandFactory.bucket_flush(bucket_name=bucket_name, **env) command, env_vars = CommandFactory.bucket_flush_expect( - bucket_name=bucket_name, **env) + bucket_name=bucket_name, **env + ) kwargs[ENV_VAR_KEY].update(env_vars) logger.debug("flush bucket {}".format(command)) return utilities.execute_expect(self.connection, command, **kwargs) @@ -105,126 +113,176 @@ def bucket_remove(self, bucket_name): self.bucket_delete(bucket_name) helper_lib.sleepForSecond(2) - def bucket_create(self, bucket_name, ram_size, bucket_type, bucket_compression): + def bucket_create( + self, bucket_name, ram_size, bucket_type, bucket_compression + ): logger.debug("Creating bucket: {} ".format(bucket_name)) # To create the bucket with given ram size self.__validate_bucket_name(bucket_name) if ram_size is None: - logger.debug("Needed ramsize for bucket_create. Currently it is: {}".format(ram_size)) + logger.debug( + "Needed ramsize for bucket_create. Currently it is: {}".format( + ram_size + ) + ) return - - if bucket_type == 'membase': + if bucket_type == "membase": # API return different type - bucket_type = 'couchbase' + bucket_type = "couchbase" if bucket_compression is not None: - bucket_compression = '--compression-mode {}'.format(bucket_compression) + bucket_compression = "--compression-mode {}".format( + bucket_compression + ) else: - bucket_compression = '' + bucket_compression = "" policy = self.parameters.bucket_eviction_policy env = _BucketMixin.generate_environment_map(self) - kwargs = {ENV_VAR_KEY: { - 'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } env.update(kwargs[ENV_VAR_KEY]) - # command = CommandFactory.bucket_create(bucket_name=bucket_name, ramsize=ram_size, evictionpolicy=policy, bucket_type=bucket_type, bucket_compression=bucket_compression, **env) - command, env_vars = CommandFactory.bucket_create_expect(bucket_name=bucket_name, ramsize=ram_size, evictionpolicy=policy, bucket_type=bucket_type, bucket_compression=bucket_compression, **env) + command, env_vars = CommandFactory.bucket_create_expect( + bucket_name=bucket_name, + ramsize=ram_size, + evictionpolicy=policy, + bucket_type=bucket_type, + bucket_compression=bucket_compression, + **env + ) logger.debug("create bucket {}".format(command)) kwargs[ENV_VAR_KEY].update(env_vars) - output, error, exit_code = utilities.execute_expect(self.connection, command, **kwargs) - logger.debug("create bucket output: {} {} {}".format(output, error, exit_code)) + output, error, exit_code = utilities.execute_expect( + self.connection, command, **kwargs + ) + logger.debug( + "create bucket output: {} {} {}".format(output, error, exit_code) + ) helper_lib.sleepForSecond(2) - def bucket_list(self, return_type=list): - # See the all bucket. + # See the all bucket. # It will return also other information like ramused, ramsize etc logger.debug("Finding staged bucket list") env = _BucketMixin.generate_environment_map(self) - kwargs = {ENV_VAR_KEY: { - 'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } env.update(kwargs[ENV_VAR_KEY]) # command = CommandFactory.bucket_list(**env) command, env_vars = CommandFactory.bucket_list_expect(**env) kwargs[ENV_VAR_KEY].update(env_vars) logger.debug("list bucket {}".format(command)) - # bucket_list, error, exit_code = utilities.execute_bash(self.connection, command, **kwargs) - bucket_list, error, exit_code = utilities.execute_expect(self.connection, - command, - **kwargs) + bucket_list, error, exit_code = utilities.execute_expect( + self.connection, command, **kwargs + ) logger.debug("list bucket output{}".format(bucket_list)) if return_type == list: - #bucket_list = bucket_list.split("\n") + # bucket_list = bucket_list.split("\n") if bucket_list == "[]" or bucket_list is None: logger.debug("empty list") return [] else: logger.debug("clean up json") - bucket_list = bucket_list.replace("u'","'") - bucket_list = bucket_list.replace("'", "\"") - bucket_list = bucket_list.replace("True", "\"True\"") - bucket_list = bucket_list.replace("False", "\"False\"") + bucket_list = bucket_list.replace("u'", "'") + bucket_list = bucket_list.replace("'", '"') + bucket_list = bucket_list.replace("True", '"True"') + bucket_list = bucket_list.replace("False", '"False"') logger.debug("parse json") bucket_list_dict = json.loads(bucket_list) logger.debug("remap json") - bucket_list_dict = list(map(helper_lib.remap_bucket_json, bucket_list_dict)) - logger.debug("Bucket details in staged environment: {}".format(bucket_list)) + bucket_list_dict = list( + map(helper_lib.remap_bucket_json, bucket_list_dict) + ) + logger.debug( + "Bucket details in staged environment: {}".format(bucket_list) + ) return bucket_list_dict def move_bucket(self, bucket_name, direction): logger.debug("Rename folder") - - - if direction == 'save': - src = join(self.virtual_source.parameters.mount_path,'data',bucket_name) - dst = join(self.virtual_source.parameters.mount_path,'data',".{}.delphix".format(bucket_name)) + if direction == "save": + src = join( + self.virtual_source.parameters.mount_path, "data", bucket_name + ) + dst = join( + self.virtual_source.parameters.mount_path, + "data", + ".{}.delphix".format(bucket_name), + ) command = CommandFactory.os_mv(src, dst, self.need_sudo, self.uid) - logger.debug("rename command: {}".format(command)) - stdout, error, exit_code = utilities.execute_bash(self.connection, command) - elif direction == 'restore': - dst = join(self.virtual_source.parameters.mount_path,'data',bucket_name) - src = join(self.virtual_source.parameters.mount_path,'data',".{}.delphix".format(bucket_name)) + logger.debug("rename command: {}".format(command)) + utilities.execute_bash(self.connection, command) + elif direction == "restore": + dst = join( + self.virtual_source.parameters.mount_path, "data", bucket_name + ) + src = join( + self.virtual_source.parameters.mount_path, + "data", + ".{}.delphix".format(bucket_name), + ) command = CommandFactory.delete_dir(dst, self.need_sudo, self.uid) - logger.debug("delete command: {}".format(command)) - stdout, error, exit_code = utilities.execute_bash(self.connection, command) + logger.debug("delete command: {}".format(command)) + utilities.execute_bash(self.connection, command) command = CommandFactory.os_mv(src, dst, self.need_sudo, self.uid) - logger.debug("rename command: {}".format(command)) - stdout, error, exit_code = utilities.execute_bash(self.connection, command) - - - + logger.debug("rename command: {}".format(command)) + utilities.execute_bash(self.connection, command) def monitor_bucket(self, bucket_name, staging_UUID): # To monitor the replication - logger.debug("Monitoring the replication for bucket {} ".format(bucket_name)) - kwargs = {ENV_VAR_KEY: {'password': self.staged_source.parameters.xdcr_admin_password}} + logger.debug( + "Monitoring the replication for bucket {} ".format(bucket_name) + ) + kwargs = { + ENV_VAR_KEY: { + "password": self.staged_source.parameters.xdcr_admin_password + } + } env = kwargs[ENV_VAR_KEY] - command, env_vars = CommandFactory.monitor_replication_expect(source_username=self.staged_source.parameters.xdcr_admin, - source_hostname=self.source_config.couchbase_src_host, - source_port=self.source_config.couchbase_src_port, - bucket_name=bucket_name, uuid=staging_UUID, - **env) + command, env_vars = CommandFactory.monitor_replication_expect( + source_username=self.staged_source.parameters.xdcr_admin, + source_hostname=self.source_config.couchbase_src_host, + source_port=self.source_config.couchbase_src_port, + bucket_name=bucket_name, + uuid=staging_UUID, + **env + ) kwargs[ENV_VAR_KEY].update(env_vars) - stdout, stderr, exit_code = utilities.execute_expect(self.connection, command, **kwargs) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, command, **kwargs + ) logger.debug("stdout: {}".format(stdout)) content = json.loads(stdout) - pending_docs = self._get_last_value_of_node_stats(list(content["nodeStats"].values())[0]) + pending_docs = self._get_last_value_of_node_stats( + list(content["nodeStats"].values())[0] + ) while pending_docs != 0: - logger.debug("Documents pending for replication: {}".format(pending_docs)) + logger.debug( + "Documents pending for replication: {}".format(pending_docs) + ) helper_lib.sleepForSecond(30) - stdout, stderr, exit_code = utilities.execute_expect(self.connection, command, **kwargs) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, command, **kwargs + ) content = json.loads(stdout) - pending_docs = self._get_last_value_of_node_stats(list(content["nodeStats"].values())[0]) + pending_docs = self._get_last_value_of_node_stats( + list(content["nodeStats"].values())[0] + ) else: - logger.debug("Replication for bucket {} completed".format(bucket_name)) + logger.debug( + "Replication for bucket {} completed".format(bucket_name) + ) @staticmethod def _get_last_value_of_node_stats(content_list): """ :param content_list: - :return: last node value, if the list is defined. it the list is empty return 0 + :return: last node value, if the list is defined. it the list is empty + return 0 """ value = 0 if len(content_list) > 0: @@ -242,7 +300,11 @@ def __validate_bucket_name(name): @MixinInterface.check_attribute_error def generate_environment_map(self): - env = {'shell_path': self.repository.cb_shell_path, 'hostname': self.connection.environment.host.name, - 'port': self.parameters.couchbase_port, 'username': self.parameters.couchbase_admin} + env = { + "shell_path": self.repository.cb_shell_path, + "hostname": self.connection.environment.host.name, + "port": self.parameters.couchbase_port, + "username": self.parameters.couchbase_admin, + } # MixinInterface.read_map(env) return env diff --git a/src/controller/couchbase_lib/_cb_backup.py b/src/controller/couchbase_lib/_cb_backup.py index 8fd3b27..613e749 100644 --- a/src/controller/couchbase_lib/_cb_backup.py +++ b/src/controller/couchbase_lib/_cb_backup.py @@ -1,76 +1,66 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """This class contains methods for all cb backup manager. -This is child class of Resource and parent class of CouchbaseOperation +This is child class of Resource and parent class of CouchbaseOperation. """ import json -####################################################################################################################### + +############################################################################## import logging -from utils import utilities + from controller import helper_lib from controller.couchbase_lib._mixin_interface import MixinInterface from controller.resource_builder import Resource -from db_commands.constants import ENV_VAR_KEY -from db_commands.commands import CommandFactory from dlpx.virtualization.platform.exceptions import UserError logger = logging.getLogger(__name__) class _CBBackupMixin(Resource, MixinInterface): - def __init__(self, builder): super(_CBBackupMixin, self).__init__(builder) @MixinInterface.check_attribute_error def generate_environment_map(self): - env = {'base_path': helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path), - 'hostname': self.connection.environment.host.name, 'port': self.parameters.couchbase_port, - 'username': self.parameters.couchbase_admin - } + env = { + "base_path": helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ), + "hostname": self.connection.environment.host.name, + "port": self.parameters.couchbase_port, + "username": self.parameters.couchbase_admin, + } # MixinInterface.read_map(env) return env - - def cb_backup_full(self, csv_bucket): logger.debug("Starting Restore via Backup file...") logger.debug("csv_bucket_list: {}".format(csv_bucket)) - skip = '--disable-analytics' - - if self.parameters.fts_service != True: - skip = skip + ' {} {} '.format('--disable-ft-indexes','--disable-ft-alias') + skip = "--disable-analytics" - if self.parameters.eventing_service != True: - skip = skip + ' {} '.format('--disable-eventing') + if not self.parameters.fts_service: + skip = skip + " {} {} ".format( + "--disable-ft-indexes", "--disable-ft-alias" + ) + if not self.parameters.eventing_service: + skip = skip + " {} ".format("--disable-eventing") logger.debug("skip backup is set to: {}".format(skip)) - - # kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - # env = _CBBackupMixin.generate_environment_map(self) - - # cmd = CommandFactory.cb_backup_full(backup_location=self.parameters.couchbase_bak_loc, - # csv_bucket_list=csv_bucket, - # backup_repo=self.parameters.couchbase_bak_repo, - # need_sudo=self.need_sudo, uid=self.uid, - # skip=skip, - # **env) - # logger.debug("Backup restore: {}".format(cmd)) - # utilities.execute_bash(self.connection, cmd, **kwargs) map_data_list = [] if int(self.repository.version.split(".")[0]) >= 7: for bucket_name in csv_bucket.split(","): logger.debug(f"bucket_name: {bucket_name}") stdout, _, _ = self.run_couchbase_command( - couchbase_command='get_scope_list_expect', + couchbase_command="get_scope_list_expect", base_path=helper_lib.get_base_directory_of_given_path( - self.repository.cb_shell_path), - bucket_name=bucket_name + self.repository.cb_shell_path + ), + bucket_name=bucket_name, ) json_scope_data = json.loads(stdout) for s in json_scope_data["scopes"]: @@ -82,18 +72,29 @@ def cb_backup_full(self, csv_bucket): collection_name = c["name"] if collection_name == "_default": continue - map_data_list.append(f"{bucket_name}.{scope_name}.{collection_name}={bucket_name}.{scope_name}.{collection_name}") - - stdout, stderr, exit_code = self.run_couchbase_command(couchbase_command='cb_backup_full', - backup_location=self.parameters.couchbase_bak_loc, - csv_bucket_list=csv_bucket, - backup_repo=self.parameters.couchbase_bak_repo, - skip=skip, - base_path=helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path), - map_data=",".join(map_data_list), - repo_version = self.repository.version - ) + map_data_list.append( + f"{bucket_name}.{scope_name}.{collection_name}=" + f"{bucket_name}.{scope_name}.{collection_name}" + ) + + stdout, stderr, exit_code = self.run_couchbase_command( + couchbase_command="cb_backup_full", + backup_location=self.parameters.couchbase_bak_loc, + csv_bucket_list=csv_bucket, + backup_repo=self.parameters.couchbase_bak_repo, + skip=skip, + base_path=helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ), + map_data=",".join(map_data_list), + repo_version=self.repository.version, + ) if exit_code != 0: - raise UserError("Problem with restoring backup using cbbackupmgr", "Check if repo and all privileges are correct", - "stdout: {}, stderr: {}, exit_code: {}".format(stdout, stderr, exit_code)) + raise UserError( + "Problem with restoring backup using cbbackupmgr", + "Check if repo and all privileges are correct", + "stdout: {}, stderr: {}, exit_code: {}".format( + stdout, stderr, exit_code + ), + ) diff --git a/src/controller/couchbase_lib/_cluster.py b/src/controller/couchbase_lib/_cluster.py index 0ef3774..d12066a 100644 --- a/src/controller/couchbase_lib/_cluster.py +++ b/src/controller/couchbase_lib/_cluster.py @@ -1,43 +1,48 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ This class contains methods for cluster related operations -This is child class of Resource and parent class of CouchbaseOperation +This is child class of Resource and parent class of CouchbaseOperation. """ -####################################################################################################################### +############################################################################## import logging import re -from utils import utilities + +from controller.couchbase_lib._mixin_interface import MixinInterface from controller.helper_lib import sleepForSecond +from controller.resource_builder import Resource from db_commands.commands import CommandFactory -from controller.couchbase_lib._mixin_interface import MixinInterface from db_commands.constants import ENV_VAR_KEY -from controller.resource_builder import Resource +from utils import utilities logger = logging.getLogger(__name__) # Error string on which we have to skip without raising the Exception -ALREADY_CLUSTER_INIT = "Cluster is already initialized, use setting-cluster to change settings" +ALREADY_CLUSTER_INIT = ( + "Cluster is already initialized, use setting-cluster to change settings" +) class _ClusterMixin(Resource, MixinInterface): - def __init__(self, builder): super(_ClusterMixin, self).__init__(builder) @MixinInterface.check_attribute_error def generate_environment_map(self): - env = {'shell_path': self.repository.cb_shell_path, 'hostname': self.connection.environment.host.name, - 'port': self.parameters.couchbase_port, 'username': self.parameters.couchbase_admin, - 'cluster_ramsize': self.parameters.cluster_ram_size, - 'cluster_index_ramsize': self.parameters.cluster_index_ram_size, - 'cluster_fts_ramsize': self.parameters.cluster_ftsram_size, - 'cluster_eventing_ramsize': self.parameters.cluster_eventing_ram_size, - 'cluster_analytics_ramsize': self.parameters.cluster_analytics_ram_size - } + env = { + "shell_path": self.repository.cb_shell_path, + "hostname": self.connection.environment.host.name, + "port": self.parameters.couchbase_port, + "username": self.parameters.couchbase_admin, + "cluster_ramsize": self.parameters.cluster_ram_size, + "cluster_index_ramsize": self.parameters.cluster_index_ram_size, + "cluster_fts_ramsize": self.parameters.cluster_ftsram_size, + "cluster_eventing_ramsize": self.parameters.cluster_eventing_ram_size, # noqa E501 + "cluster_analytics_ramsize": self.parameters.cluster_analytics_ram_size, # noqa E501 + } # MixinInterface.read_map(env) return env @@ -53,22 +58,28 @@ def cluster_init(self): # Cluster initialization logger.debug("Cluster Initialization started") fts_service = self.parameters.fts_service - #analytics_service = self.parameters.analytics_service + # analytics_service = self.parameters.analytics_service eventing_service = self.parameters.eventing_service cluster_name = self._get_cluster_name() - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } additional_service = "query" - if fts_service == True: + if fts_service: additional_service = additional_service + ",fts" # if analytics_service: # additional_service = additional_service + ",analytics" - if eventing_service == True: + if eventing_service: additional_service = additional_service + ",eventing" logger.debug("additional services : {}".format(additional_service)) - lambda_expr = lambda output: bool(re.search(ALREADY_CLUSTER_INIT, output)) + + def f(): + return lambda output: bool(re.search(ALREADY_CLUSTER_INIT, output)) + + lambda_expr = f env = _ClusterMixin.generate_environment_map(self) - env['additional_services'] = additional_service + env["additional_services"] = additional_service if int(self.repository.version.split(".")[0]) >= 7: env.update(kwargs[ENV_VAR_KEY]) if "(CE)" in self.repository.version: @@ -77,18 +88,28 @@ def cluster_init(self): env["indexerStorageMode"] = "forestdb" else: env["indexerStorageMode"] = "plasma" - cmd, env_vars = CommandFactory.cluster_init_rest_expect(cluster_name=cluster_name, **env) + cmd, env_vars = CommandFactory.cluster_init_rest_expect( + cluster_name=cluster_name, **env + ) kwargs[ENV_VAR_KEY].update(env_vars) - stdout, stderr, exit_code = utilities.execute_expect(self.connection, - cmd, **kwargs) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, cmd, **kwargs + ) else: cmd = CommandFactory.cluster_init(cluster_name=cluster_name, **env) logger.debug("Cluster init: {}".format(cmd)) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, command_name=cmd, callback_func=lambda_expr, - **kwargs) + stdout, stderr, exit_code = utilities.execute_bash( + self.connection, + command_name=cmd, + callback_func=lambda_expr, + **kwargs + ) if re.search(r"ERROR", str(stdout)): if re.search(r"ERROR: Cluster is already initialized", stdout): - logger.debug("Performing cluster setting as cluster is already initialized") + logger.debug( + "Performing cluster setting as cluster is already " + "initialized" + ) self.cluster_setting() else: logger.error("Cluster init failed. Throwing exception") @@ -96,22 +117,25 @@ def cluster_init(self): else: logger.debug("Cluster init succeeded") - # here we should wait for indexer to start + # here we should wait for indexer to start sleepForSecond(10) return [stdout, stderr, exit_code] def cluster_setting(self): logger.debug("Cluster setting process has started") - kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: {"password": self.parameters.couchbase_admin_password} + } cluster_name = self._get_cluster_name() env = _ClusterMixin.generate_environment_map(self) env.update(kwargs[ENV_VAR_KEY]) - # cmd = CommandFactory.cluster_setting(cluster_name=cluster_name, **env) - cmd, env_vars = CommandFactory.cluster_setting_expect(cluster_name=cluster_name, **env) - # stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd, **kwargs) + cmd, env_vars = CommandFactory.cluster_setting_expect( + cluster_name=cluster_name, **env + ) kwargs[ENV_VAR_KEY].update(env_vars) - stdout, stderr, exit_code = utilities.execute_expect(self.connection, - cmd, **kwargs) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, cmd, **kwargs + ) if re.search(r"ERROR", str(stdout)): logger.error("Cluster modification failed, killing the execution") raise Exception(stdout) diff --git a/src/controller/couchbase_lib/_mixin_interface.py b/src/controller/couchbase_lib/_mixin_interface.py index 44059f2..6cc9a35 100644 --- a/src/controller/couchbase_lib/_mixin_interface.py +++ b/src/controller/couchbase_lib/_mixin_interface.py @@ -1,28 +1,30 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -# This is interface which must be included by all child class of Resource +# This is interface which must be included by all child class of Resource. # This is child class of Resource and parent class of CouchbaseOperation # Therefore child class has to implement the method generate_environment_map -# Mixin class(Class which is implementing this interface) created only in two cases: +# Mixin class(Class which is implementing this interface) created only in +# two cases: # 1-> Bunch of methods belonging to one group # 2-> Environment data is common for all the commands # For case #1, it's about practice we should follow in software development -# For case #2, if such kind of cases are there in which common env data is required in execution of multiple commands -# then we club them in one class. Implement 'generate_environment_map' method and let it used by all methods defined in +# For case #2, if such kind of cases are there in which common env data is +# required in execution of multiple commands +# then we club them in one class. Implement 'generate_environment_map' method +# and let it used by all methods defined in # class. -# Other benefits are: Can call read_map to read each env data, Handling of attribute error while generating the env data +# Other benefits are: Can call read_map to read each env data, Handling of +# attribute error while generating the env data import logging - logger = logging.getLogger(__name__) class MixinInterface(object): - def generate_environment_map(self): raise Exception("You need to implement this method in child class") @@ -36,6 +38,10 @@ def inner(*args, **kwargs): try: return function(*args, **kwargs) except AttributeError as AE: - logger.debug("Failed to read value from schema objects. Error: {}".format(str(AE))) + logger.debug( + "Failed to read value from schema objects. " + "Error: {}".format(str(AE)) + ) raise + return inner diff --git a/src/controller/couchbase_lib/_xdcr.py b/src/controller/couchbase_lib/_xdcr.py index e26c7a2..1da19bb 100644 --- a/src/controller/couchbase_lib/_xdcr.py +++ b/src/controller/couchbase_lib/_xdcr.py @@ -1,58 +1,68 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -This class contains methods for XDCR related operations +This class contains methods for XDCR related operations. This is child class of Resource and parent class of CouchbaseOperation """ import json -####################################################################################################################### + +############################################################################## import logging -from utils import utilities import re + from controller import helper_lib -from db_commands.commands import CommandFactory from controller.couchbase_lib._mixin_interface import MixinInterface from controller.resource_builder import Resource -from dlpx.virtualization.platform.exceptions import UserError +from db_commands.commands import CommandFactory from db_commands.constants import ENV_VAR_KEY +from dlpx.virtualization.platform.exceptions import UserError +from utils import utilities logger = logging.getLogger(__name__) class _XDCrMixin(Resource, MixinInterface): - def __init__(self, builder): super(_XDCrMixin, self).__init__(builder) @MixinInterface.check_attribute_error def generate_environment_map(self): - env = {'shell_path': self.repository.cb_shell_path, - 'source_hostname': self.source_config.couchbase_src_host, - 'source_port': self.source_config.couchbase_src_port, - 'source_username': self.parameters.xdcr_admin, - 'hostname': self.connection.environment.host.name, - 'port': self.parameters.couchbase_port, - 'username': self.parameters.couchbase_admin - } + env = { + "shell_path": self.repository.cb_shell_path, + "source_hostname": self.source_config.couchbase_src_host, + "source_port": self.source_config.couchbase_src_port, + "source_username": self.parameters.xdcr_admin, + "hostname": self.connection.environment.host.name, + "port": self.parameters.couchbase_port, + "username": self.parameters.couchbase_admin, + } # MixinInterface.read_map(env) return env def xdcr_delete(self, cluster_name): - logger.debug("XDCR deletion for cluster_name {} has started ".format( - cluster_name)) - kwargs = {ENV_VAR_KEY: { - 'source_password': self.parameters.xdcr_admin_password, - 'password': self.parameters.couchbase_admin_password}} + logger.debug( + "XDCR deletion for cluster_name {} has started ".format( + cluster_name + ) + ) + kwargs = { + ENV_VAR_KEY: { + "source_password": self.parameters.xdcr_admin_password, + "password": self.parameters.couchbase_admin_password, + } + } env = _XDCrMixin.generate_environment_map(self) env.update(kwargs[ENV_VAR_KEY]) cmd, env_vars = CommandFactory.xdcr_delete_expect( - cluster_name=cluster_name, **env) + cluster_name=cluster_name, **env + ) kwargs[ENV_VAR_KEY].update(env_vars) - stdout, stderr, exit_code = utilities.execute_expect(self.connection, - cmd, **kwargs) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, cmd, **kwargs + ) if exit_code != 0: logger.error("XDCR Setup deletion failed") if stdout: @@ -65,29 +75,41 @@ def xdcr_delete(self, cluster_name): def xdcr_setup(self): logger.debug("Started XDCR set up ...") - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password, - 'password': self.parameters.couchbase_admin_password}} + kwargs = { + ENV_VAR_KEY: { + "source_password": self.parameters.xdcr_admin_password, + "password": self.parameters.couchbase_admin_password, + } + } env = _XDCrMixin.generate_environment_map(self) env.update(kwargs[ENV_VAR_KEY]) - cmd, env_vars = CommandFactory.xdcr_setup_expect(cluster_name=self.parameters.stg_cluster_name, **env) + cmd, env_vars = CommandFactory.xdcr_setup_expect( + cluster_name=self.parameters.stg_cluster_name, **env + ) kwargs[ENV_VAR_KEY].update(env_vars) - stdout, stderr, exit_code = utilities.execute_expect(self.connection, cmd, **kwargs) + utilities.execute_expect(self.connection, cmd, **kwargs) helper_lib.sleepForSecond(3) def xdcr_replicate(self, src, tgt): try: logger.debug("Started XDCR replication for bucket {}".format(src)) - kwargs = {ENV_VAR_KEY: {'source_password': self.parameters.xdcr_admin_password}} + kwargs = { + ENV_VAR_KEY: { + "source_password": self.parameters.xdcr_admin_password + } + } env = _XDCrMixin.generate_environment_map(self) env.update(kwargs[ENV_VAR_KEY]) cmd, env_vars = CommandFactory.xdcr_replicate_expect( source_bucket_name=src, target_bucket_name=tgt, cluster_name=self.parameters.stg_cluster_name, - **env + **env, ) kwargs[ENV_VAR_KEY].update(env_vars) - stdout, stderr, exit_code = utilities.execute_expect(self.connection, cmd, **kwargs) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, cmd, **kwargs + ) if exit_code != 0: logger.debug("XDCR replication create failed") raise Exception(stdout) @@ -96,21 +118,18 @@ def xdcr_replicate(self, src, tgt): except Exception as e: logger.debug("XDCR error {}".format(str(e))) - def get_replication_uuid(self): # False for string logger.debug("Finding the replication uuid through host name") - is_ip_or_string = False - kwargs = {ENV_VAR_KEY: {}} cluster_name = self.parameters.stg_cluster_name - stdout, stderr, exit_code = self.run_couchbase_command('get_replication_uuid', - source_hostname=self.source_config.couchbase_src_host, - source_port=self.source_config.couchbase_src_port, - source_username=self.parameters.xdcr_admin, - source_password=self.parameters.xdcr_admin_password - ) - + stdout, stderr, exit_code = self.run_couchbase_command( + "get_replication_uuid", + source_hostname=self.source_config.couchbase_src_host, + source_port=self.source_config.couchbase_src_port, + source_username=self.parameters.xdcr_admin, + source_password=self.parameters.xdcr_admin_password, + ) if exit_code != 0 or stdout is None or stdout == "": logger.debug("No Replication ID identified") @@ -132,26 +151,35 @@ def get_replication_uuid(self): if g: xdrc_cluster_name = g.group(1) uuid = re.match(r"\s*uuid:\s(\S*)", l.pop(0)).group(1) - hostname = re.match(r"\s*host name:\s(\S*):(\d*)", l.pop(0)).group(1) - user_name = l.pop(0) - uri = l.pop(0) + hostname = re.match( + r"\s*host name:\s(\S*):(\d*)", l.pop(0) + ).group(1) clusters[xdrc_cluster_name.lower()] = { "hostname": hostname, - "uuid": uuid + "uuid": uuid, } - # check if a cluster name is really connected to staging - just in case + # check if a cluster name is really connected to staging - + # just in case if cluster_name.lower() in clusters: - logger.debug("Cluster {} found in xdrc-setup output".format(cluster_name)) - # check if hostname returned from source match hostname or IP's of staging server + logger.debug( + "Cluster {} found in xdrc-setup output".format( + cluster_name + ) + ) + # check if hostname returned from source match hostname or + # IP's of staging server logger.debug(stg_hostname) logger.debug(clusters[cluster_name.lower()]["hostname"]) if stg_hostname == clusters[cluster_name.lower()]["hostname"]: # hostname matched - logger.debug("Cluster {} hostname {} is matching staging server hostname".format(cluster_name, stg_hostname)) + logger.debug( + "Cluster {} hostname {} is matching staging server " + "hostname".format(cluster_name, stg_hostname) + ) uuid = clusters[cluster_name.lower()]["uuid"] else: # check for IP's @@ -159,21 +187,38 @@ def get_replication_uuid(self): logger.debug(clusters[cluster_name.lower()]) - if clusters[cluster_name.lower()]["hostname"] in host_ips: # ip matched - logger.debug("Cluster {} IP {} is matching staging server IPs {}".format(cluster_name, clusters[cluster_name.lower()]["hostname"], host_ips)) + logger.debug( + "Cluster {} IP {} is matching staging server IPs " + "{}".format( + cluster_name, + clusters[cluster_name.lower()]["hostname"], + host_ips, + ) + ) uuid = clusters[cluster_name.lower()]["uuid"] else: - logger.debug("Can't confirm that xdrc-setup is matching staging") - raise UserError("XDRC Remote cluster {} on the source server is not pointed to staging server".format(cluster_name), - "Please check and delete remote cluster definition", clusters[cluster_name.lower()]) + logger.debug( + "Can't confirm that xdrc-setup is matching staging" + ) + raise UserError( + "XDRC Remote cluster {} on the source server " + "is not pointed to staging server".format( + cluster_name + ), + "Please check and delete remote cluster " + "definition", + clusters[cluster_name.lower()], + ) else: - logger.debug("Cluster {} configuration not found in XDCR of source".format(cluster_name)) + logger.debug( + "Cluster {} configuration not found in XDCR of " + "source".format(cluster_name) + ) return None - logger.debug("uuid for {} cluster : {}".format(uuid, cluster_name)) return uuid @@ -184,21 +229,21 @@ def get_replication_uuid(self): logger.warn("UUID is None. Not able to find any cluster") return None - def get_stream_id(self): logger.debug("Finding the stream id for provided cluster name") uuid = self.get_replication_uuid() if uuid is None: return None cluster_name = self.parameters.stg_cluster_name - - stdout, stderr, exit_code = self.run_couchbase_command('get_stream_id', - source_hostname=self.source_config.couchbase_src_host, - source_port=self.source_config.couchbase_src_port, - source_username=self.parameters.xdcr_admin, - source_password=self.parameters.xdcr_admin_password, - cluster_name=cluster_name - ) + + stdout, stderr, exit_code = self.run_couchbase_command( + "get_stream_id", + source_hostname=self.source_config.couchbase_src_host, + source_port=self.source_config.couchbase_src_port, + source_username=self.parameters.xdcr_admin, + source_password=self.parameters.xdcr_admin_password, + cluster_name=cluster_name, + ) logger.debug(stdout) logger.debug(uuid) @@ -206,29 +251,35 @@ def get_stream_id(self): logger.debug("No stream ID identified") return None else: - stream_id = re.findall(r"(?<=stream id:\s){}.*".format(uuid), stdout) + stream_id = re.findall( + r"(?<=stream id:\s){}.*".format(uuid), stdout + ) logger.debug("Stream id found: {}".format(stream_id)) return stream_id - def delete_replication(self): logger.debug("Deleting replication...") stream_id = self.get_stream_id() cluster_name = self.parameters.stg_cluster_name - logger.debug("stream_id: {} and cluster_name : {}".format(stream_id, cluster_name)) + logger.debug( + "stream_id: {} and cluster_name : {}".format( + stream_id, cluster_name + ) + ) if stream_id is None or stream_id == "": logger.debug("No Replication is found to delete.") return False, cluster_name for id in stream_id: - stdout, stderr, exit_code = self.run_couchbase_command('delete_replication', - source_hostname=self.source_config.couchbase_src_host, - source_port=self.source_config.couchbase_src_port, - source_username=self.parameters.xdcr_admin, - source_password=self.parameters.xdcr_admin_password, - cluster_name=cluster_name, - id=id - ) + stdout, stderr, exit_code = self.run_couchbase_command( + "delete_replication", + source_hostname=self.source_config.couchbase_src_host, + source_port=self.source_config.couchbase_src_port, + source_username=self.parameters.xdcr_admin, + source_password=self.parameters.xdcr_admin_password, + cluster_name=cluster_name, + id=id, + ) if exit_code != 0: logger.warn("stream_id: {} deletion failed".format(id)) @@ -238,33 +289,41 @@ def delete_replication(self): def get_ip(self): cmd = CommandFactory.get_ip_of_hostname() - stdout, stderr, exit_code = utilities.execute_bash(self.connection, cmd) + stdout, stderr, exit_code = utilities.execute_bash( + self.connection, cmd + ) logger.debug("IP is {}".format(stdout)) return stdout.split() - def setup_replication(self): uuid = self.get_replication_uuid() - + if uuid is None: logger.info("Setting up XDRC remote cluster") self.xdcr_setup() - streams_id = self.get_stream_id() if streams_id is not None: - alredy_replicated_buckets = [ m.group(1) for m in ( re.match(r'\S*/(\S*)/\S*', x) for x in streams_id ) if m ] + alredy_replicated_buckets = [ + m.group(1) + for m in (re.match(r"\S*/(\S*)/\S*", x) for x in streams_id) + if m + ] else: alredy_replicated_buckets = [] config_setting = self.staged_source.parameters.config_settings_prov if len(config_setting) > 0: - bucket_list = [config_bucket["bucketName"] for config_bucket in config_setting] + bucket_list = [ + config_bucket["bucketName"] for config_bucket in config_setting + ] else: bucket_details_source = self.source_bucket_list() - bucket_list = helper_lib.filter_bucket_name_from_json(bucket_details_source) + bucket_list = helper_lib.filter_bucket_name_from_json( + bucket_details_source + ) logger.debug("Bucket list to create replication for") logger.debug(bucket_list) @@ -276,14 +335,15 @@ def setup_replication(self): if int(self.repository.version.split(".")[0]) >= 7: logger.debug(f"bucket_name: {bkt_name}") stdout, _, _ = self.run_couchbase_command( - couchbase_command='get_scope_list_expect', + couchbase_command="get_scope_list_expect", base_path=helper_lib.get_base_directory_of_given_path( - self.repository.cb_shell_path), + self.repository.cb_shell_path + ), hostname=self.source_config.couchbase_src_host, port=self.source_config.couchbase_src_port, username=self.staged_source.parameters.xdcr_admin, - password=self.staged_source.parameters.xdcr_admin_password, - bucket_name=bkt_name + password=self.staged_source.parameters.xdcr_admin_password, # noqa E501 + bucket_name=bkt_name, ) json_scope_data = json.loads(stdout) logger.debug(f"json_scope_data={json_scope_data}") @@ -294,9 +354,11 @@ def setup_replication(self): # create scope self.run_couchbase_command( couchbase_command="create_scope_expect", - base_path=helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path), + base_path=helper_lib.get_base_directory_of_given_path( # noqa E501 + self.repository.cb_shell_path + ), scope_name=scope_name, - bucket_name=bkt_name + bucket_name=bkt_name, ) collection_list = s["collections"] for c in collection_list: @@ -306,13 +368,17 @@ def setup_replication(self): # create collection self.run_couchbase_command( couchbase_command="create_collection_expect", - base_path=helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path), + base_path=helper_lib.get_base_directory_of_given_path( # noqa E501 + self.repository.cb_shell_path + ), scope_name=scope_name, bucket_name=bkt_name, - collection_name=collection_name + collection_name=collection_name, ) logger.debug("Creating replication for {}".format(bkt_name)) self.xdcr_replicate(bkt_name, bkt_name) else: - logger.debug("Bucket {} replication already configured".format(bkt_name)) \ No newline at end of file + logger.debug( + "Bucket {} replication already configured".format(bkt_name) + ) diff --git a/src/controller/couchbase_operation.py b/src/controller/couchbase_operation.py index 64f1a2a..f9eea72 100644 --- a/src/controller/couchbase_operation.py +++ b/src/controller/couchbase_operation.py @@ -1,52 +1,59 @@ # -# Copyright (c) 2020,2021 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -This class defines methods for couchbase operations. Parent classes are: _BucketMixin, _ClusterMixin, - _XDCrMixin, _CBBackupMixin. Modules name is explaining about the operations for which module is created for. -The constructor of this class expects a `builder` on which each database operation will be performed -Commands are defined for each method in module commands.py. To perform any delphix operation we need to create -the object of this class. This class is single connector between other modules and `controller` package. +This class defines methods for couchbase operations. Parent classes are: +_BucketMixin, _ClusterMixin, + _XDCrMixin, _CBBackupMixin. Modules name is explaining about the operations + for which module is created for. +The constructor of this class expects a `builder` on which each database +operation will be performed +Commands are defined for each method in module commands.py. To perform any +delphix operation we need to create +the object of this class. This class is single connector between other modules +and `controller` package """ -####################################################################################################################### +############################################################################## -import re +import json import logging import os -import sys -import json -import inspect - -from dlpx.virtualization.platform import Status +import re +import time -from internal_exceptions.database_exceptions import CouchbaseServicesError -from utils import utilities -from controller.resource_builder import Resource from controller import helper_lib from controller.couchbase_lib._bucket import _BucketMixin +from controller.couchbase_lib._cb_backup import _CBBackupMixin from controller.couchbase_lib._cluster import _ClusterMixin from controller.couchbase_lib._xdcr import _XDCrMixin -from controller.couchbase_lib._cb_backup import _CBBackupMixin -from db_commands.commands import CommandFactory -from db_commands.constants import ENV_VAR_KEY, StatusIsActive, DELPHIX_HIDDEN_FOLDER, CONFIG_FILE_NAME from controller.helper_lib import remap_bucket_json -import time +from controller.resource_builder import Resource from db_commands import constants - +from db_commands.commands import CommandFactory +from db_commands.constants import CONFIG_FILE_NAME +from db_commands.constants import DELPHIX_HIDDEN_FOLDER +from db_commands.constants import StatusIsActive +from dlpx.virtualization.platform import Status from dlpx.virtualization.platform.exceptions import UserError +from internal_exceptions.database_exceptions import CouchbaseServicesError +from utils import utilities logger = logging.getLogger(__name__) -class CouchbaseOperation(_BucketMixin, _ClusterMixin, _XDCrMixin, _CBBackupMixin): - +class CouchbaseOperation( + _BucketMixin, _ClusterMixin, _XDCrMixin, _CBBackupMixin +): def __init__(self, builder, node_connection=None): """ - Main class through which other modules can run databases operations on provided parameters - :param builder: builder object which contains all necessary parameters on which db methods will be executed - :param node_connection: connection to node, if this is not a default one + Main class through which other modules can run databases operations on + provided parameters + :param builder: builder object which contains all necessary parameters + on which db methods will be executed + :param node_connection: connection to node, if this is not a default + one """ logger.debug("Object initialization") @@ -56,11 +63,12 @@ def __init__(self, builder, node_connection=None): if node_connection is not None: self.connection = node_connection - self.__need_sudo = helper_lib.need_sudo(self.connection, self.repository.uid, self.repository.gid) + self.__need_sudo = helper_lib.need_sudo( + self.connection, self.repository.uid, self.repository.gid + ) self.__uid = self.repository.uid self.__gid = self.repository.gid - @property def need_sudo(self): return self.__need_sudo @@ -74,21 +82,21 @@ def gid(self): return self.__gid def run_couchbase_command(self, couchbase_command, **kwargs): - logger.debug('run_couchbase_command') - logger.debug('couchbase_command: {}'.format(couchbase_command)) + logger.debug("run_couchbase_command") + logger.debug("couchbase_command: {}".format(couchbase_command)) if "password" in kwargs: - password = kwargs.get('password') + password = kwargs.get("password") else: password = self.parameters.couchbase_admin_password kwargs["password"] = password if "username" in kwargs: - username = kwargs.pop('username') + username = kwargs.pop("username") else: username = self.parameters.couchbase_admin if "hostname" in kwargs: - hostname = kwargs.pop('hostname') + hostname = kwargs.pop("hostname") else: hostname = self.connection.environment.host.name @@ -101,89 +109,104 @@ def run_couchbase_command(self, couchbase_command, **kwargs): if "newpass" in kwargs: # for setting a new password - env["newpass"] = kwargs.get('newpass') + env["newpass"] = kwargs.get("newpass") if "source_password" in kwargs: - env["source_password"] = kwargs.get('source_password') - - autoparams = [ "shell_path", "install_path", "username", "port", "sudo", "uid", "hostname"] + env["source_password"] = kwargs.get("source_password") + + autoparams = [ + "shell_path", + "install_path", + "username", + "port", + "sudo", + "uid", + "hostname", + ] new_kwargs = {k: v for k, v in kwargs.items() if k not in autoparams} - if couchbase_command not in ["get_server_list", - "couchbase_server_info", - "cb_backup_full", - "build_index", - "check_index_build", - "get_source_bucket_list", - "get_replication_uuid", - "get_stream_id", - "delete_replication", - "node_init", - "get_indexes_name", - "rename_cluster", - "server_add", - "rebalance", - "get_scope_list_expect", - "change_cluster_password", - "create_scope_expect", - "create_collection_expect"]: + if couchbase_command not in [ + "get_server_list", + "couchbase_server_info", + "cb_backup_full", + "build_index", + "check_index_build", + "get_source_bucket_list", + "get_replication_uuid", + "get_stream_id", + "delete_replication", + "node_init", + "get_indexes_name", + "rename_cluster", + "server_add", + "rebalance", + "get_scope_list_expect", + "change_cluster_password", + "create_scope_expect", + "create_collection_expect", + ]: method_to_call = getattr(CommandFactory, couchbase_command) - command = method_to_call(shell_path=self.repository.cb_shell_path, - install_path=self.repository.cb_install_path, - username=username, - port=port, - sudo=self.need_sudo, - uid=self.uid, - hostname=hostname, - **new_kwargs) + command = method_to_call( + shell_path=self.repository.cb_shell_path, + install_path=self.repository.cb_install_path, + username=username, + port=port, + sudo=self.need_sudo, + uid=self.uid, + hostname=hostname, + **new_kwargs, + ) logger.debug("couchbase command to run: {}".format(command)) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, command, environment_vars=env) + stdout, stderr, exit_code = utilities.execute_bash( + self.connection, command, environment_vars=env + ) else: - couchbase_command = couchbase_command+"_expect" if not couchbase_command.endswith("_expect") else couchbase_command - logger.debug('new_couchbase_command: {}'.format(couchbase_command)) + couchbase_command = ( + couchbase_command + "_expect" + if not couchbase_command.endswith("_expect") + else couchbase_command + ) + logger.debug("new_couchbase_command: {}".format(couchbase_command)) method_to_call = getattr(CommandFactory, couchbase_command) - command, env_vars = method_to_call(shell_path=self.repository.cb_shell_path, - install_path=self.repository.cb_install_path, - username=username, - port=port, - sudo=self.need_sudo, - uid=self.uid, - hostname=hostname, - **new_kwargs - ) + command, env_vars = method_to_call( + shell_path=self.repository.cb_shell_path, + install_path=self.repository.cb_install_path, + username=username, + port=port, + sudo=self.need_sudo, + uid=self.uid, + hostname=hostname, + **new_kwargs, + ) env.update(env_vars) logger.debug("couchbase command to run: {}".format(command)) - stdout, stderr, exit_code = utilities.execute_expect(self.connection, - command, - environment_vars=env) + stdout, stderr, exit_code = utilities.execute_expect( + self.connection, command, environment_vars=env + ) return [stdout, stderr, exit_code] - def run_os_command(self, os_command, **kwargs): - - method_to_call = getattr(CommandFactory, os_command) - command = method_to_call(sudo=self.need_sudo, - uid=self.uid, - **kwargs) + command = method_to_call(sudo=self.need_sudo, uid=self.uid, **kwargs) logger.debug("os command to run: {}".format(command)) - stdout, stderr, exit_code = utilities.execute_bash(self.connection, command) + stdout, stderr, exit_code = utilities.execute_bash( + self.connection, command + ) return [stdout, stderr, exit_code] - def restart_couchbase(self, provision=False): """stop the couchbase service and then start again""" self.stop_couchbase() self.start_couchbase(provision) def start_couchbase(self, provision=False, no_wait=False): - """ start the couchbase service""" + """start the couchbase service""" logger.debug("Starting couchbase services") - self.run_couchbase_command('start_couchbase') + self.run_couchbase_command("start_couchbase") server_status = Status.INACTIVE helper_lib.sleepForSecond(10) @@ -192,26 +215,29 @@ def start_couchbase(self, provision=False, no_wait=False): logger.debug("no wait - leaving start procedure") return - #Waiting for one minute to start the server + # Waiting for one minute to start the server # for prox to investigate end_time = time.time() + 3660 - #break the loop either end_time is exceeding from 1 minute or server is successfully started + # break the loop either end_time is exceeding from 1 minute or server + # is successfully started while time.time() < end_time and server_status == Status.INACTIVE: - helper_lib.sleepForSecond(1) # waiting for 1 second - server_status = self.status(provision) # fetching status + helper_lib.sleepForSecond(1) # waiting for 1 second + server_status = self.status(provision) # fetching status logger.debug("server status {}".format(server_status)) - # if the server is not running even in 60 seconds, then stop the further execution + # if the server is not running even in 60 seconds, then stop the + # further execution if server_status == Status.INACTIVE: - raise CouchbaseServicesError("Have failed to start couchbase server") - + raise CouchbaseServicesError( + "Have failed to start couchbase server" + ) def stop_couchbase(self): - """ stop the couchbase service""" + """stop the couchbase service""" try: logger.debug("Stopping couchbase services") - self.run_couchbase_command('stop_couchbase') + self.run_couchbase_command("stop_couchbase") end_time = time.time() + 60 server_status = Status.ACTIVE @@ -219,92 +245,86 @@ def stop_couchbase(self): helper_lib.sleepForSecond(1) # waiting for 1 second server_status = self.status() # fetching status - - logger.debug("Leaving stop loop") + logger.debug("Leaving stop loop") if server_status == Status.ACTIVE: - logger.debug("Have failed to stop couchbase server") - raise CouchbaseServicesError("Have failed to stop couchbase server") + logger.debug("Have failed to stop couchbase server") + raise CouchbaseServicesError( + "Have failed to stop couchbase server" + ) except CouchbaseServicesError as err: - logger.debug("Error: {}".format(err)) + logger.debug("Error: {}".format(err)) raise err except Exception as err: - logger.debug("Exception Error: {}".format(err)) + logger.debug("Exception Error: {}".format(err)) if self.status() == Status.INACTIVE: - logger.debug("Seems like couchbase service is not running. {}".format(str(err))) + logger.debug( + "Seems like couchbase service is not running. {}".format( + str(err) + ) + ) else: raise CouchbaseServicesError(str(err)) - def ip_file_name(self): - - ip_file = "{}/../var/lib/couchbase/ip".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) - - # check_file_command = CommandFactory.check_file(ip_file, sudo=self.need_sudo, uid=self.uid) - # check_ip_file, check_ip_file_err, exit_code = utilities.execute_bash(self.connection, check_file_command, callback_func=self.ignore_err) + ip_file = "{}/../var/lib/couchbase/ip".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) check_ip_file, check_ip_file_err, exit_code = self.run_os_command( - os_command='check_file', - file_path=ip_file - ) - - if not (exit_code == 0 and "Found" in check_ip_file): - ip_file = "{}/../var/lib/couchbase/ip_start".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + os_command="check_file", file_path=ip_file + ) + if not (exit_code == 0 and "Found" in check_ip_file): + ip_file = "{}/../var/lib/couchbase/ip_start".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) logger.debug("IP file is {}".format(ip_file)) return ip_file - def staging_bootstrap_status(self): logger.debug("staging_bootstrap_status") try: - server_info_out, std_err, exit_code = self.run_couchbase_command( - couchbase_command='couchbase_server_info', - hostname='127.0.0.1' - ) - - - # kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - # username = self.parameters.couchbase_admin - + couchbase_command="couchbase_server_info", hostname="127.0.0.1" + ) - # command = CommandFactory.server_info(self.repository.cb_shell_path, '127.0.0.1', - # self.parameters.couchbase_port, username) - # logger.debug("Status command {}".format(command)) - # server_info, std_err, exit_code = utilities.execute_bash(self.connection, command, **kwargs) + # logger.debug("Status output: {}".format(server_info)) - #logger.debug("Status output: {}".format(server_info)) - - status = helper_lib.get_value_of_key_from_json(server_info_out, 'status') + status = helper_lib.get_value_of_key_from_json( + server_info_out, "status" + ) if status.strip() == StatusIsActive: logger.debug("Server status is: {}".format("ACTIVE")) return Status.ACTIVE else: logger.debug("Server status is: {}".format("INACTIVE")) return Status.INACTIVE - - except Exception as error: # TODO - # rewrite it + # rewrite it logger.debug("Exception: {}".format(str(error))) if re.search("Unable to connect to host at", str(error)): logger.debug("Couchbase service is not running") return Status.INACTIVE def status(self, provision=False): - """Check the server status. Healthy or Warmup could be one status if the server is running""" - + """Check the server status. Healthy or Warmup could be one status + if the server is running""" + logger.debug("checking status") logger.debug(self.connection) try: - if provision==True: + if provision: username = self.snapshot.couchbase_admin password = self.snapshot.couchbase_admin_password @@ -312,51 +332,51 @@ def status(self, provision=False): password = self.parameters.couchbase_admin_password username = self.parameters.couchbase_admin - - #TODO - # Check if there is a mount point - even a started Couchbase without mountpoint means VDB - # is down or corrupted - # Couchbase with config file can start and recreate empty buckets if there is no mount point - # for future version - maybe whole /opt/couchbase/var directory should be virtualized like for Docker - # to avoid problems + # TODO + # Check if there is a mount point - even a started Couchbase + # without mountpoint means VDB + # is down or corrupted + # Couchbase with config file can start and recreate empty buckets + # if there is no mount point + # for future version - maybe whole /opt/couchbase/var directory + # should be virtualized like for Docker + # to avoid problems logger.debug("Checking for mount points") - mount_point_state = helper_lib.check_server_is_used(self.connection, self.parameters.mount_path) + mount_point_state = helper_lib.check_server_is_used( + self.connection, self.parameters.mount_path + ) logger.debug("Status of mount point {}".format(mount_point_state)) if mount_point_state == Status.INACTIVE: - logger.error("There is no mount point VDB is down regardless Couchbase status") + logger.error( + "There is no mount point VDB is down regardless " + "Couchbase status" + ) return Status.INACTIVE - ip_file = self.ip_file_name() - # read_file_command = CommandFactory.cat(ip_file, sudo=self.need_sudo, uid=self.uid) - # logger.debug("read file command {}".format(read_file_command)) - - # read_ip_file, std_err, exit_code = utilities.execute_bash(self.connection, read_file_command) - # logger.debug("read file {}".format(read_ip_file)) - - read_ip_file, std_err, exit_code = self.run_os_command( - os_command='cat', - path=ip_file - ) + os_command="cat", path=ip_file + ) server_info, std_err, exit_code = self.run_couchbase_command( - couchbase_command='get_server_list', - hostname='127.0.0.1', - username=username, - password=password) - - #status = helper_lib.get_value_of_key_from_json(server_info, 'status') + couchbase_command="get_server_list", + hostname="127.0.0.1", + username=username, + password=password, + ) - if self.dSource == False and self.parameters.node_list is not None and len(self.parameters.node_list) > 0: + if ( + not self.dSource + and self.parameters.node_list is not None + and len(self.parameters.node_list) > 0 + ): multinode = True else: multinode = False - for line in server_info.split("\n"): logger.debug("Checking line: {}".format(line)) if read_ip_file in line: @@ -367,12 +387,13 @@ def status(self, provision=False): if "healthy" in line: logger.debug("We have healthy active node") return Status.ACTIVE - + if multinode and "warmup" in line: - logger.debug("We have starting mode in multinode cluster") + logger.debug( + "We have starting mode in multinode cluster" + ) return Status.ACTIVE - return Status.INACTIVE except Exception as error: @@ -390,47 +411,35 @@ def make_directory(self, directory_path, force_env_user=False): :return: None """ - #TODO + # TODO # add error handling for OS errors logger.debug("Creating Directory {} ".format(directory_path)) - if force_env_user: - need_sudo = False - else: - need_sudo = self.need_sudo - command_output, std_err, exit_code = self.run_os_command( - os_command='make_directory', - directory_path=directory_path - ) - - # command = CommandFactory.make_directory(directory_path, need_sudo, self.uid) - # utilities.execute_bash(self.connection, command) - - logger.debug("Changing permission of directory path {}".format(directory_path)) + os_command="make_directory", directory_path=directory_path + ) - # command = CommandFactory.change_permission(directory_path, need_sudo, self.uid) - # utilities.execute_bash(self.connection, command) + logger.debug( + "Changing permission of directory path {}".format(directory_path) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='change_permission', - path=directory_path - ) + os_command="change_permission", path=directory_path + ) logger.debug("Changed the permission of directory") def create_config_dir(self): """create and return the hidden folder directory with name 'delphix'""" - #TODO + # TODO # clean up error handling logger.debug("Finding toolkit Path...") bin_directory, std_err, exit_code = self.run_os_command( - os_command='get_dlpx_bin' - ) - + os_command="get_dlpx_bin" + ) if bin_directory is None or bin_directory == "": raise Exception("Failed to find the toolkit directory") @@ -445,147 +454,134 @@ def create_config_dir(self): if not helper_lib.check_dir_present(self.connection, dir_name): self.make_directory(dir_name, force_env_user=True) return dir_name - def source_bucket_list(self): """ - return all buckets exist on source server. Also contains the information bucketType, ramQuota, ramUsed, + return all buckets exist on source server. Also contains the + information bucketType, ramQuota, ramUsed, numReplicas :return: """ # See the bucket list on source server - logger.debug("Collecting bucket list information present on source server ") - - # env = {ENV_VAR_KEY: {'password': self.staged_source.parameters.xdcr_admin_password}} - # command = CommandFactory.get_source_bucket_list(self.repository.cb_shell_path, - # self.source_config.couchbase_src_host, - # self.source_config.couchbase_src_port, - # self.staged_source.parameters.xdcr_admin) - # bucket_list, error, exit_code = utilities.execute_bash(self.connection, command_name=command, **env) + logger.debug( + "Collecting bucket list information present on source server " + ) bucket_list, error, exit_code = self.run_couchbase_command( - couchbase_command='get_source_bucket_list', - source_hostname=self.source_config.couchbase_src_host, - source_port=self.source_config.couchbase_src_port, - source_username=self.staged_source.parameters.xdcr_admin, - password=self.staged_source.parameters.xdcr_admin_password - ) - + couchbase_command="get_source_bucket_list", + source_hostname=self.source_config.couchbase_src_host, + source_port=self.source_config.couchbase_src_port, + source_username=self.staged_source.parameters.xdcr_admin, + password=self.staged_source.parameters.xdcr_admin_password, + ) if bucket_list == "[]" or bucket_list is None: return [] else: logger.debug("clean up json") - bucket_list = bucket_list.replace("u'","'") - bucket_list = bucket_list.replace("'", "\"") - bucket_list = bucket_list.replace("True", "\"True\"") - bucket_list = bucket_list.replace("False", "\"False\"") + bucket_list = bucket_list.replace("u'", "'") + bucket_list = bucket_list.replace("'", '"') + bucket_list = bucket_list.replace("True", '"True"') + bucket_list = bucket_list.replace("False", '"False"') logger.debug("parse json") bucket_list_dict = json.loads(bucket_list) - bucket_list_dict = list(map(helper_lib.remap_bucket_json, bucket_list_dict)) + bucket_list_dict = list( + map(helper_lib.remap_bucket_json, bucket_list_dict) + ) logger.debug("Source Bucket Information {}".format(bucket_list_dict)) return bucket_list_dict - def get_backup_date(self, x): - w = x.replace('{}/{}'.format(self.parameters.couchbase_bak_loc, self.parameters.couchbase_bak_repo),'') - g = re.match(r'/(.+?)/.*',w) + w = x.replace( + "{}/{}".format( + self.parameters.couchbase_bak_loc, + self.parameters.couchbase_bak_repo, + ), + "", + ) + g = re.match(r"/(.+?)/.*", w) if g: return g.group(1) else: - return '' + return "" def source_bucket_list_offline(self): """ - This function will be used in CB backup manager. It will return the same output as by - source_bucket_list method. To avoid source/production server dependency this function will be used. - In a file, put all the bucket related information of source server. This function will cat and return the - contents of that file. It is useful for cb backup manager ingestion mechanism + This function will be used in CB backup manager. It will return the + same output as by + source_bucket_list method. To avoid source/production server dependency + this function will be used. + In a file, put all the bucket related information of source server. + This function will cat and return the + contents of that file. It is useful for cb backup manager ingestion + mechanism FilePath : /couchbase_src_bucket_info In this file add output of below command: - /opt/couchbase/bin/couchbase-cli bucket-list --cluster :8091 --username $username --password $pass - From here all source bucket list information we can fetch and other related data of this bucket should be placed + /opt/couchbase/bin/couchbase-cli bucket-list --cluster + :8091 --username $username --password $pass + From here all source bucket list information we can fetch and other + related data of this bucket should be placed at backup location. - :param filename: filename(couchbase_src_bucket_info.cfg) where bucket information is kept. + :param filename: filename(couchbase_src_bucket_info.cfg) where bucket + information is kept. :return: bucket list information """ - - logger.debug(self.parameters.couchbase_bak_loc) logger.debug(self.parameters.couchbase_bak_repo) - - # command = CommandFactory.get_backup_bucket_list(os.path.join(self.parameters.couchbase_bak_loc, self.parameters.couchbase_bak_repo), self.need_sudo, self.uid) - # logger.debug("Bucket search command: {}".format(command)) - # bucket_list, error, exit_code = utilities.execute_bash(self.connection, command_name=command, callback_func=self.ignore_err) - - bucket_list, error, exit_code = self.run_os_command( - os_command='get_backup_bucket_list', - path=os.path.join(self.parameters.couchbase_bak_loc, self.parameters.couchbase_bak_repo) - ) - + os_command="get_backup_bucket_list", + path=os.path.join( + self.parameters.couchbase_bak_loc, + self.parameters.couchbase_bak_repo, + ), + ) - backup_list = bucket_list.split('\n') + backup_list = bucket_list.split("\n") logger.debug("Bucket search output: {}".format(backup_list)) date_list = list(map(self.get_backup_date, backup_list)) date_list.sort() logger.debug("date list: {}".format(date_list)) - files_to_process = [ x for x in backup_list if date_list[-1] in x ] + files_to_process = [x for x in backup_list if date_list[-1] in x] logger.debug(files_to_process) bucket_list_dict = [] for f in files_to_process: - # command = CommandFactory.cat(f, self.need_sudo, self.uid) - # logger.debug("cat command: {}".format(command)) - # bucket_file_content, error, exit_code = utilities.execute_bash(self.connection, command_name=command) bucket_file_content, error, exit_code = self.run_os_command( - os_command='cat', - path=f - ) - + os_command="cat", path=f + ) logger.debug(bucket_file_content) bucket_json = json.loads(bucket_file_content) bucket_list_dict.append(remap_bucket_json(bucket_json)) - - # command = CommandFactory.read_file(filename) - # bucket_list, error, exit_code = utilities.execute_bash(self.connection, command) - # if bucket_list == "" or bucket_list is None: - # return [] - # bucket_list = bucket_list.split("\n") logger.debug("Bucket search output: {}".format(bucket_list_dict)) return bucket_list_dict def node_init(self, nodeno=1): """ - This method initializes couchbase server node. Where user sets different required paths + This method initializes couchbase server node. Where user sets + different required paths :return: None """ logger.debug("Initializing the NODE") command_output, std_err, exit_code = self.run_couchbase_command( - couchbase_command='node_init', - data_path="{}/data_{}".format(self.parameters.mount_path, nodeno) - ) - - # kwargs = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - # command = CommandFactory.node_init(self.repository.cb_shell_path, self.parameters.couchbase_port, - # self.parameters.couchbase_admin, ) - # logger.debug("Node init: {}".format(command)) - # command_output, std_err, exit_code = utilities.execute_bash(self.connection, command, **kwargs) + couchbase_command="node_init", + data_path="{}/data_{}".format(self.parameters.mount_path, nodeno), + ) logger.debug("Command Output {} ".format(command_output)) def get_config_directory(self): """ - Hidden directory path inside mount directory will be returned. which is created in method create_config_dir + Hidden directory path inside mount directory will be returned. which + is created in method create_config_dir :return: Return the config directory """ @@ -601,7 +597,6 @@ def get_config_file_path(self): logger.debug("Config filepath is: {}".format(config_file_path)) return config_file_path - # Defined for future updates def get_indexes_definition(self): # by default take from staging but later take from source @@ -620,116 +615,124 @@ def get_indexes_definition(self): else: hostname = self.connection.environment.host.name - # cmd = CommandFactory.get_indexes_name(hostname, port, user) - # logger.debug("command for indexes is : {}".format(cmd)) - # command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd, **env) - command_output, std_err, exit_code = self.run_couchbase_command( - couchbase_command='get_indexes_name', - hostname=hostname, - username=user, - port=port, - password=password - ) - + couchbase_command="get_indexes_name", + hostname=hostname, + username=user, + port=port, + password=password, + ) logger.debug("Indexes are {}".format(command_output)) indexes_raw = json.loads(command_output) indexes = [] - logger.debug("dSource type for indexes: {}".format(self.parameters.d_source_type)) + logger.debug( + "dSource type for indexes: {}".format( + self.parameters.d_source_type + ) + ) if self.parameters.d_source_type == constants.CBBKPMGR: logger.debug("Only build for backup ingestion") buckets = {} - for i in indexes_raw['indexes']: - bucket_name = i['bucket'] - index_name = i['indexName'] - scope_name = i['scope'] if 'scope' in i.keys() else '_default' - collection_name = i[ - 'collection'] if 'collection' in i.keys() else '_default' + for i in indexes_raw["indexes"]: + bucket_name = i["bucket"] + index_name = i["indexName"] + scope_name = i["scope"] if "scope" in i.keys() else "_default" + collection_name = ( + i["collection"] if "collection" in i.keys() else "_default" + ) if bucket_name not in buckets: buckets[bucket_name] = {} if scope_name not in buckets[bucket_name].keys(): buckets[bucket_name][scope_name] = {} - if collection_name not in buckets[bucket_name][ - scope_name].keys(): + if ( + collection_name + not in buckets[bucket_name][scope_name].keys() + ): buckets[bucket_name][scope_name][collection_name] = [] buckets[bucket_name][scope_name][collection_name].append( - index_name) + index_name + ) for bucket_name in buckets.keys(): for scope_name in buckets[bucket_name].keys(): - for collection_name in buckets[bucket_name][scope_name].keys(): + for collection_name in buckets[bucket_name][ + scope_name + ].keys(): ind = buckets[bucket_name][scope_name][collection_name] - if collection_name == "_default" and scope_name == "_default": - ind_def = f'build index on `{bucket_name}` (`{"`,`".join(ind)}`)' + if ( + collection_name == "_default" + and scope_name == "_default" + ): + ind_def = ( + f"build index on `{bucket_name}` " + f'(`{"`,`".join(ind)}`)' + ) else: - ind_def = f'build index on `{bucket_name}`.{scope_name}.{collection_name} (`{"`,`".join(ind)}`)' + ind_def = ( + f"build index on `{bucket_name}`." + f"{scope_name}.{collection_name} " + f'(`{"`,`".join(ind)}`)' + ) indexes.append(ind_def) else: # full definition for replication - for i in indexes_raw['indexes']: - indexes.append(i['definition'].replace('defer_build":true','defer_build":false')) + for i in indexes_raw["indexes"]: + indexes.append( + i["definition"].replace( + 'defer_build":true', 'defer_build":false' + ) + ) return indexes # Defined for future updates def build_index(self, index_def): command_output, std_err, exit_code = self.run_couchbase_command( - couchbase_command='build_index', - base_path=helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path), - index_def=index_def - ) - - - # env = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - # cmd = CommandFactory.build_index(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path),self.connection.environment.host.name, self.parameters.couchbase_port, self.parameters.couchbase_admin, index_def) - # logger.debug("building index cmd: {}".format(cmd)) - # command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd, **env) + couchbase_command="build_index", + base_path=helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ), + index_def=index_def, + ) logger.debug("command_output is {}".format(command_output)) return command_output - def check_index_build(self): - # env = {ENV_VAR_KEY: {'password': self.parameters.couchbase_admin_password}} - # cmd = CommandFactory.check_index_build(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path),self.connection.environment.host.name, self.parameters.couchbase_port, self.parameters.couchbase_admin) - # logger.debug("check_index_build cmd: {}".format(cmd)) - # set timeout to 12 hours - end_time = time.time() + 3660*12 + end_time = time.time() + 3660 * 12 tobuild = 1 - #break the loop either end_time is exceeding from 1 minute or server is successfully started + # break the loop either end_time is exceeding from 1 minute or server + # is successfully started while time.time() < end_time and tobuild != 0: command_output, std_err, exit_code = self.run_couchbase_command( - couchbase_command='check_index_build', - base_path=helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path) - ) - + couchbase_command="check_index_build", + base_path=helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ), + ) - #command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd, **env) logger.debug("command_output is {}".format(command_output)) logger.debug("std_err is {}".format(std_err)) logger.debug("exit_code is {}".format(exit_code)) try: command_output_dict = json.loads(command_output) logger.debug("dict {}".format(command_output_dict)) - tobuild = command_output_dict['results'][0]['unbuilt'] + tobuild = command_output_dict["results"][0]["unbuilt"] logger.debug("to_build is {}".format(tobuild)) - helper_lib.sleepForSecond(30) # waiting for 1 second + helper_lib.sleepForSecond(30) # waiting for 1 second except Exception as e: logger.debug(str(e)) - - - def save_config(self, what, nodeno=1): # TODO @@ -738,198 +741,276 @@ def save_config(self, what, nodeno=1): logger.debug("start save_config") targetdir = self.get_config_directory() - target_config_filename = os.path.join(targetdir,"config.dat_{}".format(nodeno)) - target_local_filename = os.path.join(targetdir,"local.ini_{}".format(nodeno)) - target_encryption_filename = os.path.join(targetdir,"encrypted_data_keys_{}".format(nodeno)) + target_config_filename = os.path.join( + targetdir, "config.dat_{}".format(nodeno) + ) + target_local_filename = os.path.join( + targetdir, "local.ini_{}".format(nodeno) + ) + target_encryption_filename = os.path.join( + targetdir, "encrypted_data_keys_{}".format(nodeno) + ) if nodeno == 1 or int(self.repository.version.split(".")[0]) >= 7: - ip_file = "{}/../var/lib/couchbase/ip".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) - target_ip_filename = os.path.join(targetdir,"ip_{}".format(nodeno)) + ip_file = "{}/../var/lib/couchbase/ip".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + target_ip_filename = os.path.join( + targetdir, "ip_{}".format(nodeno) + ) output, err, exit_code = self.run_os_command( - os_command='check_file', - file_path=ip_file + os_command="check_file", file_path=ip_file ) if exit_code != 0 and "Found" not in output: ip_file = "{}/../var/lib/couchbase/ip_start".format( helper_lib.get_base_directory_of_given_path( - self.repository.cb_shell_path)) - target_ip_filename = os.path.join(targetdir, - "ip_start_{}".format(nodeno)) + self.repository.cb_shell_path + ) + ) + target_ip_filename = os.path.join( + targetdir, "ip_start_{}".format(nodeno) + ) else: - ip_file = "{}/../var/lib/couchbase/ip_start".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) - target_ip_filename = os.path.join(targetdir,"ip_start_{}".format(nodeno)) + ip_file = "{}/../var/lib/couchbase/ip_start".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + target_ip_filename = os.path.join( + targetdir, "ip_start_{}".format(nodeno) + ) - filename = "{}/../var/lib/couchbase/config/config.dat".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + filename = "{}/../var/lib/couchbase/config/config.dat".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='os_cp', - srcname=filename, - trgname=target_config_filename - ) + os_command="os_cp", + srcname=filename, + trgname=target_config_filename, + ) - logger.debug("save config.dat cp - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + logger.debug( + "save config.dat cp - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) if exit_code != 0: - raise UserError("Error saving configuration file: config.dat", "Check sudo or user privileges to read Couchbase config.dat file", std_err) - + raise UserError( + "Error saving configuration file: config.dat", + "Check sudo or user privileges to read Couchbase config.dat " + "file", + std_err, + ) # encryption data keys may not exist on Community edition - filename = "{}/../var/lib/couchbase/config/encrypted_data_keys".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + filename = "{}/../var/lib/couchbase/config/encrypted_data_keys".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) - check_encrypted_data_keys, check_ip_file_err, exit_code = self.run_os_command( - os_command='check_file', - file_path=filename - ) + ( + check_encrypted_data_keys, + check_ip_file_err, + exit_code, + ) = self.run_os_command(os_command="check_file", file_path=filename) - if exit_code == 0 and "Found" in check_encrypted_data_keys: - # cmd = CommandFactory.os_cp(filename, target_encryption_filename, self.need_sudo, self.uid) - # logger.debug("save encrypted_data_keys cp: {}".format(cmd)) - # command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd) + if exit_code == 0 and "Found" in check_encrypted_data_keys: command_output, std_err, exit_code = self.run_os_command( - os_command='os_cp', - srcname=filename, - trgname=target_encryption_filename - ) + os_command="os_cp", + srcname=filename, + trgname=target_encryption_filename, + ) - logger.debug("save encrypted_data_keys.dat cp - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + logger.debug( + "save encrypted_data_keys.dat cp - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) if exit_code != 0: - raise UserError("Error saving configuration file: encrypted_data_keys", "Check sudo or user privileges to read Couchbase encrypted_data_keys file", std_err) - - - - + raise UserError( + "Error saving configuration file: encrypted_data_keys", + "Check sudo or user privileges to read Couchbase " + "encrypted_data_keys file", + std_err, + ) command_output, std_err, exit_code = self.run_os_command( - os_command='os_cp', - srcname=ip_file, - trgname=target_ip_filename - ) - + os_command="os_cp", srcname=ip_file, trgname=target_ip_filename + ) - logger.debug("save {} - exit_code: {} stdout: {} std_err: {}".format(ip_file, exit_code, command_output, std_err)) + logger.debug( + "save {} - exit_code: {} stdout: {} std_err: {}".format( + ip_file, exit_code, command_output, std_err + ) + ) if exit_code != 0: - raise UserError("Error saving configuration file: {}".format(ip_file), "Check sudo or user privileges to read Couchbase {} file".format(ip_file), std_err) - + raise UserError( + "Error saving configuration file: {}".format(ip_file), + "Check sudo or user privileges to read Couchbase " + "{} file".format(ip_file), + std_err, + ) - filename = "{}/../etc/couchdb/local.ini".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + filename = "{}/../etc/couchdb/local.ini".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='os_cp', - srcname=filename, - trgname=target_local_filename - ) + os_command="os_cp", srcname=filename, trgname=target_local_filename + ) - logger.debug("save local.ini cp - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + logger.debug( + "save local.ini cp - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) if exit_code != 0: - raise UserError("Error saving configuration file: local.ini", "Check sudo or user privileges to read Couchbase local.ini file", std_err) + raise UserError( + "Error saving configuration file: local.ini", + "Check sudo or user privileges to read Couchbase local.ini " + "file", + std_err, + ) if int(self.repository.version.split(".")[0]) >= 7: - chronicle_target_dir = os.path.join(targetdir, f"chronicle_{nodeno}") - chronicle_target_dir_command_output, _, chronicle_target_dir_exit_code = self.run_os_command( - os_command='check_directory', - dir_path=chronicle_target_dir + chronicle_target_dir = os.path.join( + targetdir, f"chronicle_{nodeno}" + ) + ( + chronicle_target_dir_command_output, + _, + chronicle_target_dir_exit_code, + ) = self.run_os_command( + os_command="check_directory", dir_path=chronicle_target_dir ) - if chronicle_target_dir_exit_code == 0 and "Found" in chronicle_target_dir_command_output: + if ( + chronicle_target_dir_exit_code == 0 + and "Found" in chronicle_target_dir_command_output + ): self.run_os_command( - os_command='delete_dir', - dirname=chronicle_target_dir + os_command="delete_dir", dirname=chronicle_target_dir ) self.run_os_command( os_command="os_cpr", - srcname="{}/../var/lib/couchbase/config/chronicle".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)), - trgname=chronicle_target_dir + srcname="{}/../var/lib/couchbase/config/chronicle".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ), + trgname=chronicle_target_dir, ) - def check_cluster_notconfigured(self): logger.debug("check_cluster") command_output, std_err, exit_code = self.run_couchbase_command( - couchbase_command='get_server_list', - hostname=self.connection.environment.host.name) + couchbase_command="get_server_list", + hostname=self.connection.environment.host.name, + ) if "unknown pool" in command_output: return True else: return False - def check_cluster_configured(self): logger.debug("check_cluster configured") command_output, std_err, exit_code = self.run_couchbase_command( - couchbase_command='get_server_list', - hostname=self.connection.environment.host.name) + couchbase_command="get_server_list", + hostname=self.connection.environment.host.name, + ) if "healthy active" in command_output: return True else: return False - - def check_config(self): - filename = os.path.join(self.get_config_directory(),"config.dat") + filename = os.path.join(self.get_config_directory(), "config.dat") cmd = CommandFactory.check_file(filename) logger.debug("check file cmd: {}".format(cmd)) - command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd, callback_func=self.ignore_err) + command_output, std_err, exit_code = utilities.execute_bash( + self.connection, command_name=cmd, callback_func=self.ignore_err + ) - if exit_code == 0 and "Found" in command_output: + if exit_code == 0 and "Found" in command_output: return True else: return False def delete_data_folder(self, nodeno=1): data_folder = "{}/data_{}".format(self.parameters.mount_path, nodeno) - command_output, command_stderr, command_exit_code = self.run_os_command( - os_command="check_directory", - dir_path=data_folder + ( + command_output, + command_stderr, + command_exit_code, + ) = self.run_os_command( + os_command="check_directory", dir_path=data_folder + ) + logger.debug( + f"check data directory >> command_output=={command_output}" + f" , command_stderr=={command_stderr} , " + f"command_exit_code=={command_exit_code}" ) - logger.debug(f"check data directory >> command_output=={command_output}" - f" , command_stderr=={command_stderr} , " - f"command_exit_code=={command_exit_code}") if command_output == "Found": - self.run_os_command( - os_command="delete_dir", - dirname=data_folder - ) + self.run_os_command(os_command="delete_dir", dirname=data_folder) def delete_config_folder(self): if int(self.repository.version.split(".")[0]) >= 6: config_directory_path = "{}/../var/lib/couchbase/config".format( helper_lib.get_base_directory_of_given_path( - self.repository.cb_shell_path)) - command_output, command_stderr, command_exit_code = self.run_os_command( - os_command="check_directory", - dir_path=config_directory_path - ) - logger.debug(f"check directory >> command_output=={command_output}" - f" , command_stderr=={command_stderr} , " - f"command_exit_code=={command_exit_code}") + self.repository.cb_shell_path + ) + ) + ( + command_output, + command_stderr, + command_exit_code, + ) = self.run_os_command( + os_command="check_directory", dir_path=config_directory_path + ) + logger.debug( + f"check directory >> command_output=={command_output}" + f" , command_stderr=={command_stderr} , " + f"command_exit_code=={command_exit_code}" + ) if command_output == "Found": target_folder = f"{config_directory_path}_bkp" - command_output, command_stderr, command_exit_code = self.run_os_command( - os_command="check_directory", - dir_path=target_folder + ( + command_output, + command_stderr, + command_exit_code, + ) = self.run_os_command( + os_command="check_directory", dir_path=target_folder ) if command_output == "Found": self.run_os_command( - os_command="delete_dir", - dirname=target_folder + os_command="delete_dir", dirname=target_folder ) self.run_os_command( - os_command='os_mv', + os_command="os_mv", srcname=config_directory_path, - trgname=target_folder + trgname=target_folder, + ) + logger.debug( + f"mv directory >> command_output=={command_output}" + f" , command_stderr=={command_stderr} , " + f"command_exit_code=={command_exit_code}" ) - logger.debug(f"mv directory >> command_output=={command_output}" - f" , command_stderr=={command_stderr} , " - f"command_exit_code=={command_exit_code}") def delete_xdcr_config(self): if self.parameters.d_source_type == "XDCR": @@ -947,132 +1028,218 @@ def restore_config(self, what, nodeno=1): sourcedir = self.get_config_directory() - source_config_file = os.path.join(sourcedir,"config.dat_{}".format(nodeno)) - source_local_filename = os.path.join(sourcedir,"local.ini_{}".format(nodeno)) - source_encryption_keys = os.path.join(sourcedir,"encrypted_data_keys_{}".format(nodeno)) - - - source_ip_file = os.path.join(sourcedir,"ip_{}".format(nodeno)) - target_ip_file = "{}/../var/lib/couchbase/ip".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) - delete_ip_file = "{}/../var/lib/couchbase/ip_start".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + source_config_file = os.path.join( + sourcedir, "config.dat_{}".format(nodeno) + ) + source_local_filename = os.path.join( + sourcedir, "local.ini_{}".format(nodeno) + ) + source_encryption_keys = os.path.join( + sourcedir, "encrypted_data_keys_{}".format(nodeno) + ) - # check_file_command = CommandFactory.check_file(source_ip_file, sudo=self.need_sudo, uid=self.uid) - # check_ip_file, check_ip_file_err, exit_code = utilities.execute_bash(self.connection, check_file_command, callback_func=self.ignore_err) + source_ip_file = os.path.join(sourcedir, "ip_{}".format(nodeno)) + target_ip_file = "{}/../var/lib/couchbase/ip".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + delete_ip_file = "{}/../var/lib/couchbase/ip_start".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) check_ip_file, check_ip_file_err, exit_code = self.run_os_command( - os_command='check_file', - file_path=source_ip_file - ) - + os_command="check_file", file_path=source_ip_file + ) - if not (exit_code == 0 and "Found" in check_ip_file): - source_ip_file = os.path.join(sourcedir,"ip_start_{}".format(nodeno)) - target_ip_file = "{}/../var/lib/couchbase/ip_start".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) - delete_ip_file = "{}/../var/lib/couchbase/ip".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + if not (exit_code == 0 and "Found" in check_ip_file): + source_ip_file = os.path.join( + sourcedir, "ip_start_{}".format(nodeno) + ) + target_ip_file = "{}/../var/lib/couchbase/ip_start".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + delete_ip_file = "{}/../var/lib/couchbase/ip".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) logger.debug("IP file is {}".format(source_ip_file)) - ip_filename = os.path.join(sourcedir,"ip_{}".format(source_ip_file)) - - targetfile = "{}/../var/lib/couchbase/config/config.dat".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + targetfile = "{}/../var/lib/couchbase/config/config.dat".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='os_cp', - srcname=source_config_file, - trgname=targetfile - ) + os_command="os_cp", srcname=source_config_file, trgname=targetfile + ) - logger.debug("config.dat restore - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + logger.debug( + "config.dat restore - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) - check_encrypted_data_keys, check_ip_file_err, exit_code = self.run_os_command( - os_command='check_file', - file_path=source_encryption_keys - ) + ( + check_encrypted_data_keys, + check_ip_file_err, + exit_code, + ) = self.run_os_command( + os_command="check_file", file_path=source_encryption_keys + ) - logger.debug("Check check_encrypted_data_keys - exit_code: {} stdout: {}".format(exit_code, check_encrypted_data_keys)) + logger.debug( + "Check check_encrypted_data_keys - exit_code: {} " + "stdout: {}".format(exit_code, check_encrypted_data_keys) + ) - if exit_code == 0 and "Found" in check_encrypted_data_keys: - targetfile = "{}/../var/lib/couchbase/config/encrypted_data_keys".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + if exit_code == 0 and "Found" in check_encrypted_data_keys: + targetfile = ( + "{}/../var/lib/couchbase/config/encrypted_data_keys".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='os_cp', - srcname=source_encryption_keys, - trgname=targetfile - ) + os_command="os_cp", + srcname=source_encryption_keys, + trgname=targetfile, + ) - logger.debug("encrypted_data_keys restore - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) - + logger.debug( + "encrypted_data_keys restore - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) - check_ip_delete_file, check_ip_delete_file, check_ip_exit_code = self.run_os_command( - os_command='check_file', - file_path=delete_ip_file - ) + ( + check_ip_delete_file, + check_ip_delete_file, + check_ip_exit_code, + ) = self.run_os_command( + os_command="check_file", file_path=delete_ip_file + ) - logger.debug("Check delete old ip_file - exit_code: {} stdout: {}".format(check_ip_exit_code, check_ip_delete_file)) + logger.debug( + "Check delete old ip_file - exit_code: {} stdout: {}".format( + check_ip_exit_code, check_ip_delete_file + ) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='os_mv', - srcname=delete_ip_file, - trgname="{}.bak".format(delete_ip_file) - ) - - logger.debug("ipfile delete - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + os_command="os_mv", + srcname=delete_ip_file, + trgname="{}.bak".format(delete_ip_file), + ) + logger.debug( + "ipfile delete - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='os_cp', - srcname=source_ip_file, - trgname=target_ip_file - ) + os_command="os_cp", srcname=source_ip_file, trgname=target_ip_file + ) - logger.debug("ipfile restore - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + logger.debug( + "ipfile restore - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) - targetfile = "{}/../etc/couchdb/local.ini".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + targetfile = "{}/../etc/couchdb/local.ini".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='os_cp', - srcname=source_local_filename, - trgname=targetfile - ) + os_command="os_cp", + srcname=source_local_filename, + trgname=targetfile, + ) - logger.debug("local.ini restore - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + logger.debug( + "local.ini restore - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) if int(self.repository.version.split(".")[0]) >= 7: - source_chronicle_dirname = os.path.join(sourcedir, "chronicle_{}".format(nodeno)) - target_chronicle_dirname = "{}/../var/lib/couchbase/config/chronicle".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + source_chronicle_dirname = os.path.join( + sourcedir, "chronicle_{}".format(nodeno) + ) + target_chronicle_dirname = ( + "{}/../var/lib/couchbase/config/chronicle".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='check_directory', - dir_path=target_chronicle_dirname + os_command="check_directory", dir_path=target_chronicle_dirname ) if exit_code == 0 and "Found" in command_output: self.run_os_command( - os_command='delete_dir', - dirname=target_chronicle_dirname + os_command="delete_dir", dirname=target_chronicle_dirname ) command_output, std_err, exit_code = self.run_os_command( - os_command='os_cpr', + os_command="os_cpr", srcname=source_chronicle_dirname, - trgname=target_chronicle_dirname + trgname=target_chronicle_dirname, ) logger.debug( "chronicle restore - exit_code: {} stdout: {} std_err: {}".format( - exit_code, command_output, std_err)) + exit_code, command_output, std_err + ) + ) - if what == 'parent': - #local.ini needs to have a proper entry - filename = "{}/../etc/couchdb/local.ini".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + if what == "parent": + # local.ini needs to have a proper entry + filename = "{}/../etc/couchdb/local.ini".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) newpath = "{}/data_{}".format(self.parameters.mount_path, nodeno) - cmd = CommandFactory.sed(filename, 's|view_index_dir.*|view_index_dir={}|'.format(newpath), self.need_sudo, self.uid) + cmd = CommandFactory.sed( + filename, + "s|view_index_dir.*|view_index_dir={}|".format(newpath), + self.need_sudo, + self.uid, + ) logger.debug("sed config cmd: {}".format(cmd)) - command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd) - logger.debug("setting index paths - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + command_output, std_err, exit_code = utilities.execute_bash( + self.connection, command_name=cmd + ) + logger.debug( + "setting index paths - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) - cmd = CommandFactory.sed(filename, 's|database_dir.*|database_dir={}|'.format(newpath), self.need_sudo, self.uid) + cmd = CommandFactory.sed( + filename, + "s|database_dir.*|database_dir={}|".format(newpath), + self.need_sudo, + self.uid, + ) logger.debug("sed config cmd: {}".format(cmd)) - command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd) - logger.debug("setting data paths - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) - - + command_output, std_err, exit_code = utilities.execute_bash( + self.connection, command_name=cmd + ) + logger.debug( + "setting data paths - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) def delete_config(self): @@ -1081,80 +1248,97 @@ def delete_config(self): logger.debug("start delete_config") - filename = "{}/../var/lib/couchbase/config/config.dat".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + filename = "{}/../var/lib/couchbase/config/config.dat".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) cmd = CommandFactory.check_file(filename, self.need_sudo, self.uid) logger.debug("check file cmd: {}".format(cmd)) - command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd, callback_func=self.ignore_err) + command_output, std_err, exit_code = utilities.execute_bash( + self.connection, command_name=cmd, callback_func=self.ignore_err + ) - if exit_code == 0 and "Found" in command_output: - cmd = CommandFactory.os_mv(filename, "{}.bak".format(filename), self.need_sudo, self.uid) + if exit_code == 0 and "Found" in command_output: + cmd = CommandFactory.os_mv( + filename, "{}.bak".format(filename), self.need_sudo, self.uid + ) logger.debug("rename config cmd: {}".format(cmd)) - command_output, std_err, exit_code = utilities.execute_bash(self.connection, command_name=cmd) - logger.debug("rename config.dat to bak - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + command_output, std_err, exit_code = utilities.execute_bash( + self.connection, command_name=cmd + ) + logger.debug( + "rename config.dat to bak - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) - filename = "{}/../etc/couchdb/local.ini".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + filename = "{}/../etc/couchdb/local.ini".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='sed', - filename=filename, - regex='s/view_index_dir.*//' - ) - - logger.debug("clean local.ini index - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + os_command="sed", filename=filename, regex="s/view_index_dir.*//" + ) + + logger.debug( + "clean local.ini index - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='sed', - filename=filename, - regex='s/database_dir.*//' - ) + os_command="sed", filename=filename, regex="s/database_dir.*//" + ) - logger.debug("clean local.ini data - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + logger.debug( + "clean local.ini data - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) command_output, std_err, exit_code = self.run_os_command( - os_command='change_permission', - path=filename - ) + os_command="change_permission", path=filename + ) - logger.debug("fix local.ini permission - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + logger.debug( + "fix local.ini permission - exit_code: {} stdout: {} " + "std_err: {}".format(exit_code, command_output, std_err) + ) - chronicle_dir_name = "{}/../var/lib/couchbase/config/chronicle".format(helper_lib.get_base_directory_of_given_path(self.repository.cb_shell_path)) + chronicle_dir_name = "{}/../var/lib/couchbase/config/chronicle".format( + helper_lib.get_base_directory_of_given_path( + self.repository.cb_shell_path + ) + ) self.run_os_command( - os_command='delete_dir', - dirname=chronicle_dir_name + os_command="delete_dir", dirname=chronicle_dir_name ) def ignore_err(self, input): return True - def rename_cluster(self): """Rename cluster based on user entries""" logger.debug("start rename_cluster") - - # command_output, std_err, exit_code = self.run_couchbase_command( - # couchbase_command='rename_cluster', - # username=self.snapshot.couchbase_admin, - # password=self.snapshot.couchbase_admin_password, - # newuser=self.parameters.couchbase_admin, - # newpass=self.parameters.couchbase_admin_password, - # newname=self.parameters.tgt_cluster_name - # ) self.run_couchbase_command( - couchbase_command='rename_cluster', + couchbase_command="rename_cluster", username=self.snapshot.couchbase_admin, password=self.snapshot.couchbase_admin_password, - newname=self.parameters.tgt_cluster_name + newname=self.parameters.tgt_cluster_name, ) command_output, std_err, exit_code = self.run_couchbase_command( - couchbase_command='change_cluster_password', + couchbase_command="change_cluster_password", username=self.snapshot.couchbase_admin, password=self.snapshot.couchbase_admin_password, newuser=self.parameters.couchbase_admin, - newpass=self.parameters.couchbase_admin_password + newpass=self.parameters.couchbase_admin_password, ) - - logger.debug("rename cluster - exit_code: {} stdout: {} std_err: {}".format(exit_code, command_output, std_err)) + logger.debug( + "rename cluster - exit_code: {} stdout: {} std_err: {}".format( + exit_code, command_output, std_err + ) + ) def start_node_bootstrap(self): logger.debug("start start_node_bootstrap") @@ -1162,53 +1346,51 @@ def start_node_bootstrap(self): end_time = time.time() + 3660 server_status = Status.INACTIVE - #break the loop either end_time is exceeding from 1 minute or server is successfully started + # break the loop either end_time is exceeding from 1 minute or server + # is successfully started while time.time() < end_time and server_status != Status.ACTIVE: - helper_lib.sleepForSecond(1) # waiting for 1 second - server_status = self.staging_bootstrap_status() # fetching status + helper_lib.sleepForSecond(1) # waiting for 1 second + server_status = self.staging_bootstrap_status() # fetching status logger.debug("server status {}".format(server_status)) - - def addnode(self, nodeno, node_def): logger.debug("start addnode") - self.delete_config() self.start_node_bootstrap() self.node_init(nodeno) - helper_lib.sleepForSecond(10) - services = [ 'data', 'index', 'query' ] + services = ["data", "index", "query"] - if "fts_service" in node_def and node_def["fts_service"] == True: - services.append('fts') + if "fts_service" in node_def and node_def["fts_service"]: + services.append("fts") - if "eventing_service" in node_def and node_def["eventing_service"] == True: - services.append('eventing') + if "eventing_service" in node_def and node_def["eventing_service"]: + services.append("eventing") - if "analytics_service" in node_def and node_def["analytics_service"] == True: - services.append('analytics') + if "analytics_service" in node_def and node_def["analytics_service"]: + services.append("analytics") logger.debug("services to add: {}".format(services)) - - # hostip_command = CommandFactory.get_ip_of_hostname() - # logger.debug("host ip command: {}".format(hostip_command)) - # host_ip_output, std_err, exit_code = utilities.execute_bash(self.connection, hostip_command) - # logger.debug("host ip Output {} ".format(host_ip_output)) - - logger.debug("node host name / IP: {}".format(node_def["node_addr"])) - resolve_name_command = CommandFactory.resolve_name(hostname=node_def["node_addr"]) - logger.debug("resolve_name_command command: {}".format(resolve_name_command)) - resolve_name_output, std_err, exit_code = utilities.execute_bash(self.connection, resolve_name_command) - logger.debug("resolve_name_command Output {} ".format(resolve_name_output)) + resolve_name_command = CommandFactory.resolve_name( + hostname=node_def["node_addr"] + ) + logger.debug( + "resolve_name_command command: {}".format(resolve_name_command) + ) + resolve_name_output, std_err, exit_code = utilities.execute_bash( + self.connection, resolve_name_command + ) + logger.debug( + "resolve_name_command Output {} ".format(resolve_name_output) + ) if int(self.repository.version.split(".")[0]) >= 7: if "(CE)" in self.repository.version: @@ -1219,40 +1401,52 @@ def addnode(self, nodeno, node_def): new_port = "18091" command_output, std_err, exit_code = self.run_couchbase_command( - couchbase_command='server_add', - hostname=self.connection.environment.host.name, - newhost=resolve_name_output, - services=','.join(services), - new_port=new_port - ) - + couchbase_command="server_add", + hostname=self.connection.environment.host.name, + newhost=resolve_name_output, + services=",".join(services), + new_port=new_port, + ) - logger.debug("Add node Output {} stderr: {} exit_code: {} ".format(command_output, std_err, exit_code)) + logger.debug( + "Add node Output {} stderr: {} exit_code: {} ".format( + command_output, std_err, exit_code + ) + ) if exit_code != 0: logger.debug("Adding node error") - raise UserError("Problem with adding node", "Check an output and fix problem before retrying to provision a VDB", "stdout: {} stderr:{}".format(command_output, std_err)) - - - + raise UserError( + "Problem with adding node", + "Check an output and fix problem before retrying to provision " + "a VDB", + "stdout: {} stderr:{}".format(command_output, std_err), + ) command_output, std_err, exit_code = self.run_couchbase_command( - couchbase_command='rebalance', - hostname=self.connection.environment.host.name - ) - + couchbase_command="rebalance", + hostname=self.connection.environment.host.name, + ) - - logger.debug("Rebalance Output {} stderr: {} exit_code: {} ".format(command_output, std_err, exit_code)) + logger.debug( + "Rebalance Output {} stderr: {} exit_code: {} ".format( + command_output, std_err, exit_code + ) + ) if exit_code != 0: logger.debug("Rebalancing error") - raise UserError("Problem with rebalancing cluster", "Check an output and fix problem before retrying to provision a VDB", "stdout: {} stderr:{}".format(command_output, std_err)) - - + raise UserError( + "Problem with rebalancing cluster", + "Check an output and fix problem before retrying to provision " + "a VDB", + "stdout: {} stderr:{}".format(command_output, std_err), + ) if __name__ == "__main__": # print "Checking Couchbase Class" - test_object = CouchbaseOperation(Resource.ObjectBuilder.set_dsource(True).build()) - print (test_object.get_config_file_path.__doc__) + test_object = CouchbaseOperation( + Resource.ObjectBuilder.set_dsource(True).build() + ) + print(test_object.get_config_file_path.__doc__) diff --git a/src/controller/db_exception_handler.py b/src/controller/db_exception_handler.py index ca77d20..a7ce39b 100644 --- a/src/controller/db_exception_handler.py +++ b/src/controller/db_exception_handler.py @@ -1,32 +1,34 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -import types -import re import logging -import traceback +import re import sys +import traceback +import types - -from db_commands.constants import CLUSTER_ALREADY_PRESENT, BUCKET_NAME_ALREADY_EXIST, MULTIPLE_VDB_ERROR, \ - SHUTDOWN_FAILED, ALREADY_CLUSTER_INIT, ALREADY_CLUSTER_FOR_BUCKET -from controller import helper_lib +from db_commands.constants import ALREADY_CLUSTER_FOR_BUCKET +from db_commands.constants import ALREADY_CLUSTER_INIT +from db_commands.constants import BUCKET_NAME_ALREADY_EXIST +from db_commands.constants import CLUSTER_ALREADY_PRESENT +from db_commands.constants import MULTIPLE_VDB_ERROR +from db_commands.constants import SHUTDOWN_FAILED +from dlpx.virtualization.platform.exceptions import UserError from internal_exceptions.base_exceptions import GenericUserError from internal_exceptions.plugin_exceptions import ERR_RESPONSE_DATA -from dlpx.virtualization.platform.exceptions import UserError logger = logging.getLogger(__name__) -# This is meta class which decorates the each functions of child class with below things: +# This is meta class which decorates the each functions of child +# class with below things: # Ignore common exceptions # Enable logging in more intuitive way class DatabaseExceptionHandlerMeta(type): - - def __new__(mcs, caller_name, caller_base_name, attributes_in_caller): + def __new__(mcs, caller_name, caller_base_name, attr_in_caller): """ :param caller_name: :type caller_name: Class type @@ -38,59 +40,78 @@ def __new__(mcs, caller_name, caller_base_name, attributes_in_caller): """ # iteration for each method of a caller class - for attribute_name, attribute_value in attributes_in_caller.iteritems(): + for attribute_name, attribute_value in attr_in_caller.iteritems(): if isinstance(attribute_value, types.FunctionType): - if attribute_name == "__init__" or attribute_name == "status" or attribute_name == "check_attribute_error": + if ( + attribute_name == "__init__" + or attribute_name == "status" + or attribute_name == "check_attribute_error" + ): continue - attributes_in_caller[attribute_name] = mcs.handle_exception_decorator(attribute_value) + a = mcs.handle_exception_decorator(attribute_value) + attr_in_caller[attribute_name] = a try: - return super(DatabaseExceptionHandlerMeta, mcs).__new__(mcs, caller_name, caller_base_name, - attributes_in_caller) + return super(DatabaseExceptionHandlerMeta, mcs).__new__( + mcs, caller_name, caller_base_name, attr_in_caller + ) except Exception as err: - logger.debug("Exception occurred in metaclass: {}".format(str(err))) + logger.debug( + "Exception occurred in metaclass: {}".format( + str(err), + ) + ) raise @classmethod def _exception_generator_factory(mcs, err_string): """ :param err_string: - :raises: Exceptions based on the output. It matches the error string with predefined strings. - In some cases we need to kill the program and in some cases it is not. This is distinguished by the + :raises: Exceptions based on the output. It matches the error string + with predefined strings. + In some cases we need to kill the program and in some cases it + is not. This is distinguished by the error string. """ - if (re.search(CLUSTER_ALREADY_PRESENT, err_string) or - re.search(BUCKET_NAME_ALREADY_EXIST, err_string) or - re.search(MULTIPLE_VDB_ERROR, err_string) or - re.search(SHUTDOWN_FAILED, err_string) or - re.search(ALREADY_CLUSTER_FOR_BUCKET, err_string) or - re.search(ALREADY_CLUSTER_INIT, err_string)): + if ( + re.search(CLUSTER_ALREADY_PRESENT, err_string) + or re.search(BUCKET_NAME_ALREADY_EXIST, err_string) + or re.search(MULTIPLE_VDB_ERROR, err_string) + or re.search(SHUTDOWN_FAILED, err_string) + or re.search(ALREADY_CLUSTER_FOR_BUCKET, err_string) + or re.search(ALREADY_CLUSTER_INIT, err_string) + ): logger.debug("Gracefully accepted the last exception") return logger.debug("Searching predefined exception for this error") err_code = get_err_code(err_string) - raise GenericUserError(ERR_RESPONSE_DATA[err_code]['MESSAGE'], ERR_RESPONSE_DATA[err_code]['ACTION'], err_string) + raise GenericUserError( + ERR_RESPONSE_DATA[err_code]["MESSAGE"], + ERR_RESPONSE_DATA[err_code]["ACTION"], + err_string, + ) @classmethod def handle_exception_decorator(mcs, function_name): """ - Decorating function with exception handling. Also we can control the output of each couchbase + Decorating function with exception handling. Also we can control the + output of each couchbase command at single place. :param function_name: Method of a class which is not static and class :type : function :return : None - """ + """ def wrapper_function(*args, **kwargs): try: output_list = function_name(*args, **kwargs) return output_list - except UserError as ue: + except UserError: logger.debug("User Error found") ttype, value, traceb = sys.exc_info() logger.debug("type: {}, value: {}".format(ttype, value)) logger.debug("trackback") - logger.debug(traceback.format_exc()) + logger.debug(traceback.format_exc()) raise except Exception as error: @@ -99,7 +120,7 @@ def wrapper_function(*args, **kwargs): ttype, value, traceb = sys.exc_info() logger.debug("type: {}, value: {}".format(ttype, value)) logger.debug("trackback") - logger.debug(traceback.format_exc()) + logger.debug(traceback.format_exc()) mcs._exception_generator_factory(str(error)) return wrapper_function @@ -113,4 +134,4 @@ def get_err_code(error_string): search_string = ERR_RESPONSE_DATA[each_err_code]["ERR_STRING"] if re.search(search_string, error_string): return each_err_code - return 'DEFAULT_ERR' + return "DEFAULT_ERR" diff --git a/src/controller/helper_lib.py b/src/controller/helper_lib.py index 7a0b41b..fcfc520 100644 --- a/src/controller/helper_lib.py +++ b/src/controller/helper_lib.py @@ -1,16 +1,19 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ - This module contains common functionality that is being used across plugin. Like bucket size calculation, read file, - write data into file and also operations required in discovery. Moreover it helps in colorful logging in debug log. - Recommending to view the logs using the tail command then easily segregate the running command/output/exception/debug - messages + This module contains common functionality that is being used across plugin. + Like bucket size calculation, read file, write data into file and also + operations required in discovery. + Moreover it helps in colorful logging in debug log. Recommending to view the + logs using the tail command then easily segregate the + running command/output/exception/debug + messages. """ -####################################################################################################################### +############################################################################## import json import logging @@ -24,12 +27,12 @@ import db_commands from db_commands.commands import CommandFactory from db_commands.constants import DEFAULT_CB_BIN_PATH -from dlpx.virtualization.platform.exceptions import UserError - from dlpx.virtualization.platform import Status - -from internal_exceptions.plugin_exceptions import RepositoryDiscoveryError, SourceConfigDiscoveryError, FileIOError, \ - UnmountFileSystemError +from dlpx.virtualization.platform.exceptions import UserError +from internal_exceptions.plugin_exceptions import FileIOError +from internal_exceptions.plugin_exceptions import RepositoryDiscoveryError +from internal_exceptions.plugin_exceptions import SourceConfigDiscoveryError +from internal_exceptions.plugin_exceptions import UnmountFileSystemError from utils import utilities # Global logger object for this file @@ -39,15 +42,25 @@ def find_binary_path(source_connection): """ :param source_connection: Connection for the source environment - :return: Bin path defined in environment variable '$COUCHBASE_PATH'. If it is not defined then "/opt/couchbase/bin" + :return: Bin path defined in environment variable '$COUCHBASE_PATH'. + If it is not defined then "/opt/couchbase/bin" """ logger.debug("Finding Binary Path...") - binary_paths, std_err, exit_code = utilities.execute_bash(source_connection, CommandFactory.find_binary_path()) + binary_paths, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.find_binary_path() + ) if binary_paths == "": - logger.debug("Please verify COUCHBASE_PATH is defined. Checking at default location {}".format(DEFAULT_CB_BIN_PATH)) + logger.debug( + "Please verify COUCHBASE_PATH is defined. Checking at default " + "location {}".format(DEFAULT_CB_BIN_PATH) + ) binary_paths = DEFAULT_CB_BIN_PATH else: - logger.debug("List of couchbase path found are {}".format(binary_paths.split(';'))) + logger.debug( + "List of couchbase path found are {}".format( + binary_paths.split(";") + ) + ) logger.debug("Finding Binary: {}".format(binary_paths)) return binary_paths @@ -59,8 +72,9 @@ def find_shell_path(source_connection, binary_path): :return:path of cluster management utility: {couchbase-cli} """ logger.debug("Finding Shell Path...") - shell_path, std_err, exit_code = utilities.execute_bash(source_connection, - CommandFactory.find_shell_path(binary_path)) + shell_path, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.find_shell_path(binary_path) + ) if shell_path == "": message = "Shell path {}/couchbase-cli not found".format(binary_path) raise RepositoryDiscoveryError(message) @@ -72,32 +86,40 @@ def find_install_path(source_connection, binary_path): :param source_connection:Connection for the source environment :param binary_path: Couchbase binary path - :return: path of couchbase-server, through which daemon processes can start in background + :return: path of couchbase-server, through which daemon processes can + start in background """ logger.debug("Finding install Path...") - install_path, std_err, exit_code = utilities.execute_bash(source_connection, - CommandFactory.find_install_path(binary_path)) + install_path, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.find_install_path(binary_path) + ) if install_path == "": - message = "Install path {}/couchbase-server not found".format(binary_path) + message = "Install path {}/couchbase-server not found".format( + binary_path + ) raise RepositoryDiscoveryError(message) else: - logger.debug("couchbase-server found in directory {}".format(install_path)) + logger.debug( + "couchbase-server found in directory {}".format(install_path) + ) return install_path def find_version(source_connection, install_path): - """ return the couchbase version installed on the host""" - cb_version, std_err, exit_code = utilities.execute_bash(source_connection, - CommandFactory.get_version(install_path)) + """return the couchbase version installed on the host""" + cb_version, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.get_version(install_path) + ) version = re.search(r"\d.*$", cb_version).group() logger.debug("Couchbase version installed {}".format(version)) return version def find_ids(source_connection, install_path): - """ return the couchbase uid and gid""" - std_out, std_err, exit_code = utilities.execute_bash(source_connection, - CommandFactory.get_ids(install_path)) + """return the couchbase uid and gid""" + std_out, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.get_ids(install_path) + ) logger.debug("find ids output: {}".format(std_out)) ids = re.search(r"[-rwx.]+\s\d\s([\d]+)\s([\d]+).*", std_out) if ids: @@ -109,10 +131,12 @@ def find_ids(source_connection, install_path): logger.debug("Couchbase user uid {} gid {}".format(uid, gid)) return (uid, gid) + def find_whoami(source_connection): - """ return the user env id""" - std_out, std_err, exit_code = utilities.execute_bash(source_connection, - CommandFactory.whoami()) + """return the user env id""" + std_out, std_err, exit_code = utilities.execute_bash( + source_connection, CommandFactory.whoami() + ) logger.debug("find whoami output: {}".format(std_out)) ids = re.search(r"uid=([\d]+).*gid=([\d]+)", std_out) if ids: @@ -134,8 +158,10 @@ def need_sudo(source_connection, couchbase_uid, couchbase_gid): def is_instance_present_of_gosecrets(source_connection): - """ check couchbase server is running or not""" - instance, stderr, exit_code = utilities.execute_bash(source_connection, CommandFactory.get_process()) + """check couchbase server is running or not""" + instance, stderr, exit_code = utilities.execute_bash( + source_connection, CommandFactory.get_process() + ) # return true if 'gosecrets' string is present in output of get_process return "gosecrets" in instance @@ -146,44 +172,49 @@ def get_data_directory(source_connection, repository): couchbase_base_dir = os.path.dirname(couchbase_binary_path) filename = "{}/etc/couchbase/static_config".format(couchbase_base_dir) static_config, stderr, exit_code = read_file(source_connection, filename) - if not re.search(r"(?<=path_config_datadir, \").*(?=\"}\.)", static_config): + if not re.search( + r"(?<=path_config_datadir, \").*(?=\"}\.)", static_config + ): message = "Cannot find data directory" logger.debug(message) raise SourceConfigDiscoveryError(message) - data_directory = re.search(r"(?<=path_config_datadir, \").*(?=\"}\.)", static_config).group() + data_directory = re.search( + r"(?<=path_config_datadir, \").*(?=\"}\.)", static_config + ).group() logger.debug("data_directory is {} ".format(data_directory)) return data_directory def get_base_directory_of_given_path(binary_path): - """ Return the base directory of given path """ + """Return the base directory of given path""" path = os.path.split(binary_path)[0] return path def remap_bucket_json(bucket): output = {} - if 'bucketType' in bucket: - output['bucketType'] = bucket['bucketType'] - if 'name' in bucket: - output['name'] = bucket['name'] - if 'quota' in bucket and 'ram' in bucket['quota']: - output['ram'] = bucket['quota']['ram'] - elif 'ramQuota' in bucket: + if "bucketType" in bucket: + output["bucketType"] = bucket["bucketType"] + if "name" in bucket: + output["name"] = bucket["name"] + if "quota" in bucket and "ram" in bucket["quota"]: + output["ram"] = bucket["quota"]["ram"] + elif "ramQuota" in bucket: # this is in MB - output['ram'] = int(bucket['ramQuota']) * 1024 * 1024 + output["ram"] = int(bucket["ramQuota"]) * 1024 * 1024 else: - logger.debug('No memory in bucket - setting to default') - output['ram'] = 1024000 - if 'compressionMode' in bucket: - output['compressionMode'] = bucket['compressionMode'] + logger.debug("No memory in bucket - setting to default") + output["ram"] = 1024000 + if "compressionMode" in bucket: + output["compressionMode"] = bucket["compressionMode"] else: - output['compressionMode'] = None + output["compressionMode"] = None return output + def get_all_bucket_list_with_size(bucket_output): - """ - Return bucket name with ramUsed( adjust ramused value ) + """ + Return bucket name with ramUsed( adjust ramused value ) from bucket_output """ @@ -191,14 +222,14 @@ def get_all_bucket_list_with_size(bucket_output): min_size = 104857600 all_bucket_list = "" for line in bucket_output: - bucket_name = None - ram_size = 0 - if line.find(':') == -1: # find the bucket name + if line.find(":") == -1: # find the bucket name all_bucket_list = all_bucket_list + line + "," elif line.find("ramUsed") != -1: # find ramUsed row in output - ram_size = int(line.split(':')[1].strip()) + ram_size = int(line.split(":")[1].strip()) # Formula used used bucketsize/2 + 10% additional memory - ram_size = (ram_size) / 2 + ((ram_size / 2) * additional_buffer // 100) + ram_size = (ram_size) / 2 + ( + (ram_size / 2) * additional_buffer // 100 + ) if ram_size < min_size: ram_size = min_size all_bucket_list = all_bucket_list + str(ram_size) + ":" @@ -208,17 +239,17 @@ def get_all_bucket_list_with_size(bucket_output): def get_stg_all_bucket_list_with_ramquota_size(bucket_output): - """ Return bucket name with ramQuota from bucket_output. It will help in VDB creation as a reference value for - bucket + """Return bucket name with ramQuota from bucket_output. It will help in + VDB creation as a reference value for + bucket """ logger.debug("bucket_output: {}".format(bucket_output)) all_bucket_list = "" for line in bucket_output: - bucket_name = None - if line.find(':') == -1: # find the bucket name + if line.find(":") == -1: # find the bucket name all_bucket_list = all_bucket_list + line + "," - elif line.find("ramQuota") != -1: # find ramQuota row in output - ram_quota = int(line.split(':')[1].strip()) + elif line.find("ramQuota") != -1: # find ramQuota row in output + ram_quota = int(line.split(":")[1].strip()) all_bucket_list = all_bucket_list + str(ram_quota) + ":" all_bucket_list = all_bucket_list.strip(":") logger.debug("All bucket list is: {}".format(all_bucket_list)) @@ -226,14 +257,16 @@ def get_stg_all_bucket_list_with_ramquota_size(bucket_output): def filter_bucket_name_from_json(bucket_output): - """ Filter bucket name from bucket_output. Return list of bucket names present in bucket_output""" - output = [ x['name'] for x in bucket_output if x['ram'] > 0] + """Filter bucket name from bucket_output. Return list of + bucket names present in bucket_output""" + output = [x["name"] for x in bucket_output if x["ram"] > 0] logger.debug("Bucket list: {}".format(output)) return output + def filter_bucket_name_from_output(bucket_output): - """ - Filter bucket name from bucket_output. + """ + Filter bucket name from bucket_output. Return list of bucket names present in bucket_output """ output = [] @@ -244,12 +277,13 @@ def filter_bucket_name_from_output(bucket_output): logger.debug("Bucket list: {}".format(output)) return output + def get_bucket_object(bucket_output, bucket): - """ + """ Return bucket dict - from bucket_output string for bucket(passed in argument) + from bucket_output string for bucket(passed in argument) """ - output = filter(lambda x: x['name'] == bucket, bucket_output) + output = filter(lambda x: x["name"] == bucket, bucket_output) if len(output) != 1: ret = None else: @@ -259,9 +293,9 @@ def get_bucket_object(bucket_output, bucket): def get_bucket_name_with_size(bucket_output, bucket): - """ - Return `bucket_name:ramUsed` - as output from bucket_output string for bucket(passed in argument) + """ + Return `bucket_name:ramUsed` + as output from bucket_output string for bucket(passed in argument) """ logger.debug("HUHU") @@ -277,15 +311,17 @@ def get_bucket_name_with_size(bucket_output, bucket): def get_bucketlist_to_namesize_list(bucket_output, bucket_list): - """ Return `bucket_name:ramUsed` as output from bucket_output string for each bucket(passed in bucket_list) """ + """Return `bucket_name:ramUsed` as output from bucket_output + string for each bucket(passed in bucket_list)""" bucket_details = [] for name in bucket_list: bucket_details.append(get_bucket_name_with_size(bucket_output, name)) - logger.debug("Buckets: {} \n details : {}".format(bucket_list, bucket_details)) + logger.debug( + "Buckets: {} \n details : {}".format(bucket_list, bucket_details) + ) return bucket_details - def sleepForSecond(sec): # Sleep/Pause the execution for given seconds logger.debug("sleeping for {}".format(sec)) @@ -294,10 +330,9 @@ def sleepForSecond(sec): def current_time(): - """ Return current time in format of %Y%m%d%H%M%S'""" + """Return current time in format of %Y%m%d%H%M%S'""" curr_time = datetime.now() - return curr_time.strftime('%Y%m%d%H%M%S') - + return curr_time.strftime("%Y%m%d%H%M%S") def get_value_of_key_from_json(json_obj, key): @@ -308,31 +343,37 @@ def get_value_of_key_from_json(json_obj, key): def write_file(connection, content, filename): """Add given data into passed filename""" - logger.debug("writing data {} in file {}".format(content,filename)) + logger.debug("writing data {} in file {}".format(content, filename)) try: - utilities.execute_bash(connection, CommandFactory.write_file(data=content, filename=filename)) - except Exception as e: + utilities.execute_bash( + connection, + CommandFactory.write_file(data=content, filename=filename), + ) + except Exception: logger.debug("Failed to Write into file") raise FileIOError("Failed to Write into file ") - def check_file_present(connection, config_file_path): - """ return True if file is present else return False""" + """return True if file is present else return False""" try: - stdout, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.check_file(config_file_path)) + stdout, stderr, exit_code = utilities.execute_bash( + connection, CommandFactory.check_file(config_file_path) + ) if stdout == "Found": logger.debug("file path exist {}".format(config_file_path)) return True - except Exception as e: + except Exception: logger.debug("File path not exist {}".format(config_file_path)) return False def check_dir_present(connection, dir): - """ return True if directory is present else return False""" + """return True if directory is present else return False""" try: - stdout, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.check_directory(dir)) + stdout, stderr, exit_code = utilities.execute_bash( + connection, CommandFactory.check_directory(dir) + ) if stdout == "Found": logger.debug("dir path found {} ".format(dir)) return True @@ -341,7 +382,6 @@ def check_dir_present(connection, dir): return False - def read_file(connection, filename): """read the file content and return the content""" logger.debug("Reading file {}".format(filename)) @@ -353,7 +393,9 @@ def read_file(connection, filename): # delete file def delete_file(connection, filename): logger.debug("Deleting file {}".format(filename)) - stdout, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.delete_file(filename)) + stdout, stderr, exit_code = utilities.execute_bash( + connection, CommandFactory.delete_file(filename) + ) return [stdout, stderr, exit_code] @@ -363,16 +405,19 @@ def get_snapshot_id(): def unmount_file_system(rx_connection, path): - """ unmount the file system which will use in cbbackup manager after post snapshot""" + """unmount the file system which will use in cbbackup manager + after post snapshot""" try: - utilities.execute_bash(rx_connection, CommandFactory.unmount_file_system(path)) + utilities.execute_bash( + rx_connection, CommandFactory.unmount_file_system(path) + ) except Exception as err: logger.debug("error here {}".format(str(err))) raise UnmountFileSystemError(str(err)) def get_bucket_size_in_MB(bucket_size, bkt_name_size): - """ convert bkt size into MB if current bucket_size is zero""" + """convert bkt size into MB if current bucket_size is zero""" bkt_size_mb = 0 if bucket_size > 0: @@ -393,16 +438,20 @@ def get_sync_lock_file_name(dsource_type, dsource_name): def check_stale_mountpoint(connection, path): - - - output, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.df(path)) + output, stderr, exit_code = utilities.execute_bash( + connection, CommandFactory.df(path) + ) if exit_code != 0: if "No such file or directory" in stderr: # this is actually OK return False else: logger.error("df retured error - stale mount point or other error") - logger.error("stdout: {} stderr: {} exit_code: {}".format(output, stderr, exit_code)) + logger.error( + "stdout: {} stderr: {} exit_code: {}".format( + output, stderr, exit_code + ) + ) return True else: return False @@ -412,19 +461,29 @@ def check_server_is_used(connection, path): ret = Status.INACTIVE - output, stderr, exit_code = utilities.execute_bash(connection, CommandFactory.mount()) + output, stderr, exit_code = utilities.execute_bash( + connection, CommandFactory.mount() + ) if exit_code != 0: logger.error("mount retured error") - logger.error("stdout: {} stderr: {} exit_code: {}".format(output, stderr, exit_code)) - raise UserError("Problem with reading mounted file systems", "Ask OS admin to check mount", stderr) + logger.error( + "stdout: {} stderr: {} exit_code: {}".format( + output, stderr, exit_code + ) + ) + raise UserError( + "Problem with reading mounted file systems", + "Ask OS admin to check mount", + stderr, + ) else: # parse a mount output to find another Delphix mount points - fs_re = re.compile(r'(\S*)\son\s(\S*)\stype\s(\S*)') + fs_re = re.compile(r"(\S*)\son\s(\S*)\stype\s(\S*)") for i in output.split("\n"): match = re.search(fs_re, i) if match is not None: groups = match.groups() - if groups[2] and str(groups[2]).startswith('nfs'): + if groups[2] and str(groups[2]).startswith("nfs"): if path == groups[1]: # this is our mount point - skip it ret = Status.ACTIVE @@ -432,21 +491,28 @@ def check_server_is_used(connection, path): if "domain0" in groups[0] and "timeflow" in groups[0]: # this is a delphix mount point but it's not ours # raise an exception - raise UserError("Another database (VDB or staging) is using this server.", "Disable another one to provision or enable this one", "{} {}".format(groups[0], groups[1])) - + raise UserError( + "Another database (VDB or staging) is using " + "this server.", + "Disable another one to provision or " + "enable this one", + "{} {}".format(groups[0], groups[1]), + ) return ret - def clean_stale_mountpoint(connection, path): - - - umount_std, umount_stderr, umount_exit_code = utilities.execute_bash(connection, CommandFactory.unmount_file_system(mount_path=path, options='-lf')) + umount_std, umount_stderr, umount_exit_code = utilities.execute_bash( + connection, + CommandFactory.unmount_file_system(mount_path=path, options="-lf"), + ) if umount_exit_code != 0: logger.error("Problem with cleaning mount path") logger.error("stderr {}".format(umount_stderr)) - raise UserError("Problem with cleaning mount path", "Ask OS admin to check mount points", umount_stderr) - - + raise UserError( + "Problem with cleaning mount path", + "Ask OS admin to check mount points", + umount_stderr, + ) diff --git a/src/controller/resource_builder.py b/src/controller/resource_builder.py index 28dc30d..6525a9b 100644 --- a/src/controller/resource_builder.py +++ b/src/controller/resource_builder.py @@ -1,39 +1,54 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ There are two purposes which this module is created for: Purpose1: - This class is being used by child classes to initialize their attributes. Child classes of this are : - _bucket.py,_cb_backup.py, _cluster.py, _replication.py, _xdcr.py. To add any new feature let say 'X', create a class + This class is being used by child classes to initialize their attributes. + Child classes of this are : + _bucket.py,_cb_backup.py, _cluster.py, _replication.py, _xdcr.py. To add any + new feature let say 'X', create a class for that 'X' feature in x module and make the Resource class as parent for X. Here we are using builder design pattern to initialize the properties. Reason of using this approach: - 1: No need fixed combinations of objects - There could be multiple attributes combinations based on their availability. Possible combinations are like objects of - ('repository' + 'virtual_source' )or (' repository' +'staged_source'). Instead of creating multiple constructors, - followed this approach in which whatever the parameters available to object creator, pass only those. + 1: No need fixed combinations of objects. + There could be multiple attributes combinations based on their availability. + Possible combinations are like objects of + ('repository' + 'virtual_source' )or (' repository' +'staged_source'). + Instead of creating multiple constructors, + followed this approach in which whatever the parameters available to object + creator, pass only those. Remaining class attributes will be set as 'None'. - To create object use below format, type of obj is Resource. `Example`: - obj=Resource.ObjectBuilder().set_snapshot_parameters("SnapshotParams").set_snapshot("Snapshot").set_dsource(False).build() - Also we must end the object creation with build(), after which only ObjectBuilder will get to know about no more + To create object use below format, type of obj is Resource. + `Example`: + obj=Resource.ObjectBuilder().set_snapshot_parameters("SnapshotParams"). + set_snapshot("Snapshot").set_dsource(False).build() + Also we must end the object creation with build(), after which only + ObjectBuilder will get to know about no more attributes to set. - 2: No need to remember which constructor should be called for any particular purpose + 2: No need to remember which constructor should be called for any particular + purpose 3: No need to remember the order of parameters - 4: If you want to add other parameters in this class, refactoring will be easier in this approach + 4: If you want to add other parameters in this class, refactoring will be + easier in this approach Part2: - __metaclass__ of this class is DatabaseExceptionHandlerMeta. All child classes of Resource will automatically - inherit this property. Child classes will be decorated with small features for now, which we can scale. + __metaclass__ of this class is DatabaseExceptionHandlerMeta. All child classes + of Resource will automatically + inherit this property. Child classes will be decorated with small features for + now, which we can scale. Current usage: more readable logs and handling of ignorable exceptions. - Basically there is a decorator(inside metaclass) which is being applied on all methods defined inside the child class. - Through this design, no need to write decorators on top of each function manually. + Basically there is a decorator(inside metaclass) which is being applied on all + methods defined inside the child class. + Through this design, no need to write decorators on top of each function + manually. """ -####################################################################################################################### +############################################################################## import logging + from controller.db_exception_handler import DatabaseExceptionHandlerMeta logger = logging.getLogger(__name__) @@ -44,14 +59,20 @@ class Resource(object): def __init__(self, builder): """ - It requires the builder object to initialize the parameters of this class. + It requires the builder object to initialize the parameters of this + class. builder is object of inner class: ObjectBuilder :param builder: :return Object of Resource """ - # Validating the type of builder. It must be of two type (type or Resource). Else it will raise an Exception for + # Validating the type of builder. It must be of two type + # (type or Resource). Else it will raise an Exception for # other cases like string, int or object of any other class. - if isinstance(builder, type) or isinstance(builder, Resource) or builder.__class__.__name__ == 'Resource': + if ( + isinstance(builder, type) + or isinstance(builder, Resource) + or builder.__class__.__name__ == "Resource" + ): self.connection = builder.connection self.repository = builder.repository self.source_config = builder.source_config @@ -62,13 +83,20 @@ def __init__(self, builder): self.dSource = builder.dSource self.parameters = builder.parameters else: - logger.debug("Error, Expected builder object, Found: {} ".format(type(builder))) + logger.debug( + "Error, Expected builder object, Found: {} ".format( + type(builder) + ) + ) raise Exception( - "Failed to initialize the Resource object. Expected: ObjectBuilder, Found: {} ".format(type(builder))) + "Failed to initialize the Resource object. Expected: " + "ObjectBuilder, Found: {} ".format(type(builder)) + ) class ObjectBuilder(object): # Below are the same parameters which is required in Resource class - # All setters must be decorated with classmethod, because there will not be any instance of ObjectBuilder + # All setters must be decorated with classmethod, because there will + # not be any instance of ObjectBuilder connection = None repository = None source_config = None @@ -127,21 +155,32 @@ def set_dsource(cls, is_dSource=True): cls.dSource = is_dSource return cls - # it must be last step in order to provide the outer class object(Resource) + # it must be last step in order to provide the outer class + # object(Resource) @classmethod def build(cls): if cls.dSource is None: - raise Exception("If this object is for dSource then set True else set it False") + raise Exception( + "If this object is for dSource then set True else set " + "it False" + ) return Resource(cls) def __repr__(self): """ - overriding the __repr__ method. To print contents of Resource object, use print(obj) + overriding the __repr__ method. To print contents of Resource object, + use print(obj) :return:None """ - return "\nObjectBuilder(connection: {0.connection!r}, repository: {0.repository!r}, \n source_config: {0.source_config!r}, snapshot_parameters:{0.snapshot_parameters!r},\ - staged_source: {0.staged_source!r}, virtual_source:{0.virtual_source!r}, snapshot: {0.snapshot!r}, parameters:{0.parameters!r},dSource: {0.dSource!r})".format( - self) + return ( + "\nObjectBuilder(connection: {0.connection!r}, repository: " + "{0.repository!r}, \n source_config: {0.source_config!r}, " + "snapshot_parameters:{0.snapshot_parameters!r},\ + staged_source: {0.staged_source!r}, " + "virtual_source:{0.virtual_source!r}, snapshot: " + "{0.snapshot!r}, parameters:{0.parameters!r},dSource: " + "{0.dSource!r})".format(self) + ) def __str__(self): - return repr(self) \ No newline at end of file + return repr(self) diff --git a/src/db_commands/commands.py b/src/db_commands/commands.py index bfaeb33..bfbd46f 100644 --- a/src/db_commands/commands.py +++ b/src/db_commands/commands.py @@ -1,25 +1,31 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -CommandFactory class contains all commands required to perform couchbase and OS related operations -These are a list of commands which are being used in this project. Have segregated both types of commands into two -classes DatabaseCommand and OSCommand. CommandFactory is the actual class through which the command string will be -returned. In the last section of this file, we have created small tests for all these commands with dummy values. -Through which we can see the actual command is going to execute. All methods are decorated to @staticmethod, -so no need to create an object of the class, we can use the direct class name to use any command method +CommandFactory class contains all commands required to perform couchbase and +OS related operations +These are a list of commands which are being used in this project. Have +segregated both types of commands into two +classes DatabaseCommand and OSCommand. CommandFactory is the actual class +through which the command string will be +returned. In the last section of this file, we have created small tests for +all these commands with dummy values. +Through which we can see the actual command is going to execute. All methods +are decorated to @staticmethod, +so no need to create an object of the class, we can use the direct class name +to use any command method. """ -####################################################################################################################### +############################################################################## import logging import urllib.parse - logger = logging.getLogger(__name__) + class OSCommand(object): def __init__(self): pass @@ -30,27 +36,37 @@ def find_binary_path(**kwargs): @staticmethod def find_install_path(binary_path, **kwargs): - return "find {binary_path} -name couchbase-server".format(binary_path=binary_path) + return "find {binary_path} -name couchbase-server".format( + binary_path=binary_path + ) @staticmethod def find_shell_path(binary_path, **kwargs): - return "find {binary_path} -name couchbase-cli".format(binary_path=binary_path) + return "find {binary_path} -name couchbase-cli".format( + binary_path=binary_path + ) @staticmethod def get_process(): return "ps -ef" @staticmethod - def make_directory(directory_path,sudo=False, uid=None, **kwargs): + def make_directory(directory_path, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{uid} mkdir -p {directory_path}".format(uid=uid, directory_path=directory_path) + return "sudo -u \#{uid} mkdir -p {directory_path}".format( + uid=uid, directory_path=directory_path + ) else: - return "mkdir -p {directory_path}".format(directory_path=directory_path) + return "mkdir -p {directory_path}".format( + directory_path=directory_path + ) @staticmethod - def change_permission(path,sudo=False, uid=None, **kwargs): + def change_permission(path, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{uid} chmod -R 775 {path}".format(uid=uid, path=path) + return "sudo -u \#{uid} chmod -R 775 {path}".format( + uid=uid, path=path + ) else: return "chmod -R 775 {path}".format(path=path) @@ -65,9 +81,13 @@ def read_file(filename, **kwargs): @staticmethod def check_file(file_path, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{uid} [ -f {file_path} ] && echo 'Found'".format(file_path=file_path, uid=uid) + return "sudo -u \#{uid} [ -f {file_path} ] && echo 'Found'".format( + file_path=file_path, uid=uid + ) else: - return "[ -f {file_path} ] && echo 'Found'".format(file_path=file_path) + return "[ -f {file_path} ] && echo 'Found'".format( + file_path=file_path + ) @staticmethod def write_file(filename, data, **kwargs): @@ -80,9 +100,13 @@ def get_ip_of_hostname(**kwargs): @staticmethod def check_directory(dir_path, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{uid} [ -d {dir_path} ] && echo 'Found'".format(dir_path=dir_path, uid=uid) + return "sudo -u \#{uid} [ -d {dir_path} ] && echo 'Found'".format( + dir_path=dir_path, uid=uid + ) else: - return "[ -d {dir_path} ] && echo 'Found'".format(dir_path=dir_path) + return "[ -d {dir_path} ] && echo 'Found'".format( + dir_path=dir_path + ) @staticmethod def delete_file(filename, **kwargs): @@ -91,30 +115,44 @@ def delete_file(filename, **kwargs): @staticmethod def delete_dir(dirname, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{uid} rm -rf {dirname}".format(dirname=dirname, uid=uid) + return "sudo -u \#{uid} rm -rf {dirname}".format( + dirname=dirname, uid=uid + ) else: return "rm -rf {dirname}".format(dirname=dirname) @staticmethod def os_mv(srcname, trgname, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{uid} mv {srcname} {trgname}".format(srcname=srcname, trgname=trgname, uid=uid) + return "sudo -u \#{uid} mv {srcname} {trgname}".format( + srcname=srcname, trgname=trgname, uid=uid + ) else: - return "mv {srcname} {trgname}".format(srcname=srcname, trgname=trgname) + return "mv {srcname} {trgname}".format( + srcname=srcname, trgname=trgname + ) @staticmethod def os_cp(srcname, trgname, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{uid} cp {srcname} {trgname}".format(srcname=srcname, trgname=trgname, uid=uid) + return "sudo -u \#{uid} cp {srcname} {trgname}".format( + srcname=srcname, trgname=trgname, uid=uid + ) else: - return "cp {srcname} {trgname}".format(srcname=srcname, trgname=trgname, uid=uid) + return "cp {srcname} {trgname}".format( + srcname=srcname, trgname=trgname + ) @staticmethod def os_cpr(srcname, trgname, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{uid} cp -r {srcname} {trgname}".format(srcname=srcname, trgname=trgname, uid=uid) + return "sudo -u \#{uid} cp -r {srcname} {trgname}".format( + srcname=srcname, trgname=trgname, uid=uid + ) else: - return "cp -r {srcname} {trgname}".format(srcname=srcname, trgname=trgname) + return "cp -r {srcname} {trgname}".format( + srcname=srcname, trgname=trgname + ) @staticmethod def get_dlpx_bin(**kwargs): @@ -123,35 +161,32 @@ def get_dlpx_bin(**kwargs): @staticmethod def unmount_file_system(mount_path, **kwargs): if "options" in kwargs: - options = kwargs.pop('options') + options = kwargs.pop("options") else: options = "" - return "sudo /bin/umount {options} {mount_path}".format(mount_path=mount_path, options=options) + return "sudo /bin/umount {options} {mount_path}".format( + mount_path=mount_path, options=options + ) @staticmethod def whoami(**kwargs): - # uid=1003(delphix) gid=1003(delphix) groups=1003(delphix) context=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 return "id" - @staticmethod def sed(filename, regex, sudo=False, uid=None, **kwargs): if sudo: - return 'sudo -u \#{uid} sed -i -e "{regex}" {filename}'.format(regex=regex, filename=filename, uid=uid) + return 'sudo -u \#{uid} sed -i -e "{regex}" {filename}'.format( + regex=regex, filename=filename, uid=uid + ) else: return 'sed -i -e "{}" {}'.format(regex, filename) - @staticmethod def cat(path, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{uid} cat {path}".format( - path=path, uid=uid - ) + return "sudo -u \#{uid} cat {path}".format(path=path, uid=uid) else: - return "cat {path}".format( - path=path - ) + return "cat {path}".format(path=path) @staticmethod def df(mount_path, **kwargs): @@ -161,10 +196,13 @@ def df(mount_path, **kwargs): def mount(**kwargs): return "mount" - @staticmethod def resolve_name(hostname, **kwargs): - return "getent ahostsv4 {hostname} | grep STREAM | head -n 1 | cut -d ' ' -f 1".format(hostname=hostname) + return ( + "getent ahostsv4 {hostname} | grep STREAM | head -n 1 | " + "cut -d ' ' -f 1".format(hostname=hostname) + ) + class DatabaseCommand(object): def __init__(self): @@ -192,9 +230,15 @@ def get_parent_expect_block(): @staticmethod def start_couchbase(install_path, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{} {install_path} \-- -noinput -detached .".format(uid, install_path=install_path) + return ( + "sudo -u \#{} {install_path} \-- -noinput -detached .".format( + uid, install_path=install_path + ) + ) else: - return "{install_path} \-- -noinput -detached .".format(install_path=install_path) + return "{install_path} \-- -noinput -detached .".format( + install_path=install_path + ) @staticmethod def get_version(install_path): @@ -202,64 +246,77 @@ def get_version(install_path): @staticmethod def get_ids(install_path): - #-rwxr-xr-x. 1 996 993 514 Jan 30 2020 /opt/couchbase/bin/couchbase-cli return "ls -n {install_path}".format(install_path=install_path) - @staticmethod def get_data_directory(couchbase_base_dir): - return "cat {couchbase_base_dir}/etc/couchbase/static_config|grep path_config_datadir".format( - couchbase_base_dir=couchbase_base_dir) + return ( + "cat {couchbase_base_dir}/etc/couchbase/static_config|grep " + "path_config_datadir".format(couchbase_base_dir=couchbase_base_dir) + ) @staticmethod def stop_couchbase(install_path, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{} {install_path} -k".format(uid, install_path=install_path) + return "sudo -u \#{} {install_path} -k".format( + uid, install_path=install_path + ) else: return "{install_path} -k".format(install_path=install_path) @staticmethod - def cluster_init(shell_path, - hostname, - port, - username, - cluster_ramsize, - cluster_name, - cluster_index_ramsize, - cluster_fts_ramsize, - cluster_eventing_ramsize, - cluster_analytics_ramsize, - additional_services, - **kwargs - ): - return "{shell_path} cluster-init --cluster {hostname}:{port} --cluster-username {username} --cluster-password $password --cluster-ramsize {cluster_ramsize} --cluster-name {cluster_name} --cluster-index-ramsize {cluster_index_ramsize} --cluster-fts-ramsize {cluster_fts_ramsize} --cluster-eventing-ramsize {cluster_eventing_ramsize} --cluster-analytics-ramsize {cluster_analytics_ramsize} --services data,index,{additional_services}".format( - shell_path=shell_path, - hostname=hostname, - username=username, - port=port, - cluster_ramsize=cluster_ramsize, - cluster_name=cluster_name, - cluster_index_ramsize=cluster_index_ramsize, - cluster_fts_ramsize=cluster_fts_ramsize, - cluster_eventing_ramsize=cluster_eventing_ramsize, - cluster_analytics_ramsize=cluster_analytics_ramsize, - additional_services=additional_services - ) - - @staticmethod - def cluster_init_rest_expect(shell_path, - hostname, - port, - username, - cluster_ramsize, - cluster_name, - cluster_index_ramsize, - cluster_fts_ramsize, - cluster_eventing_ramsize, - cluster_analytics_ramsize, - additional_services, - **kwargs - ): + def cluster_init( + shell_path, + hostname, + port, + username, + cluster_ramsize, + cluster_name, + cluster_index_ramsize, + cluster_fts_ramsize, + cluster_eventing_ramsize, + cluster_analytics_ramsize, + additional_services, + **kwargs, + ): + return ( + "{shell_path} cluster-init --cluster {hostname}:{port} " + "--cluster-username {username} --cluster-password $password " + "--cluster-ramsize {cluster_ramsize} --cluster-name " + "{cluster_name} --cluster-index-ramsize {cluster_index_ramsize}" + " --cluster-fts-ramsize {cluster_fts_ramsize} " + "--cluster-eventing-ramsize {cluster_eventing_ramsize} " + "--cluster-analytics-ramsize {cluster_analytics_ramsize} " + "--services data,index,{additional_services}".format( + shell_path=shell_path, + hostname=hostname, + username=username, + port=port, + cluster_ramsize=cluster_ramsize, + cluster_name=cluster_name, + cluster_index_ramsize=cluster_index_ramsize, + cluster_fts_ramsize=cluster_fts_ramsize, + cluster_eventing_ramsize=cluster_eventing_ramsize, + cluster_analytics_ramsize=cluster_analytics_ramsize, + additional_services=additional_services, + ) + ) + + @staticmethod + def cluster_init_rest_expect( + shell_path, + hostname, + port, + username, + cluster_ramsize, + cluster_name, + cluster_index_ramsize, + cluster_fts_ramsize, + cluster_eventing_ramsize, + cluster_analytics_ramsize, + additional_services, + **kwargs, + ): payload_data = { "hostname": "127.0.0.1", "username": username, @@ -273,16 +330,23 @@ def cluster_init_rest_expect(shell_path, "indexerStorageMode": kwargs.get("indexerStorageMode"), "afamily": "ipv4", "afamilyOnly": "false", - "nodeEncryption": "off" + "nodeEncryption": "off", } if cluster_eventing_ramsize is not None: payload_data["eventingMemoryQuota"] = cluster_eventing_ramsize if cluster_analytics_ramsize is not None: payload_data["cbasMemoryQuota"] = cluster_analytics_ramsize - payload_data["services"] = payload_data["services"].replace("data", "kv").replace("query", "n1ql") + payload_data["services"] = ( + payload_data["services"] + .replace("data", "kv") + .replace("query", "n1ql") + ) payload_string = urllib.parse.urlencode(payload_data) - command = f'echo \"$PAYLOAD_SECRET\" | curl -d @- -X POST http://127.0.0.1:{port}/clusterInit -u {username}' + command = ( + f'echo "$PAYLOAD_SECRET" | curl -d @- -X POST ' + f"http://127.0.0.1:{port}/clusterInit -u {username}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -302,158 +366,255 @@ def cluster_init_rest_expect(shell_path, "CB_PWD": kwargs.get("password"), "CB_CMD": "/tmp/run_shell.sh", "SHELL_DATA": command, - "PAYLOAD_SECRET": payload_string + "PAYLOAD_SECRET": payload_string, } return expect_block, env_vars - - - @staticmethod - def cluster_setting(shell_path, hostname, port, username, cluster_ramsize, cluster_name, cluster_index_ramsize, - cluster_fts_ramsize, cluster_eventing_ramsize, cluster_analytics_ramsize, **kwargs): - return "{shell_path} setting-cluster -c {hostname}:{port} -u {username} -p $password --cluster-ramsize {cluster_ramsize} --cluster-name {cluster_name} --cluster-index-ramsize {cluster_index_ramsize} --cluster-fts-ramsize {cluster_fts_ramsize} --cluster-eventing-ramsize {cluster_eventing_ramsize} --cluster-analytics-ramsize {cluster_analytics_ramsize}".format( - shell_path=shell_path, - hostname=hostname, - port=port, - username=username, - cluster_ramsize=cluster_ramsize, - cluster_name=cluster_name, - cluster_index_ramsize=cluster_index_ramsize, - cluster_fts_ramsize=cluster_fts_ramsize, - cluster_eventing_ramsize=cluster_eventing_ramsize, - cluster_analytics_ramsize=cluster_analytics_ramsize + def cluster_setting( + shell_path, + hostname, + port, + username, + cluster_ramsize, + cluster_name, + cluster_index_ramsize, + cluster_fts_ramsize, + cluster_eventing_ramsize, + cluster_analytics_ramsize, + **kwargs, + ): + return ( + "{shell_path} setting-cluster -c {hostname}:{port} -u " + "{username} -p $password --cluster-ramsize {cluster_ramsize} " + "--cluster-name {cluster_name} " + "--cluster-index-ramsize {cluster_index_ramsize} " + "--cluster-fts-ramsize {cluster_fts_ramsize} " + "--cluster-eventing-ramsize {cluster_eventing_ramsize} " + "--cluster-analytics-ramsize " + "{cluster_analytics_ramsize}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + cluster_ramsize=cluster_ramsize, + cluster_name=cluster_name, + cluster_index_ramsize=cluster_index_ramsize, + cluster_fts_ramsize=cluster_fts_ramsize, + cluster_eventing_ramsize=cluster_eventing_ramsize, + cluster_analytics_ramsize=cluster_analytics_ramsize, + ) ) @staticmethod - def cluster_setting_expect(shell_path, hostname, port, username, cluster_ramsize, - cluster_name, cluster_index_ramsize, - cluster_fts_ramsize, cluster_eventing_ramsize, - cluster_analytics_ramsize, **kwargs): - command = f"{shell_path} setting-cluster -c {hostname}:{port} -u {username} --password --cluster-ramsize {cluster_ramsize} --cluster-name {cluster_name} --cluster-index-ramsize {cluster_index_ramsize} --cluster-fts-ramsize {cluster_fts_ramsize} --cluster-eventing-ramsize {cluster_eventing_ramsize} --cluster-analytics-ramsize {cluster_analytics_ramsize}" + def cluster_setting_expect( + shell_path, + hostname, + port, + username, + cluster_ramsize, + cluster_name, + cluster_index_ramsize, + cluster_fts_ramsize, + cluster_eventing_ramsize, + cluster_analytics_ramsize, + **kwargs, + ): + command = ( + f"{shell_path} setting-cluster -c {hostname}:{port} -u " + f"{username} --password --cluster-ramsize " + f"{cluster_ramsize} --cluster-name {cluster_name} " + f"--cluster-index-ramsize {cluster_index_ramsize} " + f"--cluster-fts-ramsize {cluster_fts_ramsize} " + f"--cluster-eventing-ramsize {cluster_eventing_ramsize} " + f"--cluster-analytics-ramsize {cluster_analytics_ramsize}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter password:.*" { - send "${env(CB_PWD)}\n" - exp_continue - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def xdcr_setup(shell_path, source_hostname, source_port, source_username, hostname, port, username, cluster_name, **kwargs): - return "{shell_path} xdcr-setup --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --create --xdcr-hostname {hostname}:{port} --xdcr-username {username} --xdcr-password $password --xdcr-cluster-name {cluster_name}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - hostname=hostname, - port=port, - username=username, - cluster_name=cluster_name + def xdcr_setup( + shell_path, + source_hostname, + source_port, + source_username, + hostname, + port, + username, + cluster_name, + **kwargs, + ): + return ( + "{shell_path} xdcr-setup --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --create --xdcr-hostname {hostname}:{port} " + "--xdcr-username {username} --xdcr-password $password " + "--xdcr-cluster-name {cluster_name}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + hostname=hostname, + port=port, + username=username, + cluster_name=cluster_name, + ) ) @staticmethod - def xdcr_setup_expect(shell_path, source_hostname, source_port, source_username, - hostname, port, username, cluster_name, **kwargs): + def xdcr_setup_expect( + shell_path, + source_hostname, + source_port, + source_username, + hostname, + port, + username, + cluster_name, + **kwargs, + ): payload_data = { "username": username, "password": kwargs.get("password"), "hostname": f"{hostname}:{port}", "name": cluster_name, - "demandEncryption": 0 + "demandEncryption": 0, } payload_string = urllib.parse.urlencode(payload_data) - command = f"echo \"$PAYLOAD_SECRET\" | curl -d @- -X POST http://{source_hostname}:{source_port}/pools/default/remoteClusters -u {source_username}" + command = ( + f'echo "$PAYLOAD_SECRET" | curl -d @- -X POST ' + f"http://{source_hostname}:{source_port}/pools/default/" + f"remoteClusters -u {source_username}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter host password for user.*" { - send "${env(CB_PWD)}\n" - set timeout -1 - exp_continue - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") env_vars = { "CB_PWD": kwargs.get("source_password"), "CB_CMD": "/tmp/run_shell.sh", "SHELL_DATA": command, - "PAYLOAD_SECRET": payload_string + "PAYLOAD_SECRET": payload_string, } return expect_block, env_vars @staticmethod - def xdcr_replicate(shell_path, source_hostname, source_port, source_username, source_bucket_name, target_bucket_name, cluster_name, hostname, port, username, **kwargs): - return "{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --create --xdcr-from-bucket {source_bucket_name} --xdcr-to-bucket {target_bucket_name} --xdcr-cluster-name {cluster_name}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - source_bucket_name=source_bucket_name, - target_bucket_name=target_bucket_name, - cluster_name=cluster_name + def xdcr_replicate( + shell_path, + source_hostname, + source_port, + source_username, + source_bucket_name, + target_bucket_name, + cluster_name, + hostname, + port, + username, + **kwargs, + ): + return ( + "{shell_path} xdcr-replicate --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --create --xdcr-from-bucket " + "{source_bucket_name} --xdcr-to-bucket {target_bucket_name} " + "--xdcr-cluster-name {cluster_name}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + source_bucket_name=source_bucket_name, + target_bucket_name=target_bucket_name, + cluster_name=cluster_name, + ) ) @staticmethod - def xdcr_replicate_expect(shell_path, source_hostname, source_port, - source_username, source_bucket_name, - target_bucket_name, cluster_name, hostname, port, - username, **kwargs): - command = f"{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password --create --xdcr-from-bucket {source_bucket_name} --xdcr-to-bucket {target_bucket_name} --xdcr-cluster-name {cluster_name}" + def xdcr_replicate_expect( + shell_path, + source_hostname, + source_port, + source_username, + source_bucket_name, + target_bucket_name, + cluster_name, + hostname, + port, + username, + **kwargs, + ): + command = ( + f"{shell_path} xdcr-replicate --cluster {source_hostname}:" + f"{source_port} --username {source_username} --password " + f"--create --xdcr-from-bucket {source_bucket_name} " + f"--xdcr-to-bucket {target_bucket_name} " + f"--xdcr-cluster-name {cluster_name}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter password:.*" { - send "${env(CB_PWD)}\n" - exp_continue - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("source_password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def get_replication_uuid(shell_path, source_hostname, source_port, source_username, **kwargs): - return "{shell_path} xdcr-setup --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --list".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, + def get_replication_uuid( + shell_path, source_hostname, source_port, source_username, **kwargs + ): + return ( + "{shell_path} xdcr-setup --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --list".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + ) ) - - - - @staticmethod - def get_replication_uuid_expect(shell_path, source_hostname, source_port, - source_username, **kwargs): - command = f"{shell_path} xdcr-setup --cluster {source_hostname}:{source_port} --username {source_username} --password --list" + def get_replication_uuid_expect( + shell_path, source_hostname, source_port, source_username, **kwargs + ): + command = ( + f"{shell_path} xdcr-setup --cluster {source_hostname}:" + f"{source_port} --username {source_username} --password " + f"--list" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -468,26 +629,45 @@ def get_replication_uuid_expect(shell_path, source_hostname, source_port, }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("source_password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def get_stream_id(shell_path, source_hostname, source_port, source_username, cluster_name, **kwargs): - return "{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --xdcr-cluster-name {cluster_name} --list".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - cluster_name=cluster_name + def get_stream_id( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + **kwargs, + ): + return ( + "{shell_path} xdcr-replicate --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --xdcr-cluster-name {cluster_name} " + "--list".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + cluster_name=cluster_name, + ) ) @staticmethod - def get_stream_id_expect(shell_path, source_hostname, source_port, - source_username, cluster_name, **kwargs): - command = f"{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password --xdcr-cluster-name {cluster_name} --list" + def get_stream_id_expect( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + **kwargs, + ): + command = ( + f"{shell_path} xdcr-replicate --cluster {source_hostname}:" + f"{source_port} --username {source_username} " + f"--password --xdcr-cluster-name {cluster_name} --list" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -502,167 +682,268 @@ def get_stream_id_expect(shell_path, source_hostname, source_port, }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("source_password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def pause_replication(shell_path, source_hostname, source_port, source_username, cluster_name, id, **kwargs): - return "{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --xdcr-cluster-name {cluster_name} --pause --xdcr-replicator={id}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - cluster_name=cluster_name, - id=id + def pause_replication( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + id, + **kwargs, + ): + return ( + "{shell_path} xdcr-replicate --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --xdcr-cluster-name {cluster_name} " + "--pause --xdcr-replicator={id}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + cluster_name=cluster_name, + id=id, + ) ) @staticmethod - def pause_replication_expect(shell_path, source_hostname, source_port, - source_username, cluster_name, id, **kwargs): - command = f"{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password --xdcr-cluster-name {cluster_name} --pause --xdcr-replicator={id}" + def pause_replication_expect( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + id, + **kwargs, + ): + command = ( + f"{shell_path} xdcr-replicate --cluster {source_hostname}:" + f"{source_port} --username {source_username} " + f"--password --xdcr-cluster-name {cluster_name} " + f"--pause --xdcr-replicator={id}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter password:.*" { - send "${env(CB_PWD)}\n" - exp_continue - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("source_password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def resume_replication(shell_path, source_hostname, source_port, source_username, cluster_name, id, **kwargs): - return "{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --xdcr-cluster-name {cluster_name} --resume --xdcr-replicator={id}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - cluster_name=cluster_name, - id=id + def resume_replication( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + id, + **kwargs, + ): + return ( + "{shell_path} xdcr-replicate --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --xdcr-cluster-name {cluster_name} " + "--resume --xdcr-replicator={id}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + cluster_name=cluster_name, + id=id, + ) ) @staticmethod - def resume_replication_expect(shell_path, source_hostname, source_port, - source_username, cluster_name, id, **kwargs): - command = f"{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password --xdcr-cluster-name {cluster_name} --resume --xdcr-replicator={id}" + def resume_replication_expect( + shell_path, + source_hostname, + source_port, + source_username, + cluster_name, + id, + **kwargs, + ): + command = ( + f"{shell_path} xdcr-replicate --cluster {source_hostname}:" + f"{source_port} --username {source_username} --password " + f"--xdcr-cluster-name {cluster_name} --resume " + f"--xdcr-replicator={id}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter password:.*" { - send "${env(CB_PWD)}\n" - exp_continue - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("source_password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def delete_replication(shell_path, source_hostname, source_port, source_username, id, cluster_name, **kwargs): - return "{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --delete --xdcr-replicator {id} --xdcr-cluster-name {cluster_name}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - id=id, - cluster_name=cluster_name + def delete_replication( + shell_path, + source_hostname, + source_port, + source_username, + id, + cluster_name, + **kwargs, + ): + return ( + "{shell_path} xdcr-replicate --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --delete --xdcr-replicator {id} " + "--xdcr-cluster-name {cluster_name}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + id=id, + cluster_name=cluster_name, + ) ) @staticmethod - def delete_replication_expect(shell_path, source_hostname, source_port, - source_username, id, cluster_name, **kwargs): - command = f"{shell_path} xdcr-replicate --cluster {source_hostname}:{source_port} --username {source_username} --password --delete --xdcr-replicator {id} --xdcr-cluster-name {cluster_name}" + def delete_replication_expect( + shell_path, + source_hostname, + source_port, + source_username, + id, + cluster_name, + **kwargs, + ): + command = ( + f"{shell_path} xdcr-replicate --cluster {source_hostname}:" + f"{source_port} --username {source_username} --password " + f"--delete --xdcr-replicator {id} --xdcr-cluster-name " + f"{cluster_name}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter password:.*" { - send "${env(CB_PWD)}\n" - exp_continue - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("source_password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def xdcr_delete(shell_path, source_hostname, source_port, source_username, hostname, port, username, cluster_name, **kwargs): - return "{shell_path} xdcr-setup --cluster {source_hostname}:{source_port} --username {source_username} --password $source_password --delete --xdcr-hostname {hostname}:{port} --xdcr-username {username} --xdcr-password $password --xdcr-cluster-name {cluster_name}".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, - hostname=hostname, - port=port, - username=username, - cluster_name=cluster_name + def xdcr_delete( + shell_path, + source_hostname, + source_port, + source_username, + hostname, + port, + username, + cluster_name, + **kwargs, + ): + return ( + "{shell_path} xdcr-setup --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$source_password --delete --xdcr-hostname {hostname}:{port} " + "--xdcr-username {username} --xdcr-password $password " + "--xdcr-cluster-name {cluster_name}".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + hostname=hostname, + port=port, + username=username, + cluster_name=cluster_name, + ) ) @staticmethod - def xdcr_delete_expect(shell_path, source_hostname, source_port, source_username, - hostname, port, username, cluster_name, **kwargs): - command = f"curl -X DELETE http://{source_hostname}:{source_port}/pools/default/remoteClusters/{cluster_name} -u {source_username}" + def xdcr_delete_expect( + shell_path, + source_hostname, + source_port, + source_username, + hostname, + port, + username, + cluster_name, + **kwargs, + ): + command = ( + f"curl -X DELETE http://{source_hostname}:{source_port}/" + f"pools/default/remoteClusters/{cluster_name} -u " + f"{source_username}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter host password for user.*" { - send "${env(CB_PWD)}\n" - set timeout -1 - exp_continue - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("source_password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("source_password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def get_source_bucket_list(shell_path, source_hostname, source_port, source_username, **kwargs): - return "{shell_path} bucket-list --cluster {source_hostname}:{source_port} --username {source_username} --password $password -o json".format( - shell_path=shell_path, - source_hostname=source_hostname, - source_port=source_port, - source_username=source_username, + def get_source_bucket_list( + shell_path, source_hostname, source_port, source_username, **kwargs + ): + return ( + "{shell_path} bucket-list --cluster {source_hostname}:" + "{source_port} --username {source_username} --password " + "$password -o json".format( + shell_path=shell_path, + source_hostname=source_hostname, + source_port=source_port, + source_username=source_username, + ) ) @staticmethod - def get_source_bucket_list_expect(shell_path, source_hostname, source_port, source_username, **kwargs): - command = f"{shell_path} bucket-list --cluster {source_hostname}:{source_port} --username {source_username} --password -o json" + def get_source_bucket_list_expect( + shell_path, source_hostname, source_port, source_username, **kwargs + ): + command = ( + f"{shell_path} bucket-list --cluster {source_hostname}:" + f"{source_port} --username {source_username} --password " + f"-o json" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -677,21 +958,27 @@ def get_source_bucket_list_expect(shell_path, source_hostname, source_port, sour }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod def get_server_list(shell_path, hostname, port, username, **kwargs): - return "{shell_path} server-list --cluster {hostname}:{port} --username {username} --password $password".format( - shell_path=shell_path, hostname=hostname, port=port, username=username + return ( + "{shell_path} server-list --cluster {hostname}:{port} " + "--username {username} --password $password".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + ) ) @staticmethod def get_server_list_expect(shell_path, hostname, port, username, **kwargs): - command = f"{shell_path} server-list --cluster {hostname}:{port} --username {username} --password" + command = ( + f"{shell_path} server-list --cluster {hostname}:{port} " + f"--username {username} --password" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -706,21 +993,33 @@ def get_server_list_expect(shell_path, hostname, port, username, **kwargs): }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod def node_init(shell_path, port, username, data_path, **kwargs): - return "{shell_path} node-init --cluster 127.0.0.1:{port} --username {username} --password $password --node-init-data-path {data_path} --node-init-index-path {data_path} --node-init-analytics-path {data_path} --node-init-hostname 127.0.0.1".format( - shell_path=shell_path, port=port, username=username, data_path=data_path + return ( + "{shell_path} node-init --cluster 127.0.0.1:{port} " + "--username {username} --password $password " + "--node-init-data-path {data_path} --node-init-index-path " + "{data_path} --node-init-analytics-path {data_path} " + "--node-init-hostname 127.0.0.1".format( + shell_path=shell_path, + port=port, + username=username, + data_path=data_path, + ) ) @staticmethod def node_init_expect(shell_path, port, username, data_path, **kwargs): - command = f"{shell_path} node-init --cluster 127.0.0.1:{port} --username {username} --password --node-init-data-path {data_path} --node-init-index-path {data_path} --node-init-analytics-path {data_path} --node-init-hostname 127.0.0.1" + command = ( + f"{shell_path} node-init --cluster 127.0.0.1:{port} " + f"--username {username} --password --node-init-data-path " + f"{data_path} --node-init-index-path {data_path} " + f"--node-init-analytics-path {data_path} " + f"--node-init-hostname 127.0.0.1" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -735,23 +1034,47 @@ def node_init_expect(shell_path, port, username, data_path, **kwargs): }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def bucket_edit(shell_path, hostname, port, username, bucket_name, flush_value, **kwargs): - return "{shell_path} bucket-edit --cluster {hostname}:{port} --username {username} --password $password --bucket={bucket_name} --enable-flush {flush_value}".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, bucket_name=bucket_name, - flush_value=flush_value + def bucket_edit( + shell_path, + hostname, + port, + username, + bucket_name, + flush_value, + **kwargs, + ): + return ( + "{shell_path} bucket-edit --cluster {hostname}:{port} " + "--username {username} --password $password " + "--bucket={bucket_name} --enable-flush {flush_value}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + bucket_name=bucket_name, + flush_value=flush_value, + ) ) @staticmethod - def bucket_edit_expect(shell_path, hostname, port, username, bucket_name, - flush_value, **kwargs): - command = f"{shell_path} bucket-edit --cluster {hostname}:{port} --username {username} --password --bucket={bucket_name} --enable-flush {flush_value}" + def bucket_edit_expect( + shell_path, + hostname, + port, + username, + bucket_name, + flush_value, + **kwargs, + ): + command = ( + f"{shell_path} bucket-edit --cluster {hostname}:{port} " + f"--username {username} --password --bucket={bucket_name} " + f"--enable-flush {flush_value}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -766,23 +1089,35 @@ def bucket_edit_expect(shell_path, hostname, port, username, bucket_name, }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def bucket_edit_ramquota(shell_path, hostname, port, username, bucket_name, ramsize, **kwargs): - return "{shell_path} bucket-edit --cluster {hostname}:{port} --username {username} --password $password --bucket={bucket_name} --bucket-ramsize {ramsize}".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, bucket_name=bucket_name, - ramsize=ramsize + def bucket_edit_ramquota( + shell_path, hostname, port, username, bucket_name, ramsize, **kwargs + ): + return ( + "{shell_path} bucket-edit --cluster {hostname}:{port} " + "--username {username} --password $password " + "--bucket={bucket_name} --bucket-ramsize {ramsize}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + bucket_name=bucket_name, + ramsize=ramsize, + ) ) @staticmethod - def bucket_edit_ramquota_expect(shell_path, hostname, port, username, bucket_name, - ramsize, **kwargs): - command = f"{shell_path} bucket-edit --cluster {hostname}:{port} --username {username} --password --bucket={bucket_name} --bucket-ramsize {ramsize}" + def bucket_edit_ramquota_expect( + shell_path, hostname, port, username, bucket_name, ramsize, **kwargs + ): + command = ( + f"{shell_path} bucket-edit --cluster {hostname}:{port} " + f"--username {username} --password --bucket={bucket_name} " + f"--bucket-ramsize {ramsize}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -797,22 +1132,33 @@ def bucket_edit_ramquota_expect(shell_path, hostname, port, username, bucket_nam }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def bucket_delete(shell_path, hostname, port, username, bucket_name, **kwargs): - return "{shell_path} bucket-delete --cluster {hostname}:{port} --username {username} --password $password --bucket={bucket_name}".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, bucket_name=bucket_name + def bucket_delete( + shell_path, hostname, port, username, bucket_name, **kwargs + ): + return ( + "{shell_path} bucket-delete --cluster {hostname}:{port} " + "--username {username} --password $password " + "--bucket={bucket_name}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + bucket_name=bucket_name, + ) ) @staticmethod - def bucket_delete_expect(shell_path, hostname, port, username, bucket_name, - **kwargs): - command = f"{shell_path} bucket-delete --cluster {hostname}:{port} --username {username} --password --bucket={bucket_name}" + def bucket_delete_expect( + shell_path, hostname, port, username, bucket_name, **kwargs + ): + command = ( + f"{shell_path} bucket-delete --cluster {hostname}:{port} " + f"--username {username} --password --bucket={bucket_name}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -827,22 +1173,33 @@ def bucket_delete_expect(shell_path, hostname, port, username, bucket_name, }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def bucket_flush(shell_path, hostname, port, username, bucket_name, **kwargs): - return "echo 'Yes' | {shell_path} bucket-flush --cluster {hostname}:{port} --username {username} --password $password --bucket={bucket_name}".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, bucket_name=bucket_name + def bucket_flush( + shell_path, hostname, port, username, bucket_name, **kwargs + ): + return ( + "echo 'Yes' | {shell_path} bucket-flush --cluster " + "{hostname}:{port} --username {username} --password $password " + "--bucket={bucket_name}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + bucket_name=bucket_name, + ) ) @staticmethod - def bucket_flush_expect(shell_path, hostname, port, username, bucket_name, - **kwargs): - command = f"{shell_path} bucket-flush --cluster {hostname}:{port} --username {username} --password --bucket={bucket_name}" + def bucket_flush_expect( + shell_path, hostname, port, username, bucket_name, **kwargs + ): + command = ( + f"{shell_path} bucket-flush --cluster {hostname}:{port} " + f"--username {username} --password --bucket={bucket_name}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -859,57 +1216,99 @@ def bucket_flush_expect(shell_path, hostname, port, username, bucket_name, puts "EXPECT SCRIPT TIMEOUT" exit 2 } - }""" + }""" # noqa ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def bucket_create(shell_path, hostname, port, username, bucket_name, ramsize, evictionpolicy, bucket_type, bucket_compression, **kwargs): - return "{shell_path} bucket-create --cluster 127.0.0.1:{port} --username {username} --password $password --bucket {bucket_name} --bucket-type {bucket_type} --bucket-ramsize {ramsize} --bucket-replica 0 --bucket-eviction-policy {evictionpolicy} {bucket_compression} --conflict-resolution sequence --wait".format( - shell_path=shell_path, port=port, username=username, - bucket_name=bucket_name, ramsize=ramsize, evictionpolicy=evictionpolicy, - bucket_type=bucket_type, bucket_compression=bucket_compression + def bucket_create( + shell_path, + hostname, + port, + username, + bucket_name, + ramsize, + evictionpolicy, + bucket_type, + bucket_compression, + **kwargs, + ): + return ( + "{shell_path} bucket-create --cluster 127.0.0.1:{port} " + "--username {username} --password $password --bucket " + "{bucket_name} --bucket-type {bucket_type} --bucket-ramsize " + "{ramsize} --bucket-replica 0 --bucket-eviction-policy " + "{evictionpolicy} {bucket_compression} --conflict-resolution " + "sequence --wait".format( + shell_path=shell_path, + port=port, + username=username, + bucket_name=bucket_name, + ramsize=ramsize, + evictionpolicy=evictionpolicy, + bucket_type=bucket_type, + bucket_compression=bucket_compression, + ) ) @staticmethod - def bucket_create_expect(shell_path, hostname, port, username, bucket_name, - ramsize, evictionpolicy, bucket_type, bucket_compression, - **kwargs): - command = f"{shell_path} bucket-create --cluster 127.0.0.1:{port} --username {username} --password --bucket {bucket_name} --bucket-type {bucket_type} --bucket-ramsize {ramsize} --bucket-replica 0 --bucket-eviction-policy {evictionpolicy} {bucket_compression} --conflict-resolution sequence --wait" + def bucket_create_expect( + shell_path, + hostname, + port, + username, + bucket_name, + ramsize, + evictionpolicy, + bucket_type, + bucket_compression, + **kwargs, + ): + command = ( + f"{shell_path} bucket-create --cluster 127.0.0.1:{port} " + f"--username {username} --password --bucket {bucket_name} " + f"--bucket-type {bucket_type} --bucket-ramsize {ramsize} " + f"--bucket-replica 0 --bucket-eviction-policy " + f"{evictionpolicy} {bucket_compression} " + f"--conflict-resolution sequence --wait" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter password:.*" { - send "${env(CB_PWD)}\n" - exp_continue - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod def bucket_list(shell_path, hostname, port, username, **kwargs): - return "{shell_path} bucket-list --cluster {hostname}:{port} --username {username} --password $password -o json".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, + return ( + "{shell_path} bucket-list --cluster {hostname}:{port} " + "--username {username} --password $password -o json".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + ) ) @staticmethod def bucket_list_expect(shell_path, hostname, port, username, **kwargs): - command = f"{shell_path} bucket-list --cluster {hostname}:{port} --username {username} --password -o json" + command = ( + f"{shell_path} bucket-list --cluster {hostname}:{port}" + f" --username {username} --password -o json" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -924,16 +1323,15 @@ def bucket_list_expect(shell_path, hostname, port, username, **kwargs): }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod def get_indexes_name(hostname, port, username, **kwargs): - return "curl {username}:$password@{hostname}:{port}/indexStatus".format( - hostname=hostname, port=port, username=username + return ( + "curl {username}:$password@{hostname}:{port}/indexStatus".format( + hostname=hostname, port=port, username=username + ) ) @staticmethod @@ -953,15 +1351,15 @@ def get_indexes_name_expect(hostname, port, username, **kwargs): }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod def get_scope_list_expect(hostname, port, username, **kwargs): - command = f"curl -u {username} {hostname}:{port}/pools/default/buckets/{kwargs.get('bucket_name')}/scopes" + command = ( + f"curl -u {username} {hostname}:{port}/pools/default/" + f"buckets/{kwargs.get('bucket_name')}/scopes" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -976,103 +1374,118 @@ def get_scope_list_expect(hostname, port, username, **kwargs): }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod def create_scope_expect(base_path, hostname, port, username, **kwargs): command = f"{base_path}/cbq -e {hostname}:{port} -u {username} -q=true" - cb_query = f"CREATE SCOPE `{kwargs.get('bucket_name')}`.{kwargs.get('scope_name')};" + cb_query = ( + f"CREATE SCOPE `{kwargs.get('bucket_name')}`." + f"{kwargs.get('scope_name')};" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter Password:.*" { - send "${env(CB_PWD)}\n" - exp_continue - } - -re ".*ERROR 100 :.*" { - puts "Error occured" - send "\x04" - } - -re "(.|\n)*cbq>(.|\n)*" { - send "${env(CB_QUERY)};\n" - expect -re "\n(.|\n)*" - send "\x04" - expect eof - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter Password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + -re ".*ERROR 100 :.*" { + puts "Error occured" + send "\x04" + } + -re "(.|\n)*cbq>(.|\n)*" { + send "${env(CB_QUERY)};\n" + expect -re "\n(.|\n)*" + send "\x04" + expect eof + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") logger.debug(f"cb_query: {cb_query}") env_vars = { "CB_PWD": kwargs.get("password"), "CB_CMD": command, - "CB_QUERY": cb_query + "CB_QUERY": cb_query, } return expect_block, env_vars @staticmethod - def create_collection_expect(base_path, hostname, port, username, **kwargs): + def create_collection_expect( + base_path, hostname, port, username, **kwargs + ): command = f"{base_path}/cbq -e {hostname}:{port} -u {username} -q=true" - cb_query = f"CREATE COLLECTION `{kwargs.get('bucket_name')}`.{kwargs.get('scope_name')}.{kwargs.get('collection_name')};" + cb_query = ( + f"CREATE COLLECTION `{kwargs.get('bucket_name')}`." + f"{kwargs.get('scope_name')}." + f"{kwargs.get('collection_name')};" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter Password:.*" { - send "${env(CB_PWD)}\n" - exp_continue - } - -re ".*ERROR 100 :.*" { - puts "Error occured" - send "\x04" - } - -re "(.|\n)*cbq>(.|\n)*" { - send "${env(CB_QUERY)};\n" - expect -re "\n(.|\n)*" - send "\x04" - expect eof - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter Password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + -re ".*ERROR 100 :.*" { + puts "Error occured" + send "\x04" + } + -re "(.|\n)*cbq>(.|\n)*" { + send "${env(CB_QUERY)};\n" + expect -re "\n(.|\n)*" + send "\x04" + expect eof + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") logger.debug(f"cb_query: {cb_query}") env_vars = { "CB_PWD": kwargs.get("password"), "CB_CMD": command, - "CB_QUERY": cb_query + "CB_QUERY": cb_query, } return expect_block, env_vars @staticmethod def get_backup_bucket_list(path, sudo=False, uid=None, **kwargs): if sudo: - return "sudo -u \#{uid} find {path} -name bucket-config.json".format( - path=path, uid=uid + return ( + "sudo -u \#{uid} find {path} -name bucket-config.json".format( + path=path, uid=uid + ) ) else: - return "find {path} -name bucket-config.json".format( - path=path - ) + return "find {path} -name bucket-config.json".format(path=path) @staticmethod def build_index(base_path, hostname, port, username, index_def, **kwargs): - return "{base_path}/cbq -e {hostname}:{port} -u {username} -p $password -q=true -s='{index_def}'".format( - base_path=base_path, hostname=hostname, port=port, username=username, index_def=index_def + return ( + "{base_path}/cbq -e {hostname}:{port} -u {username} " + "-p $password -q=true -s='{index_def}'".format( + base_path=base_path, + hostname=hostname, + port=port, + username=username, + index_def=index_def, + ) ) @staticmethod - def build_index_expect(base_path, hostname, port, username, index_def, **kwargs): + def build_index_expect( + base_path, hostname, port, username, index_def, **kwargs + ): command = f"{base_path}/cbq -e {hostname}:{port} -u {username} -q=true" expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} @@ -1102,89 +1515,146 @@ def build_index_expect(base_path, hostname, port, username, index_def, **kwargs) env_vars = { "CB_PWD": kwargs.get("password"), "CB_CMD": command, - "CB_QUERY": index_def + "CB_QUERY": index_def, } return expect_block, env_vars @staticmethod def check_index_build(base_path, hostname, port, username, **kwargs): - return "{base_path}/cbq -e {hostname}:{port} -u {username} -p $password -q=true -s=\"SELECT COUNT(*) as unbuilt FROM system:indexes WHERE state <> 'online'\"".format( - base_path=base_path, hostname=hostname, port=port, username=username + return ( + "{base_path}/cbq -e {hostname}:{port} -u {username} " + '-p $password -q=true -s="SELECT COUNT(*) as unbuilt ' + "FROM system:indexes WHERE state <> 'online'\"".format( + base_path=base_path, + hostname=hostname, + port=port, + username=username, + ) ) @staticmethod - def check_index_build_expect(base_path, hostname, port, username, **kwargs): + def check_index_build_expect( + base_path, hostname, port, username, **kwargs + ): command = f"{base_path}/cbq -e {hostname}:{port} -u {username} -q=true" - cb_query = "SELECT COUNT(*) as unbuilt FROM system:indexes WHERE state <> 'online'" + cb_query = ( + "SELECT COUNT(*) as unbuilt FROM system:indexes WHERE " + "state <> 'online'" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter Password:.*" { - send "${env(CB_PWD)}\n" - exp_continue - } - -re ".*ERROR 100 :.*" { - puts "Error occured" - send "\x04" - } - -re "(.|\n)*cbq>(.|\n)*" { - send "${env(CB_QUERY)};\n" - expect -re "\n(.|\n)*" - send "\x04" - expect eof - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter Password:.*" { + send "${env(CB_PWD)}\n" + exp_continue + } + -re ".*ERROR 100 :.*" { + puts "Error occured" + send "\x04" + } + -re "(.|\n)*cbq>(.|\n)*" { + send "${env(CB_QUERY)};\n" + expect -re "\n(.|\n)*" + send "\x04" + expect eof + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") logger.debug(f"cb_query: {cb_query}") env_vars = { "CB_PWD": kwargs.get("password"), "CB_CMD": command, - "CB_QUERY": cb_query + "CB_QUERY": cb_query, } return expect_block, env_vars @staticmethod - def cb_backup_full(base_path, backup_location, backup_repo, hostname, port, username, csv_bucket_list, sudo, uid, skip, **kwargs): + def cb_backup_full( + base_path, + backup_location, + backup_repo, + hostname, + port, + username, + csv_bucket_list, + sudo, + uid, + skip, + **kwargs, + ): if sudo: - return "sudo -u \#{uid} {base_path}/cbbackupmgr restore --archive {backup_location} --repo {backup_repo} --cluster couchbase://{hostname}:{port} --username {username} --password $password \ - --force-updates {skip} --no-progress-bar --include-buckets {csv_bucket_list}".format( - base_path=base_path, - backup_location=backup_location, - backup_repo=backup_repo, - hostname=hostname, - port=port, - username=username, - csv_bucket_list=csv_bucket_list, - uid=uid, - skip=skip + return ( + "sudo -u \#{uid} {base_path}/cbbackupmgr restore " + "--archive {backup_location} --repo {backup_repo} " + "--cluster couchbase://{hostname}:{port} --username " + "{username} --password $password --force-updates {skip} " + "--no-progress-bar --include-buckets " + "{csv_bucket_list}".format( + base_path=base_path, + backup_location=backup_location, + backup_repo=backup_repo, + hostname=hostname, + port=port, + username=username, + csv_bucket_list=csv_bucket_list, + uid=uid, + skip=skip, + ) ) else: - return "{base_path}/cbbackupmgr restore --archive {backup_location} --repo {backup_repo} --cluster couchbase://{hostname}:{port} --username {username} --password $password \ - --force-updates {skip} --no-progress-bar --include-buckets {csv_bucket_list}".format( - base_path=base_path, - backup_location=backup_location, - backup_repo=backup_repo, - hostname=hostname, - port=port, - username=username, - csv_bucket_list=csv_bucket_list, - skip=skip + return ( + "{base_path}/cbbackupmgr restore --archive " + "{backup_location} --repo {backup_repo} --cluster " + "couchbase://{hostname}:{port} --username {username} " + "--password $password --force-updates {skip} " + "--no-progress-bar --include-buckets " + "{csv_bucket_list}".format( + base_path=base_path, + backup_location=backup_location, + backup_repo=backup_repo, + hostname=hostname, + port=port, + username=username, + csv_bucket_list=csv_bucket_list, + skip=skip, + ) ) @staticmethod - def cb_backup_full_expect(base_path, backup_location, backup_repo, hostname, port, - username, csv_bucket_list, sudo, uid, skip, **kwargs): + def cb_backup_full_expect( + base_path, + backup_location, + backup_repo, + hostname, + port, + username, + csv_bucket_list, + sudo, + uid, + skip, + **kwargs, + ): if sudo: - command = f"sudo -u \#{uid} {base_path}/cbbackupmgr restore --archive {backup_location} --repo {backup_repo} --cluster couchbase://{hostname}:{port} --username {username} --password \ - --force-updates {skip} --no-progress-bar --include-buckets {csv_bucket_list}" + command = ( + f"sudo -u \#{uid} {base_path}/cbbackupmgr restore " + f"--archive {backup_location} --repo {backup_repo} " + f"--cluster couchbase://{hostname}:{port} --username " + f"{username} --password --force-updates {skip} " + f"--no-progress-bar --include-buckets {csv_bucket_list}" + ) else: - command = f"{base_path}/cbbackupmgr restore --archive {backup_location} --repo {backup_repo} --cluster couchbase://{hostname}:{port} --username {username} --password \ - --force-updates {skip} --no-progress-bar --include-buckets {csv_bucket_list}" + command = ( + f"{base_path}/cbbackupmgr restore --archive " + f"{backup_location} --repo {backup_repo} --cluster " + f"couchbase://{hostname}:{port} --username {username} " + f"--password --force-updates {skip} --no-progress-bar " + f"--include-buckets {csv_bucket_list}" + ) if int(kwargs.get("repo_version").split(".")[0]) >= 7: command = f"{command} --purge" if kwargs.get("map_data") != "": @@ -1209,26 +1679,46 @@ def cb_backup_full_expect(base_path, backup_location, backup_repo, hostname, por }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def monitor_replication(source_username, source_hostname, source_port, bucket_name, uuid, **kwargs): - return "curl --silent -u {source_username}:$password http://{source_hostname}:{source_port}/pools/default/buckets/{bucket_name}/stats/replications%2F{uuid}%2F{bucket_name}%2F{bucket_name}%2Fchanges_left".format( - source_username=source_username, - source_hostname=source_hostname, - source_port=source_port, - bucket_name=bucket_name, - uuid=uuid, + def monitor_replication( + source_username, + source_hostname, + source_port, + bucket_name, + uuid, + **kwargs, + ): + return ( + "curl --silent -u {source_username}:$password " + "http://{source_hostname}:{source_port}/pools/default/buckets" + "/{bucket_name}/stats/replications%2F{uuid}%2F{bucket_name}" + "%2F{bucket_name}%2Fchanges_left".format( + source_username=source_username, + source_hostname=source_hostname, + source_port=source_port, + bucket_name=bucket_name, + uuid=uuid, + ) ) @staticmethod - def monitor_replication_expect(source_username, source_hostname, source_port, - bucket_name, uuid, **kwargs): - command = f"curl --silent -u {source_username} http://{source_hostname}:{source_port}/pools/default/buckets/{bucket_name}/stats/replications%2F{uuid}%2F{bucket_name}%2F{bucket_name}%2Fchanges_left" + def monitor_replication_expect( + source_username, + source_hostname, + source_port, + bucket_name, + uuid, + **kwargs, + ): + command = ( + f"curl --silent -u {source_username} " + f"http://{source_hostname}:{source_port}/pools/default/" + f"buckets/{bucket_name}/stats/replications%2F{uuid}%2F" + f"{bucket_name}%2F{bucket_name}%2Fchanges_left" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -1243,22 +1733,30 @@ def monitor_replication_expect(source_username, source_hostname, source_port, }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod def couchbase_server_info(shell_path, hostname, username, port, **kwargs): - return "{shell_path} server-info --cluster {hostname}:{port} --username {username} --password $password".format( - shell_path=shell_path, hostname=hostname, port=port, username=username + return ( + "{shell_path} server-info --cluster {hostname}:{port} " + "--username {username} --password $password".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + ) ) - #return("{shell_path}".format(shell_path=shell_path)) + # return("{shell_path}".format(shell_path=shell_path)) @staticmethod - def couchbase_server_info_expect(shell_path, hostname, username, port, **kwargs): - command = f"{shell_path} server-info --cluster {hostname}:{port} --username {username} --password" + def couchbase_server_info_expect( + shell_path, hostname, username, port, **kwargs + ): + command = ( + f"{shell_path} server-info --cluster {hostname}:{port} " + f"--username {username} --password" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -1273,22 +1771,35 @@ def couchbase_server_info_expect(shell_path, hostname, username, port, **kwargs) }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def rename_cluster(shell_path, hostname, port, username, newuser, newname, **kwargs): - return "{shell_path} setting-cluster --cluster {hostname}:{port} --username {username} --password $password --cluster-username {newuser} --cluster-password $newpass --cluster-name {newname}".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, - newuser=newuser, newname=newname + def rename_cluster( + shell_path, hostname, port, username, newuser, newname, **kwargs + ): + return ( + "{shell_path} setting-cluster --cluster {hostname}:{port} " + "--username {username} --password $password " + "--cluster-username {newuser} --cluster-password $newpass " + "--cluster-name {newname}".format( + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + newuser=newuser, + newname=newname, + ) ) @staticmethod - def rename_cluster_expect(shell_path, hostname, port, username, newname, **kwargs): - command = f"curl -X POST http://{hostname}:{port}/pools/default -d clusterName={newname} -u {username}" + def rename_cluster_expect( + shell_path, hostname, port, username, newname, **kwargs + ): + command = ( + f"curl -X POST http://{hostname}:{port}/pools/default " + f"-d clusterName={newname} -u {username}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -1303,98 +1814,129 @@ def rename_cluster_expect(shell_path, hostname, port, username, newname, **kwarg }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @staticmethod - def change_cluster_password_expect(shell_path, hostname, port, username, newuser, **kwargs): - payload_string = f"password={kwargs.get('newpass')}&username={newuser}&port=SAME" - command = f'echo \"$PAYLOAD_SECRET\" | curl -d @- -X POST http://{hostname}:{port}/settings/web -u {username}' + def change_cluster_password_expect( + shell_path, hostname, port, username, newuser, **kwargs + ): + payload_string = ( + f"password={kwargs.get('newpass')}&username={newuser}&port=SAME" + ) + command = ( + f'echo "$PAYLOAD_SECRET" | curl -d @- -X POST ' + f"http://{hostname}:{port}/settings/web -u {username}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter host password for user.*" { - send "${env(CB_PWD)}\n" - set timeout -1 - exp_continue - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") env_vars = { "CB_PWD": kwargs.get("password"), "CB_CMD": "/tmp/run_shell.sh", "SHELL_DATA": command, - "PAYLOAD_SECRET": payload_string + "PAYLOAD_SECRET": payload_string, } return expect_block, env_vars @staticmethod - def server_add(shell_path, hostname, port, username, newhost, services, **kwargs): - return "{shell_path} server-add --cluster {hostname}:{port} --username {username} --password $password \ - --server-add https://{newhost}:18091 --server-add-username {username} --server-add-password $password \ + def server_add( + shell_path, hostname, port, username, newhost, services, **kwargs + ): + return ( + "{shell_path} server-add --cluster {hostname}:{port} " + "--username {username} --password $password \ + --server-add https://{newhost}:18091 --server-add-username " + "{username} --server-add-password $password \ --services {services} --no-ssl-verify".format( - shell_path=shell_path, hostname=hostname, port=port, username=username, services=services, newhost=newhost + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + services=services, + newhost=newhost, + ) ) @staticmethod - def server_add_expect(shell_path, hostname, port, username, newhost, services, - **kwargs): - if kwargs.get('new_port') == "8091": + def server_add_expect( + shell_path, hostname, port, username, newhost, services, **kwargs + ): + if kwargs.get("new_port") == "8091": hostname_prefix = "http" else: hostname_prefix = "https" payload_data = { - "hostname": f"{hostname_prefix}://{newhost}:{kwargs.get('new_port')}", + "hostname": f"{hostname_prefix}://{newhost}:" + f"{kwargs.get('new_port')}", "user": username, - "password": kwargs.get('password'), - "services": services + "password": kwargs.get("password"), + "services": services, } - payload_data["services"] = payload_data["services"].replace("data", "kv").replace("query", "n1ql") + payload_data["services"] = ( + payload_data["services"] + .replace("data", "kv") + .replace("query", "n1ql") + ) payload_string = urllib.parse.urlencode(payload_data) - command = f"echo \"$PAYLOAD_SECRET\" | curl -d @- -X POST {hostname}:8091/controller/addNode -u {username}" + command = ( + f'echo "$PAYLOAD_SECRET" | curl -d @- -X POST ' + f"{hostname}:8091/controller/addNode -u {username}" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} - expect { - -re "Enter host password for user.*" { - send "${env(CB_PWD)}\n" - set timeout -1 - exp_continue - } - timeout { - puts "EXPECT SCRIPT TIMEOUT" - exit 2 - } - }""" + expect { + -re "Enter host password for user.*" { + send "${env(CB_PWD)}\n" + set timeout -1 + exp_continue + } + timeout { + puts "EXPECT SCRIPT TIMEOUT" + exit 2 + } + }""" ) logger.debug(f"command: {command}") env_vars = { "CB_PWD": kwargs.get("password"), "CB_CMD": "/tmp/run_shell.sh", "SHELL_DATA": command, - "PAYLOAD_SECRET": payload_string + "PAYLOAD_SECRET": payload_string, } return expect_block, env_vars - @staticmethod def rebalance(shell_path, hostname, port, username, **kwargs): - return "{shell_path} rebalance --cluster {hostname}:{port} --username {username} --password $password \ + return ( + "{shell_path} rebalance --cluster {hostname}:{port} " + "--username {username} --password $password \ --no-progress-bar".format( - shell_path=shell_path, hostname=hostname, port=port, username=username + shell_path=shell_path, + hostname=hostname, + port=port, + username=username, + ) ) @staticmethod def rebalance_expect(shell_path, hostname, port, username, **kwargs): - command = f"{shell_path} rebalance --cluster {hostname}:{port} --username {username} --password \ - --no-progress-bar" + command = ( + f"{shell_path} rebalance --cluster {hostname}:{port} " + f"--username {username} --password --no-progress-bar" + ) expect_block = DatabaseCommand.get_parent_expect_block().format( command_specific_operations="""eval spawn ${env(CB_CMD)} expect { @@ -1410,10 +1952,7 @@ def rebalance_expect(shell_path, hostname, port, username, **kwargs): }""" ) logger.debug(f"command: {command}") - env_vars = { - "CB_PWD": kwargs.get("password"), - "CB_CMD": command - } + env_vars = {"CB_PWD": kwargs.get("password"), "CB_CMD": command} return expect_block, env_vars @@ -1461,4 +2000,4 @@ def __init__(self): data = "data" hostname = "192.168.1.14" dir_path = "/var/tmp" - DLPX_BIN_JQ = "/var/tmp" \ No newline at end of file + DLPX_BIN_JQ = "/var/tmp" diff --git a/src/db_commands/constants.py b/src/db_commands/constants.py index a227242..1343b06 100644 --- a/src/db_commands/constants.py +++ b/src/db_commands/constants.py @@ -1,18 +1,20 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### -# This module is created to define constants values which are being used in this plugin -####################################################################################################################### +############################################################################## +# This module is created to define constants values which are being used in +# this plugin. +############################################################################## # Constants LOCK_SYNC_OPERATION = "DO_NOT_DELETE_DELPHIX_sync.lck" LOCK_SNAPSYNC_OPERATION = "DO_NOT_DELETE_DELPHIX_snapsync.lck" SRC_BUCKET_INFO_FILENAME = "couchbase_src_bucket_info.cfg" -ENV_VAR_KEY = 'environment_vars' +ENV_VAR_KEY = "environment_vars" StatusIsActive = "healthy" # it shows the status of server is good -DELPHIX_HIDDEN_FOLDER = ".delphix" # Folder inside which config file will create +# Folder inside which config file will create +DELPHIX_HIDDEN_FOLDER = ".delphix" CONFIG_FILE_NAME = "config.txt" EVICTION_POLICY = "valueOnly" DEFAULT_CB_BIN_PATH = "/opt/couchbase/bin" @@ -20,15 +22,31 @@ XDCR = "XDCR" -# String literals to match and throw particular type of exceptions. used by db_exception_handler.py -ALREADY_CLUSTER_INIT = "Cluster is already initialized, use setting-cluster to change settings" +# String literals to match and throw particular type of exceptions. +# used by db_exception_handler.py +ALREADY_CLUSTER_INIT = ( + "Cluster is already initialized, use setting-cluster to change settings" +) SHUTDOWN_FAILED = "shutdown failed" BUCKET_NAME_ALREADY_EXIST = "Bucket with given name already exists" -MULTIPLE_VDB_ERROR = "Changing data of nodes that are part of provisioned cluster is not supported" -CLUSTER_ALREADY_PRESENT = "Cluster reference to the same cluster already exists under the name" -ALREADY_CLUSTER_FOR_BUCKET= "Replication to the same remote cluster and bucket already exists" +MULTIPLE_VDB_ERROR = ( + "Changing data of nodes that are part of provisioned " + "cluster is not supported" +) +CLUSTER_ALREADY_PRESENT = ( + "Cluster reference to the same cluster already exists under the name" +) +ALREADY_CLUSTER_FOR_BUCKET = ( + "Replication to the same remote cluster and bucket already exists" +) # used by linked.py -ALREADY_SYNC_FILE_PRESENT_ON_HOST = "Not cleaning lock files as not created by this job. Also check, is there any XDCR set up on this host. If yes " \ - "then sync file should not be deleted " -RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS = "dSource Creation / Snapsync for dSource {} is in progress. Same staging server {} cannot be used for other operations" +ALREADY_SYNC_FILE_PRESENT_ON_HOST = ( + "Not cleaning lock files as not created by this job. " + "Also check, is there any XDCR set up on this host. If yes " + "then sync file should not be deleted " +) +RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS = ( + "dSource Creation / Snapsync for dSource {} is in progress. " + "Same staging server {} cannot be used for other operations" +) diff --git a/src/internal_exceptions/base_exceptions.py b/src/internal_exceptions/base_exceptions.py index c5a2d85..eb51667 100644 --- a/src/internal_exceptions/base_exceptions.py +++ b/src/internal_exceptions/base_exceptions.py @@ -1,15 +1,18 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -We are defining two base classes for two types of exceptions: one is related to database & the other one is for -run-time errors in the plugin. Both classes are child class of Exception which is defined inside python -The purpose of segregation of these two kinds of exceptions is to get a more accurate message at runtime error. -All the exceptions created for the database will inherit the DatabaseException and these are defined in the current package +We are defining two base classes for two types of exceptions: one is related +to database & the other one is for run-time errors in the plugin. Both classes +are child class of Exception which is defined inside python +The purpose of segregation of these two kinds of exceptions is to get a more +accurate message at runtime error. +All the exceptions created for the database will inherit the DatabaseException +and these are defined in the current package. """ -####################################################################################################################### +############################################################################## from dlpx.virtualization.platform.exceptions import UserError @@ -28,8 +31,10 @@ def __init__(self, message, action, error_string): super(DatabaseException, self).__init__(message, action, error_string) -# Exceptions related to plugin operation like discovery, linking, virtualization are being handled using this. -# plugin_exceptions.py is responsible to catch and throw specific error message for each kind of delphix operation. +# Exceptions related to plugin operation like discovery, linking, +# virtualization are being handled using this. +# plugin_exceptions.py is responsible to catch and throw specific error +# message for each kind of delphix operation. class PluginException(UserConvertibleException): def __init__(self, message, action, error_string): super(PluginException, self).__init__(message, action, error_string) diff --git a/src/internal_exceptions/database_exceptions.py b/src/internal_exceptions/database_exceptions.py index b7d9c38..c339c1d 100644 --- a/src/internal_exceptions/database_exceptions.py +++ b/src/internal_exceptions/database_exceptions.py @@ -1,21 +1,23 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # from internal_exceptions.base_exceptions import DatabaseException - -# Some of below defined exceptions are not being used currently but designed for future updates. +# Some of below defined exceptions are not being used currently but designed +# for future updates class DuplicateClusterError(DatabaseException): def __init__(self, message=""): message = "Duplicate cluster name found, " + message - super(DuplicateClusterError, self).__init__(message, - "Delete existing staging cluster configuration on source or use different staging cluster name ", - "Duplicate cluster names are not allowed" - ) + super(DuplicateClusterError, self).__init__( + message, + "Delete existing staging cluster configuration on source or use " + "different staging cluster name ", + "Duplicate cluster names are not allowed", + ) # When bucket list in snapshot is empty @@ -24,7 +26,8 @@ def __init__(self, message=""): message = "Please check configurations and try again, " + message super(FailedToReadBucketDataFromSnapshot, self).__init__( message, - "Bucket list is empty. Please verify if the bucket exist at source", + "Bucket list is empty. Please verify if the bucket exist at " + "source", "bucket list empty", ) @@ -32,15 +35,22 @@ def __init__(self, message=""): # Failed To start or stop the server class CouchbaseServicesError(DatabaseException): def __init__(self, message=""): - message = "Any of start/stop operation for couchbase service fails: " + message - super(CouchbaseServicesError, self).__init__(message, - "Please check the user permission and try again", - "Not able to stop the couchbase server") + message = ( + "Any of start/stop operation for couchbase service fails: " + + message + ) + super(CouchbaseServicesError, self).__init__( + message, + "Please check the user permission and try again", + "Not able to stop the couchbase server", + ) class BucketOperationError(DatabaseException): def __init__(self, message=""): message = "Bucket operation failed: " + message - super(BucketOperationError, self).__init__(message, - "Bucket related issue is observed ", - "Please see logs for more details") + super(BucketOperationError, self).__init__( + message, + "Bucket related issue is observed ", + "Please see logs for more details", + ) diff --git a/src/internal_exceptions/plugin_exceptions.py b/src/internal_exceptions/plugin_exceptions.py index cf4cf71..9059ed8 100644 --- a/src/internal_exceptions/plugin_exceptions.py +++ b/src/internal_exceptions/plugin_exceptions.py @@ -1,170 +1,205 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -Adding exceptions related to plugin. +Adding exceptions related to plugin """ -####################################################################################################################### +############################################################################## -from internal_exceptions.base_exceptions import PluginException import logging +from internal_exceptions.base_exceptions import PluginException + logger = logging.getLogger(__name__) + class RepositoryDiscoveryError(PluginException): def __init__(self, message=""): message = "Not able to search repository information, " + message - super(RepositoryDiscoveryError, self).__init__(message, - "Check the COUCHBASE_PATH & couchbase installation", - "Failed to search repository information") + super(RepositoryDiscoveryError, self).__init__( + message, + "Check the COUCHBASE_PATH & couchbase installation", + "Failed to search repository information", + ) # This exception will be raised when failed to find source config class SourceConfigDiscoveryError(PluginException): def __init__(self, message=""): message = "Failed to find source config, " + message - super(SourceConfigDiscoveryError, self).__init__(message, - "Stop the couchbase service if it is running", - "Not able to find source") + super(SourceConfigDiscoveryError, self).__init__( + message, + "Stop the couchbase service if it is running", + "Not able to find source", + ) class MultipleSyncError(PluginException): def __init__(self, message=""): - message = "Resynchronization is in progress for other dSource, " + message - super(MultipleSyncError, self).__init__(message, - "Please wait while the other resync operation completes and try again ", - "Staging host already in use. Only Serial operations supported for couchbase") + message = ( + "Resynchronization is in progress for other dSource, " + message + ) + super(MultipleSyncError, self).__init__( + message, + "Please wait while the other resync operation completes and try " + "again ", + "Staging host already in use. Only Serial operations supported " + "for couchbase", + ) class MultipleXDCRSyncError(PluginException): def __init__(self, message=""): message = "XDCR setup found on staging host " + message - super(MultipleXDCRSyncError, self).__init__(message, - "Please use different staging host", - "Multiple XDCR is not supported on single staging host") + super(MultipleXDCRSyncError, self).__init__( + message, + "Please use different staging host", + "Multiple XDCR is not supported on single staging host", + ) class MultipleSnapSyncError(PluginException): def __init__(self, message="", filename=""): - logger.debug("Exception MultipleSnapSyncError file: {}".format(filename)) + logger.debug( + "Exception MultipleSnapSyncError file: {}".format(filename) + ) message = "SnapSync is running for any other dSource " + message - super(MultipleSnapSyncError, self).__init__(message, - "Please wait while the other operation completes and try again or delete a lock file {}".format(filename), - "Staging host already in use for SNAP-SYNC. Only Serial operations supported for couchbase") + super(MultipleSnapSyncError, self).__init__( + message, + "Please wait while the other operation completes and try again " + "or delete a lock file {}".format(filename), + "Staging host already in use for SNAP-SYNC. Only Serial " + "operations supported for couchbase", + ) class FileIOError(PluginException): def __init__(self, message=""): message = "Failed to read/write operation from a file " + message - super(FileIOError, self).__init__(message, - "Verify the permission", - "Please check the logs for more details") + super(FileIOError, self).__init__( + message, + "Verify the permission", + "Please check the logs for more details", + ) class MountPathError(PluginException): def __init__(self, message=""): - message = "Failed to create mount path because another file system is already mounted " + message - super(MountPathError, self).__init__(message, - "Please re-try after the previous operation is completed", - "Please check the logs for more details") + message = ( + "Failed to create mount path because another file system is " + "already mounted " + message + ) + super(MountPathError, self).__init__( + message, + "Please re-try after the previous operation is completed", + "Please check the logs for more details", + ) class UnmountFileSystemError(PluginException): def __init__(self, message=""): - message = "Failed to unmount the file system from host in resync operation " + message - super(UnmountFileSystemError, self).__init__(message, - "Please try again", - "Please check the logs for more details") + message = ( + "Failed to unmount the file system from host in resync operation " + + message + ) + super(UnmountFileSystemError, self).__init__( + message, + "Please try again", + "Please check the logs for more details", + ) ERR_RESPONSE_DATA = { - 'ERR_INSUFFICIENT_RAMQUOTA': { - 'MESSAGE': "Provided bucket size is not suffice to proceed", - 'ACTION': "Please change the bucket size and try again", - 'ERR_STRING': "RAM quota cannot be less than 100 MB" - }, - 'ERR_CBBKP_MGR1': { - 'MESSAGE': "Internal server error", - 'ACTION': "Please try again to run the previous operation", - 'ERR_STRING': "Internal server error while executing" - }, - - 'ERR_RESTORE_CLUSTER': { - 'MESSAGE': "Internal server error", - 'ACTION': "Please try again to run the previous operation", - 'ERR_STRING': "Error restoring cluster" - }, - 'ERR_BUCKET_LIST_EMPTY': { - 'MESSAGE': "Please check configurations and try again", - 'ACTION': "Bucket list is empty. Please verify if the bucket exist at source", - 'ERR_STRING': "bucket list empty", - }, - 'ERR_UNABLE_TO_CONNECT': { - 'MESSAGE': "Internal server error, unable to connect", - 'ACTION': "Please verify the defined configurations and try again", - 'ERR_STRING': "Unable to connect to host", - }, - 'ERR_UNRECOGNIZED_ARGS': { - 'MESSAGE': "Argument(s) mismatch. Please check logs for more details", - 'ACTION': "Please provide correct configuration details and try again", - 'ERR_STRING': "unrecognized arguments", - }, - 'ERR_INCORRECT_CREDENTIAL': { - 'MESSAGE': "Invalid credentials", - 'ACTION': "Try again with correct credentials", - 'ERR_STRING': "please check your username", - }, - 'ERR_REPLICATION_ALREADY_PRESENT': { - 'MESSAGE': "Duplicate cluster name found", - 'ACTION': "Delete existing staging cluster configuration on source or use different staging cluster name", - 'ERR_STRING': "Replication to the same remote cluster and bucket already exists", - }, - 'ERR_DUPLICATE_CLUSTER_NAME': { - 'MESSAGE': "Duplicate cluster name found", - 'ACTION': "Delete existing staging cluster configuration on source or use different staging cluster name ", - 'ERR_STRING': "Duplicate cluster names are not allowed", - }, - 'ERR_INTERNAL_SERVER_ERROR': { - 'MESSAGE': "Internal server error, unable to connect", - 'ACTION': "Please verify the defined configurations and try again", - 'ERR_STRING': "Internal server error, please retry your request", - }, - 'ERR_INTERNAL_SERVER_ERROR1': { - 'MESSAGE': "Internal server error, unable to connect", - 'ACTION': "Please verify the defined configurations and try again", - 'ERR_STRING': "Unable to connect to host", - }, - 'ERR_XDCR_OPERATION_ERROR': { - 'MESSAGE': "Unable to set up XDCR", - 'ACTION': "Please correct parameters and try again", - 'ERR_STRING': "Replication Error", - }, - - 'ERR_CB_BACKUP_MANGER_FAILED': { - 'MESSAGE': "Unable to restore backup", - 'ACTION': "Please verify the provided archive path and try again", - 'ERR_STRING': "Error restoring cluster: Bucket Backup", - }, - 'ERR_SERVICE_UNAVAILABLE_ERROR': { - 'MESSAGE': "Unable to restore backup", - 'ACTION': "Please try again ", - 'ERR_STRING': "is not available on target", - }, - 'ERR_UNEXPECTED_ERROR1': { - 'MESSAGE': "Unable to restore backup", - 'ACTION': "Please try again ", - 'ERR_STRING': "Running this command will totally PURGE database data from disk. Do you really want to do", - }, - 'ERR_INVALID_BACKUP_DIR': { - 'MESSAGE': "Unable to restore backup", - 'ACTION': "Try again with correct archive location. ", - 'ERR_STRING': "Archive directory .* doesn't exist", - }, - 'DEFAULT_ERR': { - 'MESSAGE': "Internal error occurred, retry again", - 'ACTION': "Please check logs for more details", - 'ERR_STRING': "Default error string", + "ERR_INSUFFICIENT_RAMQUOTA": { + "MESSAGE": "Provided bucket size is not suffice to proceed", + "ACTION": "Please change the bucket size and try again", + "ERR_STRING": "RAM quota cannot be less than 100 MB", + }, + "ERR_CBBKP_MGR1": { + "MESSAGE": "Internal server error", + "ACTION": "Please try again to run the previous operation", + "ERR_STRING": "Internal server error while executing", + }, + "ERR_RESTORE_CLUSTER": { + "MESSAGE": "Internal server error", + "ACTION": "Please try again to run the previous operation", + "ERR_STRING": "Error restoring cluster", + }, + "ERR_BUCKET_LIST_EMPTY": { + "MESSAGE": "Please check configurations and try again", + "ACTION": "Bucket list is empty. Please verify if the bucket exist " + "at source", + "ERR_STRING": "bucket list empty", + }, + "ERR_UNABLE_TO_CONNECT": { + "MESSAGE": "Internal server error, unable to connect", + "ACTION": "Please verify the defined configurations and try again", + "ERR_STRING": "Unable to connect to host", + }, + "ERR_UNRECOGNIZED_ARGS": { + "MESSAGE": "Argument(s) mismatch. Please check logs for more details", + "ACTION": "Please provide correct configuration details and try again", + "ERR_STRING": "unrecognized arguments", + }, + "ERR_INCORRECT_CREDENTIAL": { + "MESSAGE": "Invalid credentials", + "ACTION": "Try again with correct credentials", + "ERR_STRING": "please check your username", + }, + "ERR_REPLICATION_ALREADY_PRESENT": { + "MESSAGE": "Duplicate cluster name found", + "ACTION": "Delete existing staging cluster configuration on source " + "or use different staging cluster name", + "ERR_STRING": "Replication to the same remote cluster and bucket " + "already exists", + }, + "ERR_DUPLICATE_CLUSTER_NAME": { + "MESSAGE": "Duplicate cluster name found", + "ACTION": "Delete existing staging cluster configuration on source " + "or use different staging cluster name ", + "ERR_STRING": "Duplicate cluster names are not allowed", + }, + "ERR_INTERNAL_SERVER_ERROR": { + "MESSAGE": "Internal server error, unable to connect", + "ACTION": "Please verify the defined configurations and try again", + "ERR_STRING": "Internal server error, please retry your request", + }, + "ERR_INTERNAL_SERVER_ERROR1": { + "MESSAGE": "Internal server error, unable to connect", + "ACTION": "Please verify the defined configurations and try again", + "ERR_STRING": "Unable to connect to host", + }, + "ERR_XDCR_OPERATION_ERROR": { + "MESSAGE": "Unable to set up XDCR", + "ACTION": "Please correct parameters and try again", + "ERR_STRING": "Replication Error", + }, + "ERR_CB_BACKUP_MANGER_FAILED": { + "MESSAGE": "Unable to restore backup", + "ACTION": "Please verify the provided archive path and try again", + "ERR_STRING": "Error restoring cluster: Bucket Backup", + }, + "ERR_SERVICE_UNAVAILABLE_ERROR": { + "MESSAGE": "Unable to restore backup", + "ACTION": "Please try again ", + "ERR_STRING": "is not available on target", + }, + "ERR_UNEXPECTED_ERROR1": { + "MESSAGE": "Unable to restore backup", + "ACTION": "Please try again ", + "ERR_STRING": "Running this command will totally PURGE database " + "data from disk. Do you really want to do", + }, + "ERR_INVALID_BACKUP_DIR": { + "MESSAGE": "Unable to restore backup", + "ACTION": "Try again with correct archive location. ", + "ERR_STRING": "Archive directory .* doesn't exist", + }, + "DEFAULT_ERR": { + "MESSAGE": "Internal error occurred, retry again", + "ACTION": "Please check logs for more details", + "ERR_STRING": "Default error string", }, } diff --git a/src/operations/config.py b/src/operations/config.py index 11910d6..700a506 100644 --- a/src/operations/config.py +++ b/src/operations/config.py @@ -1,14 +1,16 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -This file contains global variables. There are some cases when we need to pass the parameters from one module to another -without using the function, then use global variables. We should try to avoid this approach. Although in some cases this -approach saves a good number of code lines. We should use this file only for that purpose. +This file contains global variables. There are some cases when we need to pass +the parameters from one module to another without using the function, then use +global variables. We should try to avoid this approach. +Although in some cases this approach saves a good number of code lines. +We should use this file only for that purpose """ -####################################################################################################################### +############################################################################## SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = True diff --git a/src/operations/discovery.py b/src/operations/discovery.py index 2b7e5d8..f905b75 100644 --- a/src/operations/discovery.py +++ b/src/operations/discovery.py @@ -1,19 +1,19 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -This module contains the methods responsible for discovery operations. +This module contains the methods responsible for discovery operations """ -####################################################################################################################### +############################################################################## import logging import sys from controller import helper_lib from generated.definitions import RepositoryDefinition -from internal_exceptions.base_exceptions import GenericUserError -from internal_exceptions.plugin_exceptions import RepositoryDiscoveryError, SourceConfigDiscoveryError +from internal_exceptions.plugin_exceptions import RepositoryDiscoveryError +from internal_exceptions.plugin_exceptions import SourceConfigDiscoveryError logger = logging.getLogger(__name__) @@ -21,22 +21,40 @@ def find_repos(source_connection): """ Args: - source_connection (RemoteConnection): The connection associated with the remote environment to run repository discovery + source_connection (RemoteConnection): The connection associated with + the remote environment to run repository discovery Returns: Object of RepositoryDefinition class """ try: binary_paths = helper_lib.find_binary_path(source_connection) repositories = [] - for binary_path in binary_paths.split(';'): + for binary_path in binary_paths.split(";"): if helper_lib.check_dir_present(source_connection, binary_path): - install_path = helper_lib.find_install_path(source_connection, binary_path) - shell_path = helper_lib.find_shell_path(source_connection, binary_path) - version = helper_lib.find_version(source_connection, install_path) - (uid, gid) = helper_lib.find_ids(source_connection, install_path) + install_path = helper_lib.find_install_path( + source_connection, binary_path + ) + shell_path = helper_lib.find_shell_path( + source_connection=source_connection, + binary_path=binary_path, + ) + version = helper_lib.find_version( + source_connection=source_connection, + install_path=install_path, + ) + (uid, gid) = helper_lib.find_ids( + source_connection=source_connection, + install_path=install_path, + ) pretty_name = "Couchbase ({})".format(version) - repository_definition = RepositoryDefinition(cb_install_path=install_path, cb_shell_path=shell_path, - version=version, pretty_name=pretty_name, uid=uid, gid=gid) + repository_definition = RepositoryDefinition( + cb_install_path=install_path, + cb_shell_path=shell_path, + version=version, + pretty_name=pretty_name, + uid=uid, + gid=gid, + ) repositories.append(repository_definition) return repositories @@ -50,7 +68,8 @@ def find_repos(source_connection): def find_source(source_connection, repository): """ Args: - source_connection (RemoteConnection): The connection associated with the remote environment to run repository discovery + source_connection (RemoteConnection): The connection associated with + the remote environment to run repository discovery repository: Object of RepositoryDefinition Returns: @@ -58,26 +77,30 @@ def find_source(source_connection, repository): """ logger.debug("Finding source config...") try: - instance = helper_lib.is_instance_present_of_gosecrets(source_connection) + instance = helper_lib.is_instance_present_of_gosecrets( + source_connection, + ) if not instance: logger.debug("No Couchbase instance found on host") - logger.debug("Hostname: {}".format(source_connection.environment.host.name)) + logger.debug( + "Hostname: {}".format( + source_connection.environment.host.name, + ) + ) return [] else: logger.debug("Couchbase instance found on host") - logger.debug("Hostname: {}".format(source_connection.environment.host.name)) + logger.debug( + "Hostname: " + "{}".format( + source_connection.environment.host.name, + ) + ) return [] - # # We don't want to run code beyond this point to avoid showing existing couchbase instance. - # # Couchbase supports only 1 instance on server so that instance on host should be managed by delphix - # source_configs = [] - # PORT = 8091 - # pretty_name = "Couchbase:{}".format(PORT) - # hostname = source_connection.environment.host.name - # data_path = helper_lib.get_data_directory(source_connection,repository) - # data_path = os.path.join(data_path, "data") - # source_config = SourceConfigDefinition(couchbase_src_port=PORT, couchbase_src_host=hostname, pretty_name=pretty_name, db_path=data_path) - # source_configs.append(source_config) - # return source_configs + # # We don't want to run code beyond this point to avoid showing + # existing couchbase instance. + # # Couchbase supports only 1 instance on server so that instance + # on host should be managed by delphix except SourceConfigDiscoveryError as err: raise err.to_user_error()(None).with_traceback(sys.exc_info()[2]) except Exception as err: diff --git a/src/operations/link_cbbkpmgr.py b/src/operations/link_cbbkpmgr.py index 071066f..0d746b2 100644 --- a/src/operations/link_cbbkpmgr.py +++ b/src/operations/link_cbbkpmgr.py @@ -1,51 +1,53 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### -# In this module, functions defined for couchbase backup manager ingestion mechanism. -####################################################################################################################### +############################################################################## +# In this module, functions defined for couchbase backup manager ingestion +# mechanism +############################################################################## -import logging -import os import json +import logging -from dlpx.virtualization.platform import Status - -import db_commands from controller import helper_lib from controller.couchbase_operation import CouchbaseOperation -from controller.helper_lib import get_bucket_size_in_MB, get_sync_lock_file_name from controller.resource_builder import Resource from generated.definitions import SnapshotDefinition -from internal_exceptions.plugin_exceptions import MultipleSyncError, MultipleSnapSyncError from operations import config from operations import linking logger = logging.getLogger(__name__) -def resync_cbbkpmgr(staged_source, repository, source_config, input_parameters): +def resync_cbbkpmgr( + staged_source, repository, source_config, input_parameters +): dsource_type = input_parameters.d_source_type dsource_name = source_config.pretty_name couchbase_host = input_parameters.couchbase_host - bucket_size = staged_source.parameters.bucket_size - rx_connection = staged_source.staged_connection resync_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) - linking.check_for_concurrent(resync_process, dsource_type, dsource_name, couchbase_host) + linking.check_for_concurrent( + resync_process, dsource_type, dsource_name, couchbase_host + ) # validate if this works as well for backup linking.configure_cluster(resync_process) - logger.debug("Finding source and staging bucket list") bucket_details_source = resync_process.source_bucket_list_offline() - bucket_details_staged = helper_lib.filter_bucket_name_from_output(resync_process.bucket_list()) + bucket_details_staged = helper_lib.filter_bucket_name_from_output( + resync_process.bucket_list() + ) - buckets_toprocess = linking.buckets_precreation(resync_process, bucket_details_source, bucket_details_staged) + buckets_toprocess = linking.buckets_precreation( + resync_process, bucket_details_source, bucket_details_staged + ) csv_bucket_list = ",".join(buckets_toprocess) logger.debug("Started CB backup manager") @@ -56,31 +58,36 @@ def resync_cbbkpmgr(staged_source, repository, source_config, input_parameters): linking.build_indexes(resync_process) logger.info("Stopping Couchbase") resync_process.stop_couchbase() - resync_process.save_config('parent') + resync_process.save_config("parent") -def pre_snapshot_cbbkpmgr(staged_source, repository, source_config, input_parameters): +def pre_snapshot_cbbkpmgr( + staged_source, repository, source_config, input_parameters +): # this is for normal snapshot - #logger.info("Do nothing version Couchbase") + # logger.info("Do nothing version Couchbase") pre_snapshot_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - bucket_size = input_parameters.bucket_size - rx_connection = staged_source.staged_connection + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) dsource_type = input_parameters.d_source_type dsource_name = source_config.pretty_name couchbase_host = input_parameters.couchbase_host - linking.check_for_concurrent(pre_snapshot_process, dsource_type, dsource_name, couchbase_host) + linking.check_for_concurrent( + pre_snapshot_process, dsource_type, dsource_name, couchbase_host + ) logger.debug("Finding source and staging bucket list") - bucket_details_source = pre_snapshot_process.source_bucket_list_offline() - bucket_details_staged = helper_lib.filter_bucket_name_from_output(pre_snapshot_process.bucket_list()) bucket_details_staged = pre_snapshot_process.bucket_list() - filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged) + filter_bucket_list = helper_lib.filter_bucket_name_from_output( + bucket_details_staged + ) csv_bucket_list = ",".join(filter_bucket_list) pre_snapshot_process.cb_backup_full(csv_bucket_list) logger.info("Re-ingesting from latest backup complete.") @@ -88,28 +95,24 @@ def pre_snapshot_cbbkpmgr(staged_source, repository, source_config, input_parame linking.build_indexes(pre_snapshot_process) logger.info("Stopping Couchbase") pre_snapshot_process.stop_couchbase() - pre_snapshot_process.save_config('parent') + pre_snapshot_process.save_config("parent") -def post_snapshot_cbbkpmgr(staged_source, repository, source_config, dsource_type): +def post_snapshot_cbbkpmgr( + staged_source, repository, source_config, dsource_type +): logger.info("In Post snapshot...") post_snapshot_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) rx_connection = staged_source.staged_connection post_snapshot_process.start_couchbase() snapshot = SnapshotDefinition(validate=False) - bucket_list = [] bucket_details = post_snapshot_process.bucket_list() - # if len(staged_source.parameters.config_settings_prov) != 0: - # bucket_list = [] - # for config_setting in staged_source.parameters.config_settings_prov: - # bucket_list.append(helper_lib.get_bucket_name_with_size(bucket_details, config_setting["bucketName"])) - # else: - # bucket_list = helper_lib.get_stg_all_bucket_list_with_ramquota_size(bucket_details) - - # extract index ind = post_snapshot_process.get_indexes_definition() @@ -123,59 +126,73 @@ def post_snapshot_cbbkpmgr(staged_source, repository, source_config, dsource_typ snapshot.time_stamp = helper_lib.current_time() snapshot.snapshot_id = str(helper_lib.get_snapshot_id()) snapshot.couchbase_admin = post_snapshot_process.parameters.couchbase_admin - snapshot.couchbase_admin_password = post_snapshot_process.parameters.couchbase_admin_password - #logger.debug("snapshot schema: {}".format(snapshot)) + snapshot.couchbase_admin_password = ( + post_snapshot_process.parameters.couchbase_admin_password + ) + # logger.debug("snapshot schema: {}".format(snapshot)) logger.debug("Deleting the lock files") helper_lib.delete_file(rx_connection, config.SNAP_SYNC_FILE_NAME) helper_lib.delete_file(rx_connection, config.SYNC_FILE_NAME) - # for Prox investigation - #post_snapshot_process.stop_couchbase() - #helper_lib.unmount_file_system(rx_connection, staged_source.parameters.mount_path) - #logger.debug("Un mounting completed") return snapshot def start_staging_cbbkpmgr(staged_source, repository, source_config): start_staging = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) start_staging.delete_config() # TODO error handling - start_staging.restore_config(what='current') + start_staging.restore_config(what="current") start_staging.start_couchbase() def stop_staging_cbbkpmgr(staged_source, repository, source_config): stop_staging = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) stop_staging.stop_couchbase() - stop_staging.save_config(what='current') + stop_staging.save_config(what="current") stop_staging.delete_config() def d_source_status_cbbkpmgr(staged_source, repository, source_config): - # if helper_lib.check_dir_present(staged_source.staged_connection, staged_source.parameters.couchbase_bak_loc): - # return Status.ACTIVE - # return Status.INACTIVE status_obj = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - logger.debug("Checking status for D_SOURCE: {}".format(source_config.pretty_name)) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + logger.debug( + "Checking status for D_SOURCE: {}".format(source_config.pretty_name) + ) return status_obj.status() - -def unmount_file_system_in_error_case(staged_source, repository, source_config): +def unmount_file_system_in_error_case( + staged_source, repository, source_config +): try: - logger.debug("Un-mounting file system as last operation was not successful") + logger.debug( + "Un-mounting file system as last operation was not successful" + ) obj = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) obj.stop_couchbase() - helper_lib.unmount_file_system(staged_source.staged_connection, staged_source.parameters.mount_path) + helper_lib.unmount_file_system( + staged_source.staged_connection, + staged_source.parameters.mount_path, + ) logger.debug("Un mounting completed") except Exception as err: - logger.debug("Un-mounting failed, reason: "+str(err)) - + logger.debug("Un-mounting failed, reason: " + str(err)) diff --git a/src/operations/link_xdcr.py b/src/operations/link_xdcr.py index ec2c039..abf671a 100644 --- a/src/operations/link_xdcr.py +++ b/src/operations/link_xdcr.py @@ -1,26 +1,22 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### -# In this module, functions defined for XDCR ingestion mechanism -####################################################################################################################### +############################################################################## +# In this module, functions defined for XDCR ingestion mechanism. +############################################################################## -import logging -import os import json -import re +import logging -from generated.definitions import SnapshotDefinition import db_commands from controller import helper_lib from controller.couchbase_operation import CouchbaseOperation from controller.resource_builder import Resource +from dlpx.virtualization.platform.exceptions import UserError from generated.definitions import SnapshotDefinition -from internal_exceptions.database_exceptions import DuplicateClusterError -from internal_exceptions.plugin_exceptions import MultipleSyncError, MultipleXDCRSyncError +from internal_exceptions.plugin_exceptions import MultipleSyncError from operations import config from operations import linking -from dlpx.virtualization.platform.exceptions import UserError logger = logging.getLogger(__name__) @@ -31,92 +27,98 @@ def resync_xdcr(staged_source, repository, source_config, input_parameters): raise UserError("Source password is mandatory in XDCR dsource type!") dsource_type = input_parameters.d_source_type dsource_name = source_config.pretty_name - bucket_size = staged_source.parameters.bucket_size - rx_connection = staged_source.staged_connection resync_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) couchbase_host = input_parameters.couchbase_host - - linking.check_for_concurrent(resync_process, dsource_type, dsource_name, couchbase_host) + linking.check_for_concurrent( + resync_process, dsource_type, dsource_name, couchbase_host + ) linking.configure_cluster(resync_process) - - - # common steps for both XDCR & CB back up bucket_details_source = resync_process.source_bucket_list() bucket_details_staged = resync_process.bucket_list() - buckets_toprocess = linking.buckets_precreation(resync_process, bucket_details_source, bucket_details_staged) + buckets_toprocess = linking.buckets_precreation( + resync_process, bucket_details_source, bucket_details_staged + ) - # run this for all buckets - resync_process.setup_replication() + # run this for all buckets + resync_process.setup_replication() logger.debug("Finding staging_uuid & cluster_name on staging") staging_uuid = resync_process.get_replication_uuid() - if staging_uuid is None: logger.debug("Can't find a replication UUID after setting it up") raise UserError("Can't find a replication UUID after setting it up") - # bucket_details_staged = resync_process.bucket_list() - # logger.debug("Filtering bucket name from output") - # filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged) for bkt in buckets_toprocess: resync_process.monitor_bucket(bkt, staging_uuid) - linking.build_indexes(resync_process) logger.info("Stopping Couchbase") resync_process.stop_couchbase() - resync_process.save_config('parent') + resync_process.save_config("parent") -def pre_snapshot_xdcr(staged_source, repository, source_config, input_parameters): +def pre_snapshot_xdcr( + staged_source, repository, source_config, input_parameters +): logger.info("In Pre snapshot...") pre_snapshot_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - config.SNAP_SYNC_FILE_NAME = pre_snapshot_process.create_config_dir() + "/" + db_commands.constants.LOCK_SNAPSYNC_OPERATION - # Don't care of sync.lck file as it will never de deleted even in post snapshot. - if helper_lib.check_file_present(staged_source.staged_connection, config.SNAP_SYNC_FILE_NAME): - config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + config.SNAP_SYNC_FILE_NAME = ( + pre_snapshot_process.create_config_dir() + + "/" + + db_commands.constants.LOCK_SNAPSYNC_OPERATION + ) + # Don't care of sync.lck file as it will never de deleted even in post + # snapshot. + if helper_lib.check_file_present( + staged_source.staged_connection, config.SNAP_SYNC_FILE_NAME + ): + config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = ( + False + ) raise MultipleSyncError() else: logger.debug("Creating lock file...") - msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(source_config.pretty_name, - input_parameters.couchbase_host) - helper_lib.write_file(staged_source.staged_connection, msg, config.SNAP_SYNC_FILE_NAME) + msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format( # noqa E501 + source_config.pretty_name, input_parameters.couchbase_host + ) + helper_lib.write_file( + staged_source.staged_connection, msg, config.SNAP_SYNC_FILE_NAME + ) logger.info("Stopping Couchbase") pre_snapshot_process.stop_couchbase() - pre_snapshot_process.save_config('parent') + pre_snapshot_process.save_config("parent") def post_snapshot_xdcr(staged_source, repository, source_config, dsource_type): logger.info("In Post snapshot...") post_snapshot_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) # post_snapshot_process.save_config() post_snapshot_process.start_couchbase() snapshot = SnapshotDefinition(validate=False) - bucket_details = post_snapshot_process.bucket_list() - - # if len(staged_source.parameters.config_settings_prov) != 0: - # bucket_list = [] - # for config_setting in staged_source.parameters.config_settings_prov: - # bucket_list.append(helper_lib.get_bucket_name_with_size(bucket_details, config_setting["bucketName"])) - # else: - # bucket_list = helper_lib.get_stg_all_bucket_list_with_ramquota_size(bucket_details) ind = post_snapshot_process.get_indexes_definition() logger.debug("indexes definition : {}".format(ind)) @@ -132,57 +134,64 @@ def post_snapshot_xdcr(staged_source, repository, source_config, dsource_type): snapshot.time_stamp = helper_lib.current_time() snapshot.snapshot_id = str(helper_lib.get_snapshot_id()) snapshot.couchbase_admin = post_snapshot_process.parameters.couchbase_admin - snapshot.couchbase_admin_password = post_snapshot_process.parameters.couchbase_admin_password - #logger.debug("snapshot schema: {}".format(snapshot)) - logger.debug("Deleting the snap sync lock file {}".format(config.SNAP_SYNC_FILE_NAME)) - helper_lib.delete_file(staged_source.staged_connection, config.SNAP_SYNC_FILE_NAME) + snapshot.couchbase_admin_password = ( + post_snapshot_process.parameters.couchbase_admin_password + ) + # logger.debug("snapshot schema: {}".format(snapshot)) + logger.debug( + "Deleting the snap sync lock file {}".format( + config.SNAP_SYNC_FILE_NAME + ) + ) + helper_lib.delete_file( + staged_source.staged_connection, config.SNAP_SYNC_FILE_NAME + ) return snapshot def start_staging_xdcr(staged_source, repository, source_config): start_staging = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) logger.debug("Enabling the D_SOURCE:{}".format(source_config.pretty_name)) dsource_type = staged_source.parameters.d_source_type rx_connection = staged_source.staged_connection - start_staging.stop_couchbase() start_staging.delete_config() # TODO error handling - start_staging.restore_config(what='current') + start_staging.restore_config(what="current") start_staging.start_couchbase() - # already_set_up_done, name_conflict = start_staging.check_duplicate_replication( - # start_staging.parameters.stg_cluster_name) - # if already_set_up_done: - # logger.info("No need to XDCR setup again") - # elif name_conflict: - # raise DuplicateClusterError("Already cluster is present") - # else: - # logger.info("First time XDCR set up") - # start_staging.xdcr_setup() - - start_staging.setup_replication() - - - config_dir = start_staging.create_config_dir() - msg = "dSource Creation / Snapsync for dSource {} is in progress".format(source_config.pretty_name) - helper_lib.write_file(rx_connection, msg, - config_dir + "/" + helper_lib.get_sync_lock_file_name(dsource_type, - source_config.pretty_name)) + msg = "dSource Creation / Snapsync for dSource {} is in progress".format( + source_config.pretty_name + ) + helper_lib.write_file( + rx_connection, + msg, + config_dir + + "/" + + helper_lib.get_sync_lock_file_name( + dsource_type, source_config.pretty_name + ), + ) logger.debug("D_SOURCE:{} enabled".format(source_config.pretty_name)) def stop_staging_xdcr(staged_source, repository, source_config): stop_staging = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) logger.debug("Disabling the D_SOURCE:{}".format(source_config.pretty_name)) dsource_type = staged_source.parameters.d_source_type @@ -193,16 +202,15 @@ def stop_staging_xdcr(staged_source, repository, source_config): logger.info("Deleting XDCR") stop_staging.xdcr_delete(cluster_name) config_dir = stop_staging.create_config_dir() - helper_lib.delete_file(rx_connection, - config_dir + "/" + helper_lib.get_sync_lock_file_name(dsource_type, - source_config.pretty_name)) + helper_lib.delete_file( + rx_connection, + config_dir + + "/" + + helper_lib.get_sync_lock_file_name( + dsource_type, source_config.pretty_name + ), + ) stop_staging.stop_couchbase() - stop_staging.save_config(what='current') + stop_staging.save_config(what="current") stop_staging.delete_config() logger.debug("D_SOURCE:{} disabled".format(source_config.pretty_name)) - - - - - - diff --git a/src/operations/linked.py b/src/operations/linked.py index 1a80ee8..cea4ff7 100644 --- a/src/operations/linked.py +++ b/src/operations/linked.py @@ -1,25 +1,28 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### -# In this module, all dSource related operations are implemented. -####################################################################################################################### +############################################################################# +# In this module, all dSource related operations are implemented +############################################################################# import logging import sys -from operations import config import db_commands -from operations import linking from controller import helper_lib from controller.couchbase_operation import CouchbaseOperation -from controller.resource_builder import Resource from controller.helper_lib import delete_file +from controller.resource_builder import Resource from db_commands import constants -from internal_exceptions.base_exceptions import PluginException, DatabaseException, GenericUserError -from internal_exceptions.plugin_exceptions import MountPathError, MultipleSnapSyncError -from operations import link_cbbkpmgr, link_xdcr from dlpx.virtualization.platform.exceptions import UserError +from internal_exceptions.base_exceptions import DatabaseException +from internal_exceptions.base_exceptions import GenericUserError +from internal_exceptions.base_exceptions import PluginException +from internal_exceptions.plugin_exceptions import MultipleSnapSyncError +from operations import config +from operations import link_cbbkpmgr +from operations import link_xdcr +from operations import linking logger = logging.getLogger(__name__) @@ -28,9 +31,13 @@ def resync(staged_source, repository, source_config, input_parameters): logger.debug("Started ReSync...") try: if input_parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.resync_cbbkpmgr(staged_source, repository, source_config, input_parameters) + link_cbbkpmgr.resync_cbbkpmgr( + staged_source, repository, source_config, input_parameters + ) elif input_parameters.d_source_type == constants.XDCR: - link_xdcr.resync_xdcr(staged_source, repository, source_config, input_parameters) + link_xdcr.resync_xdcr( + staged_source, repository, source_config, input_parameters + ) logger.debug("Completed resynchronization") except UserError: @@ -38,23 +45,37 @@ def resync(staged_source, repository, source_config, input_parameters): except Exception as ex_obj: logger.debug(str(ex_obj)) - _cleanup_in_exception_case(staged_source.staged_connection, True, False) + _cleanup_in_exception_case( + staged_source.staged_connection, + True, + False, + ) if input_parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.unmount_file_system_in_error_case(staged_source, repository, source_config) - if isinstance(ex_obj, PluginException) or isinstance(ex_obj, DatabaseException) or isinstance(ex_obj, GenericUserError): + link_cbbkpmgr.unmount_file_system_in_error_case( + staged_source, repository, source_config + ) + if ( + isinstance(ex_obj, PluginException) + or isinstance(ex_obj, DatabaseException) + or isinstance(ex_obj, GenericUserError) + ): raise ex_obj.to_user_error()(None).with_traceback( - sys.exc_info()[2]) + sys.exc_info()[2], + ) raise - def pre_snapshot(staged_source, repository, source_config, input_parameters): logger.info("In Pre snapshot...") try: if input_parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.pre_snapshot_cbbkpmgr(staged_source, repository, source_config, input_parameters) + link_cbbkpmgr.pre_snapshot_cbbkpmgr( + staged_source, repository, source_config, input_parameters + ) elif input_parameters.d_source_type == constants.XDCR: - link_xdcr.pre_snapshot_xdcr(staged_source, repository, source_config, input_parameters) + link_xdcr.pre_snapshot_xdcr( + staged_source, repository, source_config, input_parameters + ) logger.debug("Completed Pre-snapshot") except UserError: raise @@ -62,21 +83,31 @@ def pre_snapshot(staged_source, repository, source_config, input_parameters): logger.debug("Caught exception: {}".format(str(ex_obj))) _cleanup_in_exception_case(staged_source.staged_connection, True, True) if input_parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.unmount_file_system_in_error_case(staged_source, repository, source_config) - if isinstance(ex_obj, PluginException) or isinstance(ex_obj, DatabaseException) or isinstance(ex_obj, GenericUserError): + link_cbbkpmgr.unmount_file_system_in_error_case( + staged_source, repository, source_config + ) + if ( + isinstance(ex_obj, PluginException) + or isinstance(ex_obj, DatabaseException) + or isinstance(ex_obj, GenericUserError) + ): raise ex_obj.to_user_error()(None).with_traceback( - sys.exc_info()[2]) + sys.exc_info()[2], + ) raise - def post_snapshot(staged_source, repository, source_config, dsource_type): logger.info("In Post snapshot...") try: if dsource_type == constants.CBBKPMGR: - return link_cbbkpmgr.post_snapshot_cbbkpmgr(staged_source, repository, source_config, dsource_type) + return link_cbbkpmgr.post_snapshot_cbbkpmgr( + staged_source, repository, source_config, dsource_type + ) elif dsource_type == constants.XDCR: - return link_xdcr.post_snapshot_xdcr(staged_source, repository, source_config, dsource_type) + return link_xdcr.post_snapshot_xdcr( + staged_source, repository, source_config, dsource_type + ) logger.debug("Completed Post-snapshot") except UserError: raise @@ -84,18 +115,25 @@ def post_snapshot(staged_source, repository, source_config, dsource_type): logger.debug("Caught exception in post snapshot: {}".format(str(err))) _cleanup_in_exception_case(staged_source.staged_connection, True, True) if dsource_type == constants.CBBKPMGR: - link_cbbkpmgr.unmount_file_system_in_error_case(staged_source, repository, source_config) + link_cbbkpmgr.unmount_file_system_in_error_case( + staged_source, repository, source_config + ) raise - def start_staging(staged_source, repository, source_config): logger.debug("Enabling the D_SOURCE:{}".format(source_config.pretty_name)) try: if staged_source.parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.start_staging_cbbkpmgr(staged_source, repository, source_config) + link_cbbkpmgr.start_staging_cbbkpmgr( + staged_source, repository, source_config + ) elif staged_source.parameters.d_source_type == constants.XDCR: - link_xdcr.start_staging_xdcr(staged_source, repository, source_config) + link_xdcr.start_staging_xdcr( + staged_source, + repository, + source_config, + ) logger.debug("D_SOURCE:{} enabled".format(source_config.pretty_name)) except UserError: raise @@ -108,50 +146,84 @@ def stop_staging(staged_source, repository, source_config): logger.debug("Disabling the D_SOURCE:{}".format(source_config.pretty_name)) try: if staged_source.parameters.d_source_type == constants.CBBKPMGR: - link_cbbkpmgr.stop_staging_cbbkpmgr(staged_source, repository, source_config) + link_cbbkpmgr.stop_staging_cbbkpmgr( + staged_source, repository, source_config + ) elif staged_source.parameters.d_source_type == constants.XDCR: - link_xdcr.stop_staging_xdcr(staged_source, repository, source_config) + link_xdcr.stop_staging_xdcr( + staged_source, + repository, + source_config, + ) logger.debug("D_SOURCE:{} disabled".format(source_config.pretty_name)) except UserError: raise except Exception as err: logger.debug("Disable operation is failed!" + str(err)) raise - def d_source_status(staged_source, repository, source_config): return linking.d_source_status(staged_source, repository, source_config) - -#This function verifies that LOCK_SNAPSYNC_OPERATION or LOCK_SYNC_OPERATION is present in hidden folder or not -#If any file is present then it will raise exception -#This function does not cover the case for XDCR sync file presence. +# This function verifies that LOCK_SNAPSYNC_OPERATION or LOCK_SYNC_OPERATION +# is present in hidden folder or not +# If any file is present then it will raise exception +# This function does not cover the case for XDCR sync file presence. def check_mount_path(staged_source, repository): mount_path_check = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).build()) - snapsync_filename = mount_path_check.create_config_dir() + "/" + db_commands.constants.LOCK_SNAPSYNC_OPERATION - sync_filename = mount_path_check.create_config_dir() + "/" + db_commands.constants.LOCK_SYNC_OPERATION - if helper_lib.check_file_present(staged_source.staged_connection, snapsync_filename) : - raise MultipleSnapSyncError("Another Snap-Sync process is in progress ", snapsync_filename).to_user_error() - if helper_lib.check_file_present(staged_source.staged_connection, sync_filename): - raise MultipleSnapSyncError("Another Sync process is in progress ", sync_filename).to_user_error() + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .build() + ) + snapsync_filename = ( + mount_path_check.create_config_dir() + + "/" + + db_commands.constants.LOCK_SNAPSYNC_OPERATION + ) + sync_filename = ( + mount_path_check.create_config_dir() + + "/" + + db_commands.constants.LOCK_SYNC_OPERATION + ) + if helper_lib.check_file_present( + staged_source.staged_connection, snapsync_filename + ): + raise MultipleSnapSyncError( + "Another Snap-Sync process is in progress ", snapsync_filename + ).to_user_error() + file_present = helper_lib.check_file_present( + staged_source.staged_connection, sync_filename + ) + if file_present: + raise MultipleSnapSyncError( + "Another Sync process is in progress ", sync_filename + ).to_user_error() return True # Below are specific functions for this module only + def _cleanup_in_exception_case(rx_connection, is_sync, is_snap_sync): logger.debug("In clean up") try: - if is_snap_sync and config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED: + if ( + is_snap_sync + and config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED # noqa E501 + ): delete_file(rx_connection, config.SNAP_SYNC_FILE_NAME) - if is_sync and config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED: + if ( + is_sync + and config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED # noqa E501 + ): delete_file(rx_connection, config.SYNC_FILE_NAME) - if not config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED or \ - not config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED: + if ( + not config.SNAP_SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED # noqa E501 + or not config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED + ): logger.debug(constants.ALREADY_SYNC_FILE_PRESENT_ON_HOST) - except Exception as err : + except Exception as err: logger.debug("Failed to clean up the lock files {}".format(str(err))) raise diff --git a/src/operations/linking.py b/src/operations/linking.py index 15a9331..df4c165 100644 --- a/src/operations/linking.py +++ b/src/operations/linking.py @@ -1,51 +1,69 @@ # -# Copyright (c) 2020-2021 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### -# In this module, functions defined common ingestion modes - backup and xdrc -####################################################################################################################### +############################################################################## +# In this module, functions defined common ingestion modes - backup and xdcr +############################################################################## import logging import os -import json import time -from dlpx.virtualization.platform import Status - import db_commands from controller import helper_lib from controller.couchbase_operation import CouchbaseOperation -from controller.helper_lib import get_bucket_size_in_MB, get_sync_lock_file_name from controller.resource_builder import Resource -from generated.definitions import SnapshotDefinition -from internal_exceptions.database_exceptions import DuplicateClusterError -from internal_exceptions.plugin_exceptions import MultipleSyncError, MultipleXDCRSyncError -from operations import config +from dlpx.virtualization.platform import Status from dlpx.virtualization.platform.exceptions import UserError +from internal_exceptions.plugin_exceptions import MultipleXDCRSyncError +from operations import config logger = logging.getLogger(__name__) + # potentially to remove - as checks are done on the mount points -def check_for_concurrent(couchbase_obj, dsource_type, dsource_name, couchbase_host): +def check_for_concurrent( + couchbase_obj, + dsource_type, + dsource_name, + couchbase_host, +): config_dir = couchbase_obj.create_config_dir() - config.SYNC_FILE_NAME = config_dir + "/" + helper_lib.get_sync_lock_file_name(dsource_type, dsource_name) - + config.SYNC_FILE_NAME = ( + config_dir + + "/" + + helper_lib.get_sync_lock_file_name(dsource_type, dsource_name) + ) delphix_config_dir = couchbase_obj.get_config_directory() logger.debug("Check if we have config dir in Delphix storage") - if not helper_lib.check_dir_present(couchbase_obj.connection, delphix_config_dir): - logger.debug("make a Delphix storage dir {}".format(delphix_config_dir)) + if not helper_lib.check_dir_present( + couchbase_obj.connection, + delphix_config_dir, + ): + logger.debug("make Delphix storage dir:{}".format(delphix_config_dir)) couchbase_obj.make_directory(delphix_config_dir) - if not verify_sync_lock_file_for_this_job(couchbase_obj.connection, config.SYNC_FILE_NAME): + if not verify_sync_lock_file_for_this_job( + couchbase_obj.connection, config.SYNC_FILE_NAME + ): config.SYNC_FLAG_TO_USE_CLEANUP_ONLY_IF_CURRENT_JOB_CREATED = False logger.debug("Sync file is already created by other dSource") - raise MultipleXDCRSyncError("Sync file is already created by other dSource") + raise MultipleXDCRSyncError( + "Sync file is already created by other dSource", + ) else: # creating sync file - msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format(dsource_name, couchbase_host) - helper_lib.write_file(couchbase_obj.connection, msg, config.SYNC_FILE_NAME) + msg = db_commands.constants.RESYNCE_OR_SNAPSYNC_FOR_OTHER_OBJECT_IN_PROGRESS.format( # noqa E501 + dsource_name, couchbase_host + ) + helper_lib.write_file( + couchbase_obj.connection, + msg, + config.SYNC_FILE_NAME, + ) + def verify_sync_lock_file_for_this_job(rx_connection, sync_filename): if helper_lib.check_file_present(rx_connection, sync_filename): @@ -79,17 +97,19 @@ def configure_cluster(couchbase_obj): couchbase_obj.delete_data_folder() couchbase_obj.delete_config_folder() - # we can't use normal monitor as server is not configured yet + # we can't use normal monitor as server is not configured yet couchbase_obj.start_couchbase(no_wait=True) end_time = time.time() + 3660 server_status = Status.INACTIVE - #break the loop either end_time is exceeding from 1 hour or server is successfully started + # break the loop either end_time is exceeding from 1 hour or + # server is successfully started while time.time() < end_time and server_status != Status.ACTIVE: - helper_lib.sleepForSecond(1) # waiting for 1 second - server_status = couchbase_obj.staging_bootstrap_status() # fetching status + helper_lib.sleepForSecond(1) # waiting for 1 second + # fetching status + server_status = couchbase_obj.staging_bootstrap_status() logger.debug("server status {}".format(server_status)) # check if cluster not configured and raise an issue @@ -99,79 +119,120 @@ def configure_cluster(couchbase_obj): couchbase_obj.cluster_init() logger.debug("Cluster configured") else: - logger.debug("Node configured but no configuration in Delphix - ???????") + logger.debug("Node configured but no configuration in Delphix") if couchbase_obj.check_cluster_configured(): - logger.debug("Configured with staging user/password and alive so not a problem - continue") + logger.debug( + "Configured with staging user/password and alive " + "so not a problem - continue" + ) else: - logger.debug("Cluster configured but not with user/password given in Delphix potentially another cluster") - raise UserError("Cluster configured but not with user/password given in Delphix potentially another cluster") - - -def buckets_precreation(couchbase_obj, bucket_details_source, bucket_details_staged): + logger.debug( + "Cluster configured but not with user/password given " + "in Delphix potentially another cluster" + ) + raise UserError( + "Cluster configured but not with user/password given " + "in Delphix potentially another cluster" + ) + + +def buckets_precreation( + couchbase_obj, + bucket_details_source, + bucket_details_staged, +): # common steps for both XDCR & CB back up # return a list of precreated buckets to process logger.debug("buckets_precreation") bucket_list = [] config_setting = couchbase_obj.parameters.config_settings_prov - logger.debug("Bucket names passed for configuration: {}".format(config_setting)) + log_msg = "Bucket names passed for configuration: {}".format( + config_setting, + ) + logger.debug(log_msg) bucket_configured_staged = [] if len(config_setting) > 0: - # process for list of buckets + # process for list of buckets logger.debug("Getting bucket information from config") - buckets_dict = { b["name"]:b for b in bucket_details_source } + buckets_dict = {b["name"]: b for b in bucket_details_source} for config_bucket in config_setting: bucket_configured_staged.append(config_bucket["bucketName"]) - logger.debug("Filtering bucket name with size only from above output") + log_msg = "Filtering bucket name with size only from above output" + logger.debug(log_msg) bucket = buckets_dict[config_bucket["bucketName"]] logger.debug("Running bucket operations for {}".format(bucket)) - bkt_name = bucket['name'] - bkt_size = bucket['ram'] - bkt_type = bucket['bucketType'] - bkt_compression = bucket['compressionMode'] + bkt_size = bucket["ram"] + bkt_type = bucket["bucketType"] + bkt_compression = bucket["compressionMode"] - bkt_size_mb = helper_lib.get_bucket_size_in_MB(couchbase_obj.parameters.bucket_size, bkt_size) + bkt_size_mb = helper_lib.get_bucket_size_in_MB( + couchbase_obj.parameters.bucket_size, bkt_size + ) if config_bucket["bucketName"] not in bucket_details_staged: - couchbase_obj.bucket_create(config_bucket["bucketName"], bkt_size_mb, bkt_type, bkt_compression) + couchbase_obj.bucket_create( + config_bucket["bucketName"], + bkt_size_mb, + bkt_type, + bkt_compression, + ) else: - logger.debug("Bucket {} already present in staged environment. Recreating bucket ".format( - config_bucket["bucketName"])) + logger.debug( + "Bucket {} already present in staged environment. " + "Recreating bucket ".format(config_bucket["bucketName"]) + ) couchbase_obj.bucket_remove(config_bucket["bucketName"]) - couchbase_obj.bucket_create(config_bucket["bucketName"], bkt_size_mb, bkt_type, bkt_compression) - - bucket_list.append(config_bucket["bucketName"]) + couchbase_obj.bucket_create( + config_bucket["bucketName"], + bkt_size_mb, + bkt_type, + bkt_compression, + ) + bucket_list.append(config_bucket["bucketName"]) logger.debug("Finding buckets present at staged server") bucket_details_staged = couchbase_obj.bucket_list() - filter_bucket_list = helper_lib.filter_bucket_name_from_output(bucket_details_staged) - extra_bucket = list(set(filter_bucket_list) - set(bucket_configured_staged)) + filter_bucket_list = helper_lib.filter_bucket_name_from_output( + bucket_details_staged + ) + extra_bucket = set(filter_bucket_list) - set(bucket_configured_staged) + extra_bucket = list(extra_bucket) logger.debug("Extra bucket found to delete:{} ".format(extra_bucket)) for bucket in extra_bucket: couchbase_obj.bucket_remove(bucket) else: - # process for all buckets - filter_source_bucket = helper_lib.filter_bucket_name_from_json(bucket_details_source) + # process for all buckets + # filter_source_bucket = helper_lib.filter_bucket_name_from_json( + # bucket_details_source + # ) for items in bucket_details_source: if items: logger.debug("Running bucket operations for {}".format(items)) - bkt_name = items['name'] - bkt_size = items['ram'] - bkt_type = items['bucketType'] - bkt_compression = items['compressionMode'] - - bkt_size_mb = helper_lib.get_bucket_size_in_MB(couchbase_obj.parameters.bucket_size, bkt_size) + bkt_name = items["name"] + bkt_size = items["ram"] + bkt_type = items["bucketType"] + bkt_compression = items["compressionMode"] + + bkt_size_mb = helper_lib.get_bucket_size_in_MB( + couchbase_obj.parameters.bucket_size, bkt_size + ) if bkt_name not in bucket_details_staged: - couchbase_obj.bucket_create(bkt_name, bkt_size_mb, bkt_type, bkt_compression) + couchbase_obj.bucket_create( + bkt_name, bkt_size_mb, bkt_type, bkt_compression + ) else: logger.debug( - "Bucket {} already present in staged environment. Recreating bucket ".format(bkt_name)) + "Bucket {} already present in staged environment. " + "Recreating bucket ".format(bkt_name) + ) couchbase_obj.bucket_remove(bkt_name) - couchbase_obj.bucket_create(bkt_name, bkt_size_mb, bkt_type, bkt_compression) + couchbase_obj.bucket_create( + bkt_name, bkt_size_mb, bkt_type, bkt_compression + ) bucket_list.append(bkt_name) - return bucket_list @@ -187,11 +248,16 @@ def build_indexes(couchbase_obj): couchbase_obj.check_index_build() - def d_source_status(staged_source, repository, source_config): status_obj = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).set_source_config( - source_config).build()) - logger.debug("Checking status for D_SOURCE: {}".format(source_config.pretty_name)) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) + logger.debug( + "Checking status for D_SOURCE: {}".format( + source_config.pretty_name, + ) + ) return status_obj.status() - diff --git a/src/operations/virtual.py b/src/operations/virtual.py index 3b373c7..7f43958 100644 --- a/src/operations/virtual.py +++ b/src/operations/virtual.py @@ -1,33 +1,31 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # -####################################################################################################################### +############################################################################## """ -# In this module, VDB related operations are implemented. +# In this module, VDB related operations are implemented """ -####################################################################################################################### +############################################################################## -import re import json -import time - -# Auto generated libs +import logging import sys +import time -from generated.definitions import SnapshotDefinition -from generated.definitions import SourceConfigDefinition - -from internal_exceptions.database_exceptions import FailedToReadBucketDataFromSnapshot, CouchbaseServicesError from controller import helper_lib from controller.couchbase_operation import CouchbaseOperation -import logging from controller.resource_builder import Resource +from dlpx.virtualization.common import RemoteConnection from dlpx.virtualization.common import RemoteEnvironment -from dlpx.virtualization.common import RemoteHost from dlpx.virtualization.common import RemoteUser -from dlpx.virtualization.common import RemoteConnection from dlpx.virtualization.platform import Status +from generated.definitions import SnapshotDefinition +from generated.definitions import SourceConfigDefinition +from internal_exceptions.database_exceptions import CouchbaseServicesError +from internal_exceptions.database_exceptions import ( + FailedToReadBucketDataFromSnapshot, +) # Global logger for this File logger = logging.getLogger(__name__) @@ -35,14 +33,19 @@ def vdb_status(virtual_source, repository, source_config): provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) cb_status = provision_process.status() logger.debug("VDB Status is {}".format(cb_status)) if cb_status == Status.ACTIVE: logger.debug("Checking mount point") - if helper_lib.check_stale_mountpoint(provision_process.connection, virtual_source.parameters.mount_path): + if helper_lib.check_stale_mountpoint( + provision_process.connection, virtual_source.parameters.mount_path + ): logger.debug("error with mount point - report inactive") return Status.INACTIVE else: @@ -54,21 +57,34 @@ def vdb_status(virtual_source, repository, source_config): def vdb_unconfigure(virtual_source, repository, source_config): # delete all buckets provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) vdb_stop(virtual_source, repository, source_config) provision_process.delete_config() - if provision_process.parameters.node_list is not None and len(provision_process.parameters.node_list) > 0: + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): for node in provision_process.parameters.node_list: logger.debug("+++++++++++++++++++++++++++") logger.debug(node) logger.debug("+++++++++++++++++++++++++++") addnode = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build(), - make_nonprimary_connection(provision_process.connection, node['environment'], node['environmentUser'])) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) addnode.delete_config() addnode.stop_couchbase() @@ -79,24 +95,32 @@ def vdb_reconfigure(virtual_source, repository, source_config, snapshot): logger.debug("In vdb_reconfigure...") provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) - + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) provision_process.stop_couchbase() - - if provision_process.parameters.node_list is not None and len(provision_process.parameters.node_list) > 0: + + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): multinode = True server_count = len(provision_process.parameters.node_list) + 1 else: multinode = False + server_count = 1 nodeno = 1 - provision_process.restore_config(what='current', nodeno=nodeno) + provision_process.restore_config(what="current", nodeno=nodeno) provision_process.start_couchbase(no_wait=multinode) - - if provision_process.parameters.node_list is not None and len(provision_process.parameters.node_list) > 0: + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): for node in provision_process.parameters.node_list: nodeno = nodeno + 1 logger.debug("+++++++++++++++++++++++++++") @@ -104,34 +128,47 @@ def vdb_reconfigure(virtual_source, repository, source_config, snapshot): logger.debug(nodeno) logger.debug("+++++++++++++++++++++++++++") addnode = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build(), - make_nonprimary_connection(provision_process.connection, node['environment'], node['environmentUser'])) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) addnode.stop_couchbase() - addnode.restore_config(what='current', nodeno=nodeno) + addnode.restore_config(what="current", nodeno=nodeno) addnode.start_couchbase(no_wait=multinode) - logger.debug("reconfigure for multinode: {}".format(multinode)) - - if multinode == True: - - + if multinode: active_servers = {} logger.debug("wait for nodes") - logger.debug("server count: {} active servers: {}".format(server_count, sum(active_servers.values()))) + logger.debug( + "server count: {} active servers: {}".format( + server_count, sum(active_servers.values()) + ) + ) end_time = time.time() + 3660 - - - #break the loop either end_time is exceeding from 1 minute or server is successfully started - while time.time() < end_time and sum(active_servers.values()) != server_count: - logger.debug("server count 2: {} active servers: {}".format(server_count, sum(active_servers.values()))) + # break the loop either end_time is exceeding from 1 minute or server + # is successfully started + while ( + time.time() < end_time + and sum(active_servers.values()) != server_count + ): + logger.debug( + "server count 2: {} active servers: {}".format( + server_count, sum(active_servers.values()) + ) + ) nodeno = 1 - helper_lib.sleepForSecond(1) # waiting for 1 second - server_status = provision_process.status() # fetching status + helper_lib.sleepForSecond(1) # waiting for 1 second + server_status = provision_process.status() # fetching status logger.debug("server status {}".format(server_status)) if server_status == Status.ACTIVE: active_servers[nodeno] = 1 @@ -143,17 +180,21 @@ def vdb_reconfigure(virtual_source, repository, source_config, snapshot): logger.debug(nodeno) logger.debug("+++++++++++++++++++++++++++") addnode = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build(), - make_nonprimary_connection(provision_process.connection, node['environment'], node['environmentUser'])) - server_status = addnode.status() # fetching status + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) + server_status = addnode.status() # fetching status logger.debug("server status {}".format(server_status)) if server_status == Status.ACTIVE: active_servers[nodeno] = 1 - - - return _source_config(virtual_source, repository, source_config, snapshot) @@ -163,25 +204,23 @@ def vdb_configure(virtual_source, snapshot, repository): logger.debug("VDB CONFIG START") provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_snapshot( - snapshot).build()) - - - - + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_snapshot(snapshot) + .build() + ) # TODO: # fail if already has cluster ? - # to make sure there is no config + # to make sure there is no config provision_process.delete_config() # provision_process.delete_config_folder() + provision_process.restore_config(what="parent") - provision_process.restore_config(what='parent') - - # if bucket doesn't existing in target cluster - # couchbase will delete directory while starting + # if bucket doesn't existing in target cluster + # couchbase will delete directory while starting # so we have to rename it before start bucket_list_and_size = json.loads(snapshot.bucket_list) @@ -189,43 +228,25 @@ def vdb_configure(virtual_source, snapshot, repository): if not bucket_list_and_size: raise FailedToReadBucketDataFromSnapshot("Snapshot Data is empty.") else: - logger.debug("snapshot bucket data is: {}".format(bucket_list_and_size)) - - - - # for item in helper_lib.filter_bucket_name_from_output(bucket_list_and_size): - # logger.debug("Checking bucket: {}".format(item)) - # bucket_name = item.split(',')[0] - # # rename folder - # provision_process.move_bucket(bucket_name, 'save') + logger.debug( + "snapshot bucket data is: {}".format(bucket_list_and_size) + ) provision_process.restart_couchbase(provision=True) provision_process.rename_cluster() - # provision_process.node_init() - # provision_process.cluster_init() - #provision_process.node_init() - #provision_process.cluster_init() - - - #_do_provision(provision_process, snapshot) - #_cleanup(provision_process, snapshot) - - #_build_indexes(provision_process, snapshot) - - # if self.__node_local: - # logger.debug("it will start on main envioronment") - # connection = self.config.connection - # else: - # logger.debug("it will start on an additional environment {}".format(str(self.__node_environment))) - # connection=make_nonprimary_connection(self.config.connection, self.__node_environment, self.__node_envuser) - nodeno = 1 + logger.debug( + "MAIN CONNECTION HOST: {}".format( + provision_process.connection.environment.host.name + ) + ) - logger.debug("MAIN CONNECTION HOST: {}".format(provision_process.connection.environment.host.name)) - - if provision_process.parameters.node_list is not None and len(provision_process.parameters.node_list) > 0: + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): for node in provision_process.parameters.node_list: nodeno = nodeno + 1 logger.debug("+++++++++++++++++++++++++++") @@ -233,32 +254,40 @@ def vdb_configure(virtual_source, snapshot, repository): logger.debug(nodeno) logger.debug("+++++++++++++++++++++++++++") addnode = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_snapshot( - snapshot).build(), - make_nonprimary_connection(provision_process.connection, node['environment'], node['environmentUser'])) - logger.debug("ADDITIONAL CONNECTION HOST: {}".format(provision_process.connection.environment.host.name)) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_snapshot(snapshot) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) + logger.debug( + "ADDITIONAL CONNECTION HOST: {}".format( + provision_process.connection.environment.host.name + ) + ) addnode.addnode(nodeno, node) # TODO # FINISH HERE # addnode.delete_config() # addnode.stop_couchbase() - src_cfg_obj = _source_config(virtual_source, repository, None, snapshot) return src_cfg_obj - # except FailedToReadBucketDataFromSnapshot as err: - # raise FailedToReadBucketDataFromSnapshot("Provision is failed. " + err.message).to_user_error(), None, \ - # sys.exc_info()[2] - # except Exception as err: - # logger.debug("Provision is failed {}".format(err.message)) - # raise -def make_nonprimary_connection(primary_connection, secondary_env_ref, secondary_user_ref): +def make_nonprimary_connection( + primary_connection, secondary_env_ref, secondary_user_ref +): dummy_host = primary_connection.environment.host user = RemoteUser(name="unused", reference=secondary_user_ref) - environment = RemoteEnvironment(name="unused", reference=secondary_env_ref, host=dummy_host) + environment = RemoteEnvironment( + name="unused", reference=secondary_env_ref, host=dummy_host + ) return RemoteConnection(environment=environment, user=user) @@ -268,7 +297,9 @@ def _do_provision(provision_process, snapshot): if not bucket_list_and_size: raise FailedToReadBucketDataFromSnapshot("Snapshot Data is empty.") else: - logger.debug("snapshot bucket data is: {}".format(bucket_list_and_size)) + logger.debug( + "snapshot bucket data is: {}".format(bucket_list_and_size) + ) bucket_list_and_size = json.loads(bucket_list_and_size) @@ -279,35 +310,38 @@ def _do_provision(provision_process, snapshot): except Exception as err: logger.debug("Failed to get bucket list. Error is " + str(err)) - - renamed_folders = [] - for item in bucket_list_and_size: logger.debug("Checking bucket: {}".format(item)) # try: - bucket_name = item['name'] - bkt_size = item['ram'] - bkt_type = item['bucketType'] - bkt_compression = item['compressionMode'] + bucket_name = item["name"] + bkt_size = item["ram"] + bkt_type = item["bucketType"] + bkt_compression = item["compressionMode"] bkt_size_mb = helper_lib.get_bucket_size_in_MB(0, bkt_size) if bucket_name not in bucket_list: # a new bucket needs to be created logger.debug("Creating bucket: {}".format(bucket_name)) - provision_process.bucket_create(bucket_name, bkt_size_mb, bkt_type, bkt_compression) + provision_process.bucket_create( + bucket_name, bkt_size_mb, bkt_type, bkt_compression + ) helper_lib.sleepForSecond(2) else: - logger.debug("Bucket {} exist - no need to rename directory".format(bucket_name)) + logger.debug( + "Bucket {} exist - no need to rename directory".format( + bucket_name + ) + ) - provision_process.stop_couchbase() - for item in helper_lib.filter_bucket_name_from_output(bucket_list_and_size): + for item in helper_lib.filter_bucket_name_from_output( + bucket_list_and_size + ): logger.debug("Checking bucket: {}".format(item)) - bucket_name = item.split(',')[0] + bucket_name = item.split(",")[0] logger.debug("restoring folders") - provision_process.move_bucket(bucket_name, 'restore') - - + provision_process.move_bucket(bucket_name, "restore") + provision_process.start_couchbase() # getting config directory path @@ -315,23 +349,20 @@ def _do_provision(provision_process, snapshot): # making directory and changing permission to 755. provision_process.make_directory(directory) - # This file path is being used to store the bucket information coming in snapshot - config_file_path = provision_process.get_config_file_path() - - #content = "BUCKET_LIST=" + _find_bucket_name_from_snapshot(snapshot) - - # Adding bucket list in config file path .config file, inside .delphix folder - #helper_lib.write_file(provision_process.connection, content, config_file_path) + # This file path is being used to store the bucket information + # coming in snapshot def _cleanup(provision_process, snapshot): logger.debug("Deleting extra buckets from target host") bucket_list = [] - # Get details of already exist buckets on the target server. We need to delete if some of these are not needed + # Get details of already exist buckets on the target server. + # We need to delete if some of these are not needed try: bucket_list = provision_process.bucket_list() logger.debug(bucket_list) - # Removing extra information captured like ramsize, ramused. Only need to get bucket name from output + # Removing extra information captured like ramsize, ramused. + # Only need to get bucket name from output bucket_list = helper_lib.filter_bucket_name_from_output(bucket_list) except Exception as err: logger.debug("Failed to get bucket list. Error is " + str(err)) @@ -339,9 +370,11 @@ def _cleanup(provision_process, snapshot): snapshot_bucket_list_and_size = snapshot.bucket_list snapshot_bucket = _find_bucket_name_from_snapshot(snapshot) - if (snapshot_bucket): - logger.debug("BUCKET_LIST to be provisioned: {}".format(snapshot_bucket)) - snapshot_bucket_list = snapshot_bucket.split(':') + if snapshot_bucket: + logger.debug( + "BUCKET_LIST to be provisioned: {}".format(snapshot_bucket) + ) + snapshot_bucket_list = snapshot_bucket.split(":") bucket_to_delete = [] bucket_to_update = [] for bkt in bucket_list: @@ -353,7 +386,9 @@ def _cleanup(provision_process, snapshot): logger.debug("Bucket list to delete: {} ".format(bucket_to_delete)) _bucket_common_task(provision_process, bucket_to_delete) logger.debug("Bucket list to update: {} ".format(bucket_to_update)) - _bucket_modify_task(provision_process, bucket_to_update, snapshot_bucket_list_and_size) + _bucket_modify_task( + provision_process, bucket_to_update, snapshot_bucket_list_and_size + ) else: logger.debug("This block is not expected to run") @@ -369,17 +404,25 @@ def _bucket_common_task(provision_process, bucket_list): helper_lib.sleepForSecond(2) -def _bucket_modify_task(provision_process, bucket_list, snapshot_bucket_list_and_size): +def _bucket_modify_task( + provision_process, bucket_list, snapshot_bucket_list_and_size +): for bkt in bucket_list: bkt = bkt.strip() logger.debug("Modification of bucket {} started".format(bkt)) - ramquotasize = _find_bucket_size_byname(bkt, snapshot_bucket_list_and_size) - logger.debug("Update bucket {} with ramsize {}MB".format(bkt, ramquotasize)) + ramquotasize = _find_bucket_size_byname( + bkt, snapshot_bucket_list_and_size + ) + logger.debug( + "Update bucket {} with ramsize {}MB".format(bkt, ramquotasize) + ) provision_process.bucket_edit_ramquota(bkt, _ramsize=ramquotasize) helper_lib.sleepForSecond(2) -def _source_config(virtual_source, repository=None, source_config=None, snapshot=None): +def _source_config( + virtual_source, repository=None, source_config=None, snapshot=None +): port = virtual_source.parameters.couchbase_port mount_path = virtual_source.parameters.mount_path host = virtual_source.connection.environment.host.name @@ -388,61 +431,95 @@ def _source_config(virtual_source, repository=None, source_config=None, snapshot couchbase_src_host=host, couchbase_src_port=port, pretty_name=pretty_name, - db_path=mount_path + db_path=mount_path, ) def vdb_start(virtual_source, repository, source_config): provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) logger.debug("Starting couchbase server") try: provision_process.start_couchbase() - if provision_process.parameters.node_list is not None and len(provision_process.parameters.node_list) > 0: + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): for node in provision_process.parameters.node_list: logger.debug("+++++++++++++++++++++++++++") logger.debug(node) logger.debug("+++++++++++++++++++++++++++") addnode = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build(), - make_nonprimary_connection(provision_process.connection, node['environment'], node['environmentUser'])) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) addnode.start_couchbase() except Exception: - raise CouchbaseServicesError(" Start").to_user_error()(None).with_traceback(sys.exc_info()[2]) + raise CouchbaseServicesError(" Start").to_user_error()( + None + ).with_traceback(sys.exc_info()[2]) def vdb_stop(virtual_source, repository, source_config): provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) logger.debug("Stopping couchbase server") provision_process.stop_couchbase() - if provision_process.parameters.node_list is not None and len(provision_process.parameters.node_list) > 0: + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): for node in provision_process.parameters.node_list: logger.debug("+++++++++++++++++++++++++++") logger.debug(node) logger.debug("+++++++++++++++++++++++++++") addnode = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build(), - make_nonprimary_connection(provision_process.connection, node['environment'], node['environmentUser'])) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) addnode.stop_couchbase() + def vdb_pre_snapshot(virtual_source, repository, source_config): logger.debug("In Pre snapshot...") provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) - + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) nodeno = 1 - provision_process.save_config(what='current', nodeno=nodeno) + provision_process.save_config(what="current", nodeno=nodeno) - if provision_process.parameters.node_list is not None and len(provision_process.parameters.node_list) > 0: + if ( + provision_process.parameters.node_list is not None + and len(provision_process.parameters.node_list) > 0 + ): for node in provision_process.parameters.node_list: nodeno = nodeno + 1 logger.debug("+++++++++++++++++++++++++++") @@ -450,30 +527,33 @@ def vdb_pre_snapshot(virtual_source, repository, source_config): logger.debug(nodeno) logger.debug("+++++++++++++++++++++++++++") addnode = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build(), - make_nonprimary_connection(provision_process.connection, node['environment'], node['environmentUser'])) - addnode.save_config(what='current', nodeno=nodeno) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build(), + make_nonprimary_connection( + provision_process.connection, + node["environment"], + node["environmentUser"], + ), + ) + addnode.save_config(what="current", nodeno=nodeno) def post_snapshot(virtual_source, repository, source_config): try: logger.debug("Taking Post Snapshot...") provision_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).set_source_config( - source_config).build()) - # config_file = provision_process.get_config_file_path() - - # stdout, stderr, exit_code = helper_lib.read_file(virtual_source.connection, config_file) - # bucket_list = re.sub('BUCKET_LIST=', '', stdout) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .set_source_config(source_config) + .build() + ) ind = [] - #ind = provision_process.get_indexes_definition() - #logger.debug("indexes definition : {}".format(ind)) - - - + # ind = provision_process.get_indexes_definition() + # logger.debug("indexes definition : {}".format(ind)) bucket_details = json.dumps(provision_process.bucket_list()) logger.debug("BUCKET_LIST={}".format(bucket_details)) @@ -482,12 +562,21 @@ def post_snapshot(virtual_source, repository, source_config): couchbase_port = virtual_source.parameters.couchbase_port couchbase_host = virtual_source.connection.environment.host.name snapshot_id = str(helper_lib.get_snapshot_id()) - snapshot = SnapshotDefinition(db_path=db_path, couchbase_port=couchbase_port, couchbase_host=couchbase_host, - bucket_list=bucket_details, time_stamp=time_stamp, snapshot_id=snapshot_id, indexes = ind) + snapshot = SnapshotDefinition( + db_path=db_path, + couchbase_port=couchbase_port, + couchbase_host=couchbase_host, + bucket_list=bucket_details, + time_stamp=time_stamp, + snapshot_id=snapshot_id, + indexes=ind, + ) snapshot.couchbase_admin = provision_process.parameters.couchbase_admin - snapshot.couchbase_admin_password = provision_process.parameters.couchbase_admin_password - + snapshot.couchbase_admin_password = ( + provision_process.parameters.couchbase_admin_password + ) + return snapshot except Exception as err: logger.debug("Snap shot is failed with error {}".format(str(err))) @@ -498,28 +587,25 @@ def post_snapshot(virtual_source, repository, source_config): def _find_bucket_name_from_snapshot(snapshot): bucket_list_and_size = json.loads(snapshot.bucket_list) logger.debug("SnapShot bucket data is: {}".format(bucket_list_and_size)) - # # bucket_list_and_size contains the ramsize e.g. "Bucket1,122:Bucket2,3432" - # # Filtering the size from above information. - # bucket_list_and_size += ':' - # # Parsing logic because there could be bucket name having some digit - # # bucket details in snapshot : Bucket_name1,RamSize1:Bucket_name2,RamSize2: - # bucket_name = re.sub(',[0-9]*:', ':', bucket_list_and_size) - # bucket_name = bucket_name.strip(':') - bucket_name = helper_lib.filter_bucket_name_from_output(bucket_list_and_size) + bucket_name = helper_lib.filter_bucket_name_from_output( + bucket_list_and_size + ) return bucket_name def _find_bucket_size_byname(bucket_name, bucket_metadata): data_found = 0 - for bkt in bucket_metadata.split(':'): - if bkt.split(',')[0] == bucket_name: + for bkt in bucket_metadata.split(":"): + if bkt.split(",")[0] == bucket_name: logger.debug("Bucket {} found in list".format(bucket_name)) data_found = 1 - bkt_size_mb = int(bkt.split(',')[1].strip()) // 1024 // 1024 + bkt_size_mb = int(bkt.split(",")[1].strip()) // 1024 // 1024 return bkt_size_mb if data_found == 0: # raise exception. Ideally this condition should never occur - raise Exception("Failed to find the bucket_name from bucket_metadata list") + raise Exception( + "Failed to find the bucket_name from bucket_metadata list" + ) def _build_indexes(provision_process, snapshot): @@ -527,4 +613,4 @@ def _build_indexes(provision_process, snapshot): for i in snapshot.indexes: logger.debug(i) - provision_process.build_index(i) \ No newline at end of file + provision_process.build_index(i) diff --git a/src/plugin_runner.py b/src/plugin_runner.py index bd73bad..c2637f7 100644 --- a/src/plugin_runner.py +++ b/src/plugin_runner.py @@ -1,31 +1,34 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # # -from dlpx.virtualization.platform import Mount, MountSpecification, Plugin, Status -from dlpx.virtualization.platform import OwnershipSpecification -from operations import discovery, linked, virtual -from utils import setup_logger -from db_commands.constants import EVICTION_POLICY import logging -from dlpx.virtualization.common import RemoteEnvironment -from dlpx.virtualization.common import RemoteHost -from dlpx.virtualization.common import RemoteUser -from dlpx.virtualization.common import RemoteConnection + +from controller.couchbase_operation import CouchbaseOperation +from controller.helper_lib import check_server_is_used from controller.helper_lib import check_stale_mountpoint from controller.helper_lib import clean_stale_mountpoint -from controller.couchbase_operation import CouchbaseOperation from controller.resource_builder import Resource -from controller.helper_lib import check_server_is_used - - +from dlpx.virtualization.common import RemoteConnection +from dlpx.virtualization.common import RemoteEnvironment +from dlpx.virtualization.common import RemoteHost +from dlpx.virtualization.common import RemoteUser +from dlpx.virtualization.platform import Mount +from dlpx.virtualization.platform import MountSpecification +from dlpx.virtualization.platform import OwnershipSpecification +from dlpx.virtualization.platform import Plugin +from operations import discovery +from operations import linked +from operations import virtual +from utils import setup_logger plugin = Plugin() setup_logger._setup_logger() logger = logging.getLogger(__name__) + # # Below is an example of the repository discovery operation. # @@ -53,8 +56,15 @@ def source_config_discovery(source_connection, repository): @plugin.linked.post_snapshot() -def linked_post_snapshot(staged_source, repository, source_config, optional_snapshot_parameters): - return linked.post_snapshot(staged_source, repository, source_config,staged_source.parameters.d_source_type) +def linked_post_snapshot( + staged_source, repository, source_config, optional_snapshot_parameters +): + return linked.post_snapshot( + staged_source, + repository, + source_config, + staged_source.parameters.d_source_type, + ) @plugin.linked.mount_specification() @@ -63,7 +73,10 @@ def linked_mount_specification(staged_source, repository): if check_stale_mountpoint(staged_source.staged_connection, mount_path): cleanup_process = CouchbaseOperation( - Resource.ObjectBuilder.set_staged_source(staged_source).set_repository(repository).build()) + Resource.ObjectBuilder.set_staged_source(staged_source) + .set_repository(repository) + .build() + ) cleanup_process.stop_couchbase() clean_stale_mountpoint(staged_source.staged_connection, mount_path) @@ -73,23 +86,35 @@ def linked_mount_specification(staged_source, repository): linked.check_mount_path(staged_source, repository) logger.debug("Mounting path {}".format(mount_path)) mounts = [Mount(environment, mount_path)] - logger.debug("Setting ownership to uid {} and gid {}".format(repository.uid, repository.gid)) + logger.debug( + "Setting ownership to uid {} and gid {}".format( + repository.uid, + repository.gid, + ) + ) ownership_spec = OwnershipSpecification(repository.uid, repository.gid) return MountSpecification(mounts, ownership_spec) @plugin.linked.pre_snapshot() -def linked_pre_snapshot(staged_source, repository, source_config, optional_snapshot_parameters): - if optional_snapshot_parameters and int(optional_snapshot_parameters.resync) == 1: - linked.resync(staged_source, repository, source_config, staged_source.parameters) +def linked_pre_snapshot( + staged_source, repository, source_config, optional_snapshot_parameters +): + if optional_snapshot_parameters and optional_snapshot_parameters.resync: + linked.resync( + staged_source, repository, source_config, staged_source.parameters + ) else: - linked.pre_snapshot(staged_source, repository, source_config, staged_source.parameters) + linked.pre_snapshot( + staged_source, repository, source_config, staged_source.parameters + ) @plugin.linked.status() def linked_status(staged_source, repository, source_config): return linked.d_source_status(staged_source, repository, source_config) + @plugin.linked.stop_staging() def stop_staging(staged_source, repository, source_config): linked.stop_staging(staged_source, repository, source_config) @@ -100,7 +125,6 @@ def start_staging(staged_source, repository, source_config): linked.start_staging(staged_source, repository, source_config) - @plugin.virtual.configure() def configure(virtual_source, snapshot, repository): return virtual.vdb_configure(virtual_source, snapshot, repository) @@ -108,7 +132,12 @@ def configure(virtual_source, snapshot, repository): @plugin.virtual.reconfigure() def reconfigure(virtual_source, repository, source_config, snapshot): - return virtual.vdb_reconfigure(virtual_source, repository, source_config, snapshot) + return virtual.vdb_reconfigure( + virtual_source, + repository, + source_config, + snapshot, + ) @plugin.virtual.pre_snapshot() @@ -137,7 +166,10 @@ def virtual_mount_specification(virtual_source, repository): if check_stale_mountpoint(virtual_source.connection, mount_path): cleanup_process = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).build()) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .build() + ) cleanup_process.stop_couchbase() clean_stale_mountpoint(virtual_source.connection, mount_path) @@ -145,42 +177,57 @@ def virtual_mount_specification(virtual_source, repository): mounts = [Mount(virtual_source.connection.environment, mount_path)] logger.debug("Mounting path {}".format(mount_path)) - logger.debug("Setting ownership to uid {} and gid {}".format(repository.uid, repository.gid)) + logger.debug( + "Setting ownership to uid {} and gid {}".format( + repository.uid, + repository.gid, + ) + ) ownership_spec = OwnershipSpecification(repository.uid, repository.gid) - logger.debug("in mounting: {}".format(str(virtual_source.parameters.node_list))) - + logger.debug( + "in mounting: {}".format( + str(virtual_source.parameters.node_list), + ) + ) - - if virtual_source.parameters.node_list is not None and len(virtual_source.parameters.node_list) > 0: + if ( + virtual_source.parameters.node_list is not None + and len(virtual_source.parameters.node_list) > 0 + ): # more nodes for m in virtual_source.parameters.node_list: logger.debug("in loop: {}".format(str(m))) - node_host = RemoteHost(name='foo', - reference=m["environment"].replace('_ENVIRONMENT', ''), - binary_path="", - scratch_path="" - ) - e = RemoteEnvironment("foo", m["environment"], node_host ) + node_host = RemoteHost( + name="foo", + reference=m["environment"].replace("_ENVIRONMENT", ""), + binary_path="", + scratch_path="", + ) + e = RemoteEnvironment("foo", m["environment"], node_host) mount = Mount(e, mount_path) mounts.append(mount) - - user = RemoteUser(name="unused", reference=m['environmentUser']) - environment = RemoteEnvironment(name="unused", reference=m['environment'], host=node_host) - clean_node_conn = RemoteConnection(environment=environment, user=user) - - + user = RemoteUser(name="unused", reference=m["environmentUser"]) + environment = RemoteEnvironment( + name="unused", reference=m["environment"], host=node_host + ) + clean_node_conn = RemoteConnection( + environment=environment, + user=user, + ) if check_stale_mountpoint(clean_node_conn, mount_path): clean_node = CouchbaseOperation( - Resource.ObjectBuilder.set_virtual_source(virtual_source).set_repository(repository).build(), - clean_node_conn ) + Resource.ObjectBuilder.set_virtual_source(virtual_source) + .set_repository(repository) + .build(), + clean_node_conn, + ) clean_node.stop_couchbase() clean_stale_mountpoint(clean_node_conn, mount_path) - - check_server_is_used(clean_node_conn, mount_path) + check_server_is_used(clean_node_conn, mount_path) return MountSpecification(mounts, ownership_spec) @@ -199,18 +246,18 @@ def unconfigure(virtual_source, repository, source_config): @plugin.upgrade.virtual_source("2021.07.19") def add_node_to_virtual(old_virtual_source): - new_virt = dict(old_virtual_source) - new_virt["node_list"] = [] - return new_virt + new_virt = dict(old_virtual_source) + new_virt["node_list"] = [] + return new_virt @plugin.upgrade.virtual_source("2021.10.06") -def add_node_to_virtual(old_virtual_source): - logger.debug("Doing upgrade to node_addr") - new_virt = dict(old_virtual_source) - logger.debug(new_virt) - for i in new_virt["node_list"]: - i["node_addr"] = "" - logger.debug("After changes") - logger.debug(new_virt) - return new_virt +def add_node_to_virtual1(old_virtual_source): + logger.debug("Doing upgrade to node_addr") + new_virt = dict(old_virtual_source) + logger.debug(new_virt) + for i in new_virt["node_list"]: + i["node_addr"] = "" + logger.debug("After changes") + logger.debug(new_virt) + return new_virt diff --git a/src/utils/setup_logger.py b/src/utils/setup_logger.py index 6a223ac..90720f2 100644 --- a/src/utils/setup_logger.py +++ b/src/utils/setup_logger.py @@ -1,27 +1,35 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # import logging + from dlpx.virtualization import libs class Logger: - """ + """ """ - """ _logger = None def __get_mode(self, mode): return eval("logging." + mode) - def __init__(self,name, mode="DEBUG", formatter='[%(asctime)s] [%(levelname)-10s] [%(filename)-15s:%(lineno)2d] %(message)s'): + def __init__( + self, + name, + mode="DEBUG", + formatter="[%(asctime)s] [%(levelname)-10s] " + "[%(filename)-15s:%(lineno)2d] %(message)s", + ): if Logger._logger is None: vsdkHandler = libs.PlatformHandler() vsdkHandler.setLevel(self.__get_mode(mode)) - vsdkFormatter = logging.Formatter(formatter, - datefmt="%Y-%m-%d %H:%M:%S") + vsdkFormatter = logging.Formatter( + formatter, + datefmt="%Y-%m-%d %H:%M:%S", + ) vsdkHandler.setFormatter(vsdkFormatter) logger = logging.getLogger(name) logger.addHandler(vsdkHandler) @@ -31,12 +39,18 @@ def __init__(self,name, mode="DEBUG", formatter='[%(asctime)s] [%(levelname)-10s def get_logger(self): return Logger._logger + def _setup_logger(): - log_message_format = '[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s' - log_message_date_format = '%Y-%m-%d %H:%M:%S' + log_message_format = ( + "[%(asctime)s] [%(levelname)s] [%(filename)s:%(lineno)d] %(message)s" + ) + log_message_date_format = "%Y-%m-%d %H:%M:%S" # Create a custom formatter. This will help in diagnose the problem. - formatter = logging.Formatter(log_message_format, datefmt=log_message_date_format) + formatter = logging.Formatter( + log_message_format, + datefmt=log_message_date_format, + ) platform_handler = libs.PlatformHandler() platform_handler.setFormatter(formatter) @@ -45,4 +59,4 @@ def _setup_logger(): logger.addHandler(platform_handler) # By default the root logger's level is logging.WARNING. - logger.setLevel(logging.DEBUG) \ No newline at end of file + logger.setLevel(logging.DEBUG) diff --git a/src/utils/utilities.py b/src/utils/utilities.py index 2e93849..eb353fa 100644 --- a/src/utils/utilities.py +++ b/src/utils/utilities.py @@ -1,48 +1,62 @@ # -# Copyright (c) 2020 by Delphix. All rights reserved. +# Copyright (c) 2020-2023 by Delphix. All rights reserved. # import logging +import random from dlpx.virtualization import libs from dlpx.virtualization.libs import exceptions -import random # logger object logger = logging.getLogger(__name__) -def execute_bash(source_connection, command_name, callback_func=None, environment_vars=None): +def execute_bash( + source_connection, command_name, callback_func=None, environment_vars=None +): """ :param callback_func: :param source_connection: Connection object for the source environment :param command_name: Command to be search from dictionary of bash command - :param environment_vars: Expecting environment variables which are required to execute the command + :param environment_vars: Expecting environment variables which are required + to execute the command :return: list of output of command, error string, exit code """ if source_connection is None: raise exceptions.PluginScriptError("Connection object cannot be empty") - result = libs.run_bash(source_connection, command=command_name, variables=environment_vars, use_login_shell=True) + result = libs.run_bash( + source_connection, + command=command_name, + variables=environment_vars, + use_login_shell=True, + ) - # strip the each part of result to remove spaces from beginning and last of output + # strip the each part of result to remove spaces from beginning + # and last of output output = result.stdout.strip() error = result.stderr.strip() exit_code = result.exit_code - # Verify the exit code of each executed command. 0 means command ran successfully and for other code it is failed. - # For failed cases we need to find the scenario in which programs will die and otherwise execution will continue. - #_handle_exit_code(exit_code, error, output, callback_func) + # Verify the exit code of each executed command. 0 means command ran + # successfully and for other code it is failed. + # For failed cases we need to find the scenario in which programs will + # die and otherwise execution will continue. + # _handle_exit_code(exit_code, error, output, callback_func) return [output, error, exit_code] -def execute_expect(source_connection, command_name, callback_func=None, environment_vars=None): +def execute_expect( + source_connection, command_name, callback_func=None, environment_vars=None +): """ :param callback_func: :param source_connection: Connection object for the source environment :param command_name: Command to be search from dictionary of bash command - :param environment_vars: Expecting environment variables which are required to execute the command + :param environment_vars: Expecting environment variables which are + required to execute the command :return: list of output of command, error string, exit code """ @@ -52,12 +66,14 @@ def execute_expect(source_connection, command_name, callback_func=None, environm file_random_id = random.randint(1000000000, 9999999999) if "SHELL_DATA" in environment_vars: - environment_vars["CB_CMD"] = environment_vars["CB_CMD"].replace(".sh", f"_{file_random_id}.sh") + environment_vars["CB_CMD"] = environment_vars["CB_CMD"].replace( + ".sh", f"_{file_random_id}.sh" + ) result = libs.run_bash( source_connection, - command=f'echo -e "$SHELL_DATA" > $CB_CMD', + command='echo -e "$SHELL_DATA" > $CB_CMD', use_login_shell=True, - variables=environment_vars + variables=environment_vars, ) output = result.stdout.strip() error = result.stderr.strip() @@ -68,9 +84,9 @@ def execute_expect(source_connection, command_name, callback_func=None, environm logger.debug(f"dump_exit_code==={exit_code}") result = libs.run_bash( source_connection, - command=f"chmod +x $CB_CMD", + command="chmod +x $CB_CMD", use_login_shell=True, - variables=environment_vars + variables=environment_vars, ) output = result.stdout.strip() error = result.stderr.strip() @@ -85,7 +101,7 @@ def execute_expect(source_connection, command_name, callback_func=None, environm result = libs.run_bash( source_connection, command=f"echo -e '{command_name}' > {file_path}", - use_login_shell=True + use_login_shell=True, ) output = result.stdout.strip() error = result.stderr.strip() @@ -99,10 +115,11 @@ def execute_expect(source_connection, command_name, callback_func=None, environm source_connection, command=f"/usr/bin/expect -f {file_path}", variables=environment_vars, - use_login_shell=True + use_login_shell=True, ) - # strip the each part of result to remove spaces from beginning and last of output + # strip the each part of result to remove spaces from beginning and + # last of output output = result.stdout.strip() error = result.stderr.strip() exit_code = result.exit_code @@ -112,21 +129,23 @@ def execute_expect(source_connection, command_name, callback_func=None, environm logger.debug(f"expect_exit_code==={exit_code}") libs.run_bash( - source_connection, - command=f"rm -rf {file_path}", - use_login_shell=True + source_connection, command=f"rm -rf {file_path}", use_login_shell=True ) if "SHELL_DATA" in environment_vars: libs.run_bash( - source_connection, - command=f"rm -rf $CB_CMD", - use_login_shell=True + source_connection, command="rm -rf $CB_CMD", use_login_shell=True ) if "DLPX_EXPECT_EXIT_CODE" in output: - exit_code = int(output.split("DLPX_EXPECT_EXIT_CODE:")[1].split("\n")[0]) + exit_code = int( + output.split("DLPX_EXPECT_EXIT_CODE:")[1].split("\n")[0] + ) if "\n" in output: - msg = output.split("DLPX_EXPECT_EXIT_CODE:")[1].split("\n", 1)[1].strip() + msg = ( + output.split("DLPX_EXPECT_EXIT_CODE:")[1] + .split("\n", 1)[1] + .strip() + ) else: msg = "" if exit_code != 0: @@ -135,35 +154,46 @@ def execute_expect(source_connection, command_name, callback_func=None, environm output = msg if "cbq>" in output and output.rsplit("\n", 1)[1].strip() == "cbq>": - output = output.rsplit("\n", 1)[0] + output = output.rsplit("\n", 1)[0] logger.debug(f"final_output==={output}") logger.debug(f"final_error==={error}") logger.debug(f"final_exit_code==={exit_code}") - # Verify the exit code of each executed command. 0 means command ran successfully and for other code it is failed. - # For failed cases we need to find the scenario in which programs will die and otherwise execution will continue. - #_handle_exit_code(exit_code, error, output, callback_func) + # Verify the exit code of each executed command. 0 means command ran + # successfully and for other code it is failed. + # For failed cases we need to find the scenario in which programs + # will die and otherwise execution will continue. + # _handle_exit_code(exit_code, error, output, callback_func) return [output, error, exit_code] -def _handle_exit_code(exit_code, std_err=None, std_output=None, callback_func=None): - if exit_code == 0: - return - - else: - # Call back function which contains logic to skip the error and continue to throw +def _handle_exit_code( + exit_code, std_err=None, std_output=None, callback_func=None +): + if exit_code != 0: + # Call back function which contains logic to skip the error and + # continue to throw if callback_func: - logger.debug("Executing call back. Seems some exception is observed. Validating last error...") + logger.debug( + "Executing call back. Seems some exception is observed. " + "Validating last error..." + ) try: result_of_match = callback_func(std_output) - logger.debug("Call back result is : {}".format(result_of_match)) + logger.debug( + "Call back result is : {}".format(result_of_match) + ) if result_of_match: return True except Exception as err: - logger.debug("Failed to execute call back function with error: {}".format(str(err))) - - error_details = std_output - if error_details is None or error_details == "": - error_details = std_err - raise Exception(error_details) - + logger.debug( + "Failed to execute call back function with " + "error: {}".format(str(err)) + ) + + error_details = std_output + if error_details is None or error_details == "": + error_details = std_err + raise Exception(error_details) + else: + return False