From f7ba9da92ae49f8c191877c1c17318c24c74600c Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Mon, 2 Jan 2023 01:57:11 +0000 Subject: [PATCH 01/85] updating DIRECTORY.md --- DIRECTORY.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 3437df12cbf5..5ce9dca74c06 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -123,6 +123,7 @@ * [Huffman](compression/huffman.py) * [Lempel Ziv](compression/lempel_ziv.py) * [Lempel Ziv Decompress](compression/lempel_ziv_decompress.py) + * [Lz77](compression/lz77.py) * [Peak Signal To Noise Ratio](compression/peak_signal_to_noise_ratio.py) * [Run Length Encoding](compression/run_length_encoding.py) @@ -1162,7 +1163,7 @@ * [Get Amazon Product Data](web_programming/get_amazon_product_data.py) * [Get Imdb Top 250 Movies Csv](web_programming/get_imdb_top_250_movies_csv.py) * [Get Imdbtop](web_programming/get_imdbtop.py) - * [Get Top Billioners](web_programming/get_top_billioners.py) + * [Get Top Billionaires](web_programming/get_top_billionaires.py) * [Get Top Hn Posts](web_programming/get_top_hn_posts.py) * [Get User Tweets](web_programming/get_user_tweets.py) * [Giphy](web_programming/giphy.py) From 9d1971b11f736898b1ff2112aa0de470977224c0 Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Thu, 12 Jan 2023 15:25:40 +0000 Subject: [PATCH 02/85] updating DIRECTORY.md --- DIRECTORY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index 5ce9dca74c06..31e86ea59b79 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -557,6 +557,7 @@ * [Gamma Recursive](maths/gamma_recursive.py) * [Gaussian](maths/gaussian.py) * [Gaussian Error Linear Unit](maths/gaussian_error_linear_unit.py) + * [Gcd Of N Numbers](maths/gcd_of_n_numbers.py) * [Greatest Common Divisor](maths/greatest_common_divisor.py) * [Greedy Coin Change](maths/greedy_coin_change.py) * [Hamming Numbers](maths/hamming_numbers.py) From 5f404b482ffe5f6ac26e87ea18ed74be042be7c0 Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Thu, 26 Jan 2023 07:26:59 +0000 Subject: [PATCH 03/85] updating DIRECTORY.md --- DIRECTORY.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 31e86ea59b79..a8786cc2591f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -543,8 +543,7 @@ * [Euler Modified](maths/euler_modified.py) * [Eulers Totient](maths/eulers_totient.py) * [Extended Euclidean Algorithm](maths/extended_euclidean_algorithm.py) - * [Factorial Iterative](maths/factorial_iterative.py) - * [Factorial Recursive](maths/factorial_recursive.py) + * [Factorial](maths/factorial.py) * [Factors](maths/factors.py) * [Fermat Little Theorem](maths/fermat_little_theorem.py) * [Fibonacci](maths/fibonacci.py) From f70ee939711c1dc87ad0920711f9ef2a3cf9ece2 Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Sat, 1 Apr 2023 05:35:12 +0000 Subject: [PATCH 04/85] updating DIRECTORY.md --- DIRECTORY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index 1d3177801a2c..4225889bc05d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -317,6 +317,7 @@ * [Longest Sub Array](dynamic_programming/longest_sub_array.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) + * [Max Product Subarray](dynamic_programming/max_product_subarray.py) * [Max Sub Array](dynamic_programming/max_sub_array.py) * [Max Sum Contiguous Subsequence](dynamic_programming/max_sum_contiguous_subsequence.py) * [Min Distance Up Bottom](dynamic_programming/min_distance_up_bottom.py) From 7a762ea28e3ea806352bdc0856463f27fc7f5b16 Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Sat, 1 Apr 2023 16:49:14 +0000 Subject: [PATCH 05/85] updating DIRECTORY.md --- DIRECTORY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index c781b17bf05f..588d0b1e542e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -715,6 +715,7 @@ * [Archimedes Principle](physics/archimedes_principle.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) + * [Grahams Law](physics/grahams_law.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Hubble Parameter](physics/hubble_parameter.py) * [Ideal Gas Law](physics/ideal_gas_law.py) From af214d77b99d968f6eb00c0b925615baaa7f620b Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Mon, 3 Apr 2023 15:23:10 +0000 Subject: [PATCH 06/85] updating DIRECTORY.md --- DIRECTORY.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index 8dd3fb5d9af1..3764c471ce70 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -922,6 +922,8 @@ * [Sol1](project_euler/problem_077/sol1.py) * Problem 078 * [Sol1](project_euler/problem_078/sol1.py) + * Problem 079 + * [Sol1](project_euler/problem_079/sol1.py) * Problem 080 * [Sol1](project_euler/problem_080/sol1.py) * Problem 081 From 0b2b7dbeb49fda2480a7954ec71d0f855dda4dca Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Wed, 10 May 2023 19:53:48 +0000 Subject: [PATCH 07/85] updating DIRECTORY.md --- DIRECTORY.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index a70ad6861d6f..df239f9c0003 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -632,6 +632,7 @@ * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) * [Relu](maths/relu.py) + * [Remove Digit](maths/remove_digit.py) * [Runge Kutta](maths/runge_kutta.py) * [Segmented Sieve](maths/segmented_sieve.py) * Series @@ -694,6 +695,8 @@ ## Neural Network * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) + * Activation Functions + * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Input Data](neural_network/input_data.py) @@ -1080,6 +1083,7 @@ ## Sorts * [Bead Sort](sorts/bead_sort.py) + * [Binary Insertion Sort](sorts/binary_insertion_sort.py) * [Bitonic Sort](sorts/bitonic_sort.py) * [Bogo Sort](sorts/bogo_sort.py) * [Bubble Sort](sorts/bubble_sort.py) @@ -1170,6 +1174,7 @@ * [Reverse Words](strings/reverse_words.py) * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) + * [String Switch Case](strings/string_switch_case.py) * [Text Justification](strings/text_justification.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) From 7065489a70dba54801219e0e058ff2475661fa88 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 16 May 2023 00:40:59 -0700 Subject: [PATCH 08/85] Fix strassen_matrix_multiplication.py type error --- ...ltiplication.py.BROKEN => strassen_matrix_multiplication.py} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename divide_and_conquer/{strassen_matrix_multiplication.py.BROKEN => strassen_matrix_multiplication.py} (99%) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN b/divide_and_conquer/strassen_matrix_multiplication.py similarity index 99% rename from divide_and_conquer/strassen_matrix_multiplication.py.BROKEN rename to divide_and_conquer/strassen_matrix_multiplication.py index 2ca91c63bf4c..cfbbe7746cb4 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -122,7 +122,7 @@ def strassen(matrix1: list, matrix2: list) -> list: if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]: return [matrix1, matrix2] - maximum = max(dimension1, dimension2) + maximum = max(*dimension1, *dimension2) maxim = int(math.pow(2, math.ceil(math.log2(maximum)))) new_matrix1 = matrix1 new_matrix2 = matrix2 From d09855bd01dac33a944d57c53aeca95ae1ab4359 Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Tue, 16 May 2023 07:47:32 +0000 Subject: [PATCH 09/85] updating DIRECTORY.md --- DIRECTORY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index fc6cbaf7ff41..df239f9c0003 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -294,6 +294,7 @@ * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) + * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) From b3ba57ebc712a8d6a92bb492d01216d48d2f0762 Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Wed, 17 May 2023 03:49:28 +0000 Subject: [PATCH 10/85] updating DIRECTORY.md --- DIRECTORY.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index 27057988b5c7..9bd75d669729 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -606,6 +606,7 @@ * [Newton Raphson](maths/newton_raphson.py) * [Number Of Digits](maths/number_of_digits.py) * [Numerical Integration](maths/numerical_integration.py) + * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) @@ -713,6 +714,7 @@ * [Gauss Easter](other/gauss_easter.py) * [Graham Scan](other/graham_scan.py) * [Greedy](other/greedy.py) + * [Guess The Number Search](other/guess_the_number_search.py) * [H Index](other/h_index.py) * [Least Recently Used](other/least_recently_used.py) * [Lfu Cache](other/lfu_cache.py) From 257c4a9e2a22dd4a4bd22999812688976cf1f610 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 16 May 2023 21:03:29 -0700 Subject: [PATCH 11/85] Fix ruff error in guess_the_number_search.py --- other/guess_the_number_search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other/guess_the_number_search.py b/other/guess_the_number_search.py index 0439223f2ec9..677c4095bc01 100644 --- a/other/guess_the_number_search.py +++ b/other/guess_the_number_search.py @@ -148,7 +148,7 @@ def answer(number: int) -> str: break print(f"guess the number : {last_numbers[-1]}") - print(f"details : {str(last_numbers)}") + print(f"details : {last_numbers}") def main() -> None: From 78dafd82e9541cd2265f7af649aff86456bebf4b Mon Sep 17 00:00:00 2001 From: Genesis <128913081+KaixLina@users.noreply.github.com> Date: Sun, 26 Mar 2023 20:49:18 +0530 Subject: [PATCH 12/85] New gitter link added or replaced (#8551) * New gitter link added * ruff==0.0.258 * noqa: S310 * noqa: S310 * Update ruff.yml * Add Ruff rule S311 * Ruff v0.0.259 * return ("{:08x}" * 5).format(*self.h) * pickle.load(f) # noqa: S301 --------- Co-authored-by: Christian Clauss --- .github/stale.yml | 4 ++-- .pre-commit-config.yaml | 2 +- CONTRIBUTING.md | 4 ++-- README.md | 4 ++-- hashes/sha1.py | 2 +- machine_learning/sequential_minimum_optimization.py | 2 +- neural_network/convolution_neural_network.py | 2 +- project_euler/README.md | 2 +- pyproject.toml | 1 + web_programming/download_images_from_google_query.py | 2 +- 10 files changed, 13 insertions(+), 12 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index 36ca56266b26..813f688348d8 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -45,7 +45,7 @@ pulls: closeComment: > Please reopen this pull request once you commit the changes requested or make improvements on the code. If this is not the case and you need - some help, feel free to seek help from our [Gitter](https://gitter.im/TheAlgorithms) + some help, feel free to seek help from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) or ping one of the reviewers. Thank you for your contributions! issues: @@ -59,5 +59,5 @@ issues: closeComment: > Please reopen this issue once you add more information and updates here. If this is not the case and you need some help, feel free to seek help - from our [Gitter](https://gitter.im/TheAlgorithms) or ping one of the + from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) or ping one of the reviewers. Thank you for your contributions! diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 58cec4ff6ee6..72a878387e15 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.257 + rev: v0.0.259 hooks: - id: ruff diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6b6e4d21bfc7..75e4fb893723 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ## Before contributing -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms). +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). ## Contributing @@ -176,7 +176,7 @@ We want your work to be readable by others; therefore, we encourage you to note - Most importantly, - __Be consistent in the use of these guidelines when submitting.__ - - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms) __now!__ + - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) __now!__ - Happy coding! Writer [@poyea](https://github.com/poyea), Jun 2019. diff --git a/README.md b/README.md index 68a6e5e6fbce..3d2f1a110780 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Discord chat - + Gitter chat @@ -42,7 +42,7 @@ Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribut ## Community Channels -We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms)! Community channels are a great way for you to ask questions and get help. Please join us! +We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im)! Community channels are a great way for you to ask questions and get help. Please join us! ## List of Algorithms diff --git a/hashes/sha1.py b/hashes/sha1.py index 9f0437f208fa..b325ce3e43bb 100644 --- a/hashes/sha1.py +++ b/hashes/sha1.py @@ -124,7 +124,7 @@ def final_hash(self): self.h[3] + d & 0xFFFFFFFF, self.h[4] + e & 0xFFFFFFFF, ) - return "%08x%08x%08x%08x%08x" % tuple(self.h) + return ("{:08x}" * 5).format(*self.h) def test_sha1_hash(): diff --git a/machine_learning/sequential_minimum_optimization.py b/machine_learning/sequential_minimum_optimization.py index b68bd52f4de9..b24f5669e2e8 100644 --- a/machine_learning/sequential_minimum_optimization.py +++ b/machine_learning/sequential_minimum_optimization.py @@ -458,7 +458,7 @@ def test_cancel_data(): CANCER_DATASET_URL, headers={"User-Agent": "Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)"}, ) - response = urllib.request.urlopen(request) + response = urllib.request.urlopen(request) # noqa: S310 content = response.read().decode("utf-8") with open(r"cancel_data.csv", "w") as f: f.write(content) diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index bd0550212157..f5ec156f3593 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -77,7 +77,7 @@ def save_model(self, save_path): def read_model(cls, model_path): # read saved model with open(model_path, "rb") as f: - model_dic = pickle.load(f) + model_dic = pickle.load(f) # noqa: S301 conv_get = model_dic.get("conv1") conv_get.append(model_dic.get("step_conv1")) diff --git a/project_euler/README.md b/project_euler/README.md index e3dc035eee5e..4832d0078ebf 100644 --- a/project_euler/README.md +++ b/project_euler/README.md @@ -10,7 +10,7 @@ The solutions will be checked by our [automated testing on GitHub Actions](https ## Solution Guidelines -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. ### Coding Style diff --git a/pyproject.toml b/pyproject.toml index 169c3a71ba6c..23fe45e97d20 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,6 +34,7 @@ ignore = [ # `ruff rule S101` for a description of that rule "S101", # S101: Use of `assert` detected -- DO NOT FIX "S105", # S105: Possible hardcoded password: 'password' "S113", # S113: Probable use of requests call without timeout + "S311", # S311: Standard pseudo-random generators are not suitable for cryptographic purposes "UP038", # UP038: Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] select = [ # https://beta.ruff.rs/docs/rules diff --git a/web_programming/download_images_from_google_query.py b/web_programming/download_images_from_google_query.py index 9c0c21dc804e..441347459f8e 100644 --- a/web_programming/download_images_from_google_query.py +++ b/web_programming/download_images_from_google_query.py @@ -86,7 +86,7 @@ def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) path_name = f"query_{query.replace(' ', '_')}" if not os.path.exists(path_name): os.makedirs(path_name) - urllib.request.urlretrieve( + urllib.request.urlretrieve( # noqa: S310 original_size_img, f"{path_name}/original_size_img_{index}.jpg" ) return index From 9cbe86a57c47a0565afc448bc6beda303eac3874 Mon Sep 17 00:00:00 2001 From: Christian Veenhuis Date: Sun, 26 Mar 2023 18:20:47 +0200 Subject: [PATCH 13/85] Fix broken links to Gitter Community (Fixes: #8197) (#8546) Co-authored-by: Christian Clauss --- .github/stale.yml | 4 ++-- CONTRIBUTING.md | 4 ++-- README.md | 4 ++-- project_euler/README.md | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/stale.yml b/.github/stale.yml index 813f688348d8..0939e1f223ff 100644 --- a/.github/stale.yml +++ b/.github/stale.yml @@ -45,7 +45,7 @@ pulls: closeComment: > Please reopen this pull request once you commit the changes requested or make improvements on the code. If this is not the case and you need - some help, feel free to seek help from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) + some help, feel free to seek help from our [Gitter](https://gitter.im/TheAlgorithms/community) or ping one of the reviewers. Thank you for your contributions! issues: @@ -59,5 +59,5 @@ issues: closeComment: > Please reopen this issue once you add more information and updates here. If this is not the case and you need some help, feel free to seek help - from our [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) or ping one of the + from our [Gitter](https://gitter.im/TheAlgorithms/community) or ping one of the reviewers. Thank you for your contributions! diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 75e4fb893723..2bb0c2e39eee 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -2,7 +2,7 @@ ## Before contributing -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before sending your pull requests, make sure that you __read the whole guidelines__. If you have any doubt on the contributing guide, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms/community). ## Contributing @@ -176,7 +176,7 @@ We want your work to be readable by others; therefore, we encourage you to note - Most importantly, - __Be consistent in the use of these guidelines when submitting.__ - - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im) __now!__ + - __Join__ us on [Discord](https://discord.com/invite/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms/community) __now!__ - Happy coding! Writer [@poyea](https://github.com/poyea), Jun 2019. diff --git a/README.md b/README.md index 3d2f1a110780..bf6e0ed3cf75 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Discord chat - + Gitter chat @@ -42,7 +42,7 @@ Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribut ## Community Channels -We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im)! Community channels are a great way for you to ask questions and get help. Please join us! +We are on [Discord](https://discord.gg/c7MnfGFGa6) and [Gitter](https://gitter.im/TheAlgorithms/community)! Community channels are a great way for you to ask questions and get help. Please join us! ## List of Algorithms diff --git a/project_euler/README.md b/project_euler/README.md index 4832d0078ebf..16865edf2a67 100644 --- a/project_euler/README.md +++ b/project_euler/README.md @@ -10,7 +10,7 @@ The solutions will be checked by our [automated testing on GitHub Actions](https ## Solution Guidelines -Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://app.gitter.im/#/room/#TheAlgorithms_community:gitter.im). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. +Welcome to [TheAlgorithms/Python](https://github.com/TheAlgorithms/Python)! Before reading the solution guidelines, make sure you read the whole [Contributing Guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) as it won't be repeated in here. If you have any doubt on the guidelines, please feel free to [state it clearly in an issue](https://github.com/TheAlgorithms/Python/issues/new) or ask the community in [Gitter](https://gitter.im/TheAlgorithms/community). You can use the [template](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#solution-template) we have provided below as your starting point but be sure to read the [Coding Style](https://github.com/TheAlgorithms/Python/blob/master/project_euler/README.md#coding-style) part first. ### Coding Style From 40fed655e6d73e620c2706b2995482fbd0c5b4a9 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Wed, 29 Mar 2023 00:41:54 +0300 Subject: [PATCH 14/85] Reduce the complexity of graphs/bi_directional_dijkstra.py (#8165) * Reduce the complexity of graphs/bi_directional_dijkstra.py * Try to lower the --max-complexity threshold in the file .flake8 * Lower the --max-complexity threshold in the file .flake8 * updating DIRECTORY.md * updating DIRECTORY.md * Try to lower max-complexity * Try to lower max-complexity * Try to lower max-complexity --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- graphs/bi_directional_dijkstra.py | 95 +++++++++++++++++-------------- pyproject.toml | 2 +- 2 files changed, 53 insertions(+), 44 deletions(-) diff --git a/graphs/bi_directional_dijkstra.py b/graphs/bi_directional_dijkstra.py index fc53e2f0d8f3..a4489026be80 100644 --- a/graphs/bi_directional_dijkstra.py +++ b/graphs/bi_directional_dijkstra.py @@ -17,6 +17,32 @@ import numpy as np +def pass_and_relaxation( + graph: dict, + v: str, + visited_forward: set, + visited_backward: set, + cst_fwd: dict, + cst_bwd: dict, + queue: PriorityQueue, + parent: dict, + shortest_distance: float | int, +) -> float | int: + for nxt, d in graph[v]: + if nxt in visited_forward: + continue + old_cost_f = cst_fwd.get(nxt, np.inf) + new_cost_f = cst_fwd[v] + d + if new_cost_f < old_cost_f: + queue.put((new_cost_f, nxt)) + cst_fwd[nxt] = new_cost_f + parent[nxt] = v + if nxt in visited_backward: + if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance: + shortest_distance = cst_fwd[v] + d + cst_bwd[nxt] + return shortest_distance + + def bidirectional_dij( source: str, destination: str, graph_forward: dict, graph_backward: dict ) -> int: @@ -51,53 +77,36 @@ def bidirectional_dij( if source == destination: return 0 - while queue_forward and queue_backward: - while not queue_forward.empty(): - _, v_fwd = queue_forward.get() - - if v_fwd not in visited_forward: - break - else: - break + while not queue_forward.empty() and not queue_backward.empty(): + _, v_fwd = queue_forward.get() visited_forward.add(v_fwd) - while not queue_backward.empty(): - _, v_bwd = queue_backward.get() - - if v_bwd not in visited_backward: - break - else: - break + _, v_bwd = queue_backward.get() visited_backward.add(v_bwd) - # forward pass and relaxation - for nxt_fwd, d_forward in graph_forward[v_fwd]: - if nxt_fwd in visited_forward: - continue - old_cost_f = cst_fwd.get(nxt_fwd, np.inf) - new_cost_f = cst_fwd[v_fwd] + d_forward - if new_cost_f < old_cost_f: - queue_forward.put((new_cost_f, nxt_fwd)) - cst_fwd[nxt_fwd] = new_cost_f - parent_forward[nxt_fwd] = v_fwd - if nxt_fwd in visited_backward: - if cst_fwd[v_fwd] + d_forward + cst_bwd[nxt_fwd] < shortest_distance: - shortest_distance = cst_fwd[v_fwd] + d_forward + cst_bwd[nxt_fwd] - - # backward pass and relaxation - for nxt_bwd, d_backward in graph_backward[v_bwd]: - if nxt_bwd in visited_backward: - continue - old_cost_b = cst_bwd.get(nxt_bwd, np.inf) - new_cost_b = cst_bwd[v_bwd] + d_backward - if new_cost_b < old_cost_b: - queue_backward.put((new_cost_b, nxt_bwd)) - cst_bwd[nxt_bwd] = new_cost_b - parent_backward[nxt_bwd] = v_bwd - - if nxt_bwd in visited_forward: - if cst_bwd[v_bwd] + d_backward + cst_fwd[nxt_bwd] < shortest_distance: - shortest_distance = cst_bwd[v_bwd] + d_backward + cst_fwd[nxt_bwd] + shortest_distance = pass_and_relaxation( + graph_forward, + v_fwd, + visited_forward, + visited_backward, + cst_fwd, + cst_bwd, + queue_forward, + parent_forward, + shortest_distance, + ) + + shortest_distance = pass_and_relaxation( + graph_backward, + v_bwd, + visited_backward, + visited_forward, + cst_bwd, + cst_fwd, + queue_backward, + parent_backward, + shortest_distance, + ) if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance: break diff --git a/pyproject.toml b/pyproject.toml index 23fe45e97d20..48c3fbd4009d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -61,7 +61,7 @@ show-source = true target-version = "py311" [tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE -max-complexity = 20 # default: 10 +max-complexity = 17 # default: 10 [tool.ruff.pylint] # DO NOT INCREASE THESE VALUES max-args = 10 # default: 5 From e61c6e2c42b3b897966733cf3ddf63ac9ae20502 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Thu, 30 Mar 2023 10:39:21 +0530 Subject: [PATCH 15/85] Update cnn_classification.py (#8570) --- computer_vision/cnn_classification.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/computer_vision/cnn_classification.py b/computer_vision/cnn_classification.py index 1c193fcbb50b..9b5f8c95eebf 100644 --- a/computer_vision/cnn_classification.py +++ b/computer_vision/cnn_classification.py @@ -93,7 +93,7 @@ test_image = tf.keras.preprocessing.image.img_to_array(test_image) test_image = np.expand_dims(test_image, axis=0) result = classifier.predict(test_image) - training_set.class_indices + # training_set.class_indices if result[0][0] == 0: prediction = "Normal" if result[0][0] == 1: From 58cb9a13c1871f3f3f7a59fbffa427a95ec78cff Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Fri, 31 Mar 2023 16:47:13 +0530 Subject: [PATCH 16/85] added a problem on kadane's algo and its solution. (#8569) * added kadane's algorithm directory with one problem's solution. * added type hints * Rename kaadne_algorithm/max_product_subarray.py to dynamic_programming/max_product_subarray.py * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * Update max_product_subarray.py * Update max_product_subarray.py * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * Update max_product_subarray.py * Update max_product_subarray.py * Update max_product_subarray.py * Update max_product_subarray.py * Update max_product_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update max_product_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update max_product_subarray.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update max_product_subarray.py * Update max_product_subarray.py * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update dynamic_programming/max_product_subarray.py Co-authored-by: Christian Clauss * Update max_product_subarray.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- dynamic_programming/max_product_subarray.py | 53 +++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 dynamic_programming/max_product_subarray.py diff --git a/dynamic_programming/max_product_subarray.py b/dynamic_programming/max_product_subarray.py new file mode 100644 index 000000000000..425859bc03e3 --- /dev/null +++ b/dynamic_programming/max_product_subarray.py @@ -0,0 +1,53 @@ +def max_product_subarray(numbers: list[int]) -> int: + """ + Returns the maximum product that can be obtained by multiplying a + contiguous subarray of the given integer list `nums`. + + Example: + >>> max_product_subarray([2, 3, -2, 4]) + 6 + >>> max_product_subarray((-2, 0, -1)) + 0 + >>> max_product_subarray([2, 3, -2, 4, -1]) + 48 + >>> max_product_subarray([-1]) + -1 + >>> max_product_subarray([0]) + 0 + >>> max_product_subarray([]) + 0 + >>> max_product_subarray("") + 0 + >>> max_product_subarray(None) + 0 + >>> max_product_subarray([2, 3, -2, 4.5, -1]) + Traceback (most recent call last): + ... + ValueError: numbers must be an iterable of integers + >>> max_product_subarray("ABC") + Traceback (most recent call last): + ... + ValueError: numbers must be an iterable of integers + """ + if not numbers: + return 0 + + if not isinstance(numbers, (list, tuple)) or not all( + isinstance(number, int) for number in numbers + ): + raise ValueError("numbers must be an iterable of integers") + + max_till_now = min_till_now = max_prod = numbers[0] + + for i in range(1, len(numbers)): + # update the maximum and minimum subarray products + number = numbers[i] + if number < 0: + max_till_now, min_till_now = min_till_now, max_till_now + max_till_now = max(number, max_till_now * number) + min_till_now = min(number, min_till_now * number) + + # update the maximum product found till now + max_prod = max(max_prod, max_till_now) + + return max_prod From 8dfc81d70f5379fbe131413c69a3b6ba8c22e952 Mon Sep 17 00:00:00 2001 From: NIKITA PANDEY <113332472+nikitapandeyy@users.noreply.github.com> Date: Fri, 31 Mar 2023 19:38:13 +0530 Subject: [PATCH 17/85] Update receive_file.py (#8541) * Update receive_file.py Here are the changes I made: Added the main() function and called it from if __name__ == "__main__" block. This makes it easier to test the code and import it into other programs. Added socket.AF_INET as the first argument to socket.socket(). This specifies the address family to be used, which is necessary when using connect(). Changed print(f"{data = }") to print("Received:", len(data), "bytes"). This makes it clearer what's happening and how much data is being received. Changed the final print statement to "Successfully received the file". This makes it more accurate and descriptive. Moved the import statement to the top of the file. This is a common convention in Python. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- file_transfer/receive_file.py | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/file_transfer/receive_file.py b/file_transfer/receive_file.py index 37a503036dc2..f50ad9fe1107 100644 --- a/file_transfer/receive_file.py +++ b/file_transfer/receive_file.py @@ -1,8 +1,9 @@ -if __name__ == "__main__": - import socket # Import socket module +import socket + - sock = socket.socket() # Create a socket object - host = socket.gethostname() # Get local machine name +def main(): + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + host = socket.gethostname() port = 12312 sock.connect((host, port)) @@ -13,11 +14,14 @@ print("Receiving data...") while True: data = sock.recv(1024) - print(f"{data = }") if not data: break - out_file.write(data) # Write data to a file + out_file.write(data) - print("Successfully got the file") + print("Successfully received the file") sock.close() print("Connection closed") + + +if __name__ == "__main__": + main() From 3c2ee83437e1396f2ec0b85b607a2bcbd9be1eb7 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Apr 2023 01:11:24 -0400 Subject: [PATCH 18/85] Fix `mypy` errors in `lu_decomposition.py` (attempt 2) (#8100) * updating DIRECTORY.md * Fix mypy errors in lu_decomposition.py * Replace for-loops with comprehensions * Add explanation of LU decomposition and extra doctests Add an explanation of LU decomposition with conditions for when an LU decomposition exists Add extra doctests to handle each of the possible conditions for when a decomposition exists/doesn't exist * updating DIRECTORY.md * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- arithmetic_analysis/lu_decomposition.py | 91 ++++++++++++++++++------- 1 file changed, 65 insertions(+), 26 deletions(-) diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 217719cf4da1..941c1dadf556 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -1,62 +1,101 @@ -"""Lower-Upper (LU) Decomposition. +""" +Lower–upper (LU) decomposition factors a matrix as a product of a lower +triangular matrix and an upper triangular matrix. A square matrix has an LU +decomposition under the following conditions: + - If the matrix is invertible, then it has an LU decomposition if and only + if all of its leading principal minors are non-zero (see + https://en.wikipedia.org/wiki/Minor_(linear_algebra) for an explanation of + leading principal minors of a matrix). + - If the matrix is singular (i.e., not invertible) and it has a rank of k + (i.e., it has k linearly independent columns), then it has an LU + decomposition if its first k leading principal minors are non-zero. + +This algorithm will simply attempt to perform LU decomposition on any square +matrix and raise an error if no such decomposition exists. -Reference: -- https://en.wikipedia.org/wiki/LU_decomposition +Reference: https://en.wikipedia.org/wiki/LU_decomposition """ from __future__ import annotations import numpy as np -from numpy import float64 -from numpy.typing import ArrayLike - -def lower_upper_decomposition( - table: ArrayLike[float64], -) -> tuple[ArrayLike[float64], ArrayLike[float64]]: - """Lower-Upper (LU) Decomposition - - Example: +def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """ + Perform LU decomposition on a given matrix and raises an error if the matrix + isn't square or if no such decomposition exists >>> matrix = np.array([[2, -2, 1], [0, 1, 2], [5, 3, 1]]) - >>> outcome = lower_upper_decomposition(matrix) - >>> outcome[0] + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + >>> lower_mat array([[1. , 0. , 0. ], [0. , 1. , 0. ], [2.5, 8. , 1. ]]) - >>> outcome[1] + >>> upper_mat array([[ 2. , -2. , 1. ], [ 0. , 1. , 2. ], [ 0. , 0. , -17.5]]) + >>> matrix = np.array([[4, 3], [6, 3]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + >>> lower_mat + array([[1. , 0. ], + [1.5, 1. ]]) + >>> upper_mat + array([[ 4. , 3. ], + [ 0. , -1.5]]) + + # Matrix is not square >>> matrix = np.array([[2, -2, 1], [0, 1, 2]]) - >>> lower_upper_decomposition(matrix) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) Traceback (most recent call last): ... ValueError: 'table' has to be of square shaped array but got a 2x3 array: [[ 2 -2 1] [ 0 1 2]] + + # Matrix is invertible, but its first leading principal minor is 0 + >>> matrix = np.array([[0, 1], [1, 0]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + Traceback (most recent call last): + ... + ArithmeticError: No LU decomposition exists + + # Matrix is singular, but its first leading principal minor is 1 + >>> matrix = np.array([[1, 0], [1, 0]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + >>> lower_mat + array([[1., 0.], + [1., 1.]]) + >>> upper_mat + array([[1., 0.], + [0., 0.]]) + + # Matrix is singular, but its first leading principal minor is 0 + >>> matrix = np.array([[0, 1], [0, 1]]) + >>> lower_mat, upper_mat = lower_upper_decomposition(matrix) + Traceback (most recent call last): + ... + ArithmeticError: No LU decomposition exists """ - # Table that contains our data - # Table has to be a square array so we need to check first + # Ensure that table is a square array rows, columns = np.shape(table) if rows != columns: raise ValueError( - f"'table' has to be of square shaped array but got a {rows}x{columns} " - + f"array:\n{table}" + f"'table' has to be of square shaped array but got a " + f"{rows}x{columns} array:\n{table}" ) + lower = np.zeros((rows, columns)) upper = np.zeros((rows, columns)) for i in range(columns): for j in range(i): - total = 0 - for k in range(j): - total += lower[i][k] * upper[k][j] + total = sum(lower[i][k] * upper[k][j] for k in range(j)) + if upper[j][j] == 0: + raise ArithmeticError("No LU decomposition exists") lower[i][j] = (table[i][j] - total) / upper[j][j] lower[i][i] = 1 for j in range(i, columns): - total = 0 - for k in range(i): - total += lower[i][k] * upper[k][j] + total = sum(lower[i][k] * upper[k][j] for k in range(j)) upper[i][j] = table[i][j] - total return lower, upper From 74d105d31d154c635cd4ad7ee1fee8a5e28fdebe Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Sat, 1 Apr 2023 05:35:12 +0000 Subject: [PATCH 19/85] updating DIRECTORY.md --- DIRECTORY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index 1d3177801a2c..4225889bc05d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -317,6 +317,7 @@ * [Longest Sub Array](dynamic_programming/longest_sub_array.py) * [Matrix Chain Order](dynamic_programming/matrix_chain_order.py) * [Max Non Adjacent Sum](dynamic_programming/max_non_adjacent_sum.py) + * [Max Product Subarray](dynamic_programming/max_product_subarray.py) * [Max Sub Array](dynamic_programming/max_sub_array.py) * [Max Sum Contiguous Subsequence](dynamic_programming/max_sum_contiguous_subsequence.py) * [Min Distance Up Bottom](dynamic_programming/min_distance_up_bottom.py) From e1de3173ce9fc739365fa79a4b2f737cf7923c57 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 08:47:24 +0300 Subject: [PATCH 20/85] Add Project Euler problem 187 solution 1 (#8182) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_187/__init__.py | 0 project_euler/problem_187/sol1.py | 58 +++++++++++++++++++++++++++ 3 files changed, 60 insertions(+) create mode 100644 project_euler/problem_187/__init__.py create mode 100644 project_euler/problem_187/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 4225889bc05d..33c816fc4add 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -991,6 +991,8 @@ * [Sol1](project_euler/problem_174/sol1.py) * Problem 180 * [Sol1](project_euler/problem_180/sol1.py) + * Problem 187 + * [Sol1](project_euler/problem_187/sol1.py) * Problem 188 * [Sol1](project_euler/problem_188/sol1.py) * Problem 191 diff --git a/project_euler/problem_187/__init__.py b/project_euler/problem_187/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_187/sol1.py b/project_euler/problem_187/sol1.py new file mode 100644 index 000000000000..12f03e2a7023 --- /dev/null +++ b/project_euler/problem_187/sol1.py @@ -0,0 +1,58 @@ +""" +Project Euler Problem 187: https://projecteuler.net/problem=187 + +A composite is a number containing at least two prime factors. +For example, 15 = 3 x 5; 9 = 3 x 3; 12 = 2 x 2 x 3. + +There are ten composites below thirty containing precisely two, +not necessarily distinct, prime factors: 4, 6, 9, 10, 14, 15, 21, 22, 25, 26. + +How many composite integers, n < 10^8, have precisely two, +not necessarily distinct, prime factors? +""" + +from math import isqrt + + +def calculate_prime_numbers(max_number: int) -> list[int]: + """ + Returns prime numbers below max_number + + >>> calculate_prime_numbers(10) + [2, 3, 5, 7] + """ + + is_prime = [True] * max_number + for i in range(2, isqrt(max_number - 1) + 1): + if is_prime[i]: + for j in range(i**2, max_number, i): + is_prime[j] = False + + return [i for i in range(2, max_number) if is_prime[i]] + + +def solution(max_number: int = 10**8) -> int: + """ + Returns the number of composite integers below max_number have precisely two, + not necessarily distinct, prime factors + + >>> solution(30) + 10 + """ + + prime_numbers = calculate_prime_numbers(max_number // 2) + + semiprimes_count = 0 + left = 0 + right = len(prime_numbers) - 1 + while left <= right: + while prime_numbers[left] * prime_numbers[right] >= max_number: + right -= 1 + semiprimes_count += right - left + 1 + left += 1 + + return semiprimes_count + + +if __name__ == "__main__": + print(f"{solution() = }") From 3c50847209165c0f64c6bfffb0cb2873258dfc2d Mon Sep 17 00:00:00 2001 From: amirsoroush <114881632+amirsoroush@users.noreply.github.com> Date: Sat, 1 Apr 2023 09:26:43 +0300 Subject: [PATCH 21/85] change space complexity of linked list's __len__ from O(n) to O(1) (#8183) --- data_structures/linked_list/circular_linked_list.py | 2 +- data_structures/linked_list/doubly_linked_list.py | 2 +- data_structures/linked_list/merge_two_lists.py | 2 +- data_structures/linked_list/singly_linked_list.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 67a63cd55e19..9092fb29e3ff 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -24,7 +24,7 @@ def __iter__(self) -> Iterator[Any]: break def __len__(self) -> int: - return len(tuple(iter(self))) + return sum(1 for _ in self) def __repr__(self): return "->".join(str(item) for item in iter(self)) diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 6c81493fff85..41d07d63e005 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -51,7 +51,7 @@ def __len__(self): >>> len(linked_list) == 5 True """ - return len(tuple(iter(self))) + return sum(1 for _ in self) def insert_at_head(self, data): self.insert_at_nth(0, data) diff --git a/data_structures/linked_list/merge_two_lists.py b/data_structures/linked_list/merge_two_lists.py index 61e2412aa7fd..ca0d3bb48540 100644 --- a/data_structures/linked_list/merge_two_lists.py +++ b/data_structures/linked_list/merge_two_lists.py @@ -44,7 +44,7 @@ def __len__(self) -> int: >>> len(SortedLinkedList(test_data_odd)) 8 """ - return len(tuple(iter(self))) + return sum(1 for _ in self) def __str__(self) -> str: """ diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index bdeb5922ac67..a8f9e8ebb977 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -72,7 +72,7 @@ def __len__(self) -> int: >>> len(linked_list) 0 """ - return len(tuple(iter(self))) + return sum(1 for _ in self) def __repr__(self) -> str: """ From 70e96f3c9c1736a1423e47cf2a97f29ab07717e4 Mon Sep 17 00:00:00 2001 From: Dhruv Manilawala Date: Sat, 1 Apr 2023 11:59:26 +0530 Subject: [PATCH 22/85] chore: additional Project Euler solution hash (#8593) --- scripts/project_euler_answers.json | 109 ++++++++++++++++++++++++++++- 1 file changed, 108 insertions(+), 1 deletion(-) diff --git a/scripts/project_euler_answers.json b/scripts/project_euler_answers.json index 6d354363ee5f..f2b876934766 100644 --- a/scripts/project_euler_answers.json +++ b/scripts/project_euler_answers.json @@ -723,5 +723,112 @@ "722": "9687101dfe209fd65f57a10603baa38ba83c9152e43a8b802b96f1e07f568e0e", "723": "74832787e7d4e0cb7991256c8f6d02775dffec0684de234786f25f898003f2de", "724": "fa05e2b497e7eafa64574017a4c45aadef6b163d907b03d63ba3f4021096d329", - "725": "005c873563f51bbebfdb1f8dbc383259e9a98e506bc87ae8d8c9044b81fc6418" + "725": "005c873563f51bbebfdb1f8dbc383259e9a98e506bc87ae8d8c9044b81fc6418", + "726": "93e41c533136bf4b436e493090fd4e7b277234db2a69c62a871f775ff26681bf", + "727": "c366f7426ca9351dcdde2e3bea01181897cda4d9b44977678ea3828419b84851", + "728": "8de62a644511d27c7c23c7722f56112b3c1ab9b05a078a98a0891f09f92464c6", + "729": "0ae82177174eef99fc80a2ec921295f61a6ac4dfed86a1bf333a50c26d01955c", + "730": "78cd876a176c8fbf7c2155b80dccbdededdbc43c28ef17b5a6e554d649325d38", + "731": "54afb9f829be51d29f90eecbfe40e5ba91f3a3bf538de62f3e34674af15eb542", + "732": "c4dc4610dcafc806b30e5d3f5560b57f462218a04397809843a7110838f0ebac", + "733": "bdde7d98d057d6a6ae360fd2f872d8bccb7e7f2971df37a3c5f20712ea3c618f", + "734": "9a514875bd9af26fcc565337771f852d311cd77033186e4d957e7b6c7b8ce018", + "735": "8bbc5a27c0031d8c44f3f73c99622a202cd6ea9a080049d615a7ae80ce6024f9", + "736": "e0d4c78b9b3dae51940877aff28275d036eccfc641111c8e34227ff6015a0fab", + "737": "a600884bcaa01797310c83b198bad58c98530289305af29b0bf75f679af38d3a", + "738": "c85f15fdaafe7d5525acff960afef7e4b8ffded5a7ee0d1dc2b0e8d0c26b9b46", + "739": "8716e9302f0fb90153e2f522bd88a710361a897480e4ccc0542473c704793518", + "740": "6ff41ee34b263b742cda109aee3be9ad6c95eec2ce31d6a9fc5353bba1b41afd", + "741": "99ac0eb9589b895e5755895206bbad5febd6bc29b2912df1c7544c547e26bca3", + "742": "7d2761a240aa577348df4813ea248088d0d6d8d421142c712ed576cdc90d4df9", + "743": "d93c42a129c0961b4e36738efae3b7e8ffae3a4daeced20e85bb740d3d72522d", + "744": "211f76700a010461486dde6c723720be85e68c192cd8a8ed0a88860b8ae9b0f0", + "745": "2d32dc1fea2f1b8600c0ada927b057b566870ceb5362cce71ac3693dcb7136ae", + "746": "2df1c2a0181f0c25e8d13d2a1eadba55a6b06267a2b22075fcf6867fb2e10c02", + "747": "a8d8f93142e320c6f0dd386c7a3bfb011bbdc15b85291a9be8f0266b3608175e", + "748": "7de937e04c10386b240afb8bb2ff590009946df8b7850a0329ccdb59fca8955f", + "749": "1a55f5484ccf964aeb186faedefa01db05d87180891dc2280b6eb85b6efb4779", + "750": "fa4318c213179e6af1c949be7cf47210f4383e0a44d191e2bad44228d3192f14", + "751": "12fe650fcb3afc214b3d647c655070e8142cfd397441fc7636ad7e6ffcaefde2", + "752": "e416c0123bc6b82df8726b328494db31aa4781d938a0a6e2107b1e44c73c0434", + "753": "0ee3299bc89e1e4c2fc79285fb1cd84c887456358a825e56be92244b7115f5af", + "754": "1370574b16207c41d3dafb62aa898379ec101ac36843634b1633b7b509d4c35a", + "755": "78bb4b18b13f5254cfafe872c0e93791ab5206b2851960dc6aebea8f62b9580c", + "756": "6becaabbda2e9ea22373e62e989b6b70467efa24fbe2f0d124d7a99a53e93f74", + "757": "fbfee0a5c4fa57a1dd6cf0c9bb2423cf7e7bcb130e67114aa360e42234987314", + "758": "8e4dfc259cec9dfd89d4b4ac8c33c75af6e0f5f7926526ee22ad4d45f93d3c18", + "759": "40bac0ed2e4f7861a6d9a2d87191a9034e177c319aa40a43638cc1b69572e5f2", + "760": "7ab50386a211f0815593389ab05b57a1a5eb5cbf5b9a85fe4afc517dcab74e06", + "761": "1cdb0318ac16e11c8d2ae7b1d7ca7138f7b1a461e9d75bd69be0f9cdd3add0c5", + "762": "84c4662267d5809380a540dfc2881665b3019047d74d5ef0a01f86e45f4b5b59", + "763": "f0def5903139447fabe7d106db5fff660d94b45af7b8b48d789596cf65ab2514", + "764": "7b4131f4d1e13d091ca7dd4d32317a14a2a24e6e1abd214df1c14c215287b330", + "765": "7558b775727426bccd945f5aa6b3e131e6034a7b1ff8576332329ef65d6a1663", + "766": "23c309430fa9546adb617457dbfd30fb7432904595c8c000e9b67ea23f32a53b", + "767": "70aef22ac2db8a5bdfcc42ff8dafbd2901e85e268f5f3c45085aa40c590b1d42", + "768": "b69a808dfc654b037e2f47ace16f48fe3bb553b3c8eed3e2b6421942fbf521d0", + "769": "78537a30577e806c6d8d94725e54d2d52e56f7f39f89c133cd5d0a2aad7e46e4", + "770": "c9d80c19c4895d1498bf809fcc37c447fa961fb325e5667eb35d6aa992966b41", + "771": "9803ace30c0d90d422e703fdf25a10a9342d0178a277ebc20c7bd6feac4c7a15", + "772": "f5a1e391af815ea6453db58a1bd71790f433c44ed63e5e93d8f5c045dfd5a464", + "773": "e1b93fc323c4d9c383100603339548e1e56ce9c38bcdcc425024c12b862ea8cb", + "774": "3646cd098b213014fb7bbc9597871585e62ee0cf2770e141f1df771237cc09ab", + "775": "d9d7d515ce7350c9e5696d85f68bbb42daa74b9e171a601dd04c823b18bb7757", + "776": "83286074d3bc86a5b449facb5fe5eafc91eb4c8031e2fb5e716443402cd8ed0f", + "777": "e62616a387d05b619d47cee3d49d5d2db19393736bf54b6cdd20933c0531cb7e", + "778": "d4de958ba44d25353de5b380e04d06c7968794ad50dbf6231ad0049ff53e106b", + "779": "c08ce54a59afc4af62f28b80a9c9a5190822d124eed8d73fd6db3e19c81e2157", + "780": "fc7ba646c16482f0f4f5ce2b06d21183dba2bdeaf9469b36b55bc7bc2d87baf3", + "781": "8fa5733f06838fb61b55b3e9d59c5061d922147e59947fe52e566dd975b2199f", + "782": "9f757d92df401ee049bc066bb2625c6287e5e4bcd38c958396a77a578f036a24", + "783": "270ff37f60c267a673bd4b223e44941f01ae9cfbf6bbdf99ca57af89b1e9a66f", + "784": "388b17c4c7b829cef767f83b4686c903faeec1241edfe5f58ee91d2b0c7f8dfc", + "785": "77cf600204c5265e1d5d3d26bf28ba1e92e6f24def040c16977450bec8b1cb99", + "786": "fb14022b7edbc6c7bfde27f35b49f6acaa4f0fc383af27614cb9d4a1980e626b", + "787": "7516ba0ac1951665723dcc4adcc52764d9497e7b6ed30bdb9937ac9df82b7c4f", + "788": "adede1d30258bb0f353af11f559b67f8b823304c71e967f52db52d002760c24f", + "789": "0c82e744a1f9bc57fd8ae8b2f479998455bc45126de971c59b68541c254e303a", + "790": "319847122251afd20d4d650047c55981a509fa2be78abd7c9c3caa0555e60a05", + "791": "2e0bbdcd0a8460e1e33c55668d0dc9752379a78b9f3561d7a17b922a5541a3fb", + "792": "5f77834c5a509023dd95dd98411eae1dd4bafd125deca590632f409f92fd257b", + "793": "dbfd900a3b31eeec2f14b916f5151611541cb716d80b7b9a1229de12293a02ea", + "794": "d019fe415aba832c4c761140d60c466c9aaad52b504df3167c17f2d3f0b277a7", + "795": "617b259349da44c2af2664acde113673ab3bb03a85d31f1be8f01027d0ebd4d3", + "796": "cba6b30a818d073398e5802211987f0897523e4752987bb445b2bca079670e22", + "797": "61e42cac3d7858b8850111a8c64c56432a18dd058dfb6afd773f07d703703b1a", + "798": "ae8b155d6b77522af79f7e4017fefe92aaa5d45eff132c83dc4d4bcfc9686020", + "799": "a41cb14ddf8f1948a01f590fbe53d9ca4e2faf48375ce1c306f91acf7c94e005", + "800": "c6a47bc6f02cf06be16728fb308c83f2f2ae350325ef7016867f5bdaea849d71", + "801": "d14b358c76b55106613f9c0a2112393338dfd01513b0fd231b79fc8db20e41f0", + "802": "22ae33e67fb48accfaa3b36e70c5a19066b974194c3130680de0c7cdce2d0f2e", + "803": "d95b3f9bbb7054042c1fba4db02f7223a2dad94977a36f08c8aaf92f373f9e78", + "804": "b0b1cf7253593eb2334c75e66dbe22b4b4540347485f1ea24e80226b4b18171c", + "805": "41b1ff5db0e70984ad20c50d1a9ac2b5a53ccd5f42796c8e948ae8880005fbb9", + "806": "b9c813beb39671adb8e1530555cadca44c21ddc7127932274918df2091dbd9ca", + "807": "745fd9ba97970d85a29877942839e41fc192794420e86f3bde39fd26db7a8bff", + "808": "6c73b947eb603602a7e8afadc83eaaa381a46db8b82a6fb89c9c1d93cb023fce", + "809": "eebac7753da4c1230dfce0f15fc124ffff01b0e432f0b74623b60cff71bbc9a9", + "810": "42be7899672a1a0046823603ce60dbeda7250a56fcb8d0913093850c85394307", + "811": "8698cd28ae4d93db36631870c33e4a8a527d970050d994666115f54260b64138", + "812": "dc2495924f37353db8b846323b8085fae9db502e890c513ed2e64ed7281f567f", + "813": "92179dde05aa6557baca65699fda50ca024d33a77078d8e128caa3c5db84064b", + "814": "344ed8cb7684307c00b7f03d751729a7f9d2a5f4a4cb4574594113d69593c0c1", + "815": "f642cf15345af3feab60e26a02aee038f759914906a5b2b469b46fdeee50ff59", + "816": "058178444e85f2aedb2f75d824a469747381f0bd3235d8c72df4385fec86eb07", + "817": "582fdc2233298192b09ceaf1463d6be06a09894075532630aa9d9efcfcb31da4", + "818": "67f6964d6ff114a43371b8375c44db2f1362df4f110b4a7ce8d79cf1b76621a0", + "819": "c7a82513ad48dfc87f2c1e0f2915b71464b7f5a16501c71df4ae4a8741dceef3", + "820": "9b23ae0181f320aadda2637ac2179c8b41b00715630c3acb643c7aee3b81cf90", + "821": "0941e396ff15b98fd7827de8e33ef94996d48ba719a88ba8e2da7f2605df3e5c", + "822": "ed8ef7f568939b9df1b77ae58344940b91c7e154a4367fe2b179bc7b9484d4e6", + "823": "05139328571a86096032b57e3a6a02a61acad4fb0d8f8e1b5d0ffb0d063ba697", + "826": "7f40f14ca65e5c06dd9ec9bbb212adb4d97a503199cb3c30ed921a04373bbe1c", + "827": "80461f02c63654c642382a6ffb7a44d0a3554434dfcfcea00ba91537724c7106", + "828": "520c196175625a0230afb76579ea26033372de3ef4c78aceb146b84322bfa871", + "829": "ed0089e61cf5540dd4a8fef1c468b96cf57f1d2bb79968755ba856d547ddafdf", + "831": "8ec445084427419ca6da405e0ded9814a4b4e11a2be84d88a8dea421f8e49992", + "832": "cfcb9ebef9308823f64798b5e12a59bf77ff6f92b0eae3790a61c0a26f577010", + "833": "e6ff3a5b257eb53366a32bfc8ea410a00a78bafa63650c76ac2bceddfbb42ff5", + "834": "b0d2a7e7d629ef14db9e7352a9a06d6ca66f750429170bb169ca52c172b8cc96", + "835": "bdfa1b1eecbad79f5de48bc6daee4d2b07689d7fb172aa306dd6094172b396f0" } From 02e593d9e76441c789349f11f8a6f28196d38808 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 15:18:13 +0300 Subject: [PATCH 23/85] Add Project Euler problem 800 solution 1 (#8567) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 + project_euler/problem_800/__init__.py | 0 project_euler/problem_800/sol1.py | 65 +++++++++++++++++++++++++++ 3 files changed, 67 insertions(+) create mode 100644 project_euler/problem_800/__init__.py create mode 100644 project_euler/problem_800/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 33c816fc4add..18c573909773 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1017,6 +1017,8 @@ * [Sol1](project_euler/problem_587/sol1.py) * Problem 686 * [Sol1](project_euler/problem_686/sol1.py) + * Problem 800 + * [Sol1](project_euler/problem_800/sol1.py) ## Quantum * [Bb84](quantum/bb84.py) diff --git a/project_euler/problem_800/__init__.py b/project_euler/problem_800/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_800/sol1.py b/project_euler/problem_800/sol1.py new file mode 100644 index 000000000000..f887787bcbc6 --- /dev/null +++ b/project_euler/problem_800/sol1.py @@ -0,0 +1,65 @@ +""" +Project Euler Problem 800: https://projecteuler.net/problem=800 + +An integer of the form p^q q^p with prime numbers p != q is called a hybrid-integer. +For example, 800 = 2^5 5^2 is a hybrid-integer. + +We define C(n) to be the number of hybrid-integers less than or equal to n. +You are given C(800) = 2 and C(800^800) = 10790 + +Find C(800800^800800) +""" + +from math import isqrt, log2 + + +def calculate_prime_numbers(max_number: int) -> list[int]: + """ + Returns prime numbers below max_number + + >>> calculate_prime_numbers(10) + [2, 3, 5, 7] + """ + + is_prime = [True] * max_number + for i in range(2, isqrt(max_number - 1) + 1): + if is_prime[i]: + for j in range(i**2, max_number, i): + is_prime[j] = False + + return [i for i in range(2, max_number) if is_prime[i]] + + +def solution(base: int = 800800, degree: int = 800800) -> int: + """ + Returns the number of hybrid-integers less than or equal to base^degree + + >>> solution(800, 1) + 2 + + >>> solution(800, 800) + 10790 + """ + + upper_bound = degree * log2(base) + max_prime = int(upper_bound) + prime_numbers = calculate_prime_numbers(max_prime) + + hybrid_integers_count = 0 + left = 0 + right = len(prime_numbers) - 1 + while left < right: + while ( + prime_numbers[right] * log2(prime_numbers[left]) + + prime_numbers[left] * log2(prime_numbers[right]) + > upper_bound + ): + right -= 1 + hybrid_integers_count += right - left + left += 1 + + return hybrid_integers_count + + +if __name__ == "__main__": + print(f"{solution() = }") From 63deae69299db55b0c2851e865f752770ef88500 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 15:20:08 +0300 Subject: [PATCH 24/85] Add Project Euler problem 94 solution 1 (#8599) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 ++ project_euler/problem_094/__init__.py | 0 project_euler/problem_094/sol1.py | 44 +++++++++++++++++++++++++++ 3 files changed, 46 insertions(+) create mode 100644 project_euler/problem_094/__init__.py create mode 100644 project_euler/problem_094/sol1.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 18c573909773..c781b17bf05f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -937,6 +937,8 @@ * [Sol1](project_euler/problem_091/sol1.py) * Problem 092 * [Sol1](project_euler/problem_092/sol1.py) + * Problem 094 + * [Sol1](project_euler/problem_094/sol1.py) * Problem 097 * [Sol1](project_euler/problem_097/sol1.py) * Problem 099 diff --git a/project_euler/problem_094/__init__.py b/project_euler/problem_094/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_094/sol1.py b/project_euler/problem_094/sol1.py new file mode 100644 index 000000000000..a41292fe26fd --- /dev/null +++ b/project_euler/problem_094/sol1.py @@ -0,0 +1,44 @@ +""" +Project Euler Problem 94: https://projecteuler.net/problem=94 + +It is easily proved that no equilateral triangle exists with integral length sides and +integral area. However, the almost equilateral triangle 5-5-6 has an area of 12 square +units. + +We shall define an almost equilateral triangle to be a triangle for which two sides are +equal and the third differs by no more than one unit. + +Find the sum of the perimeters of all almost equilateral triangles with integral side +lengths and area and whose perimeters do not exceed one billion (1,000,000,000). +""" + + +def solution(max_perimeter: int = 10**9) -> int: + """ + Returns the sum of the perimeters of all almost equilateral triangles with integral + side lengths and area and whose perimeters do not exceed max_perimeter + + >>> solution(20) + 16 + """ + + prev_value = 1 + value = 2 + + perimeters_sum = 0 + i = 0 + perimeter = 0 + while perimeter <= max_perimeter: + perimeters_sum += perimeter + + prev_value += 2 * value + value += prev_value + + perimeter = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 + i += 1 + + return perimeters_sum + + +if __name__ == "__main__": + print(f"{solution() = }") From d9bc5c3cbafdacfe162507b646284f517c922343 Mon Sep 17 00:00:00 2001 From: amirsoroush <114881632+amirsoroush@users.noreply.github.com> Date: Sat, 1 Apr 2023 15:23:21 +0300 Subject: [PATCH 25/85] Remove extra `len` calls in doubly-linked-list's methods (#8600) --- data_structures/linked_list/doubly_linked_list.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 41d07d63e005..69763d12da15 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -81,7 +81,9 @@ def insert_at_nth(self, index: int, data): .... IndexError: list index out of range """ - if not 0 <= index <= len(self): + length = len(self) + + if not 0 <= index <= length: raise IndexError("list index out of range") new_node = Node(data) if self.head is None: @@ -90,7 +92,7 @@ def insert_at_nth(self, index: int, data): self.head.previous = new_node new_node.next = self.head self.head = new_node - elif index == len(self): + elif index == length: self.tail.next = new_node new_node.previous = self.tail self.tail = new_node @@ -131,15 +133,17 @@ def delete_at_nth(self, index: int): .... IndexError: list index out of range """ - if not 0 <= index <= len(self) - 1: + length = len(self) + + if not 0 <= index <= length - 1: raise IndexError("list index out of range") delete_node = self.head # default first node - if len(self) == 1: + if length == 1: self.head = self.tail = None elif index == 0: self.head = self.head.next self.head.previous = None - elif index == len(self) - 1: + elif index == length - 1: delete_node = self.tail self.tail = self.tail.previous self.tail.next = None From 4991ef38407b497892646e39a7165349801ee159 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 19:22:33 +0300 Subject: [PATCH 26/85] Reduce the complexity of digital_image_processing/edge detection/canny.py (#8167) * Reduce the complexity of digital_image_processing/edge_detection/canny.py * Fix * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Fix review issues * Rename dst to destination --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .../edge_detection/canny.py | 129 ++++++++++-------- 1 file changed, 75 insertions(+), 54 deletions(-) diff --git a/digital_image_processing/edge_detection/canny.py b/digital_image_processing/edge_detection/canny.py index a830355267c4..f8cbeedb3874 100644 --- a/digital_image_processing/edge_detection/canny.py +++ b/digital_image_processing/edge_detection/canny.py @@ -18,105 +18,126 @@ def gen_gaussian_kernel(k_size, sigma): return g -def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): - image_row, image_col = image.shape[0], image.shape[1] - # gaussian_filter - gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4)) - # get the gradient and degree by sobel_filter - sobel_grad, sobel_theta = sobel_filter(gaussian_out) - gradient_direction = np.rad2deg(sobel_theta) - gradient_direction += PI - - dst = np.zeros((image_row, image_col)) - +def suppress_non_maximum(image_shape, gradient_direction, sobel_grad): """ Non-maximum suppression. If the edge strength of the current pixel is the largest compared to the other pixels in the mask with the same direction, the value will be preserved. Otherwise, the value will be suppressed. """ - for row in range(1, image_row - 1): - for col in range(1, image_col - 1): + destination = np.zeros(image_shape) + + for row in range(1, image_shape[0] - 1): + for col in range(1, image_shape[1] - 1): direction = gradient_direction[row, col] if ( - 0 <= direction < 22.5 + 0 <= direction < PI / 8 or 15 * PI / 8 <= direction <= 2 * PI or 7 * PI / 8 <= direction <= 9 * PI / 8 ): w = sobel_grad[row, col - 1] e = sobel_grad[row, col + 1] if sobel_grad[row, col] >= w and sobel_grad[row, col] >= e: - dst[row, col] = sobel_grad[row, col] + destination[row, col] = sobel_grad[row, col] - elif (PI / 8 <= direction < 3 * PI / 8) or ( - 9 * PI / 8 <= direction < 11 * PI / 8 + elif ( + PI / 8 <= direction < 3 * PI / 8 + or 9 * PI / 8 <= direction < 11 * PI / 8 ): sw = sobel_grad[row + 1, col - 1] ne = sobel_grad[row - 1, col + 1] if sobel_grad[row, col] >= sw and sobel_grad[row, col] >= ne: - dst[row, col] = sobel_grad[row, col] + destination[row, col] = sobel_grad[row, col] - elif (3 * PI / 8 <= direction < 5 * PI / 8) or ( - 11 * PI / 8 <= direction < 13 * PI / 8 + elif ( + 3 * PI / 8 <= direction < 5 * PI / 8 + or 11 * PI / 8 <= direction < 13 * PI / 8 ): n = sobel_grad[row - 1, col] s = sobel_grad[row + 1, col] if sobel_grad[row, col] >= n and sobel_grad[row, col] >= s: - dst[row, col] = sobel_grad[row, col] + destination[row, col] = sobel_grad[row, col] - elif (5 * PI / 8 <= direction < 7 * PI / 8) or ( - 13 * PI / 8 <= direction < 15 * PI / 8 + elif ( + 5 * PI / 8 <= direction < 7 * PI / 8 + or 13 * PI / 8 <= direction < 15 * PI / 8 ): nw = sobel_grad[row - 1, col - 1] se = sobel_grad[row + 1, col + 1] if sobel_grad[row, col] >= nw and sobel_grad[row, col] >= se: - dst[row, col] = sobel_grad[row, col] - - """ - High-Low threshold detection. If an edge pixel’s gradient value is higher - than the high threshold value, it is marked as a strong edge pixel. If an - edge pixel’s gradient value is smaller than the high threshold value and - larger than the low threshold value, it is marked as a weak edge pixel. If - an edge pixel's value is smaller than the low threshold value, it will be - suppressed. - """ - if dst[row, col] >= threshold_high: - dst[row, col] = strong - elif dst[row, col] <= threshold_low: - dst[row, col] = 0 + destination[row, col] = sobel_grad[row, col] + + return destination + + +def detect_high_low_threshold( + image_shape, destination, threshold_low, threshold_high, weak, strong +): + """ + High-Low threshold detection. If an edge pixel’s gradient value is higher + than the high threshold value, it is marked as a strong edge pixel. If an + edge pixel’s gradient value is smaller than the high threshold value and + larger than the low threshold value, it is marked as a weak edge pixel. If + an edge pixel's value is smaller than the low threshold value, it will be + suppressed. + """ + for row in range(1, image_shape[0] - 1): + for col in range(1, image_shape[1] - 1): + if destination[row, col] >= threshold_high: + destination[row, col] = strong + elif destination[row, col] <= threshold_low: + destination[row, col] = 0 else: - dst[row, col] = weak + destination[row, col] = weak + +def track_edge(image_shape, destination, weak, strong): """ Edge tracking. Usually a weak edge pixel caused from true edges will be connected to a strong edge pixel while noise responses are unconnected. As long as there is one strong edge pixel that is involved in its 8-connected neighborhood, that weak edge point can be identified as one that should be preserved. """ - for row in range(1, image_row): - for col in range(1, image_col): - if dst[row, col] == weak: + for row in range(1, image_shape[0]): + for col in range(1, image_shape[1]): + if destination[row, col] == weak: if 255 in ( - dst[row, col + 1], - dst[row, col - 1], - dst[row - 1, col], - dst[row + 1, col], - dst[row - 1, col - 1], - dst[row + 1, col - 1], - dst[row - 1, col + 1], - dst[row + 1, col + 1], + destination[row, col + 1], + destination[row, col - 1], + destination[row - 1, col], + destination[row + 1, col], + destination[row - 1, col - 1], + destination[row + 1, col - 1], + destination[row - 1, col + 1], + destination[row + 1, col + 1], ): - dst[row, col] = strong + destination[row, col] = strong else: - dst[row, col] = 0 + destination[row, col] = 0 + + +def canny(image, threshold_low=15, threshold_high=30, weak=128, strong=255): + # gaussian_filter + gaussian_out = img_convolve(image, gen_gaussian_kernel(9, sigma=1.4)) + # get the gradient and degree by sobel_filter + sobel_grad, sobel_theta = sobel_filter(gaussian_out) + gradient_direction = PI + np.rad2deg(sobel_theta) + + destination = suppress_non_maximum(image.shape, gradient_direction, sobel_grad) + + detect_high_low_threshold( + image.shape, destination, threshold_low, threshold_high, weak, strong + ) + + track_edge(image.shape, destination, weak, strong) - return dst + return destination if __name__ == "__main__": # read original image in gray mode lena = cv2.imread(r"../image_data/lena.jpg", 0) # canny edge detection - canny_dst = canny(lena) - cv2.imshow("canny", canny_dst) + canny_destination = canny(lena) + cv2.imshow("canny", canny_destination) cv2.waitKey(0) From 6a500cdcef4dca74729bf5aee933ea0af387d39e Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Apr 2023 12:39:22 -0400 Subject: [PATCH 27/85] Fix `mypy` errors in `dilation_operation.py` (#8595) * updating DIRECTORY.md * Fix mypy errors in dilation_operation.py * Rename functions to use snake case * updating DIRECTORY.md * updating DIRECTORY.md * Replace raw file string with pathlib Path * Update digital_image_processing/morphological_operations/dilation_operation.py Co-authored-by: Christian Clauss --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../dilation_operation.py | 35 ++++++++++--------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/digital_image_processing/morphological_operations/dilation_operation.py b/digital_image_processing/morphological_operations/dilation_operation.py index c8380737d219..e49b955c1480 100644 --- a/digital_image_processing/morphological_operations/dilation_operation.py +++ b/digital_image_processing/morphological_operations/dilation_operation.py @@ -1,33 +1,35 @@ +from pathlib import Path + import numpy as np from PIL import Image -def rgb2gray(rgb: np.array) -> np.array: +def rgb_to_gray(rgb: np.ndarray) -> np.ndarray: """ Return gray image from rgb image - >>> rgb2gray(np.array([[[127, 255, 0]]])) + >>> rgb_to_gray(np.array([[[127, 255, 0]]])) array([[187.6453]]) - >>> rgb2gray(np.array([[[0, 0, 0]]])) + >>> rgb_to_gray(np.array([[[0, 0, 0]]])) array([[0.]]) - >>> rgb2gray(np.array([[[2, 4, 1]]])) + >>> rgb_to_gray(np.array([[[2, 4, 1]]])) array([[3.0598]]) - >>> rgb2gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]])) + >>> rgb_to_gray(np.array([[[26, 255, 14], [5, 147, 20], [1, 200, 0]]])) array([[159.0524, 90.0635, 117.6989]]) """ r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b -def gray2binary(gray: np.array) -> np.array: +def gray_to_binary(gray: np.ndarray) -> np.ndarray: """ Return binary image from gray image - >>> gray2binary(np.array([[127, 255, 0]])) + >>> gray_to_binary(np.array([[127, 255, 0]])) array([[False, True, False]]) - >>> gray2binary(np.array([[0]])) + >>> gray_to_binary(np.array([[0]])) array([[False]]) - >>> gray2binary(np.array([[26.2409, 4.9315, 1.4729]])) + >>> gray_to_binary(np.array([[26.2409, 4.9315, 1.4729]])) array([[False, False, False]]) - >>> gray2binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]])) + >>> gray_to_binary(np.array([[26, 255, 14], [5, 147, 20], [1, 200, 0]])) array([[False, True, False], [False, True, False], [False, True, False]]) @@ -35,7 +37,7 @@ def gray2binary(gray: np.array) -> np.array: return (gray > 127) & (gray <= 255) -def dilation(image: np.array, kernel: np.array) -> np.array: +def dilation(image: np.ndarray, kernel: np.ndarray) -> np.ndarray: """ Return dilated image >>> dilation(np.array([[True, False, True]]), np.array([[0, 1, 0]])) @@ -61,14 +63,13 @@ def dilation(image: np.array, kernel: np.array) -> np.array: return output -# kernel to be applied -structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) - - if __name__ == "__main__": # read original image - image = np.array(Image.open(r"..\image_data\lena.jpg")) - output = dilation(gray2binary(rgb2gray(image)), structuring_element) + lena_path = Path(__file__).resolve().parent / "image_data" / "lena.jpg" + lena = np.array(Image.open(lena_path)) + # kernel to be applied + structuring_element = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) + output = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image pil_img = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png") From a4e31a028aa6aeaea3f59adfaddc138e6e4c5a0d Mon Sep 17 00:00:00 2001 From: Blake Reimer Date: Sat, 1 Apr 2023 10:43:07 -0600 Subject: [PATCH 28/85] Graham's Law (#8162) * grahams law * doctest and type hints * doctest formatting * peer review updates --- physics/grahams_law.py | 208 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 208 insertions(+) create mode 100644 physics/grahams_law.py diff --git a/physics/grahams_law.py b/physics/grahams_law.py new file mode 100644 index 000000000000..6e5d75127e83 --- /dev/null +++ b/physics/grahams_law.py @@ -0,0 +1,208 @@ +""" +Title: Graham's Law of Effusion + +Description: Graham's law of effusion states that the rate of effusion of a gas is +inversely proportional to the square root of the molar mass of its particles: + +r1/r2 = sqrt(m2/m1) + +r1 = Rate of effusion for the first gas. +r2 = Rate of effusion for the second gas. +m1 = Molar mass of the first gas. +m2 = Molar mass of the second gas. + +(Description adapted from https://en.wikipedia.org/wiki/Graham%27s_law) +""" + +from math import pow, sqrt + + +def validate(*values: float) -> bool: + """ + Input Parameters: + ----------------- + effusion_rate_1: Effustion rate of first gas (m^2/s, mm^2/s, etc.) + effusion_rate_2: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> validate(2.016, 4.002) + True + >>> validate(-2.016, 4.002) + False + >>> validate() + False + """ + result = len(values) > 0 and all(value > 0.0 for value in values) + return result + + +def effusion_ratio(molar_mass_1: float, molar_mass_2: float) -> float | ValueError: + """ + Input Parameters: + ----------------- + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> effusion_ratio(2.016, 4.002) + 1.408943 + >>> effusion_ratio(-2.016, 4.002) + ValueError('Input Error: Molar mass values must greater than 0.') + >>> effusion_ratio(2.016) + Traceback (most recent call last): + ... + TypeError: effusion_ratio() missing 1 required positional argument: 'molar_mass_2' + """ + return ( + round(sqrt(molar_mass_2 / molar_mass_1), 6) + if validate(molar_mass_1, molar_mass_2) + else ValueError("Input Error: Molar mass values must greater than 0.") + ) + + +def first_effusion_rate( + effusion_rate: float, molar_mass_1: float, molar_mass_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + effusion_rate: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> first_effusion_rate(1, 2.016, 4.002) + 1.408943 + >>> first_effusion_rate(-1, 2.016, 4.002) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> first_effusion_rate(1) + Traceback (most recent call last): + ... + TypeError: first_effusion_rate() missing 2 required positional arguments: \ +'molar_mass_1' and 'molar_mass_2' + >>> first_effusion_rate(1, 2.016) + Traceback (most recent call last): + ... + TypeError: first_effusion_rate() missing 1 required positional argument: \ +'molar_mass_2' + """ + return ( + round(effusion_rate * sqrt(molar_mass_2 / molar_mass_1), 6) + if validate(effusion_rate, molar_mass_1, molar_mass_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) + + +def second_effusion_rate( + effusion_rate: float, molar_mass_1: float, molar_mass_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + effusion_rate: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + molar_mass_1: Molar mass of the first gas (g/mol, kg/kmol, etc.) + molar_mass_2: Molar mass of the second gas (g/mol, kg/kmol, etc.) + + Returns: + -------- + >>> second_effusion_rate(1, 2.016, 4.002) + 0.709752 + >>> second_effusion_rate(-1, 2.016, 4.002) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> second_effusion_rate(1) + Traceback (most recent call last): + ... + TypeError: second_effusion_rate() missing 2 required positional arguments: \ +'molar_mass_1' and 'molar_mass_2' + >>> second_effusion_rate(1, 2.016) + Traceback (most recent call last): + ... + TypeError: second_effusion_rate() missing 1 required positional argument: \ +'molar_mass_2' + """ + return ( + round(effusion_rate / sqrt(molar_mass_2 / molar_mass_1), 6) + if validate(effusion_rate, molar_mass_1, molar_mass_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) + + +def first_molar_mass( + molar_mass: float, effusion_rate_1: float, effusion_rate_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + molar_mass: Molar mass of the first gas (g/mol, kg/kmol, etc.) + effusion_rate_1: Effustion rate of first gas (m^2/s, mm^2/s, etc.) + effusion_rate_2: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + + Returns: + -------- + >>> first_molar_mass(2, 1.408943, 0.709752) + 0.507524 + >>> first_molar_mass(-1, 2.016, 4.002) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> first_molar_mass(1) + Traceback (most recent call last): + ... + TypeError: first_molar_mass() missing 2 required positional arguments: \ +'effusion_rate_1' and 'effusion_rate_2' + >>> first_molar_mass(1, 2.016) + Traceback (most recent call last): + ... + TypeError: first_molar_mass() missing 1 required positional argument: \ +'effusion_rate_2' + """ + return ( + round(molar_mass / pow(effusion_rate_1 / effusion_rate_2, 2), 6) + if validate(molar_mass, effusion_rate_1, effusion_rate_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) + + +def second_molar_mass( + molar_mass: float, effusion_rate_1: float, effusion_rate_2: float +) -> float | ValueError: + """ + Input Parameters: + ----------------- + molar_mass: Molar mass of the first gas (g/mol, kg/kmol, etc.) + effusion_rate_1: Effustion rate of first gas (m^2/s, mm^2/s, etc.) + effusion_rate_2: Effustion rate of second gas (m^2/s, mm^2/s, etc.) + + Returns: + -------- + >>> second_molar_mass(2, 1.408943, 0.709752) + 1.970351 + >>> second_molar_mass(-2, 1.408943, 0.709752) + ValueError('Input Error: Molar mass and effusion rate values must greater than 0.') + >>> second_molar_mass(1) + Traceback (most recent call last): + ... + TypeError: second_molar_mass() missing 2 required positional arguments: \ +'effusion_rate_1' and 'effusion_rate_2' + >>> second_molar_mass(1, 2.016) + Traceback (most recent call last): + ... + TypeError: second_molar_mass() missing 1 required positional argument: \ +'effusion_rate_2' + """ + return ( + round(pow(effusion_rate_1 / effusion_rate_2, 2) / molar_mass, 6) + if validate(molar_mass, effusion_rate_1, effusion_rate_2) + else ValueError( + "Input Error: Molar mass and effusion rate values must greater than 0." + ) + ) From 9713a98bb2309fadac218b23e6c668989c169b1a Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Sat, 1 Apr 2023 16:49:14 +0000 Subject: [PATCH 29/85] updating DIRECTORY.md --- DIRECTORY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index c781b17bf05f..588d0b1e542e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -715,6 +715,7 @@ * [Archimedes Principle](physics/archimedes_principle.py) * [Casimir Effect](physics/casimir_effect.py) * [Centripetal Force](physics/centripetal_force.py) + * [Grahams Law](physics/grahams_law.py) * [Horizontal Projectile Motion](physics/horizontal_projectile_motion.py) * [Hubble Parameter](physics/hubble_parameter.py) * [Ideal Gas Law](physics/ideal_gas_law.py) From 2c957e74d4894f4a79585a3d0c91177fe9899aac Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Sat, 1 Apr 2023 20:43:11 +0300 Subject: [PATCH 30/85] Reenable files when TensorFlow supports the current Python (#8602) * Remove python_version < "3.11" for tensorflow * Reenable neural_network/input_data.py_tf * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff * Try to fix ruff * Try to fix ruff * Try to fix pre-commit * Try to fix * Fix * Fix * Reenable dynamic_programming/k_means_clustering_tensorflow.py_tf * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Try to fix ruff --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 2 + ...py_tf => k_means_clustering_tensorflow.py} | 9 +- .../{input_data.py_tf => input_data.py} | 98 +++++++++---------- requirements.txt | 2 +- 4 files changed, 55 insertions(+), 56 deletions(-) rename dynamic_programming/{k_means_clustering_tensorflow.py_tf => k_means_clustering_tensorflow.py} (98%) rename neural_network/{input_data.py_tf => input_data.py} (83%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 588d0b1e542e..b1adc23f6e61 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -309,6 +309,7 @@ * [Floyd Warshall](dynamic_programming/floyd_warshall.py) * [Integer Partition](dynamic_programming/integer_partition.py) * [Iterating Through Submasks](dynamic_programming/iterating_through_submasks.py) + * [K Means Clustering Tensorflow](dynamic_programming/k_means_clustering_tensorflow.py) * [Knapsack](dynamic_programming/knapsack.py) * [Longest Common Subsequence](dynamic_programming/longest_common_subsequence.py) * [Longest Common Substring](dynamic_programming/longest_common_substring.py) @@ -685,6 +686,7 @@ * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) + * [Input Data](neural_network/input_data.py) * [Perceptron](neural_network/perceptron.py) * [Simple Neural Network](neural_network/simple_neural_network.py) diff --git a/dynamic_programming/k_means_clustering_tensorflow.py_tf b/dynamic_programming/k_means_clustering_tensorflow.py similarity index 98% rename from dynamic_programming/k_means_clustering_tensorflow.py_tf rename to dynamic_programming/k_means_clustering_tensorflow.py index 4fbcedeaa0dc..8d3f6f0dfbcb 100644 --- a/dynamic_programming/k_means_clustering_tensorflow.py_tf +++ b/dynamic_programming/k_means_clustering_tensorflow.py @@ -1,9 +1,10 @@ -import tensorflow as tf from random import shuffle + +import tensorflow as tf from numpy import array -def TFKMeansCluster(vectors, noofclusters): +def tf_k_means_cluster(vectors, noofclusters): """ K-Means Clustering using TensorFlow. 'vectors' should be a n*k 2-D NumPy array, where n is the number @@ -30,7 +31,6 @@ def TFKMeansCluster(vectors, noofclusters): graph = tf.Graph() with graph.as_default(): - # SESSION OF COMPUTATION sess = tf.Session() @@ -95,8 +95,7 @@ def TFKMeansCluster(vectors, noofclusters): # iterations. To keep things simple, we will only do a set number of # iterations, instead of using a Stopping Criterion. noofiterations = 100 - for iteration_n in range(noofiterations): - + for _ in range(noofiterations): ##EXPECTATION STEP ##Based on the centroid locations till last iteration, compute ##the _expected_ centroid assignments. diff --git a/neural_network/input_data.py_tf b/neural_network/input_data.py similarity index 83% rename from neural_network/input_data.py_tf rename to neural_network/input_data.py index 0e22ac0bcda5..2a32f0b82c37 100644 --- a/neural_network/input_data.py_tf +++ b/neural_network/input_data.py @@ -21,13 +21,10 @@ import collections import gzip import os +import urllib import numpy -from six.moves import urllib -from six.moves import xrange # pylint: disable=redefined-builtin - -from tensorflow.python.framework import dtypes -from tensorflow.python.framework import random_seed +from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated @@ -46,16 +43,16 @@ def _read32(bytestream): def _extract_images(f): """Extract the images into a 4D uint8 numpy array [index, y, x, depth]. - Args: - f: A file object that can be passed into a gzip reader. + Args: + f: A file object that can be passed into a gzip reader. - Returns: - data: A 4D uint8 numpy array [index, y, x, depth]. + Returns: + data: A 4D uint8 numpy array [index, y, x, depth]. - Raises: - ValueError: If the bytestream does not start with 2051. + Raises: + ValueError: If the bytestream does not start with 2051. - """ + """ print("Extracting", f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) @@ -86,17 +83,17 @@ def _dense_to_one_hot(labels_dense, num_classes): def _extract_labels(f, one_hot=False, num_classes=10): """Extract the labels into a 1D uint8 numpy array [index]. - Args: - f: A file object that can be passed into a gzip reader. - one_hot: Does one hot encoding for the result. - num_classes: Number of classes for the one hot encoding. + Args: + f: A file object that can be passed into a gzip reader. + one_hot: Does one hot encoding for the result. + num_classes: Number of classes for the one hot encoding. - Returns: - labels: a 1D uint8 numpy array. + Returns: + labels: a 1D uint8 numpy array. - Raises: - ValueError: If the bystream doesn't start with 2049. - """ + Raises: + ValueError: If the bystream doesn't start with 2049. + """ print("Extracting", f.name) with gzip.GzipFile(fileobj=f) as bytestream: magic = _read32(bytestream) @@ -115,8 +112,8 @@ def _extract_labels(f, one_hot=False, num_classes=10): class _DataSet: """Container class for a _DataSet (deprecated). - THIS CLASS IS DEPRECATED. - """ + THIS CLASS IS DEPRECATED. + """ @deprecated( None, @@ -135,21 +132,21 @@ def __init__( ): """Construct a _DataSet. - one_hot arg is used only if fake_data is true. `dtype` can be either - `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into - `[0, 1]`. Seed arg provides for convenient deterministic testing. - - Args: - images: The images - labels: The labels - fake_data: Ignore inages and labels, use fake data. - one_hot: Bool, return the labels as one hot vectors (if True) or ints (if - False). - dtype: Output image dtype. One of [uint8, float32]. `uint8` output has - range [0,255]. float32 output has range [0,1]. - reshape: Bool. If True returned images are returned flattened to vectors. - seed: The random seed to use. - """ + one_hot arg is used only if fake_data is true. `dtype` can be either + `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into + `[0, 1]`. Seed arg provides for convenient deterministic testing. + + Args: + images: The images + labels: The labels + fake_data: Ignore inages and labels, use fake data. + one_hot: Bool, return the labels as one hot vectors (if True) or ints (if + False). + dtype: Output image dtype. One of [uint8, float32]. `uint8` output has + range [0,255]. float32 output has range [0,1]. + reshape: Bool. If True returned images are returned flattened to vectors. + seed: The random seed to use. + """ seed1, seed2 = random_seed.get_seed(seed) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seed1 if seed is None else seed2) @@ -206,8 +203,8 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): else: fake_label = 0 return ( - [fake_image for _ in xrange(batch_size)], - [fake_label for _ in xrange(batch_size)], + [fake_image for _ in range(batch_size)], + [fake_label for _ in range(batch_size)], ) start = self._index_in_epoch # Shuffle for the first epoch @@ -250,19 +247,19 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): def _maybe_download(filename, work_directory, source_url): """Download the data from source url, unless it's already here. - Args: - filename: string, name of the file in the directory. - work_directory: string, path to working directory. - source_url: url to download from if file doesn't exist. + Args: + filename: string, name of the file in the directory. + work_directory: string, path to working directory. + source_url: url to download from if file doesn't exist. - Returns: - Path to resulting file. - """ + Returns: + Path to resulting file. + """ if not gfile.Exists(work_directory): gfile.MakeDirs(work_directory) filepath = os.path.join(work_directory, filename) if not gfile.Exists(filepath): - urllib.request.urlretrieve(source_url, filepath) + urllib.request.urlretrieve(source_url, filepath) # noqa: S310 with gfile.GFile(filepath) as f: size = f.size() print("Successfully downloaded", filename, size, "bytes.") @@ -328,7 +325,8 @@ def fake(): if not 0 <= validation_size <= len(train_images): raise ValueError( - f"Validation size should be between 0 and {len(train_images)}. Received: {validation_size}." + f"Validation size should be between 0 and {len(train_images)}. " + f"Received: {validation_size}." ) validation_images = train_images[:validation_size] @@ -336,7 +334,7 @@ def fake(): train_images = train_images[validation_size:] train_labels = train_labels[validation_size:] - options = dict(dtype=dtype, reshape=reshape, seed=seed) + options = {"dtype": dtype, "reshape": reshape, "seed": seed} train = _DataSet(train_images, train_labels, **options) validation = _DataSet(validation_images, validation_labels, **options) diff --git a/requirements.txt b/requirements.txt index a1d607df07e1..acfbc823e77f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -15,7 +15,7 @@ scikit-fuzzy scikit-learn statsmodels sympy -tensorflow; python_version < "3.11" +tensorflow texttable tweepy xgboost From 803c0418da8621aad2dbb856fbb45bc57b275f4e Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sat, 1 Apr 2023 16:05:01 -0400 Subject: [PATCH 31/85] Revamp `md5.py` (#8065) * Add type hints to md5.py * Rename some vars to snake case * Specify functions imported from math * Rename vars and functions to be more descriptive * Make tests from test function into doctests * Clarify more var names * Refactor some MD5 code into preprocess function * Simplify loop indices in get_block_words * Add more detailed comments, docs, and doctests * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Add type hints to md5.py * Rename some vars to snake case * Specify functions imported from math * Rename vars and functions to be more descriptive * Make tests from test function into doctests * Clarify more var names * Refactor some MD5 code into preprocess function * Simplify loop indices in get_block_words * Add more detailed comments, docs, and doctests * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * updating DIRECTORY.md * Convert str types to bytes * Add tests comparing md5_me to hashlib's md5 * Replace line-break backslashes with parentheses --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- hashes/md5.py | 372 +++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 289 insertions(+), 83 deletions(-) diff --git a/hashes/md5.py b/hashes/md5.py index 2020bf2e53bf..2187006ec8a9 100644 --- a/hashes/md5.py +++ b/hashes/md5.py @@ -1,91 +1,223 @@ -import math +""" +The MD5 algorithm is a hash function that's commonly used as a checksum to +detect data corruption. The algorithm works by processing a given message in +blocks of 512 bits, padding the message as needed. It uses the blocks to operate +a 128-bit state and performs a total of 64 such operations. Note that all values +are little-endian, so inputs are converted as needed. +Although MD5 was used as a cryptographic hash function in the past, it's since +been cracked, so it shouldn't be used for security purposes. -def rearrange(bit_string_32): - """[summary] - Regroups the given binary string. +For more info, see https://en.wikipedia.org/wiki/MD5 +""" + +from collections.abc import Generator +from math import sin + + +def to_little_endian(string_32: bytes) -> bytes: + """ + Converts the given string to little-endian in groups of 8 chars. Arguments: - bitString32 {[string]} -- [32 bit binary] + string_32 {[string]} -- [32-char string] Raises: - ValueError -- [if the given string not are 32 bit binary string] + ValueError -- [input is not 32 char] Returns: - [string] -- [32 bit binary string] - >>> rearrange('1234567890abcdfghijklmnopqrstuvw') - 'pqrstuvwhijklmno90abcdfg12345678' + 32-char little-endian string + >>> to_little_endian(b'1234567890abcdfghijklmnopqrstuvw') + b'pqrstuvwhijklmno90abcdfg12345678' + >>> to_little_endian(b'1234567890') + Traceback (most recent call last): + ... + ValueError: Input must be of length 32 """ + if len(string_32) != 32: + raise ValueError("Input must be of length 32") - if len(bit_string_32) != 32: - raise ValueError("Need length 32") - new_string = "" + little_endian = b"" for i in [3, 2, 1, 0]: - new_string += bit_string_32[8 * i : 8 * i + 8] - return new_string + little_endian += string_32[8 * i : 8 * i + 8] + return little_endian + + +def reformat_hex(i: int) -> bytes: + """ + Converts the given non-negative integer to hex string. + Example: Suppose the input is the following: + i = 1234 -def reformat_hex(i): - """[summary] - Converts the given integer into 8-digit hex number. + The input is 0x000004d2 in hex, so the little-endian hex string is + "d2040000". Arguments: - i {[int]} -- [integer] + i {[int]} -- [integer] + + Raises: + ValueError -- [input is negative] + + Returns: + 8-char little-endian hex string + + >>> reformat_hex(1234) + b'd2040000' >>> reformat_hex(666) - '9a020000' + b'9a020000' + >>> reformat_hex(0) + b'00000000' + >>> reformat_hex(1234567890) + b'd2029649' + >>> reformat_hex(1234567890987654321) + b'b11c6cb1' + >>> reformat_hex(-1) + Traceback (most recent call last): + ... + ValueError: Input must be non-negative """ + if i < 0: + raise ValueError("Input must be non-negative") - hexrep = format(i, "08x") - thing = "" + hex_rep = format(i, "08x")[-8:] + little_endian_hex = b"" for i in [3, 2, 1, 0]: - thing += hexrep[2 * i : 2 * i + 2] - return thing + little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8") + return little_endian_hex -def pad(bit_string): - """[summary] - Fills up the binary string to a 512 bit binary string +def preprocess(message: bytes) -> bytes: + """ + Preprocesses the message string: + - Convert message to bit string + - Pad bit string to a multiple of 512 chars: + - Append a 1 + - Append 0's until length = 448 (mod 512) + - Append length of original message (64 chars) + + Example: Suppose the input is the following: + message = "a" + + The message bit string is "01100001", which is 8 bits long. Thus, the + bit string needs 439 bits of padding so that + (bit_string + "1" + padding) = 448 (mod 512). + The message length is "000010000...0" in 64-bit little-endian binary. + The combined bit string is then 512 bits long. Arguments: - bitString {[string]} -- [binary string] + message {[string]} -- [message string] Returns: - [string] -- [binary string] + processed bit string padded to a multiple of 512 chars + + >>> preprocess(b"a") == (b"01100001" + b"1" + + ... (b"0" * 439) + b"00001000" + (b"0" * 56)) + True + >>> preprocess(b"") == b"1" + (b"0" * 447) + (b"0" * 64) + True """ - start_length = len(bit_string) - bit_string += "1" + bit_string = b"" + for char in message: + bit_string += format(char, "08b").encode("utf-8") + start_len = format(len(bit_string), "064b").encode("utf-8") + + # Pad bit_string to a multiple of 512 chars + bit_string += b"1" while len(bit_string) % 512 != 448: - bit_string += "0" - last_part = format(start_length, "064b") - bit_string += rearrange(last_part[32:]) + rearrange(last_part[:32]) + bit_string += b"0" + bit_string += to_little_endian(start_len[32:]) + to_little_endian(start_len[:32]) + return bit_string -def get_block(bit_string): - """[summary] - Iterator: - Returns by each call a list of length 16 with the 32 bit - integer blocks. +def get_block_words(bit_string: bytes) -> Generator[list[int], None, None]: + """ + Splits bit string into blocks of 512 chars and yields each block as a list + of 32-bit words + + Example: Suppose the input is the following: + bit_string = + "000000000...0" + # 0x00 (32 bits, padded to the right) + "000000010...0" + # 0x01 (32 bits, padded to the right) + "000000100...0" + # 0x02 (32 bits, padded to the right) + "000000110...0" + # 0x03 (32 bits, padded to the right) + ... + "000011110...0" # 0x0a (32 bits, padded to the right) + + Then len(bit_string) == 512, so there'll be 1 block. The block is split + into 32-bit words, and each word is converted to little endian. The + first word is interpreted as 0 in decimal, the second word is + interpreted as 1 in decimal, etc. + + Thus, block_words == [[0, 1, 2, 3, ..., 15]]. Arguments: - bit_string {[string]} -- [binary string >= 512] + bit_string {[string]} -- [bit string with multiple of 512 as length] + + Raises: + ValueError -- [length of bit string isn't multiple of 512] + + Yields: + a list of 16 32-bit words + + >>> test_string = ("".join(format(n << 24, "032b") for n in range(16)) + ... .encode("utf-8")) + >>> list(get_block_words(test_string)) + [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]] + >>> list(get_block_words(test_string * 4)) == [list(range(16))] * 4 + True + >>> list(get_block_words(b"1" * 512)) == [[4294967295] * 16] + True + >>> list(get_block_words(b"")) + [] + >>> list(get_block_words(b"1111")) + Traceback (most recent call last): + ... + ValueError: Input must have length that's a multiple of 512 """ + if len(bit_string) % 512 != 0: + raise ValueError("Input must have length that's a multiple of 512") - curr_pos = 0 - while curr_pos < len(bit_string): - curr_part = bit_string[curr_pos : curr_pos + 512] - my_splits = [] - for i in range(16): - my_splits.append(int(rearrange(curr_part[32 * i : 32 * i + 32]), 2)) - yield my_splits - curr_pos += 512 + for pos in range(0, len(bit_string), 512): + block = bit_string[pos : pos + 512] + block_words = [] + for i in range(0, 512, 32): + block_words.append(int(to_little_endian(block[i : i + 32]), 2)) + yield block_words -def not32(i): +def not_32(i: int) -> int: """ - >>> not32(34) + Perform bitwise NOT on given int. + + Arguments: + i {[int]} -- [given int] + + Raises: + ValueError -- [input is negative] + + Returns: + Result of bitwise NOT on i + + >>> not_32(34) 4294967261 + >>> not_32(1234) + 4294966061 + >>> not_32(4294966061) + 1234 + >>> not_32(0) + 4294967295 + >>> not_32(1) + 4294967294 + >>> not_32(-1) + Traceback (most recent call last): + ... + ValueError: Input must be non-negative """ + if i < 0: + raise ValueError("Input must be non-negative") + i_str = format(i, "032b") new_str = "" for c in i_str: @@ -93,35 +225,114 @@ def not32(i): return int(new_str, 2) -def sum32(a, b): +def sum_32(a: int, b: int) -> int: + """ + Add two numbers as 32-bit ints. + + Arguments: + a {[int]} -- [first given int] + b {[int]} -- [second given int] + + Returns: + (a + b) as an unsigned 32-bit int + + >>> sum_32(1, 1) + 2 + >>> sum_32(2, 3) + 5 + >>> sum_32(0, 0) + 0 + >>> sum_32(-1, -1) + 4294967294 + >>> sum_32(4294967295, 1) + 0 + """ return (a + b) % 2**32 -def leftrot32(i, s): - return (i << s) ^ (i >> (32 - s)) +def left_rotate_32(i: int, shift: int) -> int: + """ + Rotate the bits of a given int left by a given amount. + + Arguments: + i {[int]} -- [given int] + shift {[int]} -- [shift amount] + + Raises: + ValueError -- [either given int or shift is negative] + Returns: + `i` rotated to the left by `shift` bits + + >>> left_rotate_32(1234, 1) + 2468 + >>> left_rotate_32(1111, 4) + 17776 + >>> left_rotate_32(2147483648, 1) + 1 + >>> left_rotate_32(2147483648, 3) + 4 + >>> left_rotate_32(4294967295, 4) + 4294967295 + >>> left_rotate_32(1234, 0) + 1234 + >>> left_rotate_32(0, 0) + 0 + >>> left_rotate_32(-1, 0) + Traceback (most recent call last): + ... + ValueError: Input must be non-negative + >>> left_rotate_32(0, -1) + Traceback (most recent call last): + ... + ValueError: Shift must be non-negative + """ + if i < 0: + raise ValueError("Input must be non-negative") + if shift < 0: + raise ValueError("Shift must be non-negative") + return ((i << shift) ^ (i >> (32 - shift))) % 2**32 + + +def md5_me(message: bytes) -> bytes: + """ + Returns the 32-char MD5 hash of a given message. -def md5me(test_string): - """[summary] - Returns a 32-bit hash code of the string 'testString' + Reference: https://en.wikipedia.org/wiki/MD5#Algorithm Arguments: - testString {[string]} -- [message] + message {[string]} -- [message] + + Returns: + 32-char MD5 hash string + + >>> md5_me(b"") + b'd41d8cd98f00b204e9800998ecf8427e' + >>> md5_me(b"The quick brown fox jumps over the lazy dog") + b'9e107d9d372bb6826bd81d3542a419d6' + >>> md5_me(b"The quick brown fox jumps over the lazy dog.") + b'e4d909c290d0fb1ca068ffaddf22cbd0' + + >>> import hashlib + >>> from string import ascii_letters + >>> msgs = [b"", ascii_letters.encode("utf-8"), "Üñîçø∂é".encode("utf-8"), + ... b"The quick brown fox jumps over the lazy dog."] + >>> all(md5_me(msg) == hashlib.md5(msg).hexdigest().encode("utf-8") for msg in msgs) + True """ - bs = "" - for i in test_string: - bs += format(ord(i), "08b") - bs = pad(bs) + # Convert to bit string, add padding and append message length + bit_string = preprocess(message) - tvals = [int(2**32 * abs(math.sin(i + 1))) for i in range(64)] + added_consts = [int(2**32 * abs(sin(i + 1))) for i in range(64)] + # Starting states a0 = 0x67452301 b0 = 0xEFCDAB89 c0 = 0x98BADCFE d0 = 0x10325476 - s = [ + shift_amounts = [ 7, 12, 17, @@ -188,51 +399,46 @@ def md5me(test_string): 21, ] - for m in get_block(bs): + # Process bit string in chunks, each with 16 32-char words + for block_words in get_block_words(bit_string): a = a0 b = b0 c = c0 d = d0 + + # Hash current chunk for i in range(64): if i <= 15: - # f = (B & C) | (not32(B) & D) + # f = (b & c) | (not_32(b) & d) # Alternate definition for f f = d ^ (b & (c ^ d)) g = i elif i <= 31: - # f = (D & B) | (not32(D) & C) + # f = (d & b) | (not_32(d) & c) # Alternate definition for f f = c ^ (d & (b ^ c)) g = (5 * i + 1) % 16 elif i <= 47: f = b ^ c ^ d g = (3 * i + 5) % 16 else: - f = c ^ (b | not32(d)) + f = c ^ (b | not_32(d)) g = (7 * i) % 16 - dtemp = d + f = (f + a + added_consts[i] + block_words[g]) % 2**32 + a = d d = c c = b - b = sum32(b, leftrot32((a + f + tvals[i] + m[g]) % 2**32, s[i])) - a = dtemp - a0 = sum32(a0, a) - b0 = sum32(b0, b) - c0 = sum32(c0, c) - d0 = sum32(d0, d) + b = sum_32(b, left_rotate_32(f, shift_amounts[i])) + + # Add hashed chunk to running total + a0 = sum_32(a0, a) + b0 = sum_32(b0, b) + c0 = sum_32(c0, c) + d0 = sum_32(d0, d) digest = reformat_hex(a0) + reformat_hex(b0) + reformat_hex(c0) + reformat_hex(d0) return digest -def test(): - assert md5me("") == "d41d8cd98f00b204e9800998ecf8427e" - assert ( - md5me("The quick brown fox jumps over the lazy dog") - == "9e107d9d372bb6826bd81d3542a419d6" - ) - print("Success.") - - if __name__ == "__main__": - test() import doctest doctest.testmod() From 03c0e28740a489764f69921cce8f4f9531b87562 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sun, 2 Apr 2023 06:48:19 +0200 Subject: [PATCH 32/85] Rename quantum_random.py.DISABLED.txt to quantum_random.py (#8601) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + quantum/{quantum_random.py.DISABLED.txt => quantum_random.py} | 0 2 files changed, 1 insertion(+) rename quantum/{quantum_random.py.DISABLED.txt => quantum_random.py} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index b1adc23f6e61..8dd3fb5d9af1 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1033,6 +1033,7 @@ * [Q Fourier Transform](quantum/q_fourier_transform.py) * [Q Full Adder](quantum/q_full_adder.py) * [Quantum Entanglement](quantum/quantum_entanglement.py) + * [Quantum Random](quantum/quantum_random.py) * [Quantum Teleportation](quantum/quantum_teleportation.py) * [Ripple Adder Classic](quantum/ripple_adder_classic.py) * [Single Qubit Measure](quantum/single_qubit_measure.py) diff --git a/quantum/quantum_random.py.DISABLED.txt b/quantum/quantum_random.py similarity index 100% rename from quantum/quantum_random.py.DISABLED.txt rename to quantum/quantum_random.py From af1863ecbe45a11eaa78f38851e4c9fe123b23c5 Mon Sep 17 00:00:00 2001 From: Ishab Date: Sun, 2 Apr 2023 13:04:11 +0100 Subject: [PATCH 33/85] Add Project Euler problem 79 solution 1 (#8607) Co-authored-by: Dhruv Manilawala --- project_euler/problem_079/__init__.py | 0 project_euler/problem_079/keylog.txt | 50 ++++++++++++++++ project_euler/problem_079/keylog_test.txt | 16 ++++++ project_euler/problem_079/sol1.py | 69 +++++++++++++++++++++++ 4 files changed, 135 insertions(+) create mode 100644 project_euler/problem_079/__init__.py create mode 100644 project_euler/problem_079/keylog.txt create mode 100644 project_euler/problem_079/keylog_test.txt create mode 100644 project_euler/problem_079/sol1.py diff --git a/project_euler/problem_079/__init__.py b/project_euler/problem_079/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/project_euler/problem_079/keylog.txt b/project_euler/problem_079/keylog.txt new file mode 100644 index 000000000000..41f15673248d --- /dev/null +++ b/project_euler/problem_079/keylog.txt @@ -0,0 +1,50 @@ +319 +680 +180 +690 +129 +620 +762 +689 +762 +318 +368 +710 +720 +710 +629 +168 +160 +689 +716 +731 +736 +729 +316 +729 +729 +710 +769 +290 +719 +680 +318 +389 +162 +289 +162 +718 +729 +319 +790 +680 +890 +362 +319 +760 +316 +729 +380 +319 +728 +716 diff --git a/project_euler/problem_079/keylog_test.txt b/project_euler/problem_079/keylog_test.txt new file mode 100644 index 000000000000..2c7024bde948 --- /dev/null +++ b/project_euler/problem_079/keylog_test.txt @@ -0,0 +1,16 @@ +319 +680 +180 +690 +129 +620 +698 +318 +328 +310 +320 +610 +629 +198 +190 +631 diff --git a/project_euler/problem_079/sol1.py b/project_euler/problem_079/sol1.py new file mode 100644 index 000000000000..d34adcd243b0 --- /dev/null +++ b/project_euler/problem_079/sol1.py @@ -0,0 +1,69 @@ +""" +Project Euler Problem 79: https://projecteuler.net/problem=79 + +Passcode derivation + +A common security method used for online banking is to ask the user for three +random characters from a passcode. For example, if the passcode was 531278, +they may ask for the 2nd, 3rd, and 5th characters; the expected reply would +be: 317. + +The text file, keylog.txt, contains fifty successful login attempts. + +Given that the three characters are always asked for in order, analyse the file +so as to determine the shortest possible secret passcode of unknown length. +""" +import itertools +from pathlib import Path + + +def find_secret_passcode(logins: list[str]) -> int: + """ + Returns the shortest possible secret passcode of unknown length. + + >>> find_secret_passcode(["135", "259", "235", "189", "690", "168", "120", + ... "136", "289", "589", "160", "165", "580", "369", "250", "280"]) + 12365890 + + >>> find_secret_passcode(["426", "281", "061", "819" "268", "406", "420", + ... "428", "209", "689", "019", "421", "469", "261", "681", "201"]) + 4206819 + """ + + # Split each login by character e.g. '319' -> ('3', '1', '9') + split_logins = [tuple(login) for login in logins] + + unique_chars = {char for login in split_logins for char in login} + + for permutation in itertools.permutations(unique_chars): + satisfied = True + for login in logins: + if not ( + permutation.index(login[0]) + < permutation.index(login[1]) + < permutation.index(login[2]) + ): + satisfied = False + break + + if satisfied: + return int("".join(permutation)) + + raise Exception("Unable to find the secret passcode") + + +def solution(input_file: str = "keylog.txt") -> int: + """ + Returns the shortest possible secret passcode of unknown length + for successful login attempts given by `input_file` text file. + + >>> solution("keylog_test.txt") + 6312980 + """ + logins = Path(__file__).parent.joinpath(input_file).read_text().splitlines() + + return find_secret_passcode(logins) + + +if __name__ == "__main__": + print(f"{solution() = }") From 4a8bafda5e4fb3246846f6c7c37594d55d0122f6 Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Mon, 3 Apr 2023 15:23:10 +0000 Subject: [PATCH 34/85] updating DIRECTORY.md --- DIRECTORY.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index 8dd3fb5d9af1..3764c471ce70 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -922,6 +922,8 @@ * [Sol1](project_euler/problem_077/sol1.py) * Problem 078 * [Sol1](project_euler/problem_078/sol1.py) + * Problem 079 + * [Sol1](project_euler/problem_079/sol1.py) * Problem 080 * [Sol1](project_euler/problem_080/sol1.py) * Problem 081 From 6aadf4ad458902cec0f5848e7a20a03dfdd9d495 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 4 Apr 2023 07:00:31 +0530 Subject: [PATCH 35/85] [pre-commit.ci] pre-commit autoupdate (#8611) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.259 → v0.0.260](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.259...v0.0.260) - [github.com/psf/black: 23.1.0 → 23.3.0](https://github.com/psf/black/compare/23.1.0...23.3.0) - [github.com/abravalheri/validate-pyproject: v0.12.1 → v0.12.2](https://github.com/abravalheri/validate-pyproject/compare/v0.12.1...v0.12.2) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 72a878387e15..d54ce5adddce 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,12 +16,12 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.259 + rev: v0.0.260 hooks: - id: ruff - repo: https://github.com/psf/black - rev: 23.1.0 + rev: 23.3.0 hooks: - id: black @@ -46,7 +46,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.12.1 + rev: v0.12.2 hooks: - id: validate-pyproject From 540a7758dc1a4b1c59c94b19aeb100449e121632 Mon Sep 17 00:00:00 2001 From: Ishan Dutta Date: Fri, 7 Apr 2023 21:21:25 +0530 Subject: [PATCH 36/85] Add LeNet Implementation in PyTorch (#7070) * add torch to requirements * add lenet architecture in pytorch * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add type hints * remove file * add type hints * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * update variable name * add fail test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * add newline * reformatting --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- computer_vision/lenet_pytorch.py | 82 ++++++++++++++++++++++++++++++++ requirements.txt | 1 + 2 files changed, 83 insertions(+) create mode 100644 computer_vision/lenet_pytorch.py diff --git a/computer_vision/lenet_pytorch.py b/computer_vision/lenet_pytorch.py new file mode 100644 index 000000000000..177a5ebfcdb4 --- /dev/null +++ b/computer_vision/lenet_pytorch.py @@ -0,0 +1,82 @@ +""" +LeNet Network + +Paper: http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf +""" + +import numpy +import torch +import torch.nn as nn + + +class LeNet(nn.Module): + def __init__(self) -> None: + super().__init__() + + self.tanh = nn.Tanh() + self.avgpool = nn.AvgPool2d(kernel_size=2, stride=2) + + self.conv1 = nn.Conv2d( + in_channels=1, + out_channels=6, + kernel_size=(5, 5), + stride=(1, 1), + padding=(0, 0), + ) + self.conv2 = nn.Conv2d( + in_channels=6, + out_channels=16, + kernel_size=(5, 5), + stride=(1, 1), + padding=(0, 0), + ) + self.conv3 = nn.Conv2d( + in_channels=16, + out_channels=120, + kernel_size=(5, 5), + stride=(1, 1), + padding=(0, 0), + ) + + self.linear1 = nn.Linear(120, 84) + self.linear2 = nn.Linear(84, 10) + + def forward(self, image_array: numpy.ndarray) -> numpy.ndarray: + image_array = self.tanh(self.conv1(image_array)) + image_array = self.avgpool(image_array) + image_array = self.tanh(self.conv2(image_array)) + image_array = self.avgpool(image_array) + image_array = self.tanh(self.conv3(image_array)) + + image_array = image_array.reshape(image_array.shape[0], -1) + image_array = self.tanh(self.linear1(image_array)) + image_array = self.linear2(image_array) + return image_array + + +def test_model(image_tensor: torch.tensor) -> bool: + """ + Test the model on an input batch of 64 images + + Args: + image_tensor (torch.tensor): Batch of Images for the model + + >>> test_model(torch.randn(64, 1, 32, 32)) + True + + """ + try: + model = LeNet() + output = model(image_tensor) + except RuntimeError: + return False + + return output.shape == torch.zeros([64, 10]).shape + + +if __name__ == "__main__": + random_image_1 = torch.randn(64, 1, 32, 32) + random_image_2 = torch.randn(1, 32, 32) + + print(f"random_image_1 Model Passed: {test_model(random_image_1)}") + print(f"\nrandom_image_2 Model Passed: {test_model(random_image_2)}") diff --git a/requirements.txt b/requirements.txt index acfbc823e77f..e159fe010dc4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,6 +17,7 @@ statsmodels sympy tensorflow texttable +torch tweepy xgboost yulewalker From b916450ba126349d23a90eee659d1994c66e4242 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 8 Apr 2023 02:52:26 +0200 Subject: [PATCH 37/85] Revert "Add LeNet Implementation in PyTorch (#7070)" (#8621) This reverts commit b2b8585e63664a0c7aa18b95528e345c2738c4ae. --- computer_vision/lenet_pytorch.py | 82 -------------------------------- requirements.txt | 1 - 2 files changed, 83 deletions(-) delete mode 100644 computer_vision/lenet_pytorch.py diff --git a/computer_vision/lenet_pytorch.py b/computer_vision/lenet_pytorch.py deleted file mode 100644 index 177a5ebfcdb4..000000000000 --- a/computer_vision/lenet_pytorch.py +++ /dev/null @@ -1,82 +0,0 @@ -""" -LeNet Network - -Paper: http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf -""" - -import numpy -import torch -import torch.nn as nn - - -class LeNet(nn.Module): - def __init__(self) -> None: - super().__init__() - - self.tanh = nn.Tanh() - self.avgpool = nn.AvgPool2d(kernel_size=2, stride=2) - - self.conv1 = nn.Conv2d( - in_channels=1, - out_channels=6, - kernel_size=(5, 5), - stride=(1, 1), - padding=(0, 0), - ) - self.conv2 = nn.Conv2d( - in_channels=6, - out_channels=16, - kernel_size=(5, 5), - stride=(1, 1), - padding=(0, 0), - ) - self.conv3 = nn.Conv2d( - in_channels=16, - out_channels=120, - kernel_size=(5, 5), - stride=(1, 1), - padding=(0, 0), - ) - - self.linear1 = nn.Linear(120, 84) - self.linear2 = nn.Linear(84, 10) - - def forward(self, image_array: numpy.ndarray) -> numpy.ndarray: - image_array = self.tanh(self.conv1(image_array)) - image_array = self.avgpool(image_array) - image_array = self.tanh(self.conv2(image_array)) - image_array = self.avgpool(image_array) - image_array = self.tanh(self.conv3(image_array)) - - image_array = image_array.reshape(image_array.shape[0], -1) - image_array = self.tanh(self.linear1(image_array)) - image_array = self.linear2(image_array) - return image_array - - -def test_model(image_tensor: torch.tensor) -> bool: - """ - Test the model on an input batch of 64 images - - Args: - image_tensor (torch.tensor): Batch of Images for the model - - >>> test_model(torch.randn(64, 1, 32, 32)) - True - - """ - try: - model = LeNet() - output = model(image_tensor) - except RuntimeError: - return False - - return output.shape == torch.zeros([64, 10]).shape - - -if __name__ == "__main__": - random_image_1 = torch.randn(64, 1, 32, 32) - random_image_2 = torch.randn(1, 32, 32) - - print(f"random_image_1 Model Passed: {test_model(random_image_1)}") - print(f"\nrandom_image_2 Model Passed: {test_model(random_image_2)}") diff --git a/requirements.txt b/requirements.txt index e159fe010dc4..acfbc823e77f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -17,7 +17,6 @@ statsmodels sympy tensorflow texttable -torch tweepy xgboost yulewalker From 9609b48d590d8de70284d872f8e78cf25a5ad6d0 Mon Sep 17 00:00:00 2001 From: amirsoroush <114881632+amirsoroush@users.noreply.github.com> Date: Sat, 8 Apr 2023 14:41:08 +0300 Subject: [PATCH 38/85] Queue implementation using two Stacks (#8617) * Queue implementation using two Stacks * fix typo in queue/queue_on_two_stacks.py * add 'iterable' to queue_on_two_stacks initializer * make queue_on_two_stacks.py generic class * fix ruff-UP007 in queue_on_two_stacks.py * enhance readability in queue_on_two_stacks.py * Create queue_by_two_stacks.py --------- Co-authored-by: Christian Clauss --- data_structures/queue/queue_by_two_stacks.py | 115 ++++++++++++++++ data_structures/queue/queue_on_two_stacks.py | 137 +++++++++++++++++++ 2 files changed, 252 insertions(+) create mode 100644 data_structures/queue/queue_by_two_stacks.py create mode 100644 data_structures/queue/queue_on_two_stacks.py diff --git a/data_structures/queue/queue_by_two_stacks.py b/data_structures/queue/queue_by_two_stacks.py new file mode 100644 index 000000000000..cd62f155a63b --- /dev/null +++ b/data_structures/queue/queue_by_two_stacks.py @@ -0,0 +1,115 @@ +"""Queue implementation using two stacks""" + +from collections.abc import Iterable +from typing import Generic, TypeVar + +_T = TypeVar("_T") + + +class QueueByTwoStacks(Generic[_T]): + def __init__(self, iterable: Iterable[_T] | None = None) -> None: + """ + >>> QueueByTwoStacks() + Queue(()) + >>> QueueByTwoStacks([10, 20, 30]) + Queue((10, 20, 30)) + >>> QueueByTwoStacks((i**2 for i in range(1, 4))) + Queue((1, 4, 9)) + """ + self._stack1: list[_T] = list(iterable or []) + self._stack2: list[_T] = [] + + def __len__(self) -> int: + """ + >>> len(QueueByTwoStacks()) + 0 + >>> from string import ascii_lowercase + >>> len(QueueByTwoStacks(ascii_lowercase)) + 26 + >>> queue = QueueByTwoStacks() + >>> for i in range(1, 11): + ... queue.put(i) + ... + >>> len(queue) + 10 + >>> for i in range(2): + ... queue.get() + 1 + 2 + >>> len(queue) + 8 + """ + + return len(self._stack1) + len(self._stack2) + + def __repr__(self) -> str: + """ + >>> queue = QueueByTwoStacks() + >>> queue + Queue(()) + >>> str(queue) + 'Queue(())' + >>> queue.put(10) + >>> queue + Queue((10,)) + >>> queue.put(20) + >>> queue.put(30) + >>> queue + Queue((10, 20, 30)) + """ + return f"Queue({tuple(self._stack2[::-1] + self._stack1)})" + + def put(self, item: _T) -> None: + """ + Put `item` into the Queue + + >>> queue = QueueByTwoStacks() + >>> queue.put(10) + >>> queue.put(20) + >>> len(queue) + 2 + >>> queue + Queue((10, 20)) + """ + + self._stack1.append(item) + + def get(self) -> _T: + """ + Get `item` from the Queue + + >>> queue = QueueByTwoStacks((10, 20, 30)) + >>> queue.get() + 10 + >>> queue.put(40) + >>> queue.get() + 20 + >>> queue.get() + 30 + >>> len(queue) + 1 + >>> queue.get() + 40 + >>> queue.get() + Traceback (most recent call last): + ... + IndexError: Queue is empty + """ + + # To reduce number of attribute look-ups in `while` loop. + stack1_pop = self._stack1.pop + stack2_append = self._stack2.append + + if not self._stack2: + while self._stack1: + stack2_append(stack1_pop()) + + if not self._stack2: + raise IndexError("Queue is empty") + return self._stack2.pop() + + +if __name__ == "__main__": + from doctest import testmod + + testmod() diff --git a/data_structures/queue/queue_on_two_stacks.py b/data_structures/queue/queue_on_two_stacks.py new file mode 100644 index 000000000000..61db2b512136 --- /dev/null +++ b/data_structures/queue/queue_on_two_stacks.py @@ -0,0 +1,137 @@ +"""Queue implementation using two stacks""" + +from collections.abc import Iterable +from typing import Generic, TypeVar + +_T = TypeVar("_T") + + +class QueueByTwoStacks(Generic[_T]): + def __init__(self, iterable: Iterable[_T] | None = None) -> None: + """ + >>> queue1 = QueueByTwoStacks() + >>> str(queue1) + 'Queue([])' + >>> queue2 = QueueByTwoStacks([10, 20, 30]) + >>> str(queue2) + 'Queue([10, 20, 30])' + >>> queue3 = QueueByTwoStacks((i**2 for i in range(1, 4))) + >>> str(queue3) + 'Queue([1, 4, 9])' + """ + + self._stack1: list[_T] = [] if iterable is None else list(iterable) + self._stack2: list[_T] = [] + + def __len__(self) -> int: + """ + >>> queue = QueueByTwoStacks() + >>> for i in range(1, 11): + ... queue.put(i) + ... + >>> len(queue) == 10 + True + >>> for i in range(2): + ... queue.get() + 1 + 2 + >>> len(queue) == 8 + True + """ + + return len(self._stack1) + len(self._stack2) + + def __repr__(self) -> str: + """ + >>> queue = QueueByTwoStacks() + >>> queue + Queue([]) + >>> str(queue) + 'Queue([])' + >>> queue.put(10) + >>> queue + Queue([10]) + >>> queue.put(20) + >>> queue.put(30) + >>> queue + Queue([10, 20, 30]) + """ + + items = self._stack2[::-1] + self._stack1 + return f"Queue({items})" + + def put(self, item: _T) -> None: + """ + Put `item` into the Queue + + >>> queue = QueueByTwoStacks() + >>> queue.put(10) + >>> queue.put(20) + >>> len(queue) == 2 + True + >>> str(queue) + 'Queue([10, 20])' + """ + + self._stack1.append(item) + + def get(self) -> _T: + """ + Get `item` from the Queue + + >>> queue = QueueByTwoStacks() + >>> for i in (10, 20, 30): + ... queue.put(i) + >>> queue.get() + 10 + >>> queue.put(40) + >>> queue.get() + 20 + >>> queue.get() + 30 + >>> len(queue) == 1 + True + >>> queue.get() + 40 + >>> queue.get() + Traceback (most recent call last): + ... + IndexError: Queue is empty + """ + + # To reduce number of attribute look-ups in `while` loop. + stack1_pop = self._stack1.pop + stack2_append = self._stack2.append + + if not self._stack2: + while self._stack1: + stack2_append(stack1_pop()) + + if not self._stack2: + raise IndexError("Queue is empty") + return self._stack2.pop() + + def size(self) -> int: + """ + Returns the length of the Queue + + >>> queue = QueueByTwoStacks() + >>> queue.size() + 0 + >>> queue.put(10) + >>> queue.put(20) + >>> queue.size() + 2 + >>> queue.get() + 10 + >>> queue.size() == 1 + True + """ + + return len(self) + + +if __name__ == "__main__": + from doctest import testmod + + testmod() From 724ab7b19279cd6b8c8d601f75cee0c05935e972 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 8 Apr 2023 14:16:19 +0200 Subject: [PATCH 39/85] Delete queue_on_two_stacks.py which duplicates queue_by_two_stacks.py (#8624) * Delete queue_on_two_stacks.py which duplicates queue_by_two_stacks.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + data_structures/queue/queue_on_two_stacks.py | 137 ------------------- 2 files changed, 1 insertion(+), 137 deletions(-) delete mode 100644 data_structures/queue/queue_on_two_stacks.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 3764c471ce70..e3e0748ecf75 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -232,6 +232,7 @@ * [Double Ended Queue](data_structures/queue/double_ended_queue.py) * [Linked Queue](data_structures/queue/linked_queue.py) * [Priority Queue Using List](data_structures/queue/priority_queue_using_list.py) + * [Queue By Two Stacks](data_structures/queue/queue_by_two_stacks.py) * [Queue On List](data_structures/queue/queue_on_list.py) * [Queue On Pseudo Stack](data_structures/queue/queue_on_pseudo_stack.py) * Stacks diff --git a/data_structures/queue/queue_on_two_stacks.py b/data_structures/queue/queue_on_two_stacks.py deleted file mode 100644 index 61db2b512136..000000000000 --- a/data_structures/queue/queue_on_two_stacks.py +++ /dev/null @@ -1,137 +0,0 @@ -"""Queue implementation using two stacks""" - -from collections.abc import Iterable -from typing import Generic, TypeVar - -_T = TypeVar("_T") - - -class QueueByTwoStacks(Generic[_T]): - def __init__(self, iterable: Iterable[_T] | None = None) -> None: - """ - >>> queue1 = QueueByTwoStacks() - >>> str(queue1) - 'Queue([])' - >>> queue2 = QueueByTwoStacks([10, 20, 30]) - >>> str(queue2) - 'Queue([10, 20, 30])' - >>> queue3 = QueueByTwoStacks((i**2 for i in range(1, 4))) - >>> str(queue3) - 'Queue([1, 4, 9])' - """ - - self._stack1: list[_T] = [] if iterable is None else list(iterable) - self._stack2: list[_T] = [] - - def __len__(self) -> int: - """ - >>> queue = QueueByTwoStacks() - >>> for i in range(1, 11): - ... queue.put(i) - ... - >>> len(queue) == 10 - True - >>> for i in range(2): - ... queue.get() - 1 - 2 - >>> len(queue) == 8 - True - """ - - return len(self._stack1) + len(self._stack2) - - def __repr__(self) -> str: - """ - >>> queue = QueueByTwoStacks() - >>> queue - Queue([]) - >>> str(queue) - 'Queue([])' - >>> queue.put(10) - >>> queue - Queue([10]) - >>> queue.put(20) - >>> queue.put(30) - >>> queue - Queue([10, 20, 30]) - """ - - items = self._stack2[::-1] + self._stack1 - return f"Queue({items})" - - def put(self, item: _T) -> None: - """ - Put `item` into the Queue - - >>> queue = QueueByTwoStacks() - >>> queue.put(10) - >>> queue.put(20) - >>> len(queue) == 2 - True - >>> str(queue) - 'Queue([10, 20])' - """ - - self._stack1.append(item) - - def get(self) -> _T: - """ - Get `item` from the Queue - - >>> queue = QueueByTwoStacks() - >>> for i in (10, 20, 30): - ... queue.put(i) - >>> queue.get() - 10 - >>> queue.put(40) - >>> queue.get() - 20 - >>> queue.get() - 30 - >>> len(queue) == 1 - True - >>> queue.get() - 40 - >>> queue.get() - Traceback (most recent call last): - ... - IndexError: Queue is empty - """ - - # To reduce number of attribute look-ups in `while` loop. - stack1_pop = self._stack1.pop - stack2_append = self._stack2.append - - if not self._stack2: - while self._stack1: - stack2_append(stack1_pop()) - - if not self._stack2: - raise IndexError("Queue is empty") - return self._stack2.pop() - - def size(self) -> int: - """ - Returns the length of the Queue - - >>> queue = QueueByTwoStacks() - >>> queue.size() - 0 - >>> queue.put(10) - >>> queue.put(20) - >>> queue.size() - 2 - >>> queue.get() - 10 - >>> queue.size() == 1 - True - """ - - return len(self) - - -if __name__ == "__main__": - from doctest import testmod - - testmod() From ceddbd1e8aeddd58ce7b07f75bb3445272f15698 Mon Sep 17 00:00:00 2001 From: isidroas Date: Sat, 8 Apr 2023 19:39:24 +0200 Subject: [PATCH 40/85] Bloom Filter (#8615) * Bloom filter with tests * has functions constant * fix type * isort * passing ruff * type hints * type hints * from fail to erro * captital leter * type hints requested by boot * descriptive name for m * more descriptibe arguments II * moved movies_test to doctest * commented doctest * removed test_probability * estimated error * added types * again hash_ * Update data_structures/hashing/bloom_filter.py Co-authored-by: Christian Clauss * from b to bloom * Update data_structures/hashing/bloom_filter.py Co-authored-by: Christian Clauss * Update data_structures/hashing/bloom_filter.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * syntax error in dict comprehension * from goodfather to godfather * removed Interestellar * forgot the last Godfather * Revert "removed Interestellar" This reverts commit 35fa5f5c4bf101d073aad43c37b0a423d8975071. * pretty dict * Apply suggestions from code review * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update bloom_filter.py --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/hashing/bloom_filter.py | 105 ++++++++++++++++++++++++ 1 file changed, 105 insertions(+) create mode 100644 data_structures/hashing/bloom_filter.py diff --git a/data_structures/hashing/bloom_filter.py b/data_structures/hashing/bloom_filter.py new file mode 100644 index 000000000000..7fd0985bdc33 --- /dev/null +++ b/data_structures/hashing/bloom_filter.py @@ -0,0 +1,105 @@ +""" +See https://en.wikipedia.org/wiki/Bloom_filter + +The use of this data structure is to test membership in a set. +Compared to Python's built-in set() it is more space-efficient. +In the following example, only 8 bits of memory will be used: +>>> bloom = Bloom(size=8) + +Initially, the filter contains all zeros: +>>> bloom.bitstring +'00000000' + +When an element is added, two bits are set to 1 +since there are 2 hash functions in this implementation: +>>> "Titanic" in bloom +False +>>> bloom.add("Titanic") +>>> bloom.bitstring +'01100000' +>>> "Titanic" in bloom +True + +However, sometimes only one bit is added +because both hash functions return the same value +>>> bloom.add("Avatar") +>>> "Avatar" in bloom +True +>>> bloom.format_hash("Avatar") +'00000100' +>>> bloom.bitstring +'01100100' + +Not added elements should return False ... +>>> not_present_films = ("The Godfather", "Interstellar", "Parasite", "Pulp Fiction") +>>> { +... film: bloom.format_hash(film) for film in not_present_films +... } # doctest: +NORMALIZE_WHITESPACE +{'The Godfather': '00000101', + 'Interstellar': '00000011', + 'Parasite': '00010010', + 'Pulp Fiction': '10000100'} +>>> any(film in bloom for film in not_present_films) +False + +but sometimes there are false positives: +>>> "Ratatouille" in bloom +True +>>> bloom.format_hash("Ratatouille") +'01100000' + +The probability increases with the number of elements added. +The probability decreases with the number of bits in the bitarray. +>>> bloom.estimated_error_rate +0.140625 +>>> bloom.add("The Godfather") +>>> bloom.estimated_error_rate +0.25 +>>> bloom.bitstring +'01100101' +""" +from hashlib import md5, sha256 + +HASH_FUNCTIONS = (sha256, md5) + + +class Bloom: + def __init__(self, size: int = 8) -> None: + self.bitarray = 0b0 + self.size = size + + def add(self, value: str) -> None: + h = self.hash_(value) + self.bitarray |= h + + def exists(self, value: str) -> bool: + h = self.hash_(value) + return (h & self.bitarray) == h + + def __contains__(self, other: str) -> bool: + return self.exists(other) + + def format_bin(self, bitarray: int) -> str: + res = bin(bitarray)[2:] + return res.zfill(self.size) + + @property + def bitstring(self) -> str: + return self.format_bin(self.bitarray) + + def hash_(self, value: str) -> int: + res = 0b0 + for func in HASH_FUNCTIONS: + position = ( + int.from_bytes(func(value.encode()).digest(), "little") % self.size + ) + res |= 2**position + return res + + def format_hash(self, value: str) -> str: + return self.format_bin(self.hash_(value)) + + @property + def estimated_error_rate(self) -> float: + n_ones = bin(self.bitarray).count("1") + return (n_ones / self.size) ** len(HASH_FUNCTIONS) From 7ac4a83d2fc86bb9de9d35937561904558923f73 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 10 Apr 2023 23:43:17 +0200 Subject: [PATCH 41/85] [pre-commit.ci] pre-commit autoupdate (#8634) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.260 → v0.0.261](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.260...v0.0.261) - [github.com/pre-commit/mirrors-mypy: v1.1.1 → v1.2.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.1.1...v1.2.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d54ce5adddce..55345a574ce9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.260 + rev: v0.0.261 hooks: - id: ruff @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.1.1 + rev: v1.2.0 hooks: - id: mypy args: diff --git a/DIRECTORY.md b/DIRECTORY.md index e3e0748ecf75..36f5a752c48b 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -195,6 +195,7 @@ * [Alternate Disjoint Set](data_structures/disjoint_set/alternate_disjoint_set.py) * [Disjoint Set](data_structures/disjoint_set/disjoint_set.py) * Hashing + * [Bloom Filter](data_structures/hashing/bloom_filter.py) * [Double Hash](data_structures/hashing/double_hash.py) * [Hash Map](data_structures/hashing/hash_map.py) * [Hash Table](data_structures/hashing/hash_table.py) From 37a9db136af60c9a60f443a203f8fbef184ab348 Mon Sep 17 00:00:00 2001 From: Diego Gasco <62801631+Diegomangasco@users.noreply.github.com> Date: Mon, 17 Apr 2023 00:34:22 +0200 Subject: [PATCH 42/85] Dimensionality reduction (#8590) --- machine_learning/dimensionality_reduction.py | 198 +++++++++++++++++++ 1 file changed, 198 insertions(+) create mode 100644 machine_learning/dimensionality_reduction.py diff --git a/machine_learning/dimensionality_reduction.py b/machine_learning/dimensionality_reduction.py new file mode 100644 index 000000000000..d2046f81af04 --- /dev/null +++ b/machine_learning/dimensionality_reduction.py @@ -0,0 +1,198 @@ +# Copyright (c) 2023 Diego Gasco (diego.gasco99@gmail.com), Diegomangasco on GitHub + +""" +Requirements: + - numpy version 1.21 + - scipy version 1.3.3 +Notes: + - Each column of the features matrix corresponds to a class item +""" + +import logging + +import numpy as np +import pytest +from scipy.linalg import eigh + +logging.basicConfig(level=logging.INFO, format="%(message)s") + + +def column_reshape(input_array: np.ndarray) -> np.ndarray: + """Function to reshape a row Numpy array into a column Numpy array + >>> input_array = np.array([1, 2, 3]) + >>> column_reshape(input_array) + array([[1], + [2], + [3]]) + """ + + return input_array.reshape((input_array.size, 1)) + + +def covariance_within_classes( + features: np.ndarray, labels: np.ndarray, classes: int +) -> np.ndarray: + """Function to compute the covariance matrix inside each class. + >>> features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + >>> labels = np.array([0, 1, 0]) + >>> covariance_within_classes(features, labels, 2) + array([[0.66666667, 0.66666667, 0.66666667], + [0.66666667, 0.66666667, 0.66666667], + [0.66666667, 0.66666667, 0.66666667]]) + """ + + covariance_sum = np.nan + for i in range(classes): + data = features[:, labels == i] + data_mean = data.mean(1) + # Centralize the data of class i + centered_data = data - column_reshape(data_mean) + if i > 0: + # If covariance_sum is not None + covariance_sum += np.dot(centered_data, centered_data.T) + else: + # If covariance_sum is np.nan (i.e. first loop) + covariance_sum = np.dot(centered_data, centered_data.T) + + return covariance_sum / features.shape[1] + + +def covariance_between_classes( + features: np.ndarray, labels: np.ndarray, classes: int +) -> np.ndarray: + """Function to compute the covariance matrix between multiple classes + >>> features = np.array([[9, 2, 3], [4, 3, 6], [1, 8, 9]]) + >>> labels = np.array([0, 1, 0]) + >>> covariance_between_classes(features, labels, 2) + array([[ 3.55555556, 1.77777778, -2.66666667], + [ 1.77777778, 0.88888889, -1.33333333], + [-2.66666667, -1.33333333, 2. ]]) + """ + + general_data_mean = features.mean(1) + covariance_sum = np.nan + for i in range(classes): + data = features[:, labels == i] + device_data = data.shape[1] + data_mean = data.mean(1) + if i > 0: + # If covariance_sum is not None + covariance_sum += device_data * np.dot( + column_reshape(data_mean) - column_reshape(general_data_mean), + (column_reshape(data_mean) - column_reshape(general_data_mean)).T, + ) + else: + # If covariance_sum is np.nan (i.e. first loop) + covariance_sum = device_data * np.dot( + column_reshape(data_mean) - column_reshape(general_data_mean), + (column_reshape(data_mean) - column_reshape(general_data_mean)).T, + ) + + return covariance_sum / features.shape[1] + + +def principal_component_analysis(features: np.ndarray, dimensions: int) -> np.ndarray: + """ + Principal Component Analysis. + + For more details, see: https://en.wikipedia.org/wiki/Principal_component_analysis. + Parameters: + * features: the features extracted from the dataset + * dimensions: to filter the projected data for the desired dimension + + >>> test_principal_component_analysis() + """ + + # Check if the features have been loaded + if features.any(): + data_mean = features.mean(1) + # Center the dataset + centered_data = features - np.reshape(data_mean, (data_mean.size, 1)) + covariance_matrix = np.dot(centered_data, centered_data.T) / features.shape[1] + _, eigenvectors = np.linalg.eigh(covariance_matrix) + # Take all the columns in the reverse order (-1), and then takes only the first + filtered_eigenvectors = eigenvectors[:, ::-1][:, 0:dimensions] + # Project the database on the new space + projected_data = np.dot(filtered_eigenvectors.T, features) + logging.info("Principal Component Analysis computed") + + return projected_data + else: + logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True) + logging.error("Dataset empty") + raise AssertionError + + +def linear_discriminant_analysis( + features: np.ndarray, labels: np.ndarray, classes: int, dimensions: int +) -> np.ndarray: + """ + Linear Discriminant Analysis. + + For more details, see: https://en.wikipedia.org/wiki/Linear_discriminant_analysis. + Parameters: + * features: the features extracted from the dataset + * labels: the class labels of the features + * classes: the number of classes present in the dataset + * dimensions: to filter the projected data for the desired dimension + + >>> test_linear_discriminant_analysis() + """ + + # Check if the dimension desired is less than the number of classes + assert classes > dimensions + + # Check if features have been already loaded + if features.any: + _, eigenvectors = eigh( + covariance_between_classes(features, labels, classes), + covariance_within_classes(features, labels, classes), + ) + filtered_eigenvectors = eigenvectors[:, ::-1][:, :dimensions] + svd_matrix, _, _ = np.linalg.svd(filtered_eigenvectors) + filtered_svd_matrix = svd_matrix[:, 0:dimensions] + projected_data = np.dot(filtered_svd_matrix.T, features) + logging.info("Linear Discriminant Analysis computed") + + return projected_data + else: + logging.basicConfig(level=logging.ERROR, format="%(message)s", force=True) + logging.error("Dataset empty") + raise AssertionError + + +def test_linear_discriminant_analysis() -> None: + # Create dummy dataset with 2 classes and 3 features + features = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]]) + labels = np.array([0, 0, 0, 1, 1]) + classes = 2 + dimensions = 2 + + # Assert that the function raises an AssertionError if dimensions > classes + with pytest.raises(AssertionError) as error_info: + projected_data = linear_discriminant_analysis( + features, labels, classes, dimensions + ) + if isinstance(projected_data, np.ndarray): + raise AssertionError( + "Did not raise AssertionError for dimensions > classes" + ) + assert error_info.type is AssertionError + + +def test_principal_component_analysis() -> None: + features = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + dimensions = 2 + expected_output = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]]) + + with pytest.raises(AssertionError) as error_info: + output = principal_component_analysis(features, dimensions) + if not np.allclose(expected_output, output): + raise AssertionError + assert error_info.type is AssertionError + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From bcc57462ecaa5ea6102d9ad023d7281527694e3a Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 03:47:48 +0530 Subject: [PATCH 43/85] Create real_and_reactive_power.py (#8665) --- electronics/real_and_reactive_power.py | 49 ++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 electronics/real_and_reactive_power.py diff --git a/electronics/real_and_reactive_power.py b/electronics/real_and_reactive_power.py new file mode 100644 index 000000000000..81dcba800e82 --- /dev/null +++ b/electronics/real_and_reactive_power.py @@ -0,0 +1,49 @@ +import math + + +def real_power(apparent_power: float, power_factor: float) -> float: + """ + Calculate real power from apparent power and power factor. + + Examples: + >>> real_power(100, 0.9) + 90.0 + >>> real_power(0, 0.8) + 0.0 + >>> real_power(100, -0.9) + -90.0 + """ + if ( + not isinstance(power_factor, (int, float)) + or power_factor < -1 + or power_factor > 1 + ): + raise ValueError("power_factor must be a valid float value between -1 and 1.") + return apparent_power * power_factor + + +def reactive_power(apparent_power: float, power_factor: float) -> float: + """ + Calculate reactive power from apparent power and power factor. + + Examples: + >>> reactive_power(100, 0.9) + 43.58898943540673 + >>> reactive_power(0, 0.8) + 0.0 + >>> reactive_power(100, -0.9) + 43.58898943540673 + """ + if ( + not isinstance(power_factor, (int, float)) + or power_factor < -1 + or power_factor > 1 + ): + raise ValueError("power_factor must be a valid float value between -1 and 1.") + return apparent_power * math.sqrt(1 - power_factor**2) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 772f26cd77335d1cd2ebecfd14108c9a3dd71fe2 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:00:01 +0530 Subject: [PATCH 44/85] Create apparent_power.py (#8664) * Create apparent_power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update apparent_power.py * Update apparent_power.py * Update apparent_power.py * Update electronics/apparent_power.py Co-authored-by: Christian Clauss * Update electronics/apparent_power.py Co-authored-by: Christian Clauss * Update apparent_power.py * Update electronics/apparent_power.py Co-authored-by: Christian Clauss * Update apparent_power.py * Update apparent_power.py * Update apparent_power.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update apparent_power.py * Update apparent_power.py * Update apparent_power.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- electronics/apparent_power.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 electronics/apparent_power.py diff --git a/electronics/apparent_power.py b/electronics/apparent_power.py new file mode 100644 index 000000000000..a6f1a50822f7 --- /dev/null +++ b/electronics/apparent_power.py @@ -0,0 +1,35 @@ +import cmath +import math + + +def apparent_power( + voltage: float, current: float, voltage_angle: float, current_angle: float +) -> complex: + """ + Calculate the apparent power in a single-phase AC circuit. + + >>> apparent_power(100, 5, 0, 0) + (500+0j) + >>> apparent_power(100, 5, 90, 0) + (3.061616997868383e-14+500j) + >>> apparent_power(100, 5, -45, -60) + (-129.40952255126027-482.9629131445341j) + >>> apparent_power(200, 10, -30, -90) + (-999.9999999999998-1732.0508075688776j) + """ + # Convert angles from degrees to radians + voltage_angle_rad = math.radians(voltage_angle) + current_angle_rad = math.radians(current_angle) + + # Convert voltage and current to rectangular form + voltage_rect = cmath.rect(voltage, voltage_angle_rad) + current_rect = cmath.rect(current, current_angle_rad) + + # Calculate apparent power + return voltage_rect * current_rect + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From c35db4d1d621e250477308cf49b982450d653644 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:14:06 +0530 Subject: [PATCH 45/85] added reference link. (#8667) * added reference link. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- electronics/apparent_power.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/electronics/apparent_power.py b/electronics/apparent_power.py index a6f1a50822f7..0ce1c2aa95b9 100644 --- a/electronics/apparent_power.py +++ b/electronics/apparent_power.py @@ -8,6 +8,8 @@ def apparent_power( """ Calculate the apparent power in a single-phase AC circuit. + Reference: https://en.wikipedia.org/wiki/AC_power#Apparent_power + >>> apparent_power(100, 5, 0, 0) (500+0j) >>> apparent_power(100, 5, 90, 0) From cbcaf0650e1ddb69a90bf99498af548c6b9105b6 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Tue, 18 Apr 2023 13:32:20 +0530 Subject: [PATCH 46/85] added a problem with solution on sliding window. (#8566) * added a problem with solution on sliding window. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * added hint for return type and parameter * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * removed un-necessary docs and added 2 test cases * Rename sliding_window/minimum_size_subarray_sum.py to dynamic_programming/minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update dynamic_programming/minimum_size_subarray_sum.py Co-authored-by: Christian Clauss * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update minimum_size_subarray_sum.py * Update minimum_size_subarray_sum.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../minimum_size_subarray_sum.py | 62 +++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 dynamic_programming/minimum_size_subarray_sum.py diff --git a/dynamic_programming/minimum_size_subarray_sum.py b/dynamic_programming/minimum_size_subarray_sum.py new file mode 100644 index 000000000000..3868d73535fb --- /dev/null +++ b/dynamic_programming/minimum_size_subarray_sum.py @@ -0,0 +1,62 @@ +import sys + + +def minimum_subarray_sum(target: int, numbers: list[int]) -> int: + """ + Return the length of the shortest contiguous subarray in a list of numbers whose sum + is at least target. Reference: https://stackoverflow.com/questions/8269916 + + >>> minimum_subarray_sum(7, [2, 3, 1, 2, 4, 3]) + 2 + >>> minimum_subarray_sum(7, [2, 3, -1, 2, 4, -3]) + 4 + >>> minimum_subarray_sum(11, [1, 1, 1, 1, 1, 1, 1, 1]) + 0 + >>> minimum_subarray_sum(10, [1, 2, 3, 4, 5, 6, 7]) + 2 + >>> minimum_subarray_sum(5, [1, 1, 1, 1, 1, 5]) + 1 + >>> minimum_subarray_sum(0, []) + 0 + >>> minimum_subarray_sum(0, [1, 2, 3]) + 1 + >>> minimum_subarray_sum(10, [10, 20, 30]) + 1 + >>> minimum_subarray_sum(7, [1, 1, 1, 1, 1, 1, 10]) + 1 + >>> minimum_subarray_sum(6, []) + 0 + >>> minimum_subarray_sum(2, [1, 2, 3]) + 1 + >>> minimum_subarray_sum(-6, []) + 0 + >>> minimum_subarray_sum(-6, [3, 4, 5]) + 1 + >>> minimum_subarray_sum(8, None) + 0 + >>> minimum_subarray_sum(2, "ABC") + Traceback (most recent call last): + ... + ValueError: numbers must be an iterable of integers + """ + if not numbers: + return 0 + if target == 0 and target in numbers: + return 0 + if not isinstance(numbers, (list, tuple)) or not all( + isinstance(number, int) for number in numbers + ): + raise ValueError("numbers must be an iterable of integers") + + left = right = curr_sum = 0 + min_len = sys.maxsize + + while right < len(numbers): + curr_sum += numbers[right] + while curr_sum >= target and left <= right: + min_len = min(min_len, right - left + 1) + curr_sum -= numbers[left] + left += 1 + right += 1 + + return 0 if min_len == sys.maxsize else min_len From 27dd4f3ad99f3c113e291a531f193ba46d4a66fd Mon Sep 17 00:00:00 2001 From: JulianStiebler <68881884+JulianStiebler@users.noreply.github.com> Date: Tue, 18 Apr 2023 11:57:48 +0200 Subject: [PATCH 47/85] Create maths/pi_generator.py (#8666) * Create pi_generator.py * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pi_generator.py * Update pi_generator.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated commentary on line 28, added math.pi comparison & math.isclose() test * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Removed # noqa: E501 * printf() added as recommended by cclaus --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- maths/pi_generator.py | 94 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 maths/pi_generator.py diff --git a/maths/pi_generator.py b/maths/pi_generator.py new file mode 100644 index 000000000000..dcd218aae309 --- /dev/null +++ b/maths/pi_generator.py @@ -0,0 +1,94 @@ +def calculate_pi(limit: int) -> str: + """ + https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80 + Leibniz Formula for Pi + + The Leibniz formula is the special case arctan 1 = 1/4 Pi . + Leibniz's formula converges extremely slowly: it exhibits sublinear convergence. + + Convergence (https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Convergence) + + We cannot try to prove against an interrupted, uncompleted generation. + https://en.wikipedia.org/wiki/Leibniz_formula_for_%CF%80#Unusual_behaviour + The errors can in fact be predicted; + but those calculations also approach infinity for accuracy. + + Our output will always be a string since we can defintely store all digits in there. + For simplicity' sake, let's just compare against known values and since our outpit + is a string, we need to convert to float. + + >>> import math + >>> float(calculate_pi(15)) == math.pi + True + + Since we cannot predict errors or interrupt any infinite alternating + series generation since they approach infinity, + or interrupt any alternating series, we are going to need math.isclose() + + >>> math.isclose(float(calculate_pi(50)), math.pi) + True + + >>> math.isclose(float(calculate_pi(100)), math.pi) + True + + Since math.pi-constant contains only 16 digits, here some test with preknown values: + + >>> calculate_pi(50) + '3.14159265358979323846264338327950288419716939937510' + >>> calculate_pi(80) + '3.14159265358979323846264338327950288419716939937510582097494459230781640628620899' + + To apply the Leibniz formula for calculating pi, + the variables q, r, t, k, n, and l are used for the iteration process. + """ + q = 1 + r = 0 + t = 1 + k = 1 + n = 3 + l = 3 + decimal = limit + counter = 0 + + result = "" + + """ + We will avoid using yield since we otherwise get a Generator-Object, + which we can't just compare against anything. We would have to make a list out of it + after the generation, so we will just stick to plain return logic: + """ + while counter != decimal + 1: + if 4 * q + r - t < n * t: + result += str(n) + if counter == 0: + result += "." + + if decimal == counter: + break + + counter += 1 + nr = 10 * (r - n * t) + n = ((10 * (3 * q + r)) // t) - 10 * n + q *= 10 + r = nr + else: + nr = (2 * q + r) * l + nn = (q * (7 * k) + 2 + (r * l)) // (t * l) + q *= k + t *= l + l += 2 + k += 1 + n = nn + r = nr + return result + + +def main() -> None: + print(f"{calculate_pi(50) = }") + import doctest + + doctest.testmod() + + +if __name__ == "__main__": + main() From 5d8142d9118740b631b40c30e55060622913f2e5 Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Mon, 24 Apr 2023 10:58:30 +0530 Subject: [PATCH 48/85] Update linear_discriminant_analysis.py and rsa_cipher.py (#8680) * Update rsa_cipher.py by replacing %s with {} * Update rsa_cipher.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update linear_discriminant_analysis.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update linear_discriminant_analysis.py * Update machine_learning/linear_discriminant_analysis.py Co-authored-by: Christian Clauss * Update linear_discriminant_analysis.py * updated --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- ciphers/rsa_cipher.py | 14 ++++++++------ machine_learning/linear_discriminant_analysis.py | 2 +- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/ciphers/rsa_cipher.py b/ciphers/rsa_cipher.py index de26992f5eeb..9c41cdc5d472 100644 --- a/ciphers/rsa_cipher.py +++ b/ciphers/rsa_cipher.py @@ -76,10 +76,11 @@ def encrypt_and_write_to_file( key_size, n, e = read_key_file(key_filename) if key_size < block_size * 8: sys.exit( - "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher " + "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " "requires the block size to be equal to or greater than the key size. " - "Either decrease the block size or use different keys." - % (block_size * 8, key_size) + "Either decrease the block size or use different keys.".format( + block_size * 8, key_size + ) ) encrypted_blocks = [str(i) for i in encrypt_message(message, (n, e), block_size)] @@ -101,10 +102,11 @@ def read_from_file_and_decrypt(message_filename: str, key_filename: str) -> str: if key_size < block_size * 8: sys.exit( - "ERROR: Block size is %s bits and key size is %s bits. The RSA cipher " + "ERROR: Block size is {} bits and key size is {} bits. The RSA cipher " "requires the block size to be equal to or greater than the key size. " - "Did you specify the correct key file and encrypted file?" - % (block_size * 8, key_size) + "Did you specify the correct key file and encrypted file?".format( + block_size * 8, key_size + ) ) encrypted_blocks = [] diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index f4fb5ba76b64..c0a477be10c7 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -399,7 +399,7 @@ def main(): if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q": print("\n" + "GoodBye!".center(100, "-") + "\n") break - system("cls" if name == "nt" else "clear") + system("clear" if name == "posix" else "cls") # noqa: S605 if __name__ == "__main__": From 3021eda720359369cf558345cd8dd71909109f11 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 25 Apr 2023 06:05:45 +0200 Subject: [PATCH 49/85] [pre-commit.ci] pre-commit autoupdate (#8691) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.261 → v0.0.262](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.261...v0.0.262) - [github.com/tox-dev/pyproject-fmt: 0.9.2 → 0.10.0](https://github.com/tox-dev/pyproject-fmt/compare/0.9.2...0.10.0) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 55345a574ce9..288473ca365f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.261 + rev: v0.0.262 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.9.2" + rev: "0.10.0" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 36f5a752c48b..8e67c85c6fa8 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -327,6 +327,7 @@ * [Minimum Coin Change](dynamic_programming/minimum_coin_change.py) * [Minimum Cost Path](dynamic_programming/minimum_cost_path.py) * [Minimum Partition](dynamic_programming/minimum_partition.py) + * [Minimum Size Subarray Sum](dynamic_programming/minimum_size_subarray_sum.py) * [Minimum Squares To Represent A Number](dynamic_programming/minimum_squares_to_represent_a_number.py) * [Minimum Steps To One](dynamic_programming/minimum_steps_to_one.py) * [Minimum Tickets Cost](dynamic_programming/minimum_tickets_cost.py) @@ -339,6 +340,7 @@ * [Word Break](dynamic_programming/word_break.py) ## Electronics + * [Apparent Power](electronics/apparent_power.py) * [Builtin Voltage](electronics/builtin_voltage.py) * [Carrier Concentration](electronics/carrier_concentration.py) * [Circular Convolution](electronics/circular_convolution.py) @@ -348,6 +350,7 @@ * [Electrical Impedance](electronics/electrical_impedance.py) * [Ind Reactance](electronics/ind_reactance.py) * [Ohms Law](electronics/ohms_law.py) + * [Real And Reactive Power](electronics/real_and_reactive_power.py) * [Resistor Equivalence](electronics/resistor_equivalence.py) * [Resonant Frequency](electronics/resonant_frequency.py) @@ -483,6 +486,7 @@ * [Astar](machine_learning/astar.py) * [Data Transformations](machine_learning/data_transformations.py) * [Decision Tree](machine_learning/decision_tree.py) + * [Dimensionality Reduction](machine_learning/dimensionality_reduction.py) * Forecasting * [Run](machine_learning/forecasting/run.py) * [Gradient Descent](machine_learning/gradient_descent.py) @@ -604,6 +608,7 @@ * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) * [Persistence](maths/persistence.py) + * [Pi Generator](maths/pi_generator.py) * [Pi Monte Carlo Estimation](maths/pi_monte_carlo_estimation.py) * [Points Are Collinear 3D](maths/points_are_collinear_3d.py) * [Pollard Rho](maths/pollard_rho.py) From 1809c2c22e57083bf08823533edfa9406896ad15 Mon Sep 17 00:00:00 2001 From: Dipankar Mitra <50228537+Mitra-babu@users.noreply.github.com> Date: Tue, 25 Apr 2023 21:36:14 +0530 Subject: [PATCH 50/85] The tanh activation function is added (#8689) * tanh function been added * tanh function been added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function is added * tanh function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function added * tanh function added * tanh function is added * Apply suggestions from code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- maths/tanh.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 maths/tanh.py diff --git a/maths/tanh.py b/maths/tanh.py new file mode 100644 index 000000000000..ddab3e1ab717 --- /dev/null +++ b/maths/tanh.py @@ -0,0 +1,42 @@ +""" +This script demonstrates the implementation of the tangent hyperbolic +or tanh function. + +The function takes a vector of K real numbers as input and +then (e^x - e^(-x))/(e^x + e^(-x)). After through tanh, the +element of the vector mostly -1 between 1. + +Script inspired from its corresponding Wikipedia article +https://en.wikipedia.org/wiki/Activation_function +""" +import numpy as np + + +def tangent_hyperbolic(vector: np.array) -> np.array: + """ + Implements the tanh function + + Parameters: + vector: np.array + + Returns: + tanh (np.array): The input numpy array after applying tanh. + + mathematically (e^x - e^(-x))/(e^x + e^(-x)) can be written as (2/(1+e^(-2x))-1 + + Examples: + >>> tangent_hyperbolic(np.array([1,5,6,-0.67])) + array([ 0.76159416, 0.9999092 , 0.99998771, -0.58497988]) + + >>> tangent_hyperbolic(np.array([8,10,2,-0.98,13])) + array([ 0.99999977, 1. , 0.96402758, -0.7530659 , 1. ]) + + """ + + return (2 / (1 + np.exp(-2 * vector))) - 1 + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0fe7cf19043887bad3eff36a601a6cfc4475b2fa Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 27 Apr 2023 19:32:07 +0200 Subject: [PATCH 51/85] Solving the `Top k most frequent words` problem using a max-heap (#8685) * Solving the `Top k most frequent words` problem using a max-heap * Mentioning Python standard library solution in `Top k most frequent words` docstring * ruff --fix . * updating DIRECTORY.md --------- Co-authored-by: Amos Paribocci Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 + data_structures/heap/heap.py | 31 ++++-- .../linear_discriminant_analysis.py | 2 +- strings/top_k_frequent_words.py | 101 ++++++++++++++++++ 4 files changed, 128 insertions(+), 7 deletions(-) create mode 100644 strings/top_k_frequent_words.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 8e67c85c6fa8..681d252b232d 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1167,6 +1167,7 @@ * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) * [Text Justification](strings/text_justification.py) + * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) * [Wave](strings/wave.py) * [Wildcard Pattern Matching](strings/wildcard_pattern_matching.py) diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index b14c55d9db4c..c1004f349479 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -1,9 +1,28 @@ from __future__ import annotations +from abc import abstractmethod from collections.abc import Iterable +from typing import Generic, Protocol, TypeVar -class Heap: +class Comparable(Protocol): + @abstractmethod + def __lt__(self: T, other: T) -> bool: + pass + + @abstractmethod + def __gt__(self: T, other: T) -> bool: + pass + + @abstractmethod + def __eq__(self: T, other: object) -> bool: + pass + + +T = TypeVar("T", bound=Comparable) + + +class Heap(Generic[T]): """A Max Heap Implementation >>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5] @@ -27,7 +46,7 @@ class Heap: """ def __init__(self) -> None: - self.h: list[float] = [] + self.h: list[T] = [] self.heap_size: int = 0 def __repr__(self) -> str: @@ -79,7 +98,7 @@ def max_heapify(self, index: int) -> None: # fix the subsequent violation recursively if any self.max_heapify(violation) - def build_max_heap(self, collection: Iterable[float]) -> None: + def build_max_heap(self, collection: Iterable[T]) -> None: """build max heap from an unsorted array""" self.h = list(collection) self.heap_size = len(self.h) @@ -88,7 +107,7 @@ def build_max_heap(self, collection: Iterable[float]) -> None: for i in range(self.heap_size // 2 - 1, -1, -1): self.max_heapify(i) - def extract_max(self) -> float: + def extract_max(self) -> T: """get and remove max from heap""" if self.heap_size >= 2: me = self.h[0] @@ -102,7 +121,7 @@ def extract_max(self) -> float: else: raise Exception("Empty heap") - def insert(self, value: float) -> None: + def insert(self, value: T) -> None: """insert a new value into the max heap""" self.h.append(value) idx = (self.heap_size - 1) // 2 @@ -144,7 +163,7 @@ def heap_sort(self) -> None: ]: print(f"unsorted array: {unsorted}") - heap = Heap() + heap: Heap[int] = Heap() heap.build_max_heap(unsorted) print(f"after build heap: {heap}") diff --git a/machine_learning/linear_discriminant_analysis.py b/machine_learning/linear_discriminant_analysis.py index c0a477be10c7..88c047157893 100644 --- a/machine_learning/linear_discriminant_analysis.py +++ b/machine_learning/linear_discriminant_analysis.py @@ -399,7 +399,7 @@ def main(): if input("Press any key to restart or 'q' for quit: ").strip().lower() == "q": print("\n" + "GoodBye!".center(100, "-") + "\n") break - system("clear" if name == "posix" else "cls") # noqa: S605 + system("cls" if name == "nt" else "clear") # noqa: S605 if __name__ == "__main__": diff --git a/strings/top_k_frequent_words.py b/strings/top_k_frequent_words.py new file mode 100644 index 000000000000..f3d1e0cd5ca7 --- /dev/null +++ b/strings/top_k_frequent_words.py @@ -0,0 +1,101 @@ +""" +Finds the top K most frequent words from the provided word list. + +This implementation aims to show how to solve the problem using the Heap class +already present in this repository. +Computing order statistics is, in fact, a typical usage of heaps. + +This is mostly shown for educational purposes, since the problem can be solved +in a few lines using collections.Counter from the Python standard library: + +from collections import Counter +def top_k_frequent_words(words, k_value): + return [x[0] for x in Counter(words).most_common(k_value)] +""" + + +from collections import Counter +from functools import total_ordering + +from data_structures.heap.heap import Heap + + +@total_ordering +class WordCount: + def __init__(self, word: str, count: int) -> None: + self.word = word + self.count = count + + def __eq__(self, other: object) -> bool: + """ + >>> WordCount('a', 1).__eq__(WordCount('b', 1)) + True + >>> WordCount('a', 1).__eq__(WordCount('a', 1)) + True + >>> WordCount('a', 1).__eq__(WordCount('a', 2)) + False + >>> WordCount('a', 1).__eq__(WordCount('b', 2)) + False + >>> WordCount('a', 1).__eq__(1) + NotImplemented + """ + if not isinstance(other, WordCount): + return NotImplemented + return self.count == other.count + + def __lt__(self, other: object) -> bool: + """ + >>> WordCount('a', 1).__lt__(WordCount('b', 1)) + False + >>> WordCount('a', 1).__lt__(WordCount('a', 1)) + False + >>> WordCount('a', 1).__lt__(WordCount('a', 2)) + True + >>> WordCount('a', 1).__lt__(WordCount('b', 2)) + True + >>> WordCount('a', 2).__lt__(WordCount('a', 1)) + False + >>> WordCount('a', 2).__lt__(WordCount('b', 1)) + False + >>> WordCount('a', 1).__lt__(1) + NotImplemented + """ + if not isinstance(other, WordCount): + return NotImplemented + return self.count < other.count + + +def top_k_frequent_words(words: list[str], k_value: int) -> list[str]: + """ + Returns the `k_value` most frequently occurring words, + in non-increasing order of occurrence. + In this context, a word is defined as an element in the provided list. + + In case `k_value` is greater than the number of distinct words, a value of k equal + to the number of distinct words will be considered, instead. + + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 3) + ['c', 'a', 'b'] + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 2) + ['c', 'a'] + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 1) + ['c'] + >>> top_k_frequent_words(['a', 'b', 'c', 'a', 'c', 'c'], 0) + [] + >>> top_k_frequent_words([], 1) + [] + >>> top_k_frequent_words(['a', 'a'], 2) + ['a'] + """ + heap: Heap[WordCount] = Heap() + count_by_word = Counter(words) + heap.build_max_heap( + [WordCount(word, count) for word, count in count_by_word.items()] + ) + return [heap.extract_max().word for _ in range(min(k_value, len(count_by_word)))] + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From a354342598a7ed522589fda45f00582bd312b932 Mon Sep 17 00:00:00 2001 From: Sahil Goel <55365655+sahilg13@users.noreply.github.com> Date: Sun, 30 Apr 2023 13:33:22 -0400 Subject: [PATCH 52/85] Added an algorithm to calculate the present value of cash flows (#8700) * Added an algorithm to calculate the present value of cash flows * added doctest and reference * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Resolving deprecation issues with typing module * Fixing argument type checks and adding doctest case * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixing failing doctest case by requiring less precision due to floating point inprecision * Updating return type * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Added test cases for more coverage * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Make improvements based on Rohan's suggestions * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update financial/present_value.py Committed first suggestion Co-authored-by: Christian Clauss * Update financial/present_value.py Committed second suggestion Co-authored-by: Christian Clauss * Update financial/present_value.py Committed third suggestion Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- financial/present_value.py | 41 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 financial/present_value.py diff --git a/financial/present_value.py b/financial/present_value.py new file mode 100644 index 000000000000..dc8191a6ef53 --- /dev/null +++ b/financial/present_value.py @@ -0,0 +1,41 @@ +""" +Reference: https://www.investopedia.com/terms/p/presentvalue.asp + +An algorithm that calculates the present value of a stream of yearly cash flows given... +1. The discount rate (as a decimal, not a percent) +2. An array of cash flows, with the index of the cash flow being the associated year + +Note: This algorithm assumes that cash flows are paid at the end of the specified year + + +def present_value(discount_rate: float, cash_flows: list[float]) -> float: + """ + >>> present_value(0.13, [10, 20.70, -293, 297]) + 4.69 + >>> present_value(0.07, [-109129.39, 30923.23, 15098.93, 29734,39]) + -42739.63 + >>> present_value(0.07, [109129.39, 30923.23, 15098.93, 29734,39]) + 175519.15 + >>> present_value(-1, [109129.39, 30923.23, 15098.93, 29734,39]) + Traceback (most recent call last): + ... + ValueError: Discount rate cannot be negative + >>> present_value(0.03, []) + Traceback (most recent call last): + ... + ValueError: Cash flows list cannot be empty + """ + if discount_rate < 0: + raise ValueError("Discount rate cannot be negative") + if not cash_flows: + raise ValueError("Cash flows list cannot be empty") + present_value = sum( + cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(cash_flows) + ) + return round(present_value, ndigits=2) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 10692402a2d815ca2cf00cedb5236fc274f898e4 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Mon, 1 May 2023 02:59:42 +0200 Subject: [PATCH 53/85] Fix docstring in present_value.py (#8702) Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 ++ financial/present_value.py | 1 + 2 files changed, 3 insertions(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index 681d252b232d..167d062b4a9f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -363,6 +363,7 @@ ## Financial * [Equated Monthly Installments](financial/equated_monthly_installments.py) * [Interest](financial/interest.py) + * [Present Value](financial/present_value.py) * [Price Plus Tax](financial/price_plus_tax.py) ## Fractals @@ -655,6 +656,7 @@ * [Sum Of Harmonic Series](maths/sum_of_harmonic_series.py) * [Sumset](maths/sumset.py) * [Sylvester Sequence](maths/sylvester_sequence.py) + * [Tanh](maths/tanh.py) * [Test Prime Check](maths/test_prime_check.py) * [Trapezoidal Rule](maths/trapezoidal_rule.py) * [Triplet Sum](maths/triplet_sum.py) diff --git a/financial/present_value.py b/financial/present_value.py index dc8191a6ef53..f74612b923af 100644 --- a/financial/present_value.py +++ b/financial/present_value.py @@ -6,6 +6,7 @@ 2. An array of cash flows, with the index of the cash flow being the associated year Note: This algorithm assumes that cash flows are paid at the end of the specified year +""" def present_value(discount_rate: float, cash_flows: list[float]) -> float: From 4ce4dd59d4c4da786add19ada102a32239ad158a Mon Sep 17 00:00:00 2001 From: Himanshu Tomar Date: Mon, 1 May 2023 15:53:03 +0530 Subject: [PATCH 54/85] Added minimum waiting time problem solution using greedy algorithm (#8701) * Added minimum waiting time problem solution using greedy algorithm * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * ruff --fix * Add type hints * Added two more doc test * Removed unnecessary comments * updated type hints * Updated the code as per the code review --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + greedy_methods/minimum_waiting_time.py | 48 ++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) create mode 100644 greedy_methods/minimum_waiting_time.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 167d062b4a9f..021669d13b4a 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -450,6 +450,7 @@ * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) + * [Minimum Waiting Time ](greedy_methods/minimum_waiting_time.py) ## Hashes * [Adler32](hashes/adler32.py) diff --git a/greedy_methods/minimum_waiting_time.py b/greedy_methods/minimum_waiting_time.py new file mode 100644 index 000000000000..aaae8cf8f720 --- /dev/null +++ b/greedy_methods/minimum_waiting_time.py @@ -0,0 +1,48 @@ +""" +Calculate the minimum waiting time using a greedy algorithm. +reference: https://www.youtube.com/watch?v=Sf3eiO12eJs + +For doctests run following command: +python -m doctest -v minimum_waiting_time.py + +The minimum_waiting_time function uses a greedy algorithm to calculate the minimum +time for queries to complete. It sorts the list in non-decreasing order, calculates +the waiting time for each query by multiplying its position in the list with the +sum of all remaining query times, and returns the total waiting time. A doctest +ensures that the function produces the correct output. +""" + + +def minimum_waiting_time(queries: list[int]) -> int: + """ + This function takes a list of query times and returns the minimum waiting time + for all queries to be completed. + + Args: + queries: A list of queries measured in picoseconds + + Returns: + total_waiting_time: Minimum waiting time measured in picoseconds + + Examples: + >>> minimum_waiting_time([3, 2, 1, 2, 6]) + 17 + >>> minimum_waiting_time([3, 2, 1]) + 4 + >>> minimum_waiting_time([1, 2, 3, 4]) + 10 + >>> minimum_waiting_time([5, 5, 5, 5]) + 30 + >>> minimum_waiting_time([]) + 0 + """ + n = len(queries) + if n in (0, 1): + return 0 + return sum(query * (n - i - 1) for i, query in enumerate(sorted(queries))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 22101d5a839628037341736ccb1bb0874d83c757 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 1 May 2023 23:48:56 +0200 Subject: [PATCH 55/85] [pre-commit.ci] pre-commit autoupdate (#8704) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.262 → v0.0.263](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.262...v0.0.263) - [github.com/tox-dev/pyproject-fmt: 0.10.0 → 0.11.1](https://github.com/tox-dev/pyproject-fmt/compare/0.10.0...0.11.1) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 288473ca365f..accb57da35d3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.262 + rev: v0.0.263 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.10.0" + rev: "0.11.1" hooks: - id: pyproject-fmt diff --git a/DIRECTORY.md b/DIRECTORY.md index 021669d13b4a..826bd6fd39d4 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -449,8 +449,8 @@ ## Greedy Methods * [Fractional Knapsack](greedy_methods/fractional_knapsack.py) * [Fractional Knapsack 2](greedy_methods/fractional_knapsack_2.py) + * [Minimum Waiting Time](greedy_methods/minimum_waiting_time.py) * [Optimal Merge Pattern](greedy_methods/optimal_merge_pattern.py) - * [Minimum Waiting Time ](greedy_methods/minimum_waiting_time.py) ## Hashes * [Adler32](hashes/adler32.py) From 54f1a1357f6297f62c26054f981077d919199574 Mon Sep 17 00:00:00 2001 From: Dipankar Mitra <50228537+Mitra-babu@users.noreply.github.com> Date: Tue, 2 May 2023 20:06:28 +0530 Subject: [PATCH 56/85] The ELU activation is added (#8699) * tanh function been added * tanh function been added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function is added * tanh function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * tanh function added * tanh function added * tanh function is added * Apply suggestions from code review * ELU activation function is added * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * elu activation is added * ELU activation is added * Update maths/elu_activation.py Co-authored-by: Christian Clauss * Exponential_linear_unit activation is added * Exponential_linear_unit activation is added --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../exponential_linear_unit.py | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 neural_network/activation_functions/exponential_linear_unit.py diff --git a/neural_network/activation_functions/exponential_linear_unit.py b/neural_network/activation_functions/exponential_linear_unit.py new file mode 100644 index 000000000000..7a3cf1d84e71 --- /dev/null +++ b/neural_network/activation_functions/exponential_linear_unit.py @@ -0,0 +1,40 @@ +""" +Implements the Exponential Linear Unit or ELU function. + +The function takes a vector of K real numbers and a real number alpha as +input and then applies the ELU function to each element of the vector. + +Script inspired from its corresponding Wikipedia article +https://en.wikipedia.org/wiki/Rectifier_(neural_networks) +""" + +import numpy as np + + +def exponential_linear_unit(vector: np.ndarray, alpha: float) -> np.ndarray: + """ + Implements the ELU activation function. + Parameters: + vector: the array containing input of elu activation + alpha: hyper-parameter + return: + elu (np.array): The input numpy array after applying elu. + + Mathematically, f(x) = x, x>0 else (alpha * (e^x -1)), x<=0, alpha >=0 + + Examples: + >>> exponential_linear_unit(vector=np.array([2.3,0.6,-2,-3.8]), alpha=0.3) + array([ 2.3 , 0.6 , -0.25939942, -0.29328877]) + + >>> exponential_linear_unit(vector=np.array([-9.2,-0.3,0.45,-4.56]), alpha=0.067) + array([-0.06699323, -0.01736518, 0.45 , -0.06629904]) + + + """ + return np.where(vector > 0, vector, (alpha * (np.exp(vector) - 1))) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 2602b850eada4aabaa3a8a3ab46c93f0fcc81fc8 Mon Sep 17 00:00:00 2001 From: Pronoy Mandal Date: Wed, 10 May 2023 15:04:36 +0530 Subject: [PATCH 57/85] Update game_of_life.py (#8703) Rectify spelling in docstring --- cellular_automata/game_of_life.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cellular_automata/game_of_life.py b/cellular_automata/game_of_life.py index 8e54702519b9..3382af7b5db6 100644 --- a/cellular_automata/game_of_life.py +++ b/cellular_automata/game_of_life.py @@ -34,7 +34,7 @@ from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap -usage_doc = "Usage of script: script_nama " +usage_doc = "Usage of script: script_name " choice = [0] * 100 + [1] * 10 random.shuffle(choice) From f98ddf8b23cfe374649767637d6c6656af48774b Mon Sep 17 00:00:00 2001 From: Rohan Anand <96521078+rohan472000@users.noreply.github.com> Date: Wed, 10 May 2023 15:08:52 +0530 Subject: [PATCH 58/85] Update and_gate.py (#8690) * Update and_gate.py addressing issue #8656 by calling `test_and_gate()` , ensuring that all the assertions are verified before the actual output is printed. * Update and_gate.py addressing issue #8632 --- boolean_algebra/and_gate.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/boolean_algebra/and_gate.py b/boolean_algebra/and_gate.py index cbbcfde79f33..834116772ee7 100644 --- a/boolean_algebra/and_gate.py +++ b/boolean_algebra/and_gate.py @@ -43,6 +43,8 @@ def test_and_gate() -> None: if __name__ == "__main__": + test_and_gate() + print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1)) From e67541c2d830f142bbe7cce3ae50c0b2869b4f29 Mon Sep 17 00:00:00 2001 From: shricubed Date: Wed, 10 May 2023 14:50:32 -0400 Subject: [PATCH 59/85] Working binary insertion sort in Python (#8024) --- sorts/binary_insertion_sort.py | 61 ++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) create mode 100644 sorts/binary_insertion_sort.py diff --git a/sorts/binary_insertion_sort.py b/sorts/binary_insertion_sort.py new file mode 100644 index 000000000000..8d41025583b1 --- /dev/null +++ b/sorts/binary_insertion_sort.py @@ -0,0 +1,61 @@ +""" +This is a pure Python implementation of the binary insertion sort algorithm + +For doctests run following command: +python -m doctest -v binary_insertion_sort.py +or +python3 -m doctest -v binary_insertion_sort.py + +For manual testing run: +python binary_insertion_sort.py +""" + + +def binary_insertion_sort(collection: list) -> list: + """Pure implementation of the binary insertion sort algorithm in Python + :param collection: some mutable ordered collection with heterogeneous + comparable items inside + :return: the same collection ordered by ascending + + Examples: + >>> binary_insertion_sort([0, 4, 1234, 4, 1]) + [0, 1, 4, 4, 1234] + >>> binary_insertion_sort([]) == sorted([]) + True + >>> binary_insertion_sort([-1, -2, -3]) == sorted([-1, -2, -3]) + True + >>> lst = ['d', 'a', 'b', 'e', 'c'] + >>> binary_insertion_sort(lst) == sorted(lst) + True + >>> import random + >>> collection = random.sample(range(-50, 50), 100) + >>> binary_insertion_sort(collection) == sorted(collection) + True + >>> import string + >>> collection = random.choices(string.ascii_letters + string.digits, k=100) + >>> binary_insertion_sort(collection) == sorted(collection) + True + """ + + n = len(collection) + for i in range(1, n): + val = collection[i] + low = 0 + high = i - 1 + + while low <= high: + mid = (low + high) // 2 + if val < collection[mid]: + high = mid - 1 + else: + low = mid + 1 + for j in range(i, low, -1): + collection[j] = collection[j - 1] + collection[low] = val + return collection + + +if __name__ == "__main__": + user_input = input("Enter numbers separated by a comma:\n").strip() + unsorted = [int(item) for item in user_input.split(",")] + print(binary_insertion_sort(unsorted)) From c576ab9f8c574bd0b5675d9f04e367192e121ce5 Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 10 May 2023 21:53:47 +0300 Subject: [PATCH 60/85] Switch case (#7995) --- strings/string_switch_case.py | 108 ++++++++++++++++++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 strings/string_switch_case.py diff --git a/strings/string_switch_case.py b/strings/string_switch_case.py new file mode 100644 index 000000000000..9a07472dfd71 --- /dev/null +++ b/strings/string_switch_case.py @@ -0,0 +1,108 @@ +import re + +""" +general info: +https://en.wikipedia.org/wiki/Naming_convention_(programming)#Python_and_Ruby + +pascal case [ an upper Camel Case ]: https://en.wikipedia.org/wiki/Camel_case + +camel case: https://en.wikipedia.org/wiki/Camel_case + +kebab case [ can be found in general info ]: +https://en.wikipedia.org/wiki/Naming_convention_(programming)#Python_and_Ruby + +snake case: https://en.wikipedia.org/wiki/Snake_case +""" + + +# assistant functions +def split_input(str_: str) -> list: + """ + >>> split_input("one two 31235three4four") + [['one', 'two', '31235three4four']] + """ + return [char.split() for char in re.split(r"[^ a-z A-Z 0-9 \s]", str_)] + + +def to_simple_case(str_: str) -> str: + """ + >>> to_simple_case("one two 31235three4four") + 'OneTwo31235three4four' + """ + string_split = split_input(str_) + return "".join( + ["".join([char.capitalize() for char in sub_str]) for sub_str in string_split] + ) + + +def to_complex_case(text: str, upper: bool, separator: str) -> str: + """ + >>> to_complex_case("one two 31235three4four", True, "_") + 'ONE_TWO_31235THREE4FOUR' + >>> to_complex_case("one two 31235three4four", False, "-") + 'one-two-31235three4four' + """ + try: + string_split = split_input(text) + if upper: + res_str = "".join( + [ + separator.join([char.upper() for char in sub_str]) + for sub_str in string_split + ] + ) + else: + res_str = "".join( + [ + separator.join([char.lower() for char in sub_str]) + for sub_str in string_split + ] + ) + return res_str + except IndexError: + return "not valid string" + + +# main content +def to_pascal_case(text: str) -> str: + """ + >>> to_pascal_case("one two 31235three4four") + 'OneTwo31235three4four' + """ + return to_simple_case(text) + + +def to_camel_case(text: str) -> str: + """ + >>> to_camel_case("one two 31235three4four") + 'oneTwo31235three4four' + """ + try: + res_str = to_simple_case(text) + return res_str[0].lower() + res_str[1:] + except IndexError: + return "not valid string" + + +def to_snake_case(text: str, upper: bool) -> str: + """ + >>> to_snake_case("one two 31235three4four", True) + 'ONE_TWO_31235THREE4FOUR' + >>> to_snake_case("one two 31235three4four", False) + 'one_two_31235three4four' + """ + return to_complex_case(text, upper, "_") + + +def to_kebab_case(text: str, upper: bool) -> str: + """ + >>> to_kebab_case("one two 31235three4four", True) + 'ONE-TWO-31235THREE4FOUR' + >>> to_kebab_case("one two 31235three4four", False) + 'one-two-31235three4four' + """ + return to_complex_case(text, upper, "-") + + +if __name__ == "__main__": + __import__("doctest").testmod() From 71b45e10e8c0123e4b9a421c2681254bf220034a Mon Sep 17 00:00:00 2001 From: Margaret <62753112+meg-1@users.noreply.github.com> Date: Wed, 10 May 2023 21:55:48 +0300 Subject: [PATCH 61/85] adding the remove digit algorithm (#6708) --- maths/remove_digit.py | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 maths/remove_digit.py diff --git a/maths/remove_digit.py b/maths/remove_digit.py new file mode 100644 index 000000000000..db14ac902a6f --- /dev/null +++ b/maths/remove_digit.py @@ -0,0 +1,37 @@ +def remove_digit(num: int) -> int: + """ + + returns the biggest possible result + that can be achieved by removing + one digit from the given number + + >>> remove_digit(152) + 52 + >>> remove_digit(6385) + 685 + >>> remove_digit(-11) + 1 + >>> remove_digit(2222222) + 222222 + >>> remove_digit("2222222") + Traceback (most recent call last): + TypeError: only integers accepted as input + >>> remove_digit("string input") + Traceback (most recent call last): + TypeError: only integers accepted as input + """ + + if not isinstance(num, int): + raise TypeError("only integers accepted as input") + else: + num_str = str(abs(num)) + num_transpositions = [list(num_str) for char in range(len(num_str))] + for index in range(len(num_str)): + num_transpositions[index].pop(index) + return max( + int("".join(list(transposition))) for transposition in num_transpositions + ) + + +if __name__ == "__main__": + __import__("doctest").testmod() From 47ae3addfa76d018792c1a0838eecd71fd0422fd Mon Sep 17 00:00:00 2001 From: Pronoy Mandal Date: Thu, 11 May 2023 00:30:59 +0530 Subject: [PATCH 62/85] Create maximum_subsequence.py (#7811) --- DIRECTORY.md | 1 + other/maximum_subsequence.py | 42 ++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 other/maximum_subsequence.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 826bd6fd39d4..a70ad6861d6f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -716,6 +716,7 @@ * [Lru Cache](other/lru_cache.py) * [Magicdiamondpattern](other/magicdiamondpattern.py) * [Maximum Subarray](other/maximum_subarray.py) + * [Maximum Subsequence](other/maximum_subsequence.py) * [Nested Brackets](other/nested_brackets.py) * [Password](other/password.py) * [Quine](other/quine.py) diff --git a/other/maximum_subsequence.py b/other/maximum_subsequence.py new file mode 100644 index 000000000000..f81717596532 --- /dev/null +++ b/other/maximum_subsequence.py @@ -0,0 +1,42 @@ +from collections.abc import Sequence + + +def max_subsequence_sum(nums: Sequence[int] | None = None) -> int: + """Return the maximum possible sum amongst all non - empty subsequences. + + Raises: + ValueError: when nums is empty. + + >>> max_subsequence_sum([1,2,3,4,-2]) + 10 + >>> max_subsequence_sum([-2, -3, -1, -4, -6]) + -1 + >>> max_subsequence_sum([]) + Traceback (most recent call last): + . . . + ValueError: Input sequence should not be empty + >>> max_subsequence_sum() + Traceback (most recent call last): + . . . + ValueError: Input sequence should not be empty + """ + if nums is None or not nums: + raise ValueError("Input sequence should not be empty") + + ans = nums[0] + for i in range(1, len(nums)): + num = nums[i] + ans = max(ans, ans + num, num) + + return ans + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + # Try on a sample input from the user + n = int(input("Enter number of elements : ").strip()) + array = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] + print(max_subsequence_sum(array)) From 6285b375ada18f2c04725ab859621a747b9aa22c Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Wed, 10 May 2023 19:53:48 +0000 Subject: [PATCH 63/85] updating DIRECTORY.md --- DIRECTORY.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index a70ad6861d6f..df239f9c0003 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -632,6 +632,7 @@ * [Radians](maths/radians.py) * [Radix2 Fft](maths/radix2_fft.py) * [Relu](maths/relu.py) + * [Remove Digit](maths/remove_digit.py) * [Runge Kutta](maths/runge_kutta.py) * [Segmented Sieve](maths/segmented_sieve.py) * Series @@ -694,6 +695,8 @@ ## Neural Network * [2 Hidden Layers Neural Network](neural_network/2_hidden_layers_neural_network.py) + * Activation Functions + * [Exponential Linear Unit](neural_network/activation_functions/exponential_linear_unit.py) * [Back Propagation Neural Network](neural_network/back_propagation_neural_network.py) * [Convolution Neural Network](neural_network/convolution_neural_network.py) * [Input Data](neural_network/input_data.py) @@ -1080,6 +1083,7 @@ ## Sorts * [Bead Sort](sorts/bead_sort.py) + * [Binary Insertion Sort](sorts/binary_insertion_sort.py) * [Bitonic Sort](sorts/bitonic_sort.py) * [Bogo Sort](sorts/bogo_sort.py) * [Bubble Sort](sorts/bubble_sort.py) @@ -1170,6 +1174,7 @@ * [Reverse Words](strings/reverse_words.py) * [Snake Case To Camel Pascal Case](strings/snake_case_to_camel_pascal_case.py) * [Split](strings/split.py) + * [String Switch Case](strings/string_switch_case.py) * [Text Justification](strings/text_justification.py) * [Top K Frequent Words](strings/top_k_frequent_words.py) * [Upper](strings/upper.py) From d50f7ea63ede87d613215dc5b06e7b08df8ca94f Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Sun, 14 May 2023 22:03:13 +0100 Subject: [PATCH 64/85] Correct ruff failures (#8732) * fix: Correct ruff problems * updating DIRECTORY.md * fix: Fix pre-commit errors * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - conversions/prefix_conversions_string.py | 4 ++-- conversions/rgb_hsv_conversion.py | 4 ++-- .../test_digital_image_processing.py | 2 +- ...ion.py => strassen_matrix_multiplication.py.BROKEN} | 2 +- dynamic_programming/fibonacci.py | 2 +- maths/euclidean_distance.py | 6 +++--- physics/horizontal_projectile_motion.py | 6 +++--- searches/binary_tree_traversal.py | 10 ++++------ 9 files changed, 17 insertions(+), 20 deletions(-) rename divide_and_conquer/{strassen_matrix_multiplication.py => strassen_matrix_multiplication.py.BROKEN} (99%) diff --git a/DIRECTORY.md b/DIRECTORY.md index df239f9c0003..fc6cbaf7ff41 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -294,7 +294,6 @@ * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) - * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) diff --git a/conversions/prefix_conversions_string.py b/conversions/prefix_conversions_string.py index 3851d7c8b993..9344c9672a1f 100644 --- a/conversions/prefix_conversions_string.py +++ b/conversions/prefix_conversions_string.py @@ -96,7 +96,7 @@ def add_si_prefix(value: float) -> str: for name_prefix, value_prefix in prefixes.items(): numerical_part = value / (10**value_prefix) if numerical_part > 1: - return f"{str(numerical_part)} {name_prefix}" + return f"{numerical_part!s} {name_prefix}" return str(value) @@ -111,7 +111,7 @@ def add_binary_prefix(value: float) -> str: for prefix in BinaryUnit: numerical_part = value / (2**prefix.value) if numerical_part > 1: - return f"{str(numerical_part)} {prefix.name}" + return f"{numerical_part!s} {prefix.name}" return str(value) diff --git a/conversions/rgb_hsv_conversion.py b/conversions/rgb_hsv_conversion.py index 081cfe1d75e0..74b3d33e49e7 100644 --- a/conversions/rgb_hsv_conversion.py +++ b/conversions/rgb_hsv_conversion.py @@ -121,8 +121,8 @@ def rgb_to_hsv(red: int, green: int, blue: int) -> list[float]: float_red = red / 255 float_green = green / 255 float_blue = blue / 255 - value = max(max(float_red, float_green), float_blue) - chroma = value - min(min(float_red, float_green), float_blue) + value = max(float_red, float_green, float_blue) + chroma = value - min(float_red, float_green, float_blue) saturation = 0 if value == 0 else chroma / value if chroma == 0: diff --git a/digital_image_processing/test_digital_image_processing.py b/digital_image_processing/test_digital_image_processing.py index c999464ce85e..fee7ab247b55 100644 --- a/digital_image_processing/test_digital_image_processing.py +++ b/digital_image_processing/test_digital_image_processing.py @@ -96,7 +96,7 @@ def test_nearest_neighbour( def test_local_binary_pattern(): - file_path: str = "digital_image_processing/image_data/lena.jpg" + file_path = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. image = imread(file_path, 0) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN similarity index 99% rename from divide_and_conquer/strassen_matrix_multiplication.py rename to divide_and_conquer/strassen_matrix_multiplication.py.BROKEN index 371605d6d4d4..2ca91c63bf4c 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN @@ -122,7 +122,7 @@ def strassen(matrix1: list, matrix2: list) -> list: if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]: return [matrix1, matrix2] - maximum = max(max(dimension1), max(dimension2)) + maximum = max(dimension1, dimension2) maxim = int(math.pow(2, math.ceil(math.log2(maximum)))) new_matrix1 = matrix1 new_matrix2 = matrix2 diff --git a/dynamic_programming/fibonacci.py b/dynamic_programming/fibonacci.py index 7ec5993ef38d..c102493aa00b 100644 --- a/dynamic_programming/fibonacci.py +++ b/dynamic_programming/fibonacci.py @@ -24,7 +24,7 @@ def get(self, index: int) -> list: return self.sequence[:index] -def main(): +def main() -> None: print( "Fibonacci Series Using Dynamic Programming\n", "Enter the index of the Fibonacci number you want to calculate ", diff --git a/maths/euclidean_distance.py b/maths/euclidean_distance.py index 22012e92c9cf..9b29b37b0ce6 100644 --- a/maths/euclidean_distance.py +++ b/maths/euclidean_distance.py @@ -1,12 +1,12 @@ from __future__ import annotations +import typing from collections.abc import Iterable -from typing import Union import numpy as np -Vector = Union[Iterable[float], Iterable[int], np.ndarray] -VectorOut = Union[np.float64, int, float] +Vector = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 +VectorOut = typing.Union[np.float64, int, float] # noqa: UP007 def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut: diff --git a/physics/horizontal_projectile_motion.py b/physics/horizontal_projectile_motion.py index dbde3660f62f..80f85a1b7146 100644 --- a/physics/horizontal_projectile_motion.py +++ b/physics/horizontal_projectile_motion.py @@ -147,6 +147,6 @@ def test_motion() -> None: # Print results print() print("Results: ") - print(f"Horizontal Distance: {str(horizontal_distance(init_vel, angle))} [m]") - print(f"Maximum Height: {str(max_height(init_vel, angle))} [m]") - print(f"Total Time: {str(total_time(init_vel, angle))} [s]") + print(f"Horizontal Distance: {horizontal_distance(init_vel, angle)!s} [m]") + print(f"Maximum Height: {max_height(init_vel, angle)!s} [m]") + print(f"Total Time: {total_time(init_vel, angle)!s} [s]") diff --git a/searches/binary_tree_traversal.py b/searches/binary_tree_traversal.py index 76e80df25a13..6fb841af4294 100644 --- a/searches/binary_tree_traversal.py +++ b/searches/binary_tree_traversal.py @@ -13,11 +13,9 @@ def __init__(self, data): self.left = None -def build_tree(): +def build_tree() -> TreeNode: print("\n********Press N to stop entering at any point of time********\n") - check = input("Enter the value of the root node: ").strip().lower() or "n" - if check == "n": - return None + check = input("Enter the value of the root node: ").strip().lower() q: queue.Queue = queue.Queue() tree_node = TreeNode(int(check)) q.put(tree_node) @@ -37,7 +35,7 @@ def build_tree(): right_node = TreeNode(int(check)) node_found.right = right_node q.put(right_node) - return None + raise def pre_order(node: TreeNode) -> None: @@ -272,7 +270,7 @@ def prompt(s: str = "", width=50, char="*") -> str: doctest.testmod() print(prompt("Binary Tree Traversals")) - node = build_tree() + node: TreeNode = build_tree() print(prompt("Pre Order Traversal")) pre_order(node) print(prompt() + "\n") From e5d55946e81ef3845cf3fad926138334ce2f8940 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 May 2023 22:27:59 +0100 Subject: [PATCH 65/85] [pre-commit.ci] pre-commit autoupdate (#8716) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.263 → v0.0.267](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.263...v0.0.267) - [github.com/tox-dev/pyproject-fmt: 0.11.1 → 0.11.2](https://github.com/tox-dev/pyproject-fmt/compare/0.11.1...0.11.2) - [github.com/pre-commit/mirrors-mypy: v1.2.0 → v1.3.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.2.0...v1.3.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index accb57da35d3..6bdbc7370c9c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.263 + rev: v0.0.267 hooks: - id: ruff @@ -33,7 +33,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: "0.11.1" + rev: "0.11.2" hooks: - id: pyproject-fmt @@ -51,7 +51,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.2.0 + rev: v1.3.0 hooks: - id: mypy args: From 596422b0a6fc43e4abd0fc69d7331f02dd6efca1 Mon Sep 17 00:00:00 2001 From: Maxim Smolskiy Date: Tue, 16 May 2023 00:47:50 +0300 Subject: [PATCH 66/85] Reduce the complexity of genetic_algorithm/basic_string.py (#8606) --- genetic_algorithm/basic_string.py | 95 ++++++++++++++++--------------- 1 file changed, 50 insertions(+), 45 deletions(-) diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 45b8be651f6e..388e7219f54b 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -21,6 +21,54 @@ random.seed(random.randint(0, 1000)) +def evaluate(item: str, main_target: str) -> tuple[str, float]: + """ + Evaluate how similar the item is with the target by just + counting each char in the right position + >>> evaluate("Helxo Worlx", "Hello World") + ('Helxo Worlx', 9.0) + """ + score = len([g for position, g in enumerate(item) if g == main_target[position]]) + return (item, float(score)) + + +def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: + """Slice and combine two string at a random point.""" + random_slice = random.randint(0, len(parent_1) - 1) + child_1 = parent_1[:random_slice] + parent_2[random_slice:] + child_2 = parent_2[:random_slice] + parent_1[random_slice:] + return (child_1, child_2) + + +def mutate(child: str, genes: list[str]) -> str: + """Mutate a random gene of a child with another one from the list.""" + child_list = list(child) + if random.uniform(0, 1) < MUTATION_PROBABILITY: + child_list[random.randint(0, len(child)) - 1] = random.choice(genes) + return "".join(child_list) + + +# Select, crossover and mutate a new population. +def select( + parent_1: tuple[str, float], + population_score: list[tuple[str, float]], + genes: list[str], +) -> list[str]: + """Select the second parent and generate new population""" + pop = [] + # Generate more children proportionally to the fitness score. + child_n = int(parent_1[1] * 100) + 1 + child_n = 10 if child_n >= 10 else child_n + for _ in range(child_n): + parent_2 = population_score[random.randint(0, N_SELECTED)][0] + + child_1, child_2 = crossover(parent_1[0], parent_2) + # Append new string to the population list. + pop.append(mutate(child_1, genes)) + pop.append(mutate(child_2, genes)) + return pop + + def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, str]: """ Verify that the target contains no genes besides the ones inside genes variable. @@ -70,17 +118,6 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, total_population += len(population) # Random population created. Now it's time to evaluate. - def evaluate(item: str, main_target: str = target) -> tuple[str, float]: - """ - Evaluate how similar the item is with the target by just - counting each char in the right position - >>> evaluate("Helxo Worlx", Hello World) - ["Helxo Worlx", 9] - """ - score = len( - [g for position, g in enumerate(item) if g == main_target[position]] - ) - return (item, float(score)) # Adding a bit of concurrency can make everything faster, # @@ -94,7 +131,7 @@ def evaluate(item: str, main_target: str = target) -> tuple[str, float]: # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. - population_score = [evaluate(item) for item in population] + population_score = [evaluate(item, target) for item in population] # Check if there is a matching evolution. population_score = sorted(population_score, key=lambda x: x[1], reverse=True) @@ -121,41 +158,9 @@ def evaluate(item: str, main_target: str = target) -> tuple[str, float]: (item, score / len(target)) for item, score in population_score ] - # Select, crossover and mutate a new population. - def select(parent_1: tuple[str, float]) -> list[str]: - """Select the second parent and generate new population""" - pop = [] - # Generate more children proportionally to the fitness score. - child_n = int(parent_1[1] * 100) + 1 - child_n = 10 if child_n >= 10 else child_n - for _ in range(child_n): - parent_2 = population_score[ # noqa: B023 - random.randint(0, N_SELECTED) - ][0] - - child_1, child_2 = crossover(parent_1[0], parent_2) - # Append new string to the population list. - pop.append(mutate(child_1)) - pop.append(mutate(child_2)) - return pop - - def crossover(parent_1: str, parent_2: str) -> tuple[str, str]: - """Slice and combine two string at a random point.""" - random_slice = random.randint(0, len(parent_1) - 1) - child_1 = parent_1[:random_slice] + parent_2[random_slice:] - child_2 = parent_2[:random_slice] + parent_1[random_slice:] - return (child_1, child_2) - - def mutate(child: str) -> str: - """Mutate a random gene of a child with another one from the list.""" - child_list = list(child) - if random.uniform(0, 1) < MUTATION_PROBABILITY: - child_list[random.randint(0, len(child)) - 1] = random.choice(genes) - return "".join(child_list) - # This is selection for i in range(N_SELECTED): - population.extend(select(population_score[int(i)])) + population.extend(select(population_score[int(i)], population_score, genes)) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in From f33c50c0adabcad7d508aa13047e0409017943d1 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 16 May 2023 00:40:59 -0700 Subject: [PATCH 67/85] Fix strassen_matrix_multiplication.py type error --- ...ltiplication.py.BROKEN => strassen_matrix_multiplication.py} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename divide_and_conquer/{strassen_matrix_multiplication.py.BROKEN => strassen_matrix_multiplication.py} (99%) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN b/divide_and_conquer/strassen_matrix_multiplication.py similarity index 99% rename from divide_and_conquer/strassen_matrix_multiplication.py.BROKEN rename to divide_and_conquer/strassen_matrix_multiplication.py index 2ca91c63bf4c..cfbbe7746cb4 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py.BROKEN +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -122,7 +122,7 @@ def strassen(matrix1: list, matrix2: list) -> list: if dimension1[0] == dimension1[1] and dimension2[0] == dimension2[1]: return [matrix1, matrix2] - maximum = max(dimension1, dimension2) + maximum = max(*dimension1, *dimension2) maxim = int(math.pow(2, math.ceil(math.log2(maximum)))) new_matrix1 = matrix1 new_matrix2 = matrix2 From acb33c09235273d0df0e76c2dd7a079fab438dfb Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Tue, 16 May 2023 07:47:32 +0000 Subject: [PATCH 68/85] updating DIRECTORY.md --- DIRECTORY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index fc6cbaf7ff41..df239f9c0003 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -294,6 +294,7 @@ * [Mergesort](divide_and_conquer/mergesort.py) * [Peak](divide_and_conquer/peak.py) * [Power](divide_and_conquer/power.py) + * [Strassen Matrix Multiplication](divide_and_conquer/strassen_matrix_multiplication.py) ## Dynamic Programming * [Abbreviation](dynamic_programming/abbreviation.py) From 3414fec2597fc540735a6371d7a3ede9022518ec Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Tue, 16 May 2023 17:05:55 -0700 Subject: [PATCH 69/85] `local_weighted_learning.py`: fix `mypy` errors and more (#8073) --- .../local_weighted_learning.py | 188 +++++++++++------- 1 file changed, 112 insertions(+), 76 deletions(-) diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index 6260e9ac6bfe..8dd0e55d41df 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -1,14 +1,55 @@ +""" +Locally weighted linear regression, also called local regression, is a type of +non-parametric linear regression that prioritizes data closest to a given +prediction point. The algorithm estimates the vector of model coefficients β +using weighted least squares regression: + +β = (XᵀWX)⁻¹(XᵀWy), + +where X is the design matrix, y is the response vector, and W is the diagonal +weight matrix. + +This implementation calculates wᵢ, the weight of the ith training sample, using +the Gaussian weight: + +wᵢ = exp(-‖xᵢ - x‖²/(2τ²)), + +where xᵢ is the ith training sample, x is the prediction point, τ is the +"bandwidth", and ‖x‖ is the Euclidean norm (also called the 2-norm or the L² +norm). The bandwidth τ controls how quickly the weight of a training sample +decreases as its distance from the prediction point increases. One can think of +the Gaussian weight as a bell curve centered around the prediction point: a +training sample is weighted lower if it's farther from the center, and τ +controls the spread of the bell curve. + +Other types of locally weighted regression such as locally estimated scatterplot +smoothing (LOESS) typically use different weight functions. + +References: + - https://en.wikipedia.org/wiki/Local_regression + - https://en.wikipedia.org/wiki/Weighted_least_squares + - https://cs229.stanford.edu/notes2022fall/main_notes.pdf +""" + import matplotlib.pyplot as plt import numpy as np -def weighted_matrix( - point: np.array, training_data_x: np.array, bandwidth: float -) -> np.array: +def weight_matrix(point: np.ndarray, x_train: np.ndarray, tau: float) -> np.ndarray: """ - Calculate the weight for every point in the data set. - point --> the x value at which we want to make predictions - >>> weighted_matrix( + Calculate the weight of every point in the training data around a given + prediction point + + Args: + point: x-value at which the prediction is being made + x_train: ndarray of x-values for training + tau: bandwidth value, controls how quickly the weight of training values + decreases as the distance from the prediction point increases + + Returns: + m x m weight matrix around the prediction point, where m is the size of + the training set + >>> weight_matrix( ... np.array([1., 1.]), ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]), ... 0.6 @@ -17,25 +58,30 @@ def weighted_matrix( [0.00000000e+000, 0.00000000e+000, 0.00000000e+000], [0.00000000e+000, 0.00000000e+000, 0.00000000e+000]]) """ - m, _ = np.shape(training_data_x) # m is the number of training samples - weights = np.eye(m) # Initializing weights as identity matrix - - # calculating weights for all training examples [x(i)'s] + m = len(x_train) # Number of training samples + weights = np.eye(m) # Initialize weights as identity matrix for j in range(m): - diff = point - training_data_x[j] - weights[j, j] = np.exp(diff @ diff.T / (-2.0 * bandwidth**2)) + diff = point - x_train[j] + weights[j, j] = np.exp(diff @ diff.T / (-2.0 * tau**2)) + return weights def local_weight( - point: np.array, - training_data_x: np.array, - training_data_y: np.array, - bandwidth: float, -) -> np.array: + point: np.ndarray, x_train: np.ndarray, y_train: np.ndarray, tau: float +) -> np.ndarray: """ - Calculate the local weights using the weight_matrix function on training data. - Return the weighted matrix. + Calculate the local weights at a given prediction point using the weight + matrix for that point + + Args: + point: x-value at which the prediction is being made + x_train: ndarray of x-values for training + y_train: ndarray of y-values for training + tau: bandwidth value, controls how quickly the weight of training values + decreases as the distance from the prediction point increases + Returns: + ndarray of local weights >>> local_weight( ... np.array([1., 1.]), ... np.array([[16.99, 10.34], [21.01,23.68], [24.59,25.69]]), @@ -45,19 +91,28 @@ def local_weight( array([[0.00873174], [0.08272556]]) """ - weight = weighted_matrix(point, training_data_x, bandwidth) - w = np.linalg.inv(training_data_x.T @ (weight @ training_data_x)) @ ( - training_data_x.T @ weight @ training_data_y.T + weight_mat = weight_matrix(point, x_train, tau) + weight = np.linalg.inv(x_train.T @ weight_mat @ x_train) @ ( + x_train.T @ weight_mat @ y_train.T ) - return w + return weight def local_weight_regression( - training_data_x: np.array, training_data_y: np.array, bandwidth: float -) -> np.array: + x_train: np.ndarray, y_train: np.ndarray, tau: float +) -> np.ndarray: """ - Calculate predictions for each data point on axis + Calculate predictions for each point in the training data + + Args: + x_train: ndarray of x-values for training + y_train: ndarray of y-values for training + tau: bandwidth value, controls how quickly the weight of training values + decreases as the distance from the prediction point increases + + Returns: + ndarray of predictions >>> local_weight_regression( ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]), ... np.array([[1.01, 1.66, 3.5]]), @@ -65,77 +120,57 @@ def local_weight_regression( ... ) array([1.07173261, 1.65970737, 3.50160179]) """ - m, _ = np.shape(training_data_x) - ypred = np.zeros(m) + y_pred = np.zeros(len(x_train)) # Initialize array of predictions + for i, item in enumerate(x_train): + y_pred[i] = item @ local_weight(item, x_train, y_train, tau) - for i, item in enumerate(training_data_x): - ypred[i] = item @ local_weight( - item, training_data_x, training_data_y, bandwidth - ) - - return ypred + return y_pred def load_data( - dataset_name: str, cola_name: str, colb_name: str -) -> tuple[np.array, np.array, np.array, np.array]: + dataset_name: str, x_name: str, y_name: str +) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Load data from seaborn and split it into x and y points + >>> pass # No doctests, function is for demo purposes only """ import seaborn as sns data = sns.load_dataset(dataset_name) - col_a = np.array(data[cola_name]) # total_bill - col_b = np.array(data[colb_name]) # tip - - mcol_a = col_a.copy() - mcol_b = col_b.copy() - - one = np.ones(np.shape(mcol_b)[0], dtype=int) + x_data = np.array(data[x_name]) + y_data = np.array(data[y_name]) - # pairing elements of one and mcol_a - training_data_x = np.column_stack((one, mcol_a)) + one = np.ones(len(y_data)) - return training_data_x, mcol_b, col_a, col_b + # pairing elements of one and x_data + x_train = np.column_stack((one, x_data)) - -def get_preds(training_data_x: np.array, mcol_b: np.array, tau: float) -> np.array: - """ - Get predictions with minimum error for each training data - >>> get_preds( - ... np.array([[16.99, 10.34], [21.01, 23.68], [24.59, 25.69]]), - ... np.array([[1.01, 1.66, 3.5]]), - ... 0.6 - ... ) - array([1.07173261, 1.65970737, 3.50160179]) - """ - ypred = local_weight_regression(training_data_x, mcol_b, tau) - return ypred + return x_train, x_data, y_data def plot_preds( - training_data_x: np.array, - predictions: np.array, - col_x: np.array, - col_y: np.array, - cola_name: str, - colb_name: str, -) -> plt.plot: + x_train: np.ndarray, + preds: np.ndarray, + x_data: np.ndarray, + y_data: np.ndarray, + x_name: str, + y_name: str, +) -> None: """ Plot predictions and display the graph + >>> pass # No doctests, function is for demo purposes only """ - xsort = training_data_x.copy() - xsort.sort(axis=0) - plt.scatter(col_x, col_y, color="blue") + x_train_sorted = np.sort(x_train, axis=0) + plt.scatter(x_data, y_data, color="blue") plt.plot( - xsort[:, 1], - predictions[training_data_x[:, 1].argsort(0)], + x_train_sorted[:, 1], + preds[x_train[:, 1].argsort(0)], color="yellow", linewidth=5, ) plt.title("Local Weighted Regression") - plt.xlabel(cola_name) - plt.ylabel(colb_name) + plt.xlabel(x_name) + plt.ylabel(y_name) plt.show() @@ -144,6 +179,7 @@ def plot_preds( doctest.testmod() - training_data_x, mcol_b, col_a, col_b = load_data("tips", "total_bill", "tip") - predictions = get_preds(training_data_x, mcol_b, 0.5) - plot_preds(training_data_x, predictions, col_a, col_b, "total_bill", "tip") + # Demo with a dataset from the seaborn module + training_data_x, total_bill, tip = load_data("tips", "total_bill", "tip") + predictions = local_weight_regression(training_data_x, tip, 5) + plot_preds(training_data_x, predictions, total_bill, tip, "total_bill", "tip") From eb7042dbfd929c6c169e01865b50b9a0e1f5d7fa Mon Sep 17 00:00:00 2001 From: Rohan Saraogi <62804340+r0sa2@users.noreply.github.com> Date: Wed, 17 May 2023 05:38:56 +0530 Subject: [PATCH 70/85] Added odd_sieve.py (#8740) --- maths/odd_sieve.py | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 maths/odd_sieve.py diff --git a/maths/odd_sieve.py b/maths/odd_sieve.py new file mode 100644 index 000000000000..60e92921a94c --- /dev/null +++ b/maths/odd_sieve.py @@ -0,0 +1,42 @@ +from itertools import compress, repeat +from math import ceil, sqrt + + +def odd_sieve(num: int) -> list[int]: + """ + Returns the prime numbers < `num`. The prime numbers are calculated using an + odd sieve implementation of the Sieve of Eratosthenes algorithm + (see for reference https://en.wikipedia.org/wiki/Sieve_of_Eratosthenes). + + >>> odd_sieve(2) + [] + >>> odd_sieve(3) + [2] + >>> odd_sieve(10) + [2, 3, 5, 7] + >>> odd_sieve(20) + [2, 3, 5, 7, 11, 13, 17, 19] + """ + + if num <= 2: + return [] + if num == 3: + return [2] + + # Odd sieve for numbers in range [3, num - 1] + sieve = bytearray(b"\x01") * ((num >> 1) - 1) + + for i in range(3, int(sqrt(num)) + 1, 2): + if sieve[(i >> 1) - 1]: + i_squared = i**2 + sieve[(i_squared >> 1) - 1 :: i] = repeat( + 0, ceil((num - i_squared) / (i << 1)) + ) + + return [2] + list(compress(range(3, num, 2), sieve)) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From d766d57e12ba8b81a33129ed72e0e6dc8c0c6fbc Mon Sep 17 00:00:00 2001 From: Alexander Pantyukhin Date: Wed, 17 May 2023 04:21:16 +0400 Subject: [PATCH 71/85] Add h index (#8036) --- DIRECTORY.md | 1 + other/h_index.py | 71 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 other/h_index.py diff --git a/DIRECTORY.md b/DIRECTORY.md index df239f9c0003..27057988b5c7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -713,6 +713,7 @@ * [Gauss Easter](other/gauss_easter.py) * [Graham Scan](other/graham_scan.py) * [Greedy](other/greedy.py) + * [H Index](other/h_index.py) * [Least Recently Used](other/least_recently_used.py) * [Lfu Cache](other/lfu_cache.py) * [Linear Congruential Generator](other/linear_congruential_generator.py) diff --git a/other/h_index.py b/other/h_index.py new file mode 100644 index 000000000000..e91389675b16 --- /dev/null +++ b/other/h_index.py @@ -0,0 +1,71 @@ +""" +Task: +Given an array of integers citations where citations[i] is the number of +citations a researcher received for their ith paper, return compute the +researcher's h-index. + +According to the definition of h-index on Wikipedia: A scientist has an +index h if h of their n papers have at least h citations each, and the other +n - h papers have no more than h citations each. + +If there are several possible values for h, the maximum one is taken as the +h-index. + +H-Index link: https://en.wikipedia.org/wiki/H-index + +Implementation notes: +Use sorting of array + +Leetcode link: https://leetcode.com/problems/h-index/description/ + +n = len(citations) +Runtime Complexity: O(n * log(n)) +Space Complexity: O(1) + +""" + + +def h_index(citations: list[int]) -> int: + """ + Return H-index of citations + + >>> h_index([3, 0, 6, 1, 5]) + 3 + >>> h_index([1, 3, 1]) + 1 + >>> h_index([1, 2, 3]) + 2 + >>> h_index('test') + Traceback (most recent call last): + ... + ValueError: The citations should be a list of non negative integers. + >>> h_index([1,2,'3']) + Traceback (most recent call last): + ... + ValueError: The citations should be a list of non negative integers. + >>> h_index([1,2,-3]) + Traceback (most recent call last): + ... + ValueError: The citations should be a list of non negative integers. + """ + + # validate: + if not isinstance(citations, list) or not all( + isinstance(item, int) and item >= 0 for item in citations + ): + raise ValueError("The citations should be a list of non negative integers.") + + citations.sort() + len_citations = len(citations) + + for i in range(len_citations): + if citations[len_citations - 1 - i] <= i: + return i + + return len_citations + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 7dfe5a434613e4f3e87136c2dec385f377b23c48 Mon Sep 17 00:00:00 2001 From: Harkishan Khuva <78949167+hakiKhuva@users.noreply.github.com> Date: Wed, 17 May 2023 05:52:24 +0530 Subject: [PATCH 72/85] Create guess_the_number_search.py (#7937) --- other/guess_the_number_search.py | 165 +++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 other/guess_the_number_search.py diff --git a/other/guess_the_number_search.py b/other/guess_the_number_search.py new file mode 100644 index 000000000000..0439223f2ec9 --- /dev/null +++ b/other/guess_the_number_search.py @@ -0,0 +1,165 @@ +""" +guess the number using lower,higher and the value to find or guess + +solution works by dividing lower and higher of number guessed + +suppose lower is 0, higher is 1000 and the number to guess is 355 + +>>> guess_the_number(10, 1000, 17) +started... +guess the number : 17 +details : [505, 257, 133, 71, 40, 25, 17] + +""" + + +def temp_input_value( + min_val: int = 10, max_val: int = 1000, option: bool = True +) -> int: + """ + Temporary input values for tests + + >>> temp_input_value(option=True) + 10 + + >>> temp_input_value(option=False) + 1000 + + >>> temp_input_value(min_val=100, option=True) + 100 + + >>> temp_input_value(min_val=100, max_val=50) + Traceback (most recent call last): + ... + ValueError: Invalid value for min_val or max_val (min_value < max_value) + + >>> temp_input_value("ten","fifty",1) + Traceback (most recent call last): + ... + AssertionError: Invalid type of value(s) specified to function! + + >>> temp_input_value(min_val=-100, max_val=500) + -100 + + >>> temp_input_value(min_val=-5100, max_val=-100) + -5100 + """ + assert ( + isinstance(min_val, int) + and isinstance(max_val, int) + and isinstance(option, bool) + ), "Invalid type of value(s) specified to function!" + + if min_val > max_val: + raise ValueError("Invalid value for min_val or max_val (min_value < max_value)") + return min_val if option else max_val + + +def get_avg(number_1: int, number_2: int) -> int: + """ + Return the mid-number(whole) of two integers a and b + + >>> get_avg(10, 15) + 12 + + >>> get_avg(20, 300) + 160 + + >>> get_avg("abcd", 300) + Traceback (most recent call last): + ... + TypeError: can only concatenate str (not "int") to str + + >>> get_avg(10.5,50.25) + 30 + """ + return int((number_1 + number_2) / 2) + + +def guess_the_number(lower: int, higher: int, to_guess: int) -> None: + """ + The `guess_the_number` function that guess the number by some operations + and using inner functions + + >>> guess_the_number(10, 1000, 17) + started... + guess the number : 17 + details : [505, 257, 133, 71, 40, 25, 17] + + >>> guess_the_number(-10000, 10000, 7) + started... + guess the number : 7 + details : [0, 5000, 2500, 1250, 625, 312, 156, 78, 39, 19, 9, 4, 6, 7] + + >>> guess_the_number(10, 1000, "a") + Traceback (most recent call last): + ... + AssertionError: argument values must be type of "int" + + >>> guess_the_number(10, 1000, 5) + Traceback (most recent call last): + ... + ValueError: guess value must be within the range of lower and higher value + + >>> guess_the_number(10000, 100, 5) + Traceback (most recent call last): + ... + ValueError: argument value for lower and higher must be(lower > higher) + """ + assert ( + isinstance(lower, int) and isinstance(higher, int) and isinstance(to_guess, int) + ), 'argument values must be type of "int"' + + if lower > higher: + raise ValueError("argument value for lower and higher must be(lower > higher)") + + if not lower < to_guess < higher: + raise ValueError( + "guess value must be within the range of lower and higher value" + ) + + def answer(number: int) -> str: + """ + Returns value by comparing with entered `to_guess` number + """ + if number > to_guess: + return "high" + elif number < to_guess: + return "low" + else: + return "same" + + print("started...") + + last_lowest = lower + last_highest = higher + + last_numbers = [] + + while True: + number = get_avg(last_lowest, last_highest) + last_numbers.append(number) + + if answer(number) == "low": + last_lowest = number + elif answer(number) == "high": + last_highest = number + else: + break + + print(f"guess the number : {last_numbers[-1]}") + print(f"details : {str(last_numbers)}") + + +def main() -> None: + """ + starting point or function of script + """ + lower = int(input("Enter lower value : ").strip()) + higher = int(input("Enter high value : ").strip()) + guess = int(input("Enter value to guess : ").strip()) + guess_the_number(lower, higher, guess) + + +if __name__ == "__main__": + main() From 7a0752cae6a1c71a029687426c9f996595c6073c Mon Sep 17 00:00:00 2001 From: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Date: Wed, 17 May 2023 03:49:28 +0000 Subject: [PATCH 73/85] updating DIRECTORY.md --- DIRECTORY.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/DIRECTORY.md b/DIRECTORY.md index 27057988b5c7..9bd75d669729 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -606,6 +606,7 @@ * [Newton Raphson](maths/newton_raphson.py) * [Number Of Digits](maths/number_of_digits.py) * [Numerical Integration](maths/numerical_integration.py) + * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) * [Perfect Number](maths/perfect_number.py) * [Perfect Square](maths/perfect_square.py) @@ -713,6 +714,7 @@ * [Gauss Easter](other/gauss_easter.py) * [Graham Scan](other/graham_scan.py) * [Greedy](other/greedy.py) + * [Guess The Number Search](other/guess_the_number_search.py) * [H Index](other/h_index.py) * [Least Recently Used](other/least_recently_used.py) * [Lfu Cache](other/lfu_cache.py) From 115c7f5cf23e8d407ea8c56ea7a55b7fad3e5333 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Wed, 17 May 2023 07:47:23 +0100 Subject: [PATCH 74/85] Fixes broken "Create guess_the_number_search.py" (#8746) --- other/guess_the_number_search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/other/guess_the_number_search.py b/other/guess_the_number_search.py index 0439223f2ec9..01e8898bbb8a 100644 --- a/other/guess_the_number_search.py +++ b/other/guess_the_number_search.py @@ -148,7 +148,7 @@ def answer(number: int) -> str: break print(f"guess the number : {last_numbers[-1]}") - print(f"details : {str(last_numbers)}") + print(f"details : {last_numbers!s}") def main() -> None: From bb9a7357ca2d1f9cf251fc0a57662884bfbce22d Mon Sep 17 00:00:00 2001 From: Rohan Saraogi <62804340+r0sa2@users.noreply.github.com> Date: Fri, 19 May 2023 05:18:22 +0530 Subject: [PATCH 75/85] Added is_palindrome.py (#8748) --- maths/is_palindrome.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 maths/is_palindrome.py diff --git a/maths/is_palindrome.py b/maths/is_palindrome.py new file mode 100644 index 000000000000..ba60573ab022 --- /dev/null +++ b/maths/is_palindrome.py @@ -0,0 +1,34 @@ +def is_palindrome(num: int) -> bool: + """ + Returns whether `num` is a palindrome or not + (see for reference https://en.wikipedia.org/wiki/Palindromic_number). + + >>> is_palindrome(-121) + False + >>> is_palindrome(0) + True + >>> is_palindrome(10) + False + >>> is_palindrome(11) + True + >>> is_palindrome(101) + True + >>> is_palindrome(120) + False + """ + if num < 0: + return False + + num_copy: int = num + rev_num: int = 0 + while num > 0: + rev_num = rev_num * 10 + (num % 10) + num //= 10 + + return num_copy == rev_num + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From f743d95b0ef055332fe549f5e8c7b89c9e887cd7 Mon Sep 17 00:00:00 2001 From: Daniel Luo <103051750+DanielLuo7@users.noreply.github.com> Date: Thu, 18 May 2023 20:40:52 -0400 Subject: [PATCH 76/85] add __main__ around print (#8747) --- ciphers/mixed_keyword_cypher.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/ciphers/mixed_keyword_cypher.py b/ciphers/mixed_keyword_cypher.py index 806004faa079..93a0e3acb7b1 100644 --- a/ciphers/mixed_keyword_cypher.py +++ b/ciphers/mixed_keyword_cypher.py @@ -65,4 +65,5 @@ def mixed_keyword(key: str = "college", pt: str = "UNIVERSITY") -> str: return cypher -print(mixed_keyword("college", "UNIVERSITY")) +if __name__ == "__main__": + print(mixed_keyword("college", "UNIVERSITY")) From c44566d42b2e075336d197edd185b89e07c7f7a1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 23 May 2023 05:54:30 +0200 Subject: [PATCH 77/85] [pre-commit.ci] pre-commit autoupdate (#8759) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/charliermarsh/ruff-pre-commit: v0.0.267 → v0.0.269](https://github.com/charliermarsh/ruff-pre-commit/compare/v0.0.267...v0.0.269) - [github.com/abravalheri/validate-pyproject: v0.12.2 → v0.13](https://github.com/abravalheri/validate-pyproject/compare/v0.12.2...v0.13) * updating DIRECTORY.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- DIRECTORY.md | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6bdbc7370c9c..bd5bca8f05ab 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.267 + rev: v0.0.269 hooks: - id: ruff @@ -46,7 +46,7 @@ repos: pass_filenames: false - repo: https://github.com/abravalheri/validate-pyproject - rev: v0.12.2 + rev: v0.13 hooks: - id: validate-pyproject diff --git a/DIRECTORY.md b/DIRECTORY.md index 9bd75d669729..aa92305a8c85 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -578,6 +578,7 @@ * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) + * [Is Palindrome](maths/is_palindrome.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Juggler Sequence](maths/juggler_sequence.py) From 73586e52ad523cb69c2a24447a19c1244c77d3d8 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 25 May 2023 06:59:15 +0100 Subject: [PATCH 78/85] Mark fetch anime and play as broken (#8763) * updating DIRECTORY.md * updating DIRECTORY.md * fix: Correct ruff errors * fix: Mark anime algorithm as broken * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 1 - .../{fetch_anime_and_play.py => fetch_anime_and_play.py.BROKEN} | 0 2 files changed, 1 deletion(-) rename web_programming/{fetch_anime_and_play.py => fetch_anime_and_play.py.BROKEN} (100%) diff --git a/DIRECTORY.md b/DIRECTORY.md index aa92305a8c85..53669b49d086 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1200,7 +1200,6 @@ * [Daily Horoscope](web_programming/daily_horoscope.py) * [Download Images From Google Query](web_programming/download_images_from_google_query.py) * [Emails From Url](web_programming/emails_from_url.py) - * [Fetch Anime And Play](web_programming/fetch_anime_and_play.py) * [Fetch Bbc News](web_programming/fetch_bbc_news.py) * [Fetch Github Info](web_programming/fetch_github_info.py) * [Fetch Jobs](web_programming/fetch_jobs.py) diff --git a/web_programming/fetch_anime_and_play.py b/web_programming/fetch_anime_and_play.py.BROKEN similarity index 100% rename from web_programming/fetch_anime_and_play.py rename to web_programming/fetch_anime_and_play.py.BROKEN From 21ba90fdaf0286f522dd4443231489c7a809febf Mon Sep 17 00:00:00 2001 From: Chris O <46587501+ChrisO345@users.noreply.github.com> Date: Thu, 25 May 2023 18:04:42 +1200 Subject: [PATCH 79/85] Dual Number Automatic Differentiation (#8760) * Added dual_number_automatic_differentiation.py * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update maths/dual_number_automatic_differentiation.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + .../dual_number_automatic_differentiation.py | 141 ++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 maths/dual_number_automatic_differentiation.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 53669b49d086..cc9d631c50ce 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -550,6 +550,7 @@ * [Dodecahedron](maths/dodecahedron.py) * [Double Factorial Iterative](maths/double_factorial_iterative.py) * [Double Factorial Recursive](maths/double_factorial_recursive.py) + * [Dual Number Automatic Differentiation](maths/dual_number_automatic_differentiation.py) * [Entropy](maths/entropy.py) * [Euclidean Distance](maths/euclidean_distance.py) * [Euclidean Gcd](maths/euclidean_gcd.py) diff --git a/maths/dual_number_automatic_differentiation.py b/maths/dual_number_automatic_differentiation.py new file mode 100644 index 000000000000..9aa75830c4a1 --- /dev/null +++ b/maths/dual_number_automatic_differentiation.py @@ -0,0 +1,141 @@ +from math import factorial + +""" +https://en.wikipedia.org/wiki/Automatic_differentiation#Automatic_differentiation_using_dual_numbers +https://blog.jliszka.org/2013/10/24/exact-numeric-nth-derivatives.html + +Note this only works for basic functions, f(x) where the power of x is positive. +""" + + +class Dual: + def __init__(self, real, rank): + self.real = real + if isinstance(rank, int): + self.duals = [1] * rank + else: + self.duals = rank + + def __repr__(self): + return ( + f"{self.real}+" + f"{'+'.join(str(dual)+'E'+str(n+1)for n,dual in enumerate(self.duals))}" + ) + + def reduce(self): + cur = self.duals.copy() + while cur[-1] == 0: + cur.pop(-1) + return Dual(self.real, cur) + + def __add__(self, other): + if not isinstance(other, Dual): + return Dual(self.real + other, self.duals) + s_dual = self.duals.copy() + o_dual = other.duals.copy() + if len(s_dual) > len(o_dual): + o_dual.extend([1] * (len(s_dual) - len(o_dual))) + elif len(s_dual) < len(o_dual): + s_dual.extend([1] * (len(o_dual) - len(s_dual))) + new_duals = [] + for i in range(len(s_dual)): + new_duals.append(s_dual[i] + o_dual[i]) + return Dual(self.real + other.real, new_duals) + + __radd__ = __add__ + + def __sub__(self, other): + return self + other * -1 + + def __mul__(self, other): + if not isinstance(other, Dual): + new_duals = [] + for i in self.duals: + new_duals.append(i * other) + return Dual(self.real * other, new_duals) + new_duals = [0] * (len(self.duals) + len(other.duals) + 1) + for i, item in enumerate(self.duals): + for j, jtem in enumerate(other.duals): + new_duals[i + j + 1] += item * jtem + for k in range(len(self.duals)): + new_duals[k] += self.duals[k] * other.real + for index in range(len(other.duals)): + new_duals[index] += other.duals[index] * self.real + return Dual(self.real * other.real, new_duals) + + __rmul__ = __mul__ + + def __truediv__(self, other): + if not isinstance(other, Dual): + new_duals = [] + for i in self.duals: + new_duals.append(i / other) + return Dual(self.real / other, new_duals) + raise ValueError() + + def __floordiv__(self, other): + if not isinstance(other, Dual): + new_duals = [] + for i in self.duals: + new_duals.append(i // other) + return Dual(self.real // other, new_duals) + raise ValueError() + + def __pow__(self, n): + if n < 0 or isinstance(n, float): + raise ValueError("power must be a positive integer") + if n == 0: + return 1 + if n == 1: + return self + x = self + for _ in range(n - 1): + x *= self + return x + + +def differentiate(func, position, order): + """ + >>> differentiate(lambda x: x**2, 2, 2) + 2 + >>> differentiate(lambda x: x**2 * x**4, 9, 2) + 196830 + >>> differentiate(lambda y: 0.5 * (y + 3) ** 6, 3.5, 4) + 7605.0 + >>> differentiate(lambda y: y ** 2, 4, 3) + 0 + >>> differentiate(8, 8, 8) + Traceback (most recent call last): + ... + ValueError: differentiate() requires a function as input for func + >>> differentiate(lambda x: x **2, "", 1) + Traceback (most recent call last): + ... + ValueError: differentiate() requires a float as input for position + >>> differentiate(lambda x: x**2, 3, "") + Traceback (most recent call last): + ... + ValueError: differentiate() requires an int as input for order + """ + if not callable(func): + raise ValueError("differentiate() requires a function as input for func") + if not isinstance(position, (float, int)): + raise ValueError("differentiate() requires a float as input for position") + if not isinstance(order, int): + raise ValueError("differentiate() requires an int as input for order") + d = Dual(position, 1) + result = func(d) + if order == 0: + return result.real + return result.duals[order - 1] * factorial(order) + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + + def f(y): + return y**2 * y**4 + + print(differentiate(f, 9, 2)) From 6d128ad5458dd73fdc7181f0f25bc170e0134588 Mon Sep 17 00:00:00 2001 From: Ratnesh Kumar <89133941+ratneshrt@users.noreply.github.com> Date: Thu, 25 May 2023 16:04:11 +0530 Subject: [PATCH 80/85] Fix CI badge in the README.md (#8137) From 155924be3d525fc3a3f042dc2f9fc8d4125717e0 Mon Sep 17 00:00:00 2001 From: Caeden Perelli-Harris Date: Thu, 25 May 2023 11:56:23 +0100 Subject: [PATCH 81/85] Merge and add benchmarks to palindrome algorithms in the strings/ directory (#8749) * refactor: Merge and add benchmarks to palindrome * updating DIRECTORY.md * chore: Fix failing tests * Update strings/palindrome.py Co-authored-by: Christian Clauss * Update palindrome.py --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> Co-authored-by: Christian Clauss --- DIRECTORY.md | 1 - strings/is_palindrome.py | 41 ---------------------------------------- strings/palindrome.py | 40 ++++++++++++++++++++++++++++++++++++++- 3 files changed, 39 insertions(+), 43 deletions(-) delete mode 100644 strings/is_palindrome.py diff --git a/DIRECTORY.md b/DIRECTORY.md index cc9d631c50ce..0c1e90c4e886 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1157,7 +1157,6 @@ * [Indian Phone Validator](strings/indian_phone_validator.py) * [Is Contains Unique Chars](strings/is_contains_unique_chars.py) * [Is Isogram](strings/is_isogram.py) - * [Is Palindrome](strings/is_palindrome.py) * [Is Pangram](strings/is_pangram.py) * [Is Spain National Id](strings/is_spain_national_id.py) * [Is Srilankan Phone Number](strings/is_srilankan_phone_number.py) diff --git a/strings/is_palindrome.py b/strings/is_palindrome.py deleted file mode 100644 index 406aa2e8d3c3..000000000000 --- a/strings/is_palindrome.py +++ /dev/null @@ -1,41 +0,0 @@ -def is_palindrome(s: str) -> bool: - """ - Determine if the string s is a palindrome. - - >>> is_palindrome("A man, A plan, A canal -- Panama!") - True - >>> is_palindrome("Hello") - False - >>> is_palindrome("Able was I ere I saw Elba") - True - >>> is_palindrome("racecar") - True - >>> is_palindrome("Mr. Owl ate my metal worm?") - True - """ - # Since punctuation, capitalization, and spaces are often ignored while checking - # palindromes, we first remove them from our string. - s = "".join(character for character in s.lower() if character.isalnum()) - # return s == s[::-1] the slicing method - # uses extra spaces we can - # better with iteration method. - - end = len(s) // 2 - n = len(s) - - # We need to traverse till half of the length of string - # as we can get access of the i'th last element from - # i'th index. - # eg: [0,1,2,3,4,5] => 4th index can be accessed - # with the help of 1st index (i==n-i-1) - # where n is length of string - - return all(s[i] == s[n - i - 1] for i in range(end)) - - -if __name__ == "__main__": - s = input("Please enter a string to see if it is a palindrome: ") - if is_palindrome(s): - print(f"'{s}' is a palindrome.") - else: - print(f"'{s}' is not a palindrome.") diff --git a/strings/palindrome.py b/strings/palindrome.py index dd1fe316f479..bfdb3ddcf396 100644 --- a/strings/palindrome.py +++ b/strings/palindrome.py @@ -1,5 +1,7 @@ # Algorithms to determine if a string is palindrome +from timeit import timeit + test_data = { "MALAYALAM": True, "String": False, @@ -33,6 +35,25 @@ def is_palindrome(s: str) -> bool: return True +def is_palindrome_traversal(s: str) -> bool: + """ + Return True if s is a palindrome otherwise return False. + + >>> all(is_palindrome_traversal(key) is value for key, value in test_data.items()) + True + """ + end = len(s) // 2 + n = len(s) + + # We need to traverse till half of the length of string + # as we can get access of the i'th last element from + # i'th index. + # eg: [0,1,2,3,4,5] => 4th index can be accessed + # with the help of 1st index (i==n-i-1) + # where n is length of string + return all(s[i] == s[n - i - 1] for i in range(end)) + + def is_palindrome_recursive(s: str) -> bool: """ Return True if s is a palindrome otherwise return False. @@ -40,7 +61,7 @@ def is_palindrome_recursive(s: str) -> bool: >>> all(is_palindrome_recursive(key) is value for key, value in test_data.items()) True """ - if len(s) <= 1: + if len(s) <= 2: return True if s[0] == s[len(s) - 1]: return is_palindrome_recursive(s[1:-1]) @@ -58,9 +79,26 @@ def is_palindrome_slice(s: str) -> bool: return s == s[::-1] +def benchmark_function(name: str) -> None: + stmt = f"all({name}(key) is value for key, value in test_data.items())" + setup = f"from __main__ import test_data, {name}" + number = 500000 + result = timeit(stmt=stmt, setup=setup, number=number) + print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds") + + if __name__ == "__main__": for key, value in test_data.items(): assert is_palindrome(key) is is_palindrome_recursive(key) assert is_palindrome(key) is is_palindrome_slice(key) print(f"{key:21} {value}") print("a man a plan a canal panama") + + # finished 500,000 runs in 0.46793 seconds + benchmark_function("is_palindrome_slice") + # finished 500,000 runs in 0.85234 seconds + benchmark_function("is_palindrome") + # finished 500,000 runs in 1.32028 seconds + benchmark_function("is_palindrome_recursive") + # finished 500,000 runs in 2.08679 seconds + benchmark_function("is_palindrome_traversal") From 7333b518a94843ee985c492b1069439e70fba944 Mon Sep 17 00:00:00 2001 From: Juyoung Kim <61103343+JadeKim042386@users.noreply.github.com> Date: Thu, 25 May 2023 21:54:18 +0900 Subject: [PATCH 82/85] fix: graphs/greedy_best_first typo (#8766) #8764 --- graphs/greedy_best_first.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py index d49e65b9d814..35f7ca9feeef 100644 --- a/graphs/greedy_best_first.py +++ b/graphs/greedy_best_first.py @@ -58,8 +58,8 @@ def calculate_heuristic(self) -> float: The heuristic here is the Manhattan Distance Could elaborate to offer more than one choice """ - dy = abs(self.pos_x - self.goal_x) - dx = abs(self.pos_y - self.goal_y) + dx = abs(self.pos_x - self.goal_x) + dy = abs(self.pos_y - self.goal_y) return dx + dy def __lt__(self, other) -> bool: From 9af6da8affbc40ba7c7295965618c5ad307ab5c7 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 26 May 2023 08:50:33 +0200 Subject: [PATCH 83/85] Rename is_palindrome.py to is_int_palindrome.py (#8768) * Rename is_palindrome.py to is_int_palindrome.py * updating DIRECTORY.md --------- Co-authored-by: github-actions <${GITHUB_ACTOR}@users.noreply.github.com> --- DIRECTORY.md | 2 +- maths/{is_palindrome.py => is_int_palindrome.py} | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) rename maths/{is_palindrome.py => is_int_palindrome.py} (67%) diff --git a/DIRECTORY.md b/DIRECTORY.md index 0c1e90c4e886..231b0e2f1d2f 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -578,8 +578,8 @@ * [Hardy Ramanujanalgo](maths/hardy_ramanujanalgo.py) * [Hexagonal Number](maths/hexagonal_number.py) * [Integration By Simpson Approx](maths/integration_by_simpson_approx.py) + * [Is Int Palindrome](maths/is_int_palindrome.py) * [Is Ip V4 Address Valid](maths/is_ip_v4_address_valid.py) - * [Is Palindrome](maths/is_palindrome.py) * [Is Square Free](maths/is_square_free.py) * [Jaccard Similarity](maths/jaccard_similarity.py) * [Juggler Sequence](maths/juggler_sequence.py) diff --git a/maths/is_palindrome.py b/maths/is_int_palindrome.py similarity index 67% rename from maths/is_palindrome.py rename to maths/is_int_palindrome.py index ba60573ab022..63dc9e2138e8 100644 --- a/maths/is_palindrome.py +++ b/maths/is_int_palindrome.py @@ -1,19 +1,19 @@ -def is_palindrome(num: int) -> bool: +def is_int_palindrome(num: int) -> bool: """ Returns whether `num` is a palindrome or not (see for reference https://en.wikipedia.org/wiki/Palindromic_number). - >>> is_palindrome(-121) + >>> is_int_palindrome(-121) False - >>> is_palindrome(0) + >>> is_int_palindrome(0) True - >>> is_palindrome(10) + >>> is_int_palindrome(10) False - >>> is_palindrome(11) + >>> is_int_palindrome(11) True - >>> is_palindrome(101) + >>> is_int_palindrome(101) True - >>> is_palindrome(120) + >>> is_int_palindrome(120) False """ if num < 0: From 58eddb4762bc835ab5045f79afb2bd3c86c4abf1 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Fri, 26 May 2023 09:34:17 +0200 Subject: [PATCH 84/85] Add more ruff rules (#8767) * Add more ruff rules * Add more ruff rules * pre-commit: Update ruff v0.0.269 -> v0.0.270 * Apply suggestions from code review * Fix doctest * Fix doctest (ignore whitespace) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Dhruv Manilawala Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- .../jacobi_iteration_method.py | 30 ++-- arithmetic_analysis/lu_decomposition.py | 5 +- audio_filters/iir_filter.py | 14 +- backtracking/knight_tour.py | 3 +- bit_manipulation/reverse_bits.py | 3 +- ciphers/base64.py | 12 +- ciphers/beaufort_cipher.py | 2 +- ciphers/cryptomath_module.py | 3 +- ciphers/enigma_machine2.py | 30 ++-- ciphers/hill_cipher.py | 7 +- .../astronomical_length_scale_conversion.py | 6 +- conversions/length_conversion.py | 6 +- conversions/speed_conversions.py | 3 +- conversions/weight_conversion.py | 3 +- .../binary_search_tree_recursive.py | 6 +- .../binary_tree/binary_tree_mirror.py | 3 +- data_structures/disjoint_set/disjoint_set.py | 3 +- .../linked_list/circular_linked_list.py | 8 +- .../linked_list/doubly_linked_list.py | 4 +- .../linked_list/singly_linked_list.py | 4 +- data_structures/stacks/stack.py | 6 +- digital_image_processing/dithering/burkes.py | 3 +- divide_and_conquer/convex_hull.py | 8 +- dynamic_programming/knapsack.py | 15 +- dynamic_programming/minimum_steps_to_one.py | 3 +- dynamic_programming/rod_cutting.py | 10 +- dynamic_programming/viterbi.py | 17 ++- electronics/resistor_equivalence.py | 6 +- genetic_algorithm/basic_string.py | 8 +- graphics/vector3_for_2d_rendering.py | 8 +- graphs/breadth_first_search_shortest_path.py | 3 +- linear_algebra/src/schur_complement.py | 14 +- machine_learning/similarity_search.py | 21 +-- machine_learning/support_vector_machines.py | 3 +- maths/3n_plus_1.py | 6 +- maths/automorphic_number.py | 3 +- maths/catalan_number.py | 6 +- .../dual_number_automatic_differentiation.py | 4 +- maths/hexagonal_number.py | 3 +- maths/juggler_sequence.py | 6 +- maths/liouville_lambda.py | 3 +- maths/manhattan_distance.py | 18 +-- maths/pronic_number.py | 3 +- maths/proth_number.py | 6 +- maths/radix2_fft.py | 2 +- maths/sieve_of_eratosthenes.py | 3 +- maths/sylvester_sequence.py | 3 +- maths/twin_prime.py | 3 +- matrix/matrix_operation.py | 12 +- matrix/sherman_morrison.py | 3 +- neural_network/input_data.py | 12 +- other/nested_brackets.py | 2 +- other/scoring_algorithm.py | 3 +- project_euler/problem_054/sol1.py | 6 +- project_euler/problem_068/sol1.py | 3 +- project_euler/problem_131/sol1.py | 5 +- pyproject.toml | 139 +++++++++++++----- scripts/build_directory_md.py | 2 +- sorts/dutch_national_flag_sort.py | 5 +- strings/barcode_validator.py | 3 +- strings/capitalize.py | 2 +- strings/is_spain_national_id.py | 3 +- strings/snake_case_to_camel_pascal_case.py | 8 +- web_programming/reddit.py | 3 +- web_programming/search_books_by_isbn.py | 3 +- web_programming/slack_message.py | 7 +- 67 files changed, 349 insertions(+), 223 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bd5bca8f05ab..4c70ae219f74 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/charliermarsh/ruff-pre-commit - rev: v0.0.269 + rev: v0.0.270 hooks: - id: ruff diff --git a/arithmetic_analysis/jacobi_iteration_method.py b/arithmetic_analysis/jacobi_iteration_method.py index fe506a94a65d..17edf4bf4b8b 100644 --- a/arithmetic_analysis/jacobi_iteration_method.py +++ b/arithmetic_analysis/jacobi_iteration_method.py @@ -49,7 +49,9 @@ def jacobi_iteration_method( >>> constant = np.array([[2], [-6]]) >>> init_val = [0.5, -0.5, -0.5] >>> iterations = 3 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + >>> jacobi_iteration_method( + ... coefficient, constant, init_val, iterations + ... ) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Coefficient and constant matrices dimensions must be nxn and nx1 but @@ -59,7 +61,9 @@ def jacobi_iteration_method( >>> constant = np.array([[2], [-6], [-4]]) >>> init_val = [0.5, -0.5] >>> iterations = 3 - >>> jacobi_iteration_method(coefficient, constant, init_val, iterations) + >>> jacobi_iteration_method( + ... coefficient, constant, init_val, iterations + ... ) # doctest: +NORMALIZE_WHITESPACE Traceback (most recent call last): ... ValueError: Number of initial values must be equal to number of rows in coefficient @@ -79,24 +83,26 @@ def jacobi_iteration_method( rows2, cols2 = constant_matrix.shape if rows1 != cols1: - raise ValueError( - f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" - ) + msg = f"Coefficient matrix dimensions must be nxn but received {rows1}x{cols1}" + raise ValueError(msg) if cols2 != 1: - raise ValueError(f"Constant matrix must be nx1 but received {rows2}x{cols2}") + msg = f"Constant matrix must be nx1 but received {rows2}x{cols2}" + raise ValueError(msg) if rows1 != rows2: - raise ValueError( - f"""Coefficient and constant matrices dimensions must be nxn and nx1 but - received {rows1}x{cols1} and {rows2}x{cols2}""" + msg = ( + "Coefficient and constant matrices dimensions must be nxn and nx1 but " + f"received {rows1}x{cols1} and {rows2}x{cols2}" ) + raise ValueError(msg) if len(init_val) != rows1: - raise ValueError( - f"""Number of initial values must be equal to number of rows in coefficient - matrix but received {len(init_val)} and {rows1}""" + msg = ( + "Number of initial values must be equal to number of rows in coefficient " + f"matrix but received {len(init_val)} and {rows1}" ) + raise ValueError(msg) if iterations <= 0: raise ValueError("Iterations must be at least 1") diff --git a/arithmetic_analysis/lu_decomposition.py b/arithmetic_analysis/lu_decomposition.py index 941c1dadf556..eaabce5449c5 100644 --- a/arithmetic_analysis/lu_decomposition.py +++ b/arithmetic_analysis/lu_decomposition.py @@ -80,10 +80,11 @@ def lower_upper_decomposition(table: np.ndarray) -> tuple[np.ndarray, np.ndarray # Ensure that table is a square array rows, columns = np.shape(table) if rows != columns: - raise ValueError( - f"'table' has to be of square shaped array but got a " + msg = ( + "'table' has to be of square shaped array but got a " f"{rows}x{columns} array:\n{table}" ) + raise ValueError(msg) lower = np.zeros((rows, columns)) upper = np.zeros((rows, columns)) diff --git a/audio_filters/iir_filter.py b/audio_filters/iir_filter.py index bd448175f6f3..f3c1ad43b001 100644 --- a/audio_filters/iir_filter.py +++ b/audio_filters/iir_filter.py @@ -50,16 +50,18 @@ def set_coefficients(self, a_coeffs: list[float], b_coeffs: list[float]) -> None a_coeffs = [1.0, *a_coeffs] if len(a_coeffs) != self.order + 1: - raise ValueError( - f"Expected a_coeffs to have {self.order + 1} elements for {self.order}" - f"-order filter, got {len(a_coeffs)}" + msg = ( + f"Expected a_coeffs to have {self.order + 1} elements " + f"for {self.order}-order filter, got {len(a_coeffs)}" ) + raise ValueError(msg) if len(b_coeffs) != self.order + 1: - raise ValueError( - f"Expected b_coeffs to have {self.order + 1} elements for {self.order}" - f"-order filter, got {len(a_coeffs)}" + msg = ( + f"Expected b_coeffs to have {self.order + 1} elements " + f"for {self.order}-order filter, got {len(a_coeffs)}" ) + raise ValueError(msg) self.a_coeffs = a_coeffs self.b_coeffs = b_coeffs diff --git a/backtracking/knight_tour.py b/backtracking/knight_tour.py index bb650ece3f5e..cc88307b7fe8 100644 --- a/backtracking/knight_tour.py +++ b/backtracking/knight_tour.py @@ -91,7 +91,8 @@ def open_knight_tour(n: int) -> list[list[int]]: return board board[i][j] = 0 - raise ValueError(f"Open Kight Tour cannot be performed on a board of size {n}") + msg = f"Open Kight Tour cannot be performed on a board of size {n}" + raise ValueError(msg) if __name__ == "__main__": diff --git a/bit_manipulation/reverse_bits.py b/bit_manipulation/reverse_bits.py index 55608ae12908..a8c77c11bfdd 100644 --- a/bit_manipulation/reverse_bits.py +++ b/bit_manipulation/reverse_bits.py @@ -14,10 +14,11 @@ def get_reverse_bit_string(number: int) -> str: TypeError: operation can not be conducted on a object of type str """ if not isinstance(number, int): - raise TypeError( + msg = ( "operation can not be conducted on a object of type " f"{type(number).__name__}" ) + raise TypeError(msg) bit_string = "" for _ in range(0, 32): bit_string += str(number % 2) diff --git a/ciphers/base64.py b/ciphers/base64.py index 38a952acc307..2b950b1be37d 100644 --- a/ciphers/base64.py +++ b/ciphers/base64.py @@ -34,9 +34,8 @@ def base64_encode(data: bytes) -> bytes: """ # Make sure the supplied data is a bytes-like object if not isinstance(data, bytes): - raise TypeError( - f"a bytes-like object is required, not '{data.__class__.__name__}'" - ) + msg = f"a bytes-like object is required, not '{data.__class__.__name__}'" + raise TypeError(msg) binary_stream = "".join(bin(byte)[2:].zfill(8) for byte in data) @@ -88,10 +87,11 @@ def base64_decode(encoded_data: str) -> bytes: """ # Make sure encoded_data is either a string or a bytes-like object if not isinstance(encoded_data, bytes) and not isinstance(encoded_data, str): - raise TypeError( - "argument should be a bytes-like object or ASCII string, not " - f"'{encoded_data.__class__.__name__}'" + msg = ( + "argument should be a bytes-like object or ASCII string, " + f"not '{encoded_data.__class__.__name__}'" ) + raise TypeError(msg) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object diff --git a/ciphers/beaufort_cipher.py b/ciphers/beaufort_cipher.py index 8eae847a7ff7..788fc72b89c3 100644 --- a/ciphers/beaufort_cipher.py +++ b/ciphers/beaufort_cipher.py @@ -5,7 +5,7 @@ from string import ascii_uppercase dict1 = {char: i for i, char in enumerate(ascii_uppercase)} -dict2 = {i: char for i, char in enumerate(ascii_uppercase)} +dict2 = dict(enumerate(ascii_uppercase)) # This function generates the key in diff --git a/ciphers/cryptomath_module.py b/ciphers/cryptomath_module.py index be8764ff38c3..6f15f7b733e6 100644 --- a/ciphers/cryptomath_module.py +++ b/ciphers/cryptomath_module.py @@ -6,7 +6,8 @@ def gcd(a: int, b: int) -> int: def find_mod_inverse(a: int, m: int) -> int: if gcd(a, m) != 1: - raise ValueError(f"mod inverse of {a!r} and {m!r} does not exist") + msg = f"mod inverse of {a!r} and {m!r} does not exist" + raise ValueError(msg) u1, u2, u3 = 1, 0, a v1, v2, v3 = 0, 1, m while v3 != 0: diff --git a/ciphers/enigma_machine2.py b/ciphers/enigma_machine2.py index 07d21893f192..ec0d44e4a6c6 100644 --- a/ciphers/enigma_machine2.py +++ b/ciphers/enigma_machine2.py @@ -87,22 +87,20 @@ def _validator( # Checks if there are 3 unique rotors if (unique_rotsel := len(set(rotsel))) < 3: - raise Exception(f"Please use 3 unique rotors (not {unique_rotsel})") + msg = f"Please use 3 unique rotors (not {unique_rotsel})" + raise Exception(msg) # Checks if rotor positions are valid rotorpos1, rotorpos2, rotorpos3 = rotpos if not 0 < rotorpos1 <= len(abc): - raise ValueError( - "First rotor position is not within range of 1..26 (" f"{rotorpos1}" - ) + msg = f"First rotor position is not within range of 1..26 ({rotorpos1}" + raise ValueError(msg) if not 0 < rotorpos2 <= len(abc): - raise ValueError( - "Second rotor position is not within range of 1..26 (" f"{rotorpos2})" - ) + msg = f"Second rotor position is not within range of 1..26 ({rotorpos2})" + raise ValueError(msg) if not 0 < rotorpos3 <= len(abc): - raise ValueError( - "Third rotor position is not within range of 1..26 (" f"{rotorpos3})" - ) + msg = f"Third rotor position is not within range of 1..26 ({rotorpos3})" + raise ValueError(msg) # Validates string and returns dict pbdict = _plugboard(pb) @@ -130,9 +128,11 @@ def _plugboard(pbstring: str) -> dict[str, str]: # a) is type string # b) has even length (so pairs can be made) if not isinstance(pbstring, str): - raise TypeError(f"Plugboard setting isn't type string ({type(pbstring)})") + msg = f"Plugboard setting isn't type string ({type(pbstring)})" + raise TypeError(msg) elif len(pbstring) % 2 != 0: - raise Exception(f"Odd number of symbols ({len(pbstring)})") + msg = f"Odd number of symbols ({len(pbstring)})" + raise Exception(msg) elif pbstring == "": return {} @@ -142,9 +142,11 @@ def _plugboard(pbstring: str) -> dict[str, str]: tmppbl = set() for i in pbstring: if i not in abc: - raise Exception(f"'{i}' not in list of symbols") + msg = f"'{i}' not in list of symbols" + raise Exception(msg) elif i in tmppbl: - raise Exception(f"Duplicate symbol ({i})") + msg = f"Duplicate symbol ({i})" + raise Exception(msg) else: tmppbl.add(i) del tmppbl diff --git a/ciphers/hill_cipher.py b/ciphers/hill_cipher.py index f646d567b4c8..b4424e82298e 100644 --- a/ciphers/hill_cipher.py +++ b/ciphers/hill_cipher.py @@ -104,10 +104,11 @@ def check_determinant(self) -> None: req_l = len(self.key_string) if greatest_common_divisor(det, len(self.key_string)) != 1: - raise ValueError( - f"determinant modular {req_l} of encryption key({det}) is not co prime " - f"w.r.t {req_l}.\nTry another key." + msg = ( + f"determinant modular {req_l} of encryption key({det}) " + f"is not co prime w.r.t {req_l}.\nTry another key." ) + raise ValueError(msg) def process_text(self, text: str) -> str: """ diff --git a/conversions/astronomical_length_scale_conversion.py b/conversions/astronomical_length_scale_conversion.py index 804d82487a25..0f413644906d 100644 --- a/conversions/astronomical_length_scale_conversion.py +++ b/conversions/astronomical_length_scale_conversion.py @@ -77,15 +77,17 @@ def length_conversion(value: float, from_type: str, to_type: str) -> float: to_sanitized = UNIT_SYMBOL.get(to_sanitized, to_sanitized) if from_sanitized not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'from_type' value: {from_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) if to_sanitized not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'to_type' value: {to_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) from_exponent = METRIC_CONVERSION[from_sanitized] to_exponent = METRIC_CONVERSION[to_sanitized] exponent = 1 diff --git a/conversions/length_conversion.py b/conversions/length_conversion.py index 790d9c116845..d8f39515255e 100644 --- a/conversions/length_conversion.py +++ b/conversions/length_conversion.py @@ -104,15 +104,17 @@ def length_conversion(value: float, from_type: str, to_type: str) -> float: new_to = to_type.lower().rstrip("s") new_to = TYPE_CONVERSION.get(new_to, new_to) if new_from not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'from_type' value: {from_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) if new_to not in METRIC_CONVERSION: - raise ValueError( + msg = ( f"Invalid 'to_type' value: {to_type!r}.\n" f"Conversion abbreviations are: {', '.join(METRIC_CONVERSION)}" ) + raise ValueError(msg) return value * METRIC_CONVERSION[new_from].from_ * METRIC_CONVERSION[new_to].to diff --git a/conversions/speed_conversions.py b/conversions/speed_conversions.py index 62da9e137bc7..ba497119d3f5 100644 --- a/conversions/speed_conversions.py +++ b/conversions/speed_conversions.py @@ -57,10 +57,11 @@ def convert_speed(speed: float, unit_from: str, unit_to: str) -> float: 115.078 """ if unit_to not in speed_chart or unit_from not in speed_chart_inverse: - raise ValueError( + msg = ( f"Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n" f"Valid values are: {', '.join(speed_chart_inverse)}" ) + raise ValueError(msg) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to], 3) diff --git a/conversions/weight_conversion.py b/conversions/weight_conversion.py index 5c032a497a7b..e8326e0b688f 100644 --- a/conversions/weight_conversion.py +++ b/conversions/weight_conversion.py @@ -299,10 +299,11 @@ def weight_conversion(from_type: str, to_type: str, value: float) -> float: 1.999999998903455 """ if to_type not in KILOGRAM_CHART or from_type not in WEIGHT_TYPE_CHART: - raise ValueError( + msg = ( f"Invalid 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n" f"Supported values are: {', '.join(WEIGHT_TYPE_CHART)}" ) + raise ValueError(msg) return value * KILOGRAM_CHART[to_type] * WEIGHT_TYPE_CHART[from_type] diff --git a/data_structures/binary_tree/binary_search_tree_recursive.py b/data_structures/binary_tree/binary_search_tree_recursive.py index 97eb8e25bedd..b5b983b9ba4c 100644 --- a/data_structures/binary_tree/binary_search_tree_recursive.py +++ b/data_structures/binary_tree/binary_search_tree_recursive.py @@ -77,7 +77,8 @@ def _put(self, node: Node | None, label: int, parent: Node | None = None) -> Nod elif label > node.label: node.right = self._put(node.right, label, node) else: - raise Exception(f"Node with label {label} already exists") + msg = f"Node with label {label} already exists" + raise Exception(msg) return node @@ -100,7 +101,8 @@ def search(self, label: int) -> Node: def _search(self, node: Node | None, label: int) -> Node: if node is None: - raise Exception(f"Node with label {label} does not exist") + msg = f"Node with label {label} does not exist" + raise Exception(msg) else: if label < node.label: node = self._search(node.left, label) diff --git a/data_structures/binary_tree/binary_tree_mirror.py b/data_structures/binary_tree/binary_tree_mirror.py index 1ef950ad62d7..b8548f4ec515 100644 --- a/data_structures/binary_tree/binary_tree_mirror.py +++ b/data_structures/binary_tree/binary_tree_mirror.py @@ -31,7 +31,8 @@ def binary_tree_mirror(binary_tree: dict, root: int = 1) -> dict: if not binary_tree: raise ValueError("binary tree cannot be empty") if root not in binary_tree: - raise ValueError(f"root {root} is not present in the binary_tree") + msg = f"root {root} is not present in the binary_tree" + raise ValueError(msg) binary_tree_mirror_dictionary = dict(binary_tree) binary_tree_mirror_dict(binary_tree_mirror_dictionary, root) return binary_tree_mirror_dictionary diff --git a/data_structures/disjoint_set/disjoint_set.py b/data_structures/disjoint_set/disjoint_set.py index f8500bf2c3af..12dafb2d935e 100644 --- a/data_structures/disjoint_set/disjoint_set.py +++ b/data_structures/disjoint_set/disjoint_set.py @@ -56,7 +56,8 @@ def find_python_set(node: Node) -> set: for s in sets: if node.data in s: return s - raise ValueError(f"{node.data} is not in {sets}") + msg = f"{node.data} is not in {sets}" + raise ValueError(msg) def test_disjoint_set() -> None: diff --git a/data_structures/linked_list/circular_linked_list.py b/data_structures/linked_list/circular_linked_list.py index 9092fb29e3ff..325d91026137 100644 --- a/data_structures/linked_list/circular_linked_list.py +++ b/data_structures/linked_list/circular_linked_list.py @@ -94,25 +94,25 @@ def test_circular_linked_list() -> None: try: circular_linked_list.delete_front() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1) - raise AssertionError() + raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0) - raise AssertionError() + raise AssertionError except IndexError: assert True diff --git a/data_structures/linked_list/doubly_linked_list.py b/data_structures/linked_list/doubly_linked_list.py index 69763d12da15..1a6c48191c4e 100644 --- a/data_structures/linked_list/doubly_linked_list.py +++ b/data_structures/linked_list/doubly_linked_list.py @@ -198,13 +198,13 @@ def test_doubly_linked_list() -> None: try: linked_list.delete_head() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index a8f9e8ebb977..890e21c9b404 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -353,13 +353,13 @@ def test_singly_linked_list() -> None: try: linked_list.delete_head() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. try: linked_list.delete_tail() - raise AssertionError() # This should not happen. + raise AssertionError # This should not happen. except IndexError: assert True # This should happen. diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index 55d424d5018b..a14f4648a399 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -92,13 +92,13 @@ def test_stack() -> None: try: _ = stack.pop() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except StackUnderflowError: assert True # This should happen try: _ = stack.peek() - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except StackUnderflowError: assert True # This should happen @@ -118,7 +118,7 @@ def test_stack() -> None: try: stack.push(200) - raise AssertionError() # This should not happen + raise AssertionError # This should not happen except StackOverflowError: assert True # This should happen diff --git a/digital_image_processing/dithering/burkes.py b/digital_image_processing/dithering/burkes.py index 2bf0bbe03225..0804104abe58 100644 --- a/digital_image_processing/dithering/burkes.py +++ b/digital_image_processing/dithering/burkes.py @@ -21,7 +21,8 @@ def __init__(self, input_img, threshold: int): self.max_threshold = int(self.get_greyscale(255, 255, 255)) if not self.min_threshold < threshold < self.max_threshold: - raise ValueError(f"Factor value should be from 0 to {self.max_threshold}") + msg = f"Factor value should be from 0 to {self.max_threshold}" + raise ValueError(msg) self.input_img = input_img self.threshold = threshold diff --git a/divide_and_conquer/convex_hull.py b/divide_and_conquer/convex_hull.py index 39e78be04a71..1ad933417da6 100644 --- a/divide_and_conquer/convex_hull.py +++ b/divide_and_conquer/convex_hull.py @@ -174,12 +174,12 @@ def _validate_input(points: list[Point] | list[list[float]]) -> list[Point]: """ if not hasattr(points, "__iter__"): - raise ValueError( - f"Expecting an iterable object but got an non-iterable type {points}" - ) + msg = f"Expecting an iterable object but got an non-iterable type {points}" + raise ValueError(msg) if not points: - raise ValueError(f"Expecting a list of points but got {points}") + msg = f"Expecting a list of points but got {points}" + raise ValueError(msg) return _construct_points(points) diff --git a/dynamic_programming/knapsack.py b/dynamic_programming/knapsack.py index b12d30313e31..489b5ada450a 100644 --- a/dynamic_programming/knapsack.py +++ b/dynamic_programming/knapsack.py @@ -78,17 +78,18 @@ def knapsack_with_example_solution(w: int, wt: list, val: list): num_items = len(wt) if num_items != len(val): - raise ValueError( - "The number of weights must be the " - "same as the number of values.\nBut " - f"got {num_items} weights and {len(val)} values" + msg = ( + "The number of weights must be the same as the number of values.\n" + f"But got {num_items} weights and {len(val)} values" ) + raise ValueError(msg) for i in range(num_items): if not isinstance(wt[i], int): - raise TypeError( - "All weights must be integers but " - f"got weight of type {type(wt[i])} at index {i}" + msg = ( + "All weights must be integers but got weight of " + f"type {type(wt[i])} at index {i}" ) + raise TypeError(msg) optimal_val, dp_table = knapsack(w, wt, val, num_items) example_optional_set: set = set() diff --git a/dynamic_programming/minimum_steps_to_one.py b/dynamic_programming/minimum_steps_to_one.py index f4eb7033dd20..8785027fbff3 100644 --- a/dynamic_programming/minimum_steps_to_one.py +++ b/dynamic_programming/minimum_steps_to_one.py @@ -42,7 +42,8 @@ def min_steps_to_one(number: int) -> int: """ if number <= 0: - raise ValueError(f"n must be greater than 0. Got n = {number}") + msg = f"n must be greater than 0. Got n = {number}" + raise ValueError(msg) table = [number + 1] * (number + 1) diff --git a/dynamic_programming/rod_cutting.py b/dynamic_programming/rod_cutting.py index 79104d8f4044..f80fa440ae86 100644 --- a/dynamic_programming/rod_cutting.py +++ b/dynamic_programming/rod_cutting.py @@ -177,13 +177,15 @@ def _enforce_args(n: int, prices: list): the rod """ if n < 0: - raise ValueError(f"n must be greater than or equal to 0. Got n = {n}") + msg = f"n must be greater than or equal to 0. Got n = {n}" + raise ValueError(msg) if n > len(prices): - raise ValueError( - "Each integral piece of rod must have a corresponding " - f"price. Got n = {n} but length of prices = {len(prices)}" + msg = ( + "Each integral piece of rod must have a corresponding price. " + f"Got n = {n} but length of prices = {len(prices)}" ) + raise ValueError(msg) def main(): diff --git a/dynamic_programming/viterbi.py b/dynamic_programming/viterbi.py index 93ab845e2ae8..764d45dc2c05 100644 --- a/dynamic_programming/viterbi.py +++ b/dynamic_programming/viterbi.py @@ -297,11 +297,13 @@ def _validate_list(_object: Any, var_name: str) -> None: """ if not isinstance(_object, list): - raise ValueError(f"{var_name} must be a list") + msg = f"{var_name} must be a list" + raise ValueError(msg) else: for x in _object: if not isinstance(x, str): - raise ValueError(f"{var_name} must be a list of strings") + msg = f"{var_name} must be a list of strings" + raise ValueError(msg) def _validate_dicts( @@ -384,14 +386,15 @@ def _validate_dict( ValueError: mock_name nested dictionary all values must be float """ if not isinstance(_object, dict): - raise ValueError(f"{var_name} must be a dict") + msg = f"{var_name} must be a dict" + raise ValueError(msg) if not all(isinstance(x, str) for x in _object): - raise ValueError(f"{var_name} all keys must be strings") + msg = f"{var_name} all keys must be strings" + raise ValueError(msg) if not all(isinstance(x, value_type) for x in _object.values()): nested_text = "nested dictionary " if nested else "" - raise ValueError( - f"{var_name} {nested_text}all values must be {value_type.__name__}" - ) + msg = f"{var_name} {nested_text}all values must be {value_type.__name__}" + raise ValueError(msg) if __name__ == "__main__": diff --git a/electronics/resistor_equivalence.py b/electronics/resistor_equivalence.py index 7142f838a065..55e7f2d6b5d2 100644 --- a/electronics/resistor_equivalence.py +++ b/electronics/resistor_equivalence.py @@ -23,7 +23,8 @@ def resistor_parallel(resistors: list[float]) -> float: index = 0 for resistor in resistors: if resistor <= 0: - raise ValueError(f"Resistor at index {index} has a negative or zero value!") + msg = f"Resistor at index {index} has a negative or zero value!" + raise ValueError(msg) first_sum += 1 / float(resistor) index += 1 return 1 / first_sum @@ -47,7 +48,8 @@ def resistor_series(resistors: list[float]) -> float: for resistor in resistors: sum_r += resistor if resistor < 0: - raise ValueError(f"Resistor at index {index} has a negative value!") + msg = f"Resistor at index {index} has a negative value!" + raise ValueError(msg) index += 1 return sum_r diff --git a/genetic_algorithm/basic_string.py b/genetic_algorithm/basic_string.py index 388e7219f54b..089c5c99a1ec 100644 --- a/genetic_algorithm/basic_string.py +++ b/genetic_algorithm/basic_string.py @@ -96,13 +96,13 @@ def basic(target: str, genes: list[str], debug: bool = True) -> tuple[int, int, # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: - raise ValueError(f"{N_POPULATION} must be bigger than {N_SELECTED}") + msg = f"{N_POPULATION} must be bigger than {N_SELECTED}" + raise ValueError(msg) # Verify that the target contains no genes besides the ones inside genes variable. not_in_genes_list = sorted({c for c in target if c not in genes}) if not_in_genes_list: - raise ValueError( - f"{not_in_genes_list} is not in genes list, evolution cannot converge" - ) + msg = f"{not_in_genes_list} is not in genes list, evolution cannot converge" + raise ValueError(msg) # Generate random starting population. population = [] diff --git a/graphics/vector3_for_2d_rendering.py b/graphics/vector3_for_2d_rendering.py index dfa22262a8d8..a332206e67b6 100644 --- a/graphics/vector3_for_2d_rendering.py +++ b/graphics/vector3_for_2d_rendering.py @@ -28,9 +28,8 @@ def convert_to_2d( TypeError: Input values must either be float or int: ['1', 2, 3, 10, 10] """ if not all(isinstance(val, (float, int)) for val in locals().values()): - raise TypeError( - "Input values must either be float or int: " f"{list(locals().values())}" - ) + msg = f"Input values must either be float or int: {list(locals().values())}" + raise TypeError(msg) projected_x = ((x * distance) / (z + distance)) * scale projected_y = ((y * distance) / (z + distance)) * scale return projected_x, projected_y @@ -71,10 +70,11 @@ def rotate( input_variables = locals() del input_variables["axis"] if not all(isinstance(val, (float, int)) for val in input_variables.values()): - raise TypeError( + msg = ( "Input values except axis must either be float or int: " f"{list(input_variables.values())}" ) + raise TypeError(msg) angle = (angle % 360) / 450 * 180 / math.pi if axis == "z": new_x = x * math.cos(angle) - y * math.sin(angle) diff --git a/graphs/breadth_first_search_shortest_path.py b/graphs/breadth_first_search_shortest_path.py index cb21076f91d2..d489b110b3a7 100644 --- a/graphs/breadth_first_search_shortest_path.py +++ b/graphs/breadth_first_search_shortest_path.py @@ -73,9 +73,10 @@ def shortest_path(self, target_vertex: str) -> str: target_vertex_parent = self.parent.get(target_vertex) if target_vertex_parent is None: - raise ValueError( + msg = ( f"No path from vertex: {self.source_vertex} to vertex: {target_vertex}" ) + raise ValueError(msg) return self.shortest_path(target_vertex_parent) + f"->{target_vertex}" diff --git a/linear_algebra/src/schur_complement.py b/linear_algebra/src/schur_complement.py index 3a5f4443afd3..750f4de5e397 100644 --- a/linear_algebra/src/schur_complement.py +++ b/linear_algebra/src/schur_complement.py @@ -31,16 +31,18 @@ def schur_complement( shape_c = np.shape(mat_c) if shape_a[0] != shape_b[0]: - raise ValueError( - f"Expected the same number of rows for A and B. \ - Instead found A of size {shape_a} and B of size {shape_b}" + msg = ( + "Expected the same number of rows for A and B. " + f"Instead found A of size {shape_a} and B of size {shape_b}" ) + raise ValueError(msg) if shape_b[1] != shape_c[1]: - raise ValueError( - f"Expected the same number of columns for B and C. \ - Instead found B of size {shape_b} and C of size {shape_c}" + msg = ( + "Expected the same number of columns for B and C. " + f"Instead found B of size {shape_b} and C of size {shape_c}" ) + raise ValueError(msg) a_inv = pseudo_inv if a_inv is None: diff --git a/machine_learning/similarity_search.py b/machine_learning/similarity_search.py index 72979181f67c..7a23ec463c8f 100644 --- a/machine_learning/similarity_search.py +++ b/machine_learning/similarity_search.py @@ -97,26 +97,29 @@ def similarity_search( """ if dataset.ndim != value_array.ndim: - raise ValueError( - f"Wrong input data's dimensions... dataset : {dataset.ndim}, " - f"value_array : {value_array.ndim}" + msg = ( + "Wrong input data's dimensions... " + f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) + raise ValueError(msg) try: if dataset.shape[1] != value_array.shape[1]: - raise ValueError( - f"Wrong input data's shape... dataset : {dataset.shape[1]}, " - f"value_array : {value_array.shape[1]}" + msg = ( + "Wrong input data's shape... " + f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) + raise ValueError(msg) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape") if dataset.dtype != value_array.dtype: - raise TypeError( - f"Input data have different datatype... dataset : {dataset.dtype}, " - f"value_array : {value_array.dtype}" + msg = ( + "Input data have different datatype... " + f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) + raise TypeError(msg) answer = [] diff --git a/machine_learning/support_vector_machines.py b/machine_learning/support_vector_machines.py index df854cc850b1..24046115ebc4 100644 --- a/machine_learning/support_vector_machines.py +++ b/machine_learning/support_vector_machines.py @@ -74,7 +74,8 @@ def __init__( # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: - raise ValueError(f"Unknown kernel: {kernel}") + msg = f"Unknown kernel: {kernel}" + raise ValueError(msg) # kernels def __linear(self, vector1: ndarray, vector2: ndarray) -> float: diff --git a/maths/3n_plus_1.py b/maths/3n_plus_1.py index 59fdec48e100..f9f6dfeb9faa 100644 --- a/maths/3n_plus_1.py +++ b/maths/3n_plus_1.py @@ -9,9 +9,11 @@ def n31(a: int) -> tuple[list[int], int]: """ if not isinstance(a, int): - raise TypeError(f"Must be int, not {type(a).__name__}") + msg = f"Must be int, not {type(a).__name__}" + raise TypeError(msg) if a < 1: - raise ValueError(f"Given integer must be positive, not {a}") + msg = f"Given integer must be positive, not {a}" + raise ValueError(msg) path = [a] while a != 1: diff --git a/maths/automorphic_number.py b/maths/automorphic_number.py index 103fc7301831..8ed9375632a4 100644 --- a/maths/automorphic_number.py +++ b/maths/automorphic_number.py @@ -40,7 +40,8 @@ def is_automorphic_number(number: int) -> bool: TypeError: Input value of [number=5.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 0: return False number_square = number * number diff --git a/maths/catalan_number.py b/maths/catalan_number.py index 85607dc1eca4..20c2cfb17c06 100644 --- a/maths/catalan_number.py +++ b/maths/catalan_number.py @@ -31,10 +31,12 @@ def catalan(number: int) -> int: """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: - raise ValueError(f"Input value of [number={number}] must be > 0") + msg = f"Input value of [number={number}] must be > 0" + raise ValueError(msg) current_number = 1 diff --git a/maths/dual_number_automatic_differentiation.py b/maths/dual_number_automatic_differentiation.py index 9aa75830c4a1..f98997c8be4d 100644 --- a/maths/dual_number_automatic_differentiation.py +++ b/maths/dual_number_automatic_differentiation.py @@ -71,7 +71,7 @@ def __truediv__(self, other): for i in self.duals: new_duals.append(i / other) return Dual(self.real / other, new_duals) - raise ValueError() + raise ValueError def __floordiv__(self, other): if not isinstance(other, Dual): @@ -79,7 +79,7 @@ def __floordiv__(self, other): for i in self.duals: new_duals.append(i // other) return Dual(self.real // other, new_duals) - raise ValueError() + raise ValueError def __pow__(self, n): if n < 0 or isinstance(n, float): diff --git a/maths/hexagonal_number.py b/maths/hexagonal_number.py index 28735c638f80..3677ab95ee00 100644 --- a/maths/hexagonal_number.py +++ b/maths/hexagonal_number.py @@ -36,7 +36,8 @@ def hexagonal(number: int) -> int: TypeError: Input value of [number=11.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: raise ValueError("Input must be a positive integer") return number * (2 * number - 1) diff --git a/maths/juggler_sequence.py b/maths/juggler_sequence.py index 9daba8bc0e8a..7f65d1dff925 100644 --- a/maths/juggler_sequence.py +++ b/maths/juggler_sequence.py @@ -40,9 +40,11 @@ def juggler_sequence(number: int) -> list[int]: ValueError: Input value of [number=-1] must be a positive integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: - raise ValueError(f"Input value of [number={number}] must be a positive integer") + msg = f"Input value of [number={number}] must be a positive integer" + raise ValueError(msg) sequence = [number] while number != 1: if number % 2 == 0: diff --git a/maths/liouville_lambda.py b/maths/liouville_lambda.py index 5993efa42d66..1ed228dd5434 100644 --- a/maths/liouville_lambda.py +++ b/maths/liouville_lambda.py @@ -33,7 +33,8 @@ def liouville_lambda(number: int) -> int: TypeError: Input value of [number=11.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: raise ValueError("Input must be a positive integer") return -1 if len(prime_factors(number)) % 2 else 1 diff --git a/maths/manhattan_distance.py b/maths/manhattan_distance.py index 2711d4c8ccd6..413991468a49 100644 --- a/maths/manhattan_distance.py +++ b/maths/manhattan_distance.py @@ -15,15 +15,15 @@ def manhattan_distance(point_a: list, point_b: list) -> float: 9.0 >>> manhattan_distance([1,1], None) Traceback (most recent call last): - ... + ... ValueError: Missing an input >>> manhattan_distance([1,1], [2, 2, 2]) Traceback (most recent call last): - ... + ... ValueError: Both points must be in the same n-dimensional space >>> manhattan_distance([1,"one"], [2, 2, 2]) Traceback (most recent call last): - ... + ... TypeError: Expected a list of numbers as input, found str >>> manhattan_distance(1, [2, 2, 2]) Traceback (most recent call last): @@ -66,14 +66,14 @@ def _validate_point(point: list[float]) -> None: if isinstance(point, list): for item in point: if not isinstance(item, (int, float)): - raise TypeError( - f"Expected a list of numbers as input, " - f"found {type(item).__name__}" + msg = ( + "Expected a list of numbers as input, found " + f"{type(item).__name__}" ) + raise TypeError(msg) else: - raise TypeError( - f"Expected a list of numbers as input, found {type(point).__name__}" - ) + msg = f"Expected a list of numbers as input, found {type(point).__name__}" + raise TypeError(msg) else: raise ValueError("Missing an input") diff --git a/maths/pronic_number.py b/maths/pronic_number.py index 8b554dbbd602..cf4d3d2eb24b 100644 --- a/maths/pronic_number.py +++ b/maths/pronic_number.py @@ -41,7 +41,8 @@ def is_pronic(number: int) -> bool: TypeError: Input value of [number=6.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 0 or number % 2 == 1: return False number_sqrt = int(number**0.5) diff --git a/maths/proth_number.py b/maths/proth_number.py index ce911473a2d2..47747ed260f7 100644 --- a/maths/proth_number.py +++ b/maths/proth_number.py @@ -29,10 +29,12 @@ def proth(number: int) -> int: """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if number < 1: - raise ValueError(f"Input value of [number={number}] must be > 0") + msg = f"Input value of [number={number}] must be > 0" + raise ValueError(msg) elif number == 1: return 3 elif number == 2: diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index af98f24f9538..2c5cdc004d1d 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -167,7 +167,7 @@ def __str__(self): f"{coef}*x^{i}" for coef, i in enumerate(self.product) ) - return "\n".join((a, b, c)) + return f"{a}\n{b}\n{c}" # Unit tests diff --git a/maths/sieve_of_eratosthenes.py b/maths/sieve_of_eratosthenes.py index 3cd6ce0b4d9d..a0520aa5cf50 100644 --- a/maths/sieve_of_eratosthenes.py +++ b/maths/sieve_of_eratosthenes.py @@ -34,7 +34,8 @@ def prime_sieve(num: int) -> list[int]: """ if num <= 0: - raise ValueError(f"{num}: Invalid input, please enter a positive integer.") + msg = f"{num}: Invalid input, please enter a positive integer." + raise ValueError(msg) sieve = [True] * (num + 1) prime = [] diff --git a/maths/sylvester_sequence.py b/maths/sylvester_sequence.py index 114c9dd58582..607424c6a90b 100644 --- a/maths/sylvester_sequence.py +++ b/maths/sylvester_sequence.py @@ -31,7 +31,8 @@ def sylvester(number: int) -> int: if number == 1: return 2 elif number < 1: - raise ValueError(f"The input value of [n={number}] has to be > 0") + msg = f"The input value of [n={number}] has to be > 0" + raise ValueError(msg) else: num = sylvester(number - 1) lower = num - 1 diff --git a/maths/twin_prime.py b/maths/twin_prime.py index e6ac0cc7805b..912b10b366c0 100644 --- a/maths/twin_prime.py +++ b/maths/twin_prime.py @@ -32,7 +32,8 @@ def twin_prime(number: int) -> int: TypeError: Input value of [number=6.0] must be an integer """ if not isinstance(number, int): - raise TypeError(f"Input value of [number={number}] must be an integer") + msg = f"Input value of [number={number}] must be an integer" + raise TypeError(msg) if is_prime(number) and is_prime(number + 2): return number + 2 else: diff --git a/matrix/matrix_operation.py b/matrix/matrix_operation.py index 576094902af4..f189f1898d33 100644 --- a/matrix/matrix_operation.py +++ b/matrix/matrix_operation.py @@ -70,10 +70,11 @@ def multiply(matrix_a: list[list[int]], matrix_b: list[list[int]]) -> list[list[ rows, cols = _verify_matrix_sizes(matrix_a, matrix_b) if cols[0] != rows[1]: - raise ValueError( - f"Cannot multiply matrix of dimensions ({rows[0]},{cols[0]}) " - f"and ({rows[1]},{cols[1]})" + msg = ( + "Cannot multiply matrix of dimensions " + f"({rows[0]},{cols[0]}) and ({rows[1]},{cols[1]})" ) + raise ValueError(msg) return [ [sum(m * n for m, n in zip(i, j)) for j in zip(*matrix_b)] for i in matrix_a ] @@ -174,10 +175,11 @@ def _verify_matrix_sizes( ) -> tuple[tuple[int, int], tuple[int, int]]: shape = _shape(matrix_a) + _shape(matrix_b) if shape[0] != shape[3] or shape[1] != shape[2]: - raise ValueError( - f"operands could not be broadcast together with shape " + msg = ( + "operands could not be broadcast together with shape " f"({shape[0], shape[1]}), ({shape[2], shape[3]})" ) + raise ValueError(msg) return (shape[0], shape[2]), (shape[1], shape[3]) diff --git a/matrix/sherman_morrison.py b/matrix/sherman_morrison.py index 39eddfed81f3..256271e8a87d 100644 --- a/matrix/sherman_morrison.py +++ b/matrix/sherman_morrison.py @@ -173,7 +173,8 @@ def __mul__(self, another: int | float | Matrix) -> Matrix: result[r, c] += self[r, i] * another[i, c] return result else: - raise TypeError(f"Unsupported type given for another ({type(another)})") + msg = f"Unsupported type given for another ({type(another)})" + raise TypeError(msg) def transpose(self) -> Matrix: """ diff --git a/neural_network/input_data.py b/neural_network/input_data.py index 2a32f0b82c37..94c018ece9ba 100644 --- a/neural_network/input_data.py +++ b/neural_network/input_data.py @@ -198,10 +198,7 @@ def next_batch(self, batch_size, fake_data=False, shuffle=True): """Return the next `batch_size` examples from this data set.""" if fake_data: fake_image = [1] * 784 - if self.one_hot: - fake_label = [1] + [0] * 9 - else: - fake_label = 0 + fake_label = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(batch_size)], [fake_label for _ in range(batch_size)], @@ -324,10 +321,11 @@ def fake(): test_labels = _extract_labels(f, one_hot=one_hot) if not 0 <= validation_size <= len(train_images): - raise ValueError( - f"Validation size should be between 0 and {len(train_images)}. " - f"Received: {validation_size}." + msg = ( + "Validation size should be between 0 and " + f"{len(train_images)}. Received: {validation_size}." ) + raise ValueError(msg) validation_images = train_images[:validation_size] validation_labels = train_labels[:validation_size] diff --git a/other/nested_brackets.py b/other/nested_brackets.py index ea48c0a5f532..19c6dd53c8b2 100644 --- a/other/nested_brackets.py +++ b/other/nested_brackets.py @@ -18,7 +18,7 @@ def is_balanced(s): stack = [] open_brackets = set({"(", "[", "{"}) closed_brackets = set({")", "]", "}"}) - open_to_closed = dict({"{": "}", "[": "]", "(": ")"}) + open_to_closed = {"{": "}", "[": "]", "(": ")"} for i in range(len(s)): if s[i] in open_brackets: diff --git a/other/scoring_algorithm.py b/other/scoring_algorithm.py index 8e04a8f30dd7..af04f432e433 100644 --- a/other/scoring_algorithm.py +++ b/other/scoring_algorithm.py @@ -68,7 +68,8 @@ def calculate_each_score( # weight not 0 or 1 else: - raise ValueError(f"Invalid weight of {weight:f} provided") + msg = f"Invalid weight of {weight:f} provided" + raise ValueError(msg) score_lists.append(score) diff --git a/project_euler/problem_054/sol1.py b/project_euler/problem_054/sol1.py index 9af7aef5a716..74409f32c712 100644 --- a/project_euler/problem_054/sol1.py +++ b/project_euler/problem_054/sol1.py @@ -119,10 +119,12 @@ def __init__(self, hand: str) -> None: For example: "6S 4C KC AS TH" """ if not isinstance(hand, str): - raise TypeError(f"Hand should be of type 'str': {hand!r}") + msg = f"Hand should be of type 'str': {hand!r}" + raise TypeError(msg) # split removes duplicate whitespaces so no need of strip if len(hand.split(" ")) != 5: - raise ValueError(f"Hand should contain only 5 cards: {hand!r}") + msg = f"Hand should contain only 5 cards: {hand!r}" + raise ValueError(msg) self._hand = hand self._first_pair = 0 self._second_pair = 0 diff --git a/project_euler/problem_068/sol1.py b/project_euler/problem_068/sol1.py index 772be359f630..cf814b001d57 100644 --- a/project_euler/problem_068/sol1.py +++ b/project_euler/problem_068/sol1.py @@ -73,7 +73,8 @@ def solution(gon_side: int = 5) -> int: if is_magic_gon(numbers): return int("".join(str(n) for n in numbers)) - raise ValueError(f"Magic {gon_side}-gon ring is impossible") + msg = f"Magic {gon_side}-gon ring is impossible" + raise ValueError(msg) def generate_gon_ring(gon_side: int, perm: list[int]) -> list[int]: diff --git a/project_euler/problem_131/sol1.py b/project_euler/problem_131/sol1.py index f5302aac8644..be3ea9c81ae4 100644 --- a/project_euler/problem_131/sol1.py +++ b/project_euler/problem_131/sol1.py @@ -26,10 +26,7 @@ def is_prime(number: int) -> bool: False """ - for divisor in range(2, isqrt(number) + 1): - if number % divisor == 0: - return False - return True + return all(number % divisor != 0 for divisor in range(2, isqrt(number) + 1)) def solution(max_prime: int = 10**6) -> int: diff --git a/pyproject.toml b/pyproject.toml index 48c3fbd4009d..a526196685f5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,45 +17,88 @@ ignore-words-list = "3rt,ans,crate,damon,fo,followings,hist,iff,kwanza,mater,sec skip = "./.*,*.json,ciphers/prehistoric_men.txt,project_euler/problem_022/p022_names.txt,pyproject.toml,strings/dictionary.txt,strings/words.txt" [tool.ruff] -ignore = [ # `ruff rule S101` for a description of that rule - "B904", # B904: Within an `except` clause, raise exceptions with `raise ... from err` - "B905", # B905: `zip()` without an explicit `strict=` parameter - "E741", # E741: Ambiguous variable name 'l' - "G004", # G004 Logging statement uses f-string - "N999", # N999: Invalid module name - "PLC1901", # PLC1901: `{}` can be simplified to `{}` as an empty string is falsey - "PLR2004", # PLR2004: Magic value used in comparison - "PLR5501", # PLR5501: Consider using `elif` instead of `else` - "PLW0120", # PLW0120: `else` clause on loop without a `break` statement - "PLW060", # PLW060: Using global for `{name}` but no assignment is done -- DO NOT FIX - "PLW2901", # PLW2901: Redefined loop variable - "RUF00", # RUF00: Ambiguous unicode character -- DO NOT FIX - "RUF100", # RUF100: Unused `noqa` directive - "S101", # S101: Use of `assert` detected -- DO NOT FIX - "S105", # S105: Possible hardcoded password: 'password' - "S113", # S113: Probable use of requests call without timeout - "S311", # S311: Standard pseudo-random generators are not suitable for cryptographic purposes - "UP038", # UP038: Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX +ignore = [ # `ruff rule S101` for a description of that rule + "ARG001", # Unused function argument `amount` -- FIX ME? + "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME + "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME + "DTZ001", # The use of `datetime.datetime()` without `tzinfo` argument is not allowed -- FIX ME + "DTZ005", # The use of `datetime.datetime.now()` without `tzinfo` argument is not allowed -- FIX ME + "E741", # Ambiguous variable name 'l' -- FIX ME + "EM101", # Exception must not use a string literal, assign to variable first + "EXE001", # Shebang is present but file is not executable" -- FIX ME + "G004", # Logging statement uses f-string + "ICN001", # `matplotlib.pyplot` should be imported as `plt` -- FIX ME + "INP001", # File `x/y/z.py` is part of an implicit namespace package. Add an `__init__.py`. -- FIX ME + "N999", # Invalid module name -- FIX ME + "NPY002", # Replace legacy `np.random.choice` call with `np.random.Generator` -- FIX ME + "PGH003", # Use specific rule codes when ignoring type issues -- FIX ME + "PLC1901", # `{}` can be simplified to `{}` as an empty string is falsey + "PLR5501", # Consider using `elif` instead of `else` -- FIX ME + "PLW0120", # `else` clause on loop without a `break` statement -- FIX ME + "PLW060", # Using global for `{name}` but no assignment is done -- DO NOT FIX + "PLW2901", # PLW2901: Redefined loop variable -- FIX ME + "RUF00", # Ambiguous unicode character and other rules + "RUF100", # Unused `noqa` directive -- FIX ME + "S101", # Use of `assert` detected -- DO NOT FIX + "S105", # Possible hardcoded password: 'password' + "S113", # Probable use of requests call without timeout -- FIX ME + "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME + "SIM102", # Use a single `if` statement instead of nested `if` statements -- FIX ME + "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] -select = [ # https://beta.ruff.rs/docs/rules - "A", # A: builtins - "B", # B: bugbear - "C40", # C40: comprehensions - "C90", # C90: mccabe code complexity - "E", # E: pycodestyle errors - "F", # F: pyflakes - "G", # G: logging format - "I", # I: isort - "N", # N: pep8 naming - "PL", # PL: pylint - "PIE", # PIE: pie - "PYI", # PYI: type hinting stub files - "RUF", # RUF: ruff - "S", # S: bandit - "TID", # TID: tidy imports - "UP", # UP: pyupgrade - "W", # W: pycodestyle warnings - "YTT", # YTT: year 2020 +select = [ # https://beta.ruff.rs/docs/rules + "A", # flake8-builtins + "ARG", # flake8-unused-arguments + "ASYNC", # flake8-async + "B", # flake8-bugbear + "BLE", # flake8-blind-except + "C4", # flake8-comprehensions + "C90", # McCabe cyclomatic complexity + "DTZ", # flake8-datetimez + "E", # pycodestyle + "EM", # flake8-errmsg + "EXE", # flake8-executable + "F", # Pyflakes + "FA", # flake8-future-annotations + "FLY", # flynt + "G", # flake8-logging-format + "I", # isort + "ICN", # flake8-import-conventions + "INP", # flake8-no-pep420 + "INT", # flake8-gettext + "N", # pep8-naming + "NPY", # NumPy-specific rules + "PGH", # pygrep-hooks + "PIE", # flake8-pie + "PL", # Pylint + "PYI", # flake8-pyi + "RSE", # flake8-raise + "RUF", # Ruff-specific rules + "S", # flake8-bandit + "SIM", # flake8-simplify + "SLF", # flake8-self + "T10", # flake8-debugger + "TD", # flake8-todos + "TID", # flake8-tidy-imports + "UP", # pyupgrade + "W", # pycodestyle + "YTT", # flake8-2020 + # "ANN", # flake8-annotations # FIX ME? + # "COM", # flake8-commas + # "D", # pydocstyle -- FIX ME? + # "DJ", # flake8-django + # "ERA", # eradicate -- DO NOT FIX + # "FBT", # flake8-boolean-trap # FIX ME + # "ISC", # flake8-implicit-str-concat # FIX ME + # "PD", # pandas-vet + # "PT", # flake8-pytest-style + # "PTH", # flake8-use-pathlib # FIX ME + # "Q", # flake8-quotes + # "RET", # flake8-return # FIX ME? + # "T20", # flake8-print + # "TCH", # flake8-type-checking + # "TRY", # tryceratops ] show-source = true target-version = "py311" @@ -63,7 +106,27 @@ target-version = "py311" [tool.ruff.mccabe] # DO NOT INCREASE THIS VALUE max-complexity = 17 # default: 10 +[tool.ruff.per-file-ignores] +"arithmetic_analysis/newton_raphson.py" = ["PGH001"] +"audio_filters/show_response.py" = ["ARG002"] +"data_structures/binary_tree/binary_search_tree_recursive.py" = ["BLE001"] +"data_structures/binary_tree/treap.py" = ["SIM114"] +"data_structures/hashing/hash_table.py" = ["ARG002"] +"data_structures/hashing/quadratic_probing.py" = ["ARG002"] +"data_structures/hashing/tests/test_hash_map.py" = ["BLE001"] +"data_structures/heap/max_heap.py" = ["SIM114"] +"graphs/minimum_spanning_tree_prims.py" = ["SIM114"] +"hashes/enigma_machine.py" = ["BLE001"] +"machine_learning/decision_tree.py" = ["SIM114"] +"machine_learning/linear_discriminant_analysis.py" = ["ARG005"] +"machine_learning/sequential_minimum_optimization.py" = ["SIM115"] +"matrix/sherman_morrison.py" = ["SIM103", "SIM114"] +"physics/newtons_second_law_of_motion.py" = ["BLE001"] +"project_euler/problem_099/sol1.py" = ["SIM115"] +"sorts/external_sort.py" = ["SIM115"] + [tool.ruff.pylint] # DO NOT INCREASE THESE VALUES +allow-magic-value-types = ["float", "int", "str"] max-args = 10 # default: 5 max-branches = 20 # default: 12 max-returns = 8 # default: 6 diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py index b95be9ebc254..24bc00cd036f 100755 --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -33,7 +33,7 @@ def print_directory_md(top_dir: str = ".") -> None: if filepath != old_path: old_path = print_path(old_path, filepath) indent = (filepath.count(os.sep) + 1) if filepath else 0 - url = "/".join((filepath, filename)).replace(" ", "%20") + url = f"{filepath}/{filename}".replace(" ", "%20") filename = os.path.splitext(filename.replace("_", " ").title())[0] print(f"{md_prefix(indent)} [{filename}]({url})") diff --git a/sorts/dutch_national_flag_sort.py b/sorts/dutch_national_flag_sort.py index 79afefa73afe..758e3a887b84 100644 --- a/sorts/dutch_national_flag_sort.py +++ b/sorts/dutch_national_flag_sort.py @@ -84,9 +84,8 @@ def dutch_national_flag_sort(sequence: list) -> list: sequence[mid], sequence[high] = sequence[high], sequence[mid] high -= 1 else: - raise ValueError( - f"The elements inside the sequence must contains only {colors} values" - ) + msg = f"The elements inside the sequence must contains only {colors} values" + raise ValueError(msg) return sequence diff --git a/strings/barcode_validator.py b/strings/barcode_validator.py index e050cd337d74..b4f3864e2642 100644 --- a/strings/barcode_validator.py +++ b/strings/barcode_validator.py @@ -65,7 +65,8 @@ def get_barcode(barcode: str) -> int: ValueError: Barcode 'dwefgiweuf' has alphabetic characters. """ if str(barcode).isalpha(): - raise ValueError(f"Barcode '{barcode}' has alphabetic characters.") + msg = f"Barcode '{barcode}' has alphabetic characters." + raise ValueError(msg) elif int(barcode) < 0: raise ValueError("The entered barcode has a negative value. Try again.") else: diff --git a/strings/capitalize.py b/strings/capitalize.py index 63603aa07e2d..e7e97c2beb53 100644 --- a/strings/capitalize.py +++ b/strings/capitalize.py @@ -17,7 +17,7 @@ def capitalize(sentence: str) -> str: """ if not sentence: return "" - lower_to_upper = {lc: uc for lc, uc in zip(ascii_lowercase, ascii_uppercase)} + lower_to_upper = dict(zip(ascii_lowercase, ascii_uppercase)) return lower_to_upper.get(sentence[0], sentence[0]) + sentence[1:] diff --git a/strings/is_spain_national_id.py b/strings/is_spain_national_id.py index 67f49755f412..60d06e123aae 100644 --- a/strings/is_spain_national_id.py +++ b/strings/is_spain_national_id.py @@ -48,7 +48,8 @@ def is_spain_national_id(spanish_id: str) -> bool: """ if not isinstance(spanish_id, str): - raise TypeError(f"Expected string as input, found {type(spanish_id).__name__}") + msg = f"Expected string as input, found {type(spanish_id).__name__}" + raise TypeError(msg) spanish_id_clean = spanish_id.replace("-", "").upper() if len(spanish_id_clean) != 9: diff --git a/strings/snake_case_to_camel_pascal_case.py b/strings/snake_case_to_camel_pascal_case.py index 28a28b517a01..8219337a63b0 100644 --- a/strings/snake_case_to_camel_pascal_case.py +++ b/strings/snake_case_to_camel_pascal_case.py @@ -27,11 +27,11 @@ def snake_to_camel_case(input_str: str, use_pascal: bool = False) -> str: """ if not isinstance(input_str, str): - raise ValueError(f"Expected string as input, found {type(input_str)}") + msg = f"Expected string as input, found {type(input_str)}" + raise ValueError(msg) if not isinstance(use_pascal, bool): - raise ValueError( - f"Expected boolean as use_pascal parameter, found {type(use_pascal)}" - ) + msg = f"Expected boolean as use_pascal parameter, found {type(use_pascal)}" + raise ValueError(msg) words = input_str.split("_") diff --git a/web_programming/reddit.py b/web_programming/reddit.py index 6a31c81c34bd..5ca5f828c0fb 100644 --- a/web_programming/reddit.py +++ b/web_programming/reddit.py @@ -26,7 +26,8 @@ def get_subreddit_data( """ wanted_data = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)): - raise ValueError(f"Invalid search term: {invalid_search_terms}") + msg = f"Invalid search term: {invalid_search_terms}" + raise ValueError(msg) response = requests.get( f"https://reddit.com/r/{subreddit}/{age}.json?limit={limit}", headers={"User-agent": "A random string"}, diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index abac3c70b22e..d5d4cfe92f20 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -22,7 +22,8 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: """ new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes if new_olid.count("/") != 1: - raise ValueError(f"{olid} is not a valid Open Library olid") + msg = f"{olid} is not a valid Open Library olid" + raise ValueError(msg) return requests.get(f"https://openlibrary.org/{new_olid}.json").json() diff --git a/web_programming/slack_message.py b/web_programming/slack_message.py index f35aa3ca587e..5e97d6b64c75 100644 --- a/web_programming/slack_message.py +++ b/web_programming/slack_message.py @@ -7,10 +7,11 @@ def send_slack_message(message_body: str, slack_url: str) -> None: headers = {"Content-Type": "application/json"} response = requests.post(slack_url, json={"text": message_body}, headers=headers) if response.status_code != 200: - raise ValueError( - f"Request to slack returned an error {response.status_code}, " - f"the response is:\n{response.text}" + msg = ( + "Request to slack returned an error " + f"{response.status_code}, the response is:\n{response.text}" ) + raise ValueError(msg) if __name__ == "__main__": From 41c74b60a3c77640dc5fbec775daff5b518893f4 Mon Sep 17 00:00:00 2001 From: Tianyi Zheng Date: Sun, 28 May 2023 19:07:33 -0700 Subject: [PATCH 85/85] Fix ruff errors --- divide_and_conquer/strassen_matrix_multiplication.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/divide_and_conquer/strassen_matrix_multiplication.py b/divide_and_conquer/strassen_matrix_multiplication.py index cfbbe7746cb4..cbfc7e5655db 100644 --- a/divide_and_conquer/strassen_matrix_multiplication.py +++ b/divide_and_conquer/strassen_matrix_multiplication.py @@ -112,10 +112,12 @@ def strassen(matrix1: list, matrix2: list) -> list: [[139, 163], [121, 134], [100, 121]] """ if matrix_dimensions(matrix1)[1] != matrix_dimensions(matrix2)[0]: - raise Exception( - "Unable to multiply these matrices, please check the dimensions. \n" - f"Matrix A:{matrix1} \nMatrix B:{matrix2}" + msg = ( + "Unable to multiply these matrices, please check the dimensions.\n" + f"Matrix A: {matrix1}\n" + f"Matrix B: {matrix2}" ) + raise Exception(msg) dimension1 = matrix_dimensions(matrix1) dimension2 = matrix_dimensions(matrix2)