diff --git a/backtracking/hamiltonian_cycle.py b/backtracking/hamiltonian_cycle.py index 4c6ae46799f4..4a4156d70b32 100644 --- a/backtracking/hamiltonian_cycle.py +++ b/backtracking/hamiltonian_cycle.py @@ -71,7 +71,7 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) >>> curr_ind = 1 >>> util_hamilton_cycle(graph, path, curr_ind) True - >>> print(path) + >>> path [0, 1, 2, 4, 3, 0] Case 2: Use exact graph as in previous case, but in the properties taken from @@ -85,7 +85,7 @@ def util_hamilton_cycle(graph: list[list[int]], path: list[int], curr_ind: int) >>> curr_ind = 3 >>> util_hamilton_cycle(graph, path, curr_ind) True - >>> print(path) + >>> path [0, 1, 2, 4, 3, 0] """ diff --git a/computer_vision/flip_augmentation.py b/computer_vision/flip_augmentation.py index 1272357fd03e..93b4e3f6da79 100644 --- a/computer_vision/flip_augmentation.py +++ b/computer_vision/flip_augmentation.py @@ -22,7 +22,6 @@ def main() -> None: Get images list and annotations list from input dir. Update new images and annotations. Save images and annotations in output dir. - >>> pass # A doctest is not possible for this function. """ img_paths, annos = get_dataset(LABEL_DIR, IMAGE_DIR) print("Processing...") @@ -48,7 +47,6 @@ def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]: - label_dir : Path to label include annotation of images - img_dir : Path to folder contain images Return : List of images path and labels - >>> pass # A doctest is not possible for this function. """ img_paths = [] labels = [] @@ -88,7 +86,6 @@ def update_image_and_anno( - new_imgs_list : image after resize - new_annos_lists : list of new annotation after scale - path_list : list the name of image file - >>> pass # A doctest is not possible for this function. """ new_annos_lists = [] path_list = [] diff --git a/computer_vision/mosaic_augmentation.py b/computer_vision/mosaic_augmentation.py index 4fd81957ce2a..e2953749753f 100644 --- a/computer_vision/mosaic_augmentation.py +++ b/computer_vision/mosaic_augmentation.py @@ -23,7 +23,6 @@ def main() -> None: Get images list and annotations list from input dir. Update new images and annotations. Save images and annotations in output dir. - >>> pass # A doctest is not possible for this function. """ img_paths, annos = get_dataset(LABEL_DIR, IMG_DIR) for index in range(NUMBER_IMAGES): @@ -60,7 +59,6 @@ def get_dataset(label_dir: str, img_dir: str) -> tuple[list, list]: - label_dir : Path to label include annotation of images - img_dir : Path to folder contain images Return : List of images path and labels - >>> pass # A doctest is not possible for this function. """ img_paths = [] labels = [] @@ -105,7 +103,6 @@ def update_image_and_anno( - output_img : image after resize - new_anno : list of new annotation after scale - path[0] : get the name of image file - >>> pass # A doctest is not possible for this function. """ output_img = np.zeros([output_size[0], output_size[1], 3], dtype=np.uint8) scale_x = scale_range[0] + random.random() * (scale_range[1] - scale_range[0]) diff --git a/data_structures/heap/binomial_heap.py b/data_structures/heap/binomial_heap.py index 334b444eaaff..6398c99439cd 100644 --- a/data_structures/heap/binomial_heap.py +++ b/data_structures/heap/binomial_heap.py @@ -71,7 +71,7 @@ class BinomialHeap: ... first_heap.insert(number) Size test - >>> print(first_heap.size) + >>> first_heap.size 30 Deleting - delete() test @@ -97,7 +97,7 @@ class BinomialHeap: # # # # preOrder() test - >>> print(second_heap.preOrder()) + >>> second_heap.preOrder() [(17, 0), ('#', 1), (31, 1), (20, 2), ('#', 3), ('#', 3), (34, 2), ('#', 3), ('#', 3)] printing Heap - __str__() test diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 4c19747ec823..071790d18448 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -9,20 +9,20 @@ class Heap: >>> unsorted = [103, 9, 1, 7, 11, 15, 25, 201, 209, 107, 5] >>> h = Heap() >>> h.build_max_heap(unsorted) - >>> print(h) + >>> h [209, 201, 25, 103, 107, 15, 1, 9, 7, 11, 5] >>> >>> h.extract_max() 209 - >>> print(h) + >>> h [201, 107, 25, 103, 11, 15, 1, 9, 7, 5] >>> >>> h.insert(100) - >>> print(h) + >>> h [201, 107, 25, 103, 100, 15, 1, 9, 7, 5, 11] >>> >>> h.heap_sort() - >>> print(h) + >>> h [1, 5, 7, 9, 11, 15, 25, 100, 103, 107, 201] """ diff --git a/data_structures/heap/min_heap.py b/data_structures/heap/min_heap.py index d8975eb2dcc7..0403624f285a 100644 --- a/data_structures/heap/min_heap.py +++ b/data_structures/heap/min_heap.py @@ -27,7 +27,7 @@ class MinHeap: >>> myMinHeap.decrease_key(b, -17) >>> print(b) Node(B, -17) - >>> print(myMinHeap["B"]) + >>> myMinHeap["B"] -17 """ diff --git a/data_structures/linked_list/skip_list.py b/data_structures/linked_list/skip_list.py index a667e3e9bc84..96b0db7c896b 100644 --- a/data_structures/linked_list/skip_list.py +++ b/data_structures/linked_list/skip_list.py @@ -443,4 +443,7 @@ def main(): if __name__ == "__main__": + import doctest + + doctest.testmod() main() diff --git a/graphs/gale_shapley_bigraph.py b/graphs/gale_shapley_bigraph.py index 56b8c6c77bcb..f4b3153817c4 100644 --- a/graphs/gale_shapley_bigraph.py +++ b/graphs/gale_shapley_bigraph.py @@ -17,7 +17,7 @@ def stable_matching( >>> donor_pref = [[0, 1, 3, 2], [0, 2, 3, 1], [1, 0, 2, 3], [0, 3, 1, 2]] >>> recipient_pref = [[3, 1, 2, 0], [3, 1, 0, 2], [0, 3, 1, 2], [1, 0, 3, 2]] - >>> print(stable_matching(donor_pref, recipient_pref)) + >>> stable_matching(donor_pref, recipient_pref) [1, 2, 3, 0] """ assert len(donor_pref) == len(recipient_pref) diff --git a/graphs/graph_list.py b/graphs/graph_list.py index f04b7a92390d..e871f3b8a9d6 100644 --- a/graphs/graph_list.py +++ b/graphs/graph_list.py @@ -18,7 +18,7 @@ class GraphAdjacencyList(Generic[T]): Directed graph example: >>> d_graph = GraphAdjacencyList() - >>> d_graph + >>> print(d_graph) {} >>> d_graph.add_edge(0, 1) {0: [1], 1: []} @@ -26,7 +26,7 @@ class GraphAdjacencyList(Generic[T]): {0: [1], 1: [2, 4, 5], 2: [], 4: [], 5: []} >>> d_graph.add_edge(2, 0).add_edge(2, 6).add_edge(2, 7) {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} - >>> print(d_graph) + >>> d_graph {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} >>> print(repr(d_graph)) {0: [1], 1: [2, 4, 5], 2: [0, 6, 7], 4: [], 5: [], 6: [], 7: []} @@ -68,7 +68,7 @@ class GraphAdjacencyList(Generic[T]): {'a': ['b'], 'b': ['a']} >>> char_graph.add_edge('b', 'c').add_edge('b', 'e').add_edge('b', 'f') {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} - >>> print(char_graph) + >>> char_graph {'a': ['b'], 'b': ['a', 'c', 'e', 'f'], 'c': ['b'], 'e': ['b'], 'f': ['b']} """ diff --git a/graphs/minimum_spanning_tree_prims2.py b/graphs/minimum_spanning_tree_prims2.py index d924ee3db1e5..707be783d087 100644 --- a/graphs/minimum_spanning_tree_prims2.py +++ b/graphs/minimum_spanning_tree_prims2.py @@ -69,16 +69,16 @@ class MinPriorityQueue(Generic[T]): >>> queue.push(3, 4000) >>> queue.push(4, 3000) - >>> print(queue.extract_min()) + >>> queue.extract_min() 2 >>> queue.update_key(4, 50) - >>> print(queue.extract_min()) + >>> queue.extract_min() 4 - >>> print(queue.extract_min()) + >>> queue.extract_min() 1 - >>> print(queue.extract_min()) + >>> queue.extract_min() 3 """ diff --git a/graphs/random_graph_generator.py b/graphs/random_graph_generator.py index 15ccee5b399c..0e7e18bc8fd9 100644 --- a/graphs/random_graph_generator.py +++ b/graphs/random_graph_generator.py @@ -53,7 +53,7 @@ def complete_graph(vertices_number: int) -> dict: @input: vertices_number (number of vertices), directed (False if the graph is undirected, True otherwise) @example: - >>> print(complete_graph(3)) + >>> complete_graph(3) {0: [1, 2], 1: [0, 2], 2: [0, 1]} """ return { diff --git a/machine_learning/local_weighted_learning/local_weighted_learning.py b/machine_learning/local_weighted_learning/local_weighted_learning.py index 6c542ab825aa..df03fe0a178d 100644 --- a/machine_learning/local_weighted_learning/local_weighted_learning.py +++ b/machine_learning/local_weighted_learning/local_weighted_learning.py @@ -71,7 +71,6 @@ def local_weight_regression( def load_data(dataset_name: str, cola_name: str, colb_name: str) -> np.mat: """ Function used for loading data from the seaborn splitting into x and y points - >>> pass # this function has no doctest """ import seaborn as sns @@ -112,7 +111,6 @@ def plot_preds( ) -> plt.plot: """ This function used to plot predictions and display the graph - >>> pass #this function has no doctest """ xsort = training_data_x.copy() xsort.sort(axis=0) diff --git a/maths/polynomial_evaluation.py b/maths/polynomial_evaluation.py index 8ee82467efa1..90a51f521e01 100644 --- a/maths/polynomial_evaluation.py +++ b/maths/polynomial_evaluation.py @@ -45,7 +45,7 @@ def horner(poly: Sequence[float], x: float) -> float: >>> poly = (0.0, 0.0, 5.0, 9.3, 7.0) # f(x) = 7.0x^4 + 9.3x^3 + 5.0x^2 >>> x = -13.0 >>> # f(-13) = 7.0(-13)^4 + 9.3(-13)^3 + 5.0(-13)^2 = 180339.9 - >>> print(evaluate_poly(poly, x)) + >>> evaluate_poly(poly, x) 180339.9 """ poly = (0.0, 0.0, 5.0, 9.3, 7.0) diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index 52442134de59..1def58e1f226 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -39,7 +39,7 @@ class FFT: >>> x = FFT(A, B) Print product - >>> print(x.product) # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5 + >>> x.product # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5 [(-0+0j), (2+0j), (3+0j), (8+0j), (6+0j), (8+0j)] __str__ test diff --git a/matrix/matrix_class.py b/matrix/matrix_class.py index 8b6fefa2124b..0c3078fe6dc8 100644 --- a/matrix/matrix_class.py +++ b/matrix/matrix_class.py @@ -21,9 +21,9 @@ class Matrix: [7. 8. 9.]] Matrix rows and columns are available as 2D arrays - >>> print(matrix.rows) + >>> matrix.rows [[1, 2, 3], [4, 5, 6], [7, 8, 9]] - >>> print(matrix.columns()) + >>> matrix.columns() [[1, 4, 7], [2, 5, 8], [3, 6, 9]] Order is returned as a tuple @@ -55,7 +55,7 @@ class Matrix: [[-3. 6. -3.] [6. -12. 6.] [-3. 6. -3.]] - >>> print(matrix.inverse()) + >>> matrix.inverse() Traceback (most recent call last): ... TypeError: Only matrices with a non-zero determinant have an inverse diff --git a/searches/simple_binary_search.py b/searches/simple_binary_search.py index d1f7f7a51cbc..ff043d7369af 100644 --- a/searches/simple_binary_search.py +++ b/searches/simple_binary_search.py @@ -13,25 +13,25 @@ def binary_search(a_list: list[int], item: int) -> bool: """ >>> test_list = [0, 1, 2, 8, 13, 17, 19, 32, 42] - >>> print(binary_search(test_list, 3)) + >>> binary_search(test_list, 3) False - >>> print(binary_search(test_list, 13)) + >>> binary_search(test_list, 13) True - >>> print(binary_search([4, 4, 5, 6, 7], 4)) + >>> binary_search([4, 4, 5, 6, 7], 4) True - >>> print(binary_search([4, 4, 5, 6, 7], -10)) + >>> binary_search([4, 4, 5, 6, 7], -10) False - >>> print(binary_search([-18, 2], -18)) + >>> binary_search([-18, 2], -18) True - >>> print(binary_search([5], 5)) + >>> binary_search([5], 5) True - >>> print(binary_search(['a', 'c', 'd'], 'c')) + >>> binary_search(['a', 'c', 'd'], 'c') True - >>> print(binary_search(['a', 'c', 'd'], 'f')) + >>> binary_search(['a', 'c', 'd'], 'f') False - >>> print(binary_search([], 1)) + >>> binary_search([], 1) False - >>> print(binary_search([-.1, .1 , .8], .1)) + >>> binary_search([-.1, .1 , .8], .1) True >>> binary_search(range(-5000, 5000, 10), 80) True diff --git a/sorts/bitonic_sort.py b/sorts/bitonic_sort.py index 201fecd2ce86..b65f877a45e3 100644 --- a/sorts/bitonic_sort.py +++ b/sorts/bitonic_sort.py @@ -16,19 +16,19 @@ def comp_and_swap(array: list[int], index1: int, index2: int, direction: int) -> >>> arr = [12, 42, -21, 1] >>> comp_and_swap(arr, 1, 2, 1) - >>> print(arr) + >>> arr [12, -21, 42, 1] >>> comp_and_swap(arr, 1, 2, 0) - >>> print(arr) + >>> arr [12, 42, -21, 1] >>> comp_and_swap(arr, 0, 3, 1) - >>> print(arr) + >>> arr [1, 42, -21, 12] >>> comp_and_swap(arr, 0, 3, 0) - >>> print(arr) + >>> arr [12, 42, -21, 1] """ if (direction == 1 and array[index1] > array[index2]) or ( @@ -46,11 +46,11 @@ def bitonic_merge(array: list[int], low: int, length: int, direction: int) -> No >>> arr = [12, 42, -21, 1] >>> bitonic_merge(arr, 0, 4, 1) - >>> print(arr) + >>> arr [-21, 1, 12, 42] >>> bitonic_merge(arr, 0, 4, 0) - >>> print(arr) + >>> arr [42, 12, 1, -21] """ if length > 1: diff --git a/sorts/normal_distribution_quick_sort.md b/sorts/normal_distribution_quick_sort.md index c073f2cbc81c..27aca340fb3b 100644 --- a/sorts/normal_distribution_quick_sort.md +++ b/sorts/normal_distribution_quick_sort.md @@ -17,8 +17,8 @@ The array elements are taken from a Standard Normal Distribution, having mean = >>> mu, sigma = 0, 1 # mean and standard deviation >>> X = np.random.normal(mu, sigma, p) >>> np.save(outfile, X) ->>> print('The array is') ->>> print(X) +>>> 'The array is' +>>> X ``` diff --git a/sorts/recursive_insertion_sort.py b/sorts/recursive_insertion_sort.py index ab2716f8eae5..297dbe9457e6 100644 --- a/sorts/recursive_insertion_sort.py +++ b/sorts/recursive_insertion_sort.py @@ -14,17 +14,17 @@ def rec_insertion_sort(collection: list, n: int): >>> col = [1, 2, 1] >>> rec_insertion_sort(col, len(col)) - >>> print(col) + >>> col [1, 1, 2] >>> col = [2, 1, 0, -1, -2] >>> rec_insertion_sort(col, len(col)) - >>> print(col) + >>> col [-2, -1, 0, 1, 2] >>> col = [1] >>> rec_insertion_sort(col, len(col)) - >>> print(col) + >>> col [1] """ # Checks if the entire collection has been sorted @@ -41,17 +41,17 @@ def insert_next(collection: list, index: int): >>> col = [3, 2, 4, 2] >>> insert_next(col, 1) - >>> print(col) + >>> col [2, 3, 4, 2] >>> col = [3, 2, 3] >>> insert_next(col, 2) - >>> print(col) + >>> col [3, 2, 3] >>> col = [] >>> insert_next(col, 1) - >>> print(col) + >>> col [] """ # Checks order between adjacent elements diff --git a/web_programming/reddit.py b/web_programming/reddit.py index 672109f1399d..6a31c81c34bd 100644 --- a/web_programming/reddit.py +++ b/web_programming/reddit.py @@ -23,8 +23,6 @@ def get_subreddit_data( limit : Number of posts to fetch age : ["new", "top", "hot"] wanted_data : Get only the required data in the list - - >>> pass """ wanted_data = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(wanted_data) - valid_terms)): diff --git a/web_programming/search_books_by_isbn.py b/web_programming/search_books_by_isbn.py index 22a31dcb1db4..abac3c70b22e 100644 --- a/web_programming/search_books_by_isbn.py +++ b/web_programming/search_books_by_isbn.py @@ -19,7 +19,6 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: {'publishers': ['Puffin'], 'number_of_pages': 96, 'isbn_10': ['0140328726'], ... # >>> get_openlibrary_data(olid='/authors/OL7353617A') # doctest: +ELLIPSIS {'name': 'Adrian Brisku', 'created': {'type': '/type/datetime', ... - >>> pass # Placate https://github.com/apps/algorithms-keeper """ new_olid = olid.strip().strip("/") # Remove leading/trailing whitespace & slashes if new_olid.count("/") != 1: @@ -29,9 +28,7 @@ def get_openlibrary_data(olid: str = "isbn/0140328726") -> dict: def summarize_book(ol_book_data: dict) -> dict: """ - Given Open Library book data, return a summary as a Python dict. - - >>> pass # Placate https://github.com/apps/algorithms-keeper + Given Open Library book data, return a summary as a Python dict. """ desired_keys = { "title": "Title",