From e6e5f4b301a82dfbb8245add087789019c516c9c Mon Sep 17 00:00:00 2001 From: Reshad Hasan Date: Sat, 23 Feb 2019 20:18:21 +0600 Subject: [PATCH 01/51] Added naive string search algorithm (#715) --- strings/naiveStringSearch.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 strings/naiveStringSearch.py diff --git a/strings/naiveStringSearch.py b/strings/naiveStringSearch.py new file mode 100644 index 000000000000..04c0d8157b24 --- /dev/null +++ b/strings/naiveStringSearch.py @@ -0,0 +1,29 @@ +""" +this algorithm tries to find the pattern from every position of +the mainString if pattern is found from position i it add it to +the answer and does the same for position i+1 + +Complexity : O(n*m) + n=length of main string + m=length of pattern string +""" +def naivePatternSearch(mainString,pattern): + patLen=len(pattern) + strLen=len(mainString) + position=[] + for i in range(strLen-patLen+1): + match_found=True + for j in range(patLen): + if mainString[i+j]!=pattern[j]: + match_found=False + break + if match_found: + position.append(i) + return position + +mainString="ABAAABCDBBABCDDEBCABC" +pattern="ABC" +position=naivePatternSearch(mainString,pattern) +print("Pattern found in position ") +for x in position: + print(x) \ No newline at end of file From 9a44eb44798900e5d9b8a307a78ab8057f9e3b92 Mon Sep 17 00:00:00 2001 From: Reshad Hasan Date: Mon, 25 Feb 2019 15:35:24 +0600 Subject: [PATCH 02/51] Organize graph algorithms (#719) * organized graph algorithms * all graph algorithms in Graphs/ folder * all graph algorithms are in one folder * Rename number theory/factorial_python.py to maths/factorial_python.py --- {graphs => Graphs}/Directed and Undirected (Weighted) Graph.py | 0 {graphs => Graphs}/a_star.py | 0 {graphs => Graphs}/articulation_points.py | 0 {graphs => Graphs}/basic_graphs.py | 0 {data_structures/graph => Graphs}/bellman_ford.py | 0 {data_structures/graph => Graphs}/breadth_first_search.py | 0 {graphs => Graphs}/check_bipartite_graph_bfs.py | 0 {data_structures/graph => Graphs}/depth_first_search.py | 0 {graphs => Graphs}/dijkstra.py | 0 data_structures/graph/dijkstra.py => Graphs/dijkstra_2.py | 0 {data_structures/graph => Graphs}/dijkstra_algorithm.py | 0 {data_structures/graph => Graphs}/even_tree.py | 0 {graphs => Graphs}/finding_bridges.py | 0 {data_structures/graph => Graphs}/floyd_warshall.py | 0 {data_structures/graph => Graphs}/graph.py | 0 {data_structures/graph => Graphs}/graph_list.py | 0 {data_structures/graph => Graphs}/graph_matrix.py | 0 {graphs => Graphs}/kahns_algorithm_long.py | 0 {graphs => Graphs}/kahns_algorithm_topo.py | 0 {graphs => Graphs}/minimum_spanning_tree_kruskal.py | 0 {graphs => Graphs}/minimum_spanning_tree_prims.py | 0 {graphs => Graphs}/multi_hueristic_astar.py | 0 {graphs => Graphs}/scc_kosaraju.py | 0 {graphs => Graphs}/tarjans_scc.py | 0 factorial_python.py => maths/factorial_python.py | 0 25 files changed, 0 insertions(+), 0 deletions(-) rename {graphs => Graphs}/Directed and Undirected (Weighted) Graph.py (100%) rename {graphs => Graphs}/a_star.py (100%) rename {graphs => Graphs}/articulation_points.py (100%) rename {graphs => Graphs}/basic_graphs.py (100%) rename {data_structures/graph => Graphs}/bellman_ford.py (100%) rename {data_structures/graph => Graphs}/breadth_first_search.py (100%) rename {graphs => Graphs}/check_bipartite_graph_bfs.py (100%) rename {data_structures/graph => Graphs}/depth_first_search.py (100%) rename {graphs => Graphs}/dijkstra.py (100%) rename data_structures/graph/dijkstra.py => Graphs/dijkstra_2.py (100%) rename {data_structures/graph => Graphs}/dijkstra_algorithm.py (100%) rename {data_structures/graph => Graphs}/even_tree.py (100%) rename {graphs => Graphs}/finding_bridges.py (100%) rename {data_structures/graph => Graphs}/floyd_warshall.py (100%) rename {data_structures/graph => Graphs}/graph.py (100%) rename {data_structures/graph => Graphs}/graph_list.py (100%) rename {data_structures/graph => Graphs}/graph_matrix.py (100%) rename {graphs => Graphs}/kahns_algorithm_long.py (100%) rename {graphs => Graphs}/kahns_algorithm_topo.py (100%) rename {graphs => Graphs}/minimum_spanning_tree_kruskal.py (100%) rename {graphs => Graphs}/minimum_spanning_tree_prims.py (100%) rename {graphs => Graphs}/multi_hueristic_astar.py (100%) rename {graphs => Graphs}/scc_kosaraju.py (100%) rename {graphs => Graphs}/tarjans_scc.py (100%) rename factorial_python.py => maths/factorial_python.py (100%) diff --git a/graphs/Directed and Undirected (Weighted) Graph.py b/Graphs/Directed and Undirected (Weighted) Graph.py similarity index 100% rename from graphs/Directed and Undirected (Weighted) Graph.py rename to Graphs/Directed and Undirected (Weighted) Graph.py diff --git a/graphs/a_star.py b/Graphs/a_star.py similarity index 100% rename from graphs/a_star.py rename to Graphs/a_star.py diff --git a/graphs/articulation_points.py b/Graphs/articulation_points.py similarity index 100% rename from graphs/articulation_points.py rename to Graphs/articulation_points.py diff --git a/graphs/basic_graphs.py b/Graphs/basic_graphs.py similarity index 100% rename from graphs/basic_graphs.py rename to Graphs/basic_graphs.py diff --git a/data_structures/graph/bellman_ford.py b/Graphs/bellman_ford.py similarity index 100% rename from data_structures/graph/bellman_ford.py rename to Graphs/bellman_ford.py diff --git a/data_structures/graph/breadth_first_search.py b/Graphs/breadth_first_search.py similarity index 100% rename from data_structures/graph/breadth_first_search.py rename to Graphs/breadth_first_search.py diff --git a/graphs/check_bipartite_graph_bfs.py b/Graphs/check_bipartite_graph_bfs.py similarity index 100% rename from graphs/check_bipartite_graph_bfs.py rename to Graphs/check_bipartite_graph_bfs.py diff --git a/data_structures/graph/depth_first_search.py b/Graphs/depth_first_search.py similarity index 100% rename from data_structures/graph/depth_first_search.py rename to Graphs/depth_first_search.py diff --git a/graphs/dijkstra.py b/Graphs/dijkstra.py similarity index 100% rename from graphs/dijkstra.py rename to Graphs/dijkstra.py diff --git a/data_structures/graph/dijkstra.py b/Graphs/dijkstra_2.py similarity index 100% rename from data_structures/graph/dijkstra.py rename to Graphs/dijkstra_2.py diff --git a/data_structures/graph/dijkstra_algorithm.py b/Graphs/dijkstra_algorithm.py similarity index 100% rename from data_structures/graph/dijkstra_algorithm.py rename to Graphs/dijkstra_algorithm.py diff --git a/data_structures/graph/even_tree.py b/Graphs/even_tree.py similarity index 100% rename from data_structures/graph/even_tree.py rename to Graphs/even_tree.py diff --git a/graphs/finding_bridges.py b/Graphs/finding_bridges.py similarity index 100% rename from graphs/finding_bridges.py rename to Graphs/finding_bridges.py diff --git a/data_structures/graph/floyd_warshall.py b/Graphs/floyd_warshall.py similarity index 100% rename from data_structures/graph/floyd_warshall.py rename to Graphs/floyd_warshall.py diff --git a/data_structures/graph/graph.py b/Graphs/graph.py similarity index 100% rename from data_structures/graph/graph.py rename to Graphs/graph.py diff --git a/data_structures/graph/graph_list.py b/Graphs/graph_list.py similarity index 100% rename from data_structures/graph/graph_list.py rename to Graphs/graph_list.py diff --git a/data_structures/graph/graph_matrix.py b/Graphs/graph_matrix.py similarity index 100% rename from data_structures/graph/graph_matrix.py rename to Graphs/graph_matrix.py diff --git a/graphs/kahns_algorithm_long.py b/Graphs/kahns_algorithm_long.py similarity index 100% rename from graphs/kahns_algorithm_long.py rename to Graphs/kahns_algorithm_long.py diff --git a/graphs/kahns_algorithm_topo.py b/Graphs/kahns_algorithm_topo.py similarity index 100% rename from graphs/kahns_algorithm_topo.py rename to Graphs/kahns_algorithm_topo.py diff --git a/graphs/minimum_spanning_tree_kruskal.py b/Graphs/minimum_spanning_tree_kruskal.py similarity index 100% rename from graphs/minimum_spanning_tree_kruskal.py rename to Graphs/minimum_spanning_tree_kruskal.py diff --git a/graphs/minimum_spanning_tree_prims.py b/Graphs/minimum_spanning_tree_prims.py similarity index 100% rename from graphs/minimum_spanning_tree_prims.py rename to Graphs/minimum_spanning_tree_prims.py diff --git a/graphs/multi_hueristic_astar.py b/Graphs/multi_hueristic_astar.py similarity index 100% rename from graphs/multi_hueristic_astar.py rename to Graphs/multi_hueristic_astar.py diff --git a/graphs/scc_kosaraju.py b/Graphs/scc_kosaraju.py similarity index 100% rename from graphs/scc_kosaraju.py rename to Graphs/scc_kosaraju.py diff --git a/graphs/tarjans_scc.py b/Graphs/tarjans_scc.py similarity index 100% rename from graphs/tarjans_scc.py rename to Graphs/tarjans_scc.py diff --git a/factorial_python.py b/maths/factorial_python.py similarity index 100% rename from factorial_python.py rename to maths/factorial_python.py From 2bbf8cd10935c620400ba213bba4b92c8b2cb5c0 Mon Sep 17 00:00:00 2001 From: "S. Sharma" <36388139+s1l3ntcat@users.noreply.github.com> Date: Wed, 27 Feb 2019 08:28:59 -0600 Subject: [PATCH 03/51] Added extended euclidean algorithm (#720) * Added extended euclidean algorithm * Fixed extended euclidean algorithm --- Maths/extended_euclidean_algorithm.py | 51 +++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) create mode 100644 Maths/extended_euclidean_algorithm.py diff --git a/Maths/extended_euclidean_algorithm.py b/Maths/extended_euclidean_algorithm.py new file mode 100644 index 000000000000..f5a3cc88e474 --- /dev/null +++ b/Maths/extended_euclidean_algorithm.py @@ -0,0 +1,51 @@ +# @Author: S. Sharma +# @Date: 2019-02-25T12:08:53-06:00 +# @Email: silentcat@protonmail.com +# @Last modified by: silentcat +# @Last modified time: 2019-02-26T07:07:38-06:00 + +import sys + +# Finds 2 numbers a and b such that it satisfies +# the equation am + bn = gcd(m, n) (a.k.a Bezout's Identity) +def extended_euclidean_algorithm(m, n): + a = 0; aprime = 1; b = 1; bprime = 0 + q = 0; r = 0 + if m > n: + c = m; d = n + else: + c = n; d = m + + while True: + q = int(c / d) + r = c % d + if r == 0: + break + c = d + d = r + + t = aprime + aprime = a + a = t - q*a + + t = bprime + bprime = b + b = t - q*b + + pair = None + if m > n: + pair = (a,b) + else: + pair = (b,a) + return pair + +def main(): + if len(sys.argv) < 3: + print('2 integer arguments required') + exit(1) + m = int(sys.argv[1]) + n = int(sys.argv[2]) + print(extended_euclidean_algorithm(m, n)) + +if __name__ == '__main__': + main() From dd9f0b3f2e859d24a2dd442fe2bd45fd08a9ac86 Mon Sep 17 00:00:00 2001 From: overclockedllama Date: Wed, 27 Feb 2019 15:33:29 +0100 Subject: [PATCH 04/51] fix comma spelling from coma to comma (#722) --- searches/linear_search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/searches/linear_search.py b/searches/linear_search.py index 50c6eaad5e9b..058322f21d09 100644 --- a/searches/linear_search.py +++ b/searches/linear_search.py @@ -43,7 +43,7 @@ def linear_search(sequence, target): if __name__ == '__main__': - user_input = raw_input('Enter numbers separated by coma:\n').strip() + user_input = raw_input('Enter numbers separated by comma:\n').strip() sequence = [int(item) for item in user_input.split(',')] target_input = raw_input('Enter a single number to be found in the list:\n') From e6eb6dbb82623c8377230419a8633c3b2a0b3b63 Mon Sep 17 00:00:00 2001 From: Sanders Lin <45224617+SandersLin@users.noreply.github.com> Date: Thu, 28 Feb 2019 01:49:13 +0800 Subject: [PATCH 05/51] Delete Maths/find_hcf.py (#636) --- Maths/find_hcf.py | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 Maths/find_hcf.py diff --git a/Maths/find_hcf.py b/Maths/find_hcf.py deleted file mode 100644 index e4315d8d37a7..000000000000 --- a/Maths/find_hcf.py +++ /dev/null @@ -1,22 +0,0 @@ -# Program to find the HCF of two Numbers -def find_hcf(num_1, num_2): - if num_1 == 0: - return num_2 - if num_2 == 0: - return num_1 - # Base Case - if num_1 == num_2: - return num_1 - if num_1 > num_2: - return find_hcf(num_1 - num_2, num_2) - return find_hcf(num_1, num_2 - num_1) - - -def main(): - num_1 = 24 - num_2 = 34 - print('HCF of %s and %s is %s:' % (num_1, num_2, find_hcf(num_1, num_2))) - - -if __name__ == '__main__': - main() From 88b6caa30aeb2d0e780039c13703af56c8ffdaa7 Mon Sep 17 00:00:00 2001 From: Ashwek Swamy <39827514+ashwek@users.noreply.github.com> Date: Fri, 1 Mar 2019 22:23:29 +0530 Subject: [PATCH 06/51] fixed balanced_parentheses, Added infix-prefix & postfix evaluation (#621) * Create infix_to_prefix_conversion.py * Create postfix_evaluation.py * Update balanced_parentheses.py --- .../stacks/infix_to_prefix_conversion.py | 61 +++++++++++++++++++ data_structures/stacks/postfix_evaluation.py | 50 +++++++++++++++ 2 files changed, 111 insertions(+) create mode 100644 data_structures/stacks/infix_to_prefix_conversion.py create mode 100644 data_structures/stacks/postfix_evaluation.py diff --git a/data_structures/stacks/infix_to_prefix_conversion.py b/data_structures/stacks/infix_to_prefix_conversion.py new file mode 100644 index 000000000000..da5fc261fb9f --- /dev/null +++ b/data_structures/stacks/infix_to_prefix_conversion.py @@ -0,0 +1,61 @@ +""" +Output: + +Enter an Infix Equation = a + b ^c + Symbol | Stack | Postfix +---------------------------- + c | | c + ^ | ^ | c + b | ^ | cb + + | + | cb^ + a | + | cb^a + | | cb^a+ + + a+b^c (Infix) -> +a^bc (Prefix) +""" + +def infix_2_postfix(Infix): + Stack = [] + Postfix = [] + priority = {'^':3, '*':2, '/':2, '%':2, '+':1, '-':1} # Priority of each operator + print_width = len(Infix) if(len(Infix)>7) else 7 + + # Print table header for output + print('Symbol'.center(8), 'Stack'.center(print_width), 'Postfix'.center(print_width), sep = " | ") + print('-'*(print_width*3+7)) + + for x in Infix: + if(x.isalpha() or x.isdigit()): Postfix.append(x) # if x is Alphabet / Digit, add it to Postfix + elif(x == '('): Stack.append(x) # if x is "(" push to Stack + elif(x == ')'): # if x is ")" pop stack until "(" is encountered + while(Stack[-1] != '('): + Postfix.append( Stack.pop() ) #Pop stack & add the content to Postfix + Stack.pop() + else: + if(len(Stack)==0): Stack.append(x) #If stack is empty, push x to stack + else: + while( len(Stack) > 0 and priority[x] <= priority[Stack[-1]]): # while priority of x is not greater than priority of element in the stack + Postfix.append( Stack.pop() ) # pop stack & add to Postfix + Stack.append(x) # push x to stack + + print(x.center(8), (''.join(Stack)).ljust(print_width), (''.join(Postfix)).ljust(print_width), sep = " | ") # Output in tabular format + + while(len(Stack) > 0): # while stack is not empty + Postfix.append( Stack.pop() ) # pop stack & add to Postfix + print(' '.center(8), (''.join(Stack)).ljust(print_width), (''.join(Postfix)).ljust(print_width), sep = " | ") # Output in tabular format + + return "".join(Postfix) # return Postfix as str + +def infix_2_prefix(Infix): + Infix = list(Infix[::-1]) # reverse the infix equation + + for i in range(len(Infix)): + if(Infix[i] == '('): Infix[i] = ')' # change "(" to ")" + elif(Infix[i] == ')'): Infix[i] = '(' # change ")" to "(" + + return (infix_2_postfix("".join(Infix)))[::-1] # call infix_2_postfix on Infix, return reverse of Postfix + +if __name__ == "__main__": + Infix = input("\nEnter an Infix Equation = ") #Input an Infix equation + Infix = "".join(Infix.split()) #Remove spaces from the input + print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)") diff --git a/data_structures/stacks/postfix_evaluation.py b/data_structures/stacks/postfix_evaluation.py new file mode 100644 index 000000000000..1786e71dd383 --- /dev/null +++ b/data_structures/stacks/postfix_evaluation.py @@ -0,0 +1,50 @@ +""" +Output: + +Enter a Postfix Equation (space separated) = 5 6 9 * + + Symbol | Action | Stack +----------------------------------- + 5 | push(5) | 5 + 6 | push(6) | 5,6 + 9 | push(9) | 5,6,9 + | pop(9) | 5,6 + | pop(6) | 5 + * | push(6*9) | 5,54 + | pop(54) | 5 + | pop(5) | + + | push(5+54) | 59 + + Result = 59 +""" + +import operator as op + +def Solve(Postfix): + Stack = [] + Div = lambda x, y: int(x/y) # integer division operation + Opr = {'^':op.pow, '*':op.mul, '/':Div, '+':op.add, '-':op.sub} # operators & their respective operation + + # print table header + print('Symbol'.center(8), 'Action'.center(12), 'Stack', sep = " | ") + print('-'*(30+len(Postfix))) + + for x in Postfix: + if( x.isdigit() ): # if x in digit + Stack.append(x) # append x to stack + print(x.rjust(8), ('push('+x+')').ljust(12), ','.join(Stack), sep = " | ") # output in tabular format + else: + B = Stack.pop() # pop stack + print("".rjust(8), ('pop('+B+')').ljust(12), ','.join(Stack), sep = " | ") # output in tabular format + + A = Stack.pop() # pop stack + print("".rjust(8), ('pop('+A+')').ljust(12), ','.join(Stack), sep = " | ") # output in tabular format + + Stack.append( str(Opr[x](int(A), int(B))) ) # evaluate the 2 values poped from stack & push result to stack + print(x.rjust(8), ('push('+A+x+B+')').ljust(12), ','.join(Stack), sep = " | ") # output in tabular format + + return int(Stack[0]) + + +if __name__ == "__main__": + Postfix = input("\n\nEnter a Postfix Equation (space separated) = ").split(' ') + print("\n\tResult = ", Solve(Postfix)) From 6f6510623c7250ebea78afbd3d6eab1bfe467ada Mon Sep 17 00:00:00 2001 From: Akash Ali <45498607+AkashAli506@users.noreply.github.com> Date: Mon, 4 Mar 2019 00:49:36 -0800 Subject: [PATCH 07/51] Update heap.py (#726) Added comments for the better understanding of heap. --- data_structures/heap/heap.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/data_structures/heap/heap.py b/data_structures/heap/heap.py index 8187af101308..39778f725c3a 100644 --- a/data_structures/heap/heap.py +++ b/data_structures/heap/heap.py @@ -7,8 +7,9 @@ except NameError: raw_input = input # Python 3 +#This heap class start from here. class Heap: - def __init__(self): + def __init__(self): #Default constructor of heap class. self.h = [] self.currsize = 0 @@ -37,13 +38,13 @@ def maxHeapify(self,node): self.h[m] = temp self.maxHeapify(m) - def buildHeap(self,a): + def buildHeap(self,a): #This function is used to build the heap from the data container 'a'. self.currsize = len(a) self.h = list(a) for i in range(self.currsize//2,-1,-1): self.maxHeapify(i) - def getMax(self): + def getMax(self): #This function is used to get maximum value from the heap. if self.currsize >= 1: me = self.h[0] temp = self.h[0] @@ -54,7 +55,7 @@ def getMax(self): return me return None - def heapSort(self): + def heapSort(self): #This function is used to sort the heap. size = self.currsize while self.currsize-1 >= 0: temp = self.h[0] @@ -64,7 +65,7 @@ def heapSort(self): self.maxHeapify(0) self.currsize = size - def insert(self,data): + def insert(self,data): #This function is used to insert data in the heap. self.h.append(data) curr = self.currsize self.currsize+=1 @@ -74,7 +75,7 @@ def insert(self,data): self.h[curr] = temp curr = curr/2 - def display(self): + def display(self): #This function is used to print the heap. print(self.h) def main(): From 2c67f6161ca4385d9728951f71c4d11fda2ef7df Mon Sep 17 00:00:00 2001 From: Akash Ali <45498607+AkashAli506@users.noreply.github.com> Date: Thu, 7 Mar 2019 20:53:29 +0500 Subject: [PATCH 08/51] Update basic_binary_tree.py (#725) I have added the comments for better understanding. --- binary_tree/basic_binary_tree.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/binary_tree/basic_binary_tree.py b/binary_tree/basic_binary_tree.py index 6cdeb1a6938c..5738e4ee114f 100644 --- a/binary_tree/basic_binary_tree.py +++ b/binary_tree/basic_binary_tree.py @@ -1,11 +1,11 @@ -class Node: +class Node: # This is the Class Node with constructor that contains data variable to type data and left,right pointers. def __init__(self, data): self.data = data self.left = None self.right = None -def depth_of_tree(tree): +def depth_of_tree(tree): #This is the recursive function to find the depth of binary tree. if tree is None: return 0 else: @@ -17,7 +17,7 @@ def depth_of_tree(tree): return 1 + depth_r_tree -def is_full_binary_tree(tree): +def is_full_binary_tree(tree): # This functions returns that is it full binary tree or not? if tree is None: return True if (tree.left is None) and (tree.right is None): @@ -28,7 +28,7 @@ def is_full_binary_tree(tree): return False -def main(): +def main(): # Main func for testing. tree = Node(1) tree.left = Node(2) tree.right = Node(3) From 8e67ac3b7672c1aa05c59bef29ad1b2c5520e07d Mon Sep 17 00:00:00 2001 From: Maxim Semenyuk <33791308+semenuk@users.noreply.github.com> Date: Sun, 10 Mar 2019 07:10:29 +0500 Subject: [PATCH 09/51] Fix '__bool__' method (#735) The method returns the truth when the stack is empty --- data_structures/stacks/stack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data_structures/stacks/stack.py b/data_structures/stacks/stack.py index 66af8c025d8c..7f979d927d08 100644 --- a/data_structures/stacks/stack.py +++ b/data_structures/stacks/stack.py @@ -17,7 +17,7 @@ def __init__(self, limit=10): self.limit = limit def __bool__(self): - return not bool(self.stack) + return bool(self.stack) def __str__(self): return str(self.stack) From 96c36f828689ab1c610311c7f191f4289fd7ff6c Mon Sep 17 00:00:00 2001 From: Ishani Date: Sun, 17 Mar 2019 23:42:22 +0530 Subject: [PATCH 10/51] added wiggle_sort.py (#734) * Wiggle_sort * Rename Wiggle_Sort to wiggle_sort.py --- sorts/wiggle_sort.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 sorts/wiggle_sort.py diff --git a/sorts/wiggle_sort.py b/sorts/wiggle_sort.py new file mode 100644 index 000000000000..cc83487bdeb1 --- /dev/null +++ b/sorts/wiggle_sort.py @@ -0,0 +1,21 @@ +""" +Given an unsorted array nums, reorder it such that nums[0] < nums[1] > nums[2] < nums[3].... +For example: +if input numbers = [3, 5, 2, 1, 6, 4] +one possible Wiggle Sorted answer is [3, 5, 1, 6, 2, 4]. +""" +def wiggle_sort(nums): + for i in range(len(nums)): + if (i % 2 == 1) == (nums[i-1] > nums[i]): + nums[i-1], nums[i] = nums[i], nums[i-1] + + +print("Enter the array elements:\n") +array=list(map(int,input().split())) +print("The unsorted array is:\n") +print(array) +wiggle_sort(array) +print("Array after Wiggle sort:\n") +print(array) + + From d27968b78d721193f3dd7e11c8d7c50b3160167c Mon Sep 17 00:00:00 2001 From: Ishani Date: Wed, 20 Mar 2019 21:29:35 +0530 Subject: [PATCH 11/51] Create Searching in sorted matrix (#738) * Create Searching in sorted matrix * Rename Searching in sorted matrix to searching_in_sorted_matrix.py --- matrix/searching_in_sorted_matrix.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 matrix/searching_in_sorted_matrix.py diff --git a/matrix/searching_in_sorted_matrix.py b/matrix/searching_in_sorted_matrix.py new file mode 100644 index 000000000000..54913b350803 --- /dev/null +++ b/matrix/searching_in_sorted_matrix.py @@ -0,0 +1,27 @@ +def search_in_a_sorted_matrix(mat, m, n, key): + i, j = m - 1, 0 + while i >= 0 and j < n: + if key == mat[i][j]: + print('Key %s found at row- %s column- %s' % (key, i + 1, j + 1)) + return + if key < mat[i][j]: + i -= 1 + else: + j += 1 + print('Key %s not found' % (key)) + + +def main(): + mat = [ + [2, 5, 7], + [4, 8, 13], + [9, 11, 15], + [12, 17, 20] + ] + x = int(input("Enter the element to be searched:")) + print(mat) + search_in_a_sorted_matrix(mat, len(mat), len(mat[0]), x) + + +if __name__ == '__main__': + main() From 8b8a6d881cbda0e6d64aacf5284fbccf27772eec Mon Sep 17 00:00:00 2001 From: louisparis <48298425+louisparis@users.noreply.github.com> Date: Wed, 27 Mar 2019 19:46:46 +0200 Subject: [PATCH 12/51] reduce indentation (#741) --- file_transfer_protocol/ftp_send_receive.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/file_transfer_protocol/ftp_send_receive.py b/file_transfer_protocol/ftp_send_receive.py index 6050c83f2253..6a9819ef3f21 100644 --- a/file_transfer_protocol/ftp_send_receive.py +++ b/file_transfer_protocol/ftp_send_receive.py @@ -1,11 +1,11 @@ """ - File transfer protocol used to send and receive files using FTP server. - Use credentials to provide access to the FTP client +File transfer protocol used to send and receive files using FTP server. +Use credentials to provide access to the FTP client - Note: Do not use root username & password for security reasons - Create a seperate user and provide access to a home directory of the user - Use login id and password of the user created - cwd here stands for current working directory +Note: Do not use root username & password for security reasons +Create a seperate user and provide access to a home directory of the user +Use login id and password of the user created +cwd here stands for current working directory """ from ftplib import FTP @@ -14,8 +14,8 @@ ftp.cwd('/Enter the directory here/') """ - The file which will be received via the FTP server - Enter the location of the file where the file is received +The file which will be received via the FTP server +Enter the location of the file where the file is received """ def ReceiveFile(): @@ -25,8 +25,8 @@ def ReceiveFile(): ftp.quit() """ - The file which will be sent via the FTP server - The file send will be send to the current working directory +The file which will be sent via the FTP server +The file send will be send to the current working directory """ def SendFile(): From 441b82a95f38827cee550215f7cf6018d0258c57 Mon Sep 17 00:00:00 2001 From: RayCurse <38709018+RayCurse@users.noreply.github.com> Date: Wed, 27 Mar 2019 13:50:43 -0400 Subject: [PATCH 13/51] More matrix algorithms (#745) * added matrix minor * added matrix determinant * added inverse,scalar multiply, identity, transpose --- matrix/matrix_multiplication_addition.py | 41 +++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/matrix/matrix_multiplication_addition.py b/matrix/matrix_multiplication_addition.py index c387c43d4a85..dd50db729e43 100644 --- a/matrix/matrix_multiplication_addition.py +++ b/matrix/matrix_multiplication_addition.py @@ -10,6 +10,8 @@ def add(matrix_a, matrix_b): matrix_c.append(list_1) return matrix_c +def scalarMultiply(matrix , n): + return [[x * n for x in row] for row in matrix] def multiply(matrix_a, matrix_b): matrix_c = [] @@ -24,13 +26,50 @@ def multiply(matrix_a, matrix_b): matrix_c.append(list_1) return matrix_c +def identity(n): + return [[int(row == column) for column in range(n)] for row in range(n)] + +def transpose(matrix): + return map(list , zip(*matrix)) + +def minor(matrix, row, column): + minor = matrix[:row] + matrix[row + 1:] + minor = [row[:column] + row[column + 1:] for row in minor] + return minor + +def determinant(matrix): + if len(matrix) == 1: return matrix[0][0] + + res = 0 + for x in range(len(matrix)): + res += matrix[0][x] * determinant(minor(matrix , 0 , x)) * (-1) ** x + return res + +def inverse(matrix): + det = determinant(matrix) + if det == 0: return None + + matrixMinor = [[] for _ in range(len(matrix))] + for i in range(len(matrix)): + for j in range(len(matrix)): + matrixMinor[i].append(determinant(minor(matrix , i , j))) + + cofactors = [[x * (-1) ** (row + col) for col, x in enumerate(matrixMinor[row])] for row in range(len(matrix))] + adjugate = transpose(cofactors) + return scalarMultiply(adjugate , 1/det) def main(): matrix_a = [[12, 10], [3, 9]] matrix_b = [[3, 4], [7, 4]] + matrix_c = [[11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34], [41, 42, 43, 44]] + matrix_d = [[3, 0, 2], [2, 0, -2], [0, 1, 1]] + print(add(matrix_a, matrix_b)) print(multiply(matrix_a, matrix_b)) - + print(identity(5)) + print(minor(matrix_c , 1 , 2)) + print(determinant(matrix_b)) + print(inverse(matrix_d)) if __name__ == '__main__': main() From bb29dc55faf5b98b1e9f0b1f11a9dd15525386a3 Mon Sep 17 00:00:00 2001 From: Aditya Haridas Menon Date: Wed, 27 Mar 2019 23:29:31 +0530 Subject: [PATCH 14/51] Bitmasking and DP added (#705) --- dynamic_programming/bitmask.py | 90 ++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 dynamic_programming/bitmask.py diff --git a/dynamic_programming/bitmask.py b/dynamic_programming/bitmask.py new file mode 100644 index 000000000000..213b22fe9051 --- /dev/null +++ b/dynamic_programming/bitmask.py @@ -0,0 +1,90 @@ +""" + +This is a python implementation for questions involving task assignments between people. +Here Bitmasking and DP are used for solving this. + +Question :- +We have N tasks and M people. Each person in M can do only certain of these tasks. Also a person can do only one task and a task is performed only by one person. +Find the total no of ways in which the tasks can be distributed. + + +""" +from __future__ import print_function +from collections import defaultdict + + +class AssignmentUsingBitmask: + def __init__(self,task_performed,total): + + self.total_tasks = total #total no of tasks (N) + + # DP table will have a dimension of (2^M)*N + # initially all values are set to -1 + self.dp = [[-1 for i in range(total+1)] for j in range(2**len(task_performed))] + + self.task = defaultdict(list) #stores the list of persons for each task + + #finalmask is used to check if all persons are included by setting all bits to 1 + self.finalmask = (1< self.total_tasks: + return 0 + + #if case already considered + if self.dp[mask][taskno]!=-1: + return self.dp[mask][taskno] + + # Number of ways when we dont this task in the arrangement + total_ways_util = self.CountWaysUtil(mask,taskno+1) + + # now assign the tasks one by one to all possible persons and recursively assign for the remaining tasks. + if taskno in self.task: + for p in self.task[taskno]: + + # if p is already given a task + if mask & (1< Date: Wed, 3 Apr 2019 21:27:36 +0200 Subject: [PATCH 15/51] Added Trafid Cipher (#746) --- ciphers/trafid_cipher.py | 86 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) create mode 100644 ciphers/trafid_cipher.py diff --git a/ciphers/trafid_cipher.py b/ciphers/trafid_cipher.py new file mode 100644 index 000000000000..0453272f26a0 --- /dev/null +++ b/ciphers/trafid_cipher.py @@ -0,0 +1,86 @@ +#https://en.wikipedia.org/wiki/Trifid_cipher + +def __encryptPart(messagePart, character2Number): + one, two, three = "", "", "" + tmp = [] + + for character in messagePart: + tmp.append(character2Number[character]) + + for each in tmp: + one += each[0] + two += each[1] + three += each[2] + + return one+two+three + +def __decryptPart(messagePart, character2Number): + tmp, thisPart = "", "" + result = [] + + for character in messagePart: + thisPart += character2Number[character] + + for digit in thisPart: + tmp += digit + if len(tmp) == len(messagePart): + result.append(tmp) + tmp = "" + + return result[0], result[1], result[2] + +def __prepare(message, alphabet): + #Validate message and alphabet, set to upper and remove spaces + alphabet = alphabet.replace(" ", "").upper() + message = message.replace(" ", "").upper() + + #Check length and characters + if len(alphabet) != 27: + raise KeyError("Length of alphabet has to be 27.") + for each in message: + if each not in alphabet: + raise ValueError("Each message character has to be included in alphabet!") + + #Generate dictionares + numbers = ("111","112","113","121","122","123","131","132","133","211","212","213","221","222","223","231","232","233","311","312","313","321","322","323","331","332","333") + character2Number = {} + number2Character = {} + for letter, number in zip(alphabet, numbers): + character2Number[letter] = number + number2Character[number] = letter + + return message, alphabet, character2Number, number2Character + +def encryptMessage(message, alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period=5): + message, alphabet, character2Number, number2Character = __prepare(message, alphabet) + encrypted, encrypted_numeric = "", "" + + for i in range(0, len(message)+1, period): + encrypted_numeric += __encryptPart(message[i:i+period], character2Number) + + for i in range(0, len(encrypted_numeric), 3): + encrypted += number2Character[encrypted_numeric[i:i+3]] + + return encrypted + +def decryptMessage(message, alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.", period=5): + message, alphabet, character2Number, number2Character = __prepare(message, alphabet) + decrypted_numeric = [] + decrypted = "" + + for i in range(0, len(message)+1, period): + a,b,c = __decryptPart(message[i:i+period], character2Number) + + for j in range(0, len(a)): + decrypted_numeric.append(a[j]+b[j]+c[j]) + + for each in decrypted_numeric: + decrypted += number2Character[each] + + return decrypted + +if __name__ == '__main__': + msg = "DEFEND THE EAST WALL OF THE CASTLE." + encrypted = encryptMessage(msg,"EPSDUCVWYM.ZLKXNBTFGORIJHAQ") + decrypted = decryptMessage(encrypted, "EPSDUCVWYM.ZLKXNBTFGORIJHAQ") + print ("Encrypted: {}\nDecrypted: {}".format(encrypted, decrypted)) \ No newline at end of file From 15bc87fb416908230315a5f18adc94f950080c13 Mon Sep 17 00:00:00 2001 From: Ishani Date: Thu, 4 Apr 2019 16:40:11 +0530 Subject: [PATCH 16/51] Create is_Palindrome (#740) --- data_structures/linked_list/is_Palindrome | 77 +++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 data_structures/linked_list/is_Palindrome diff --git a/data_structures/linked_list/is_Palindrome b/data_structures/linked_list/is_Palindrome new file mode 100644 index 000000000000..acc87c1c272b --- /dev/null +++ b/data_structures/linked_list/is_Palindrome @@ -0,0 +1,77 @@ +def is_palindrome(head): + if not head: + return True + # split the list to two parts + fast, slow = head.next, head + while fast and fast.next: + fast = fast.next.next + slow = slow.next + second = slow.next + slow.next = None # Don't forget here! But forget still works! + # reverse the second part + node = None + while second: + nxt = second.next + second.next = node + node = second + second = nxt + # compare two parts + # second part has the same or one less node + while node: + if node.val != head.val: + return False + node = node.next + head = head.next + return True + + +def is_palindrome_stack(head): + if not head or not head.next: + return True + + # 1. Get the midpoint (slow) + slow = fast = cur = head + while fast and fast.next: + fast, slow = fast.next.next, slow.next + + # 2. Push the second half into the stack + stack = [slow.val] + while slow.next: + slow = slow.next + stack.append(slow.val) + + # 3. Comparison + while stack: + if stack.pop() != cur.val: + return False + cur = cur.next + + return True + + +def is_palindrome_dict(head): + if not head or not head.next: + return True + d = {} + pos = 0 + while head: + if head.val in d.keys(): + d[head.val].append(pos) + else: + d[head.val] = [pos] + head = head.next + pos += 1 + checksum = pos - 1 + middle = 0 + for v in d.values(): + if len(v) % 2 != 0: + middle += 1 + else: + step = 0 + for i in range(0, len(v)): + if v[i] + v[len(v) - 1 - step] != checksum: + return False + step += 1 + if middle > 1: + return False + return True From 56de3df784a8ff2bca54946b9218ca039776a2d7 Mon Sep 17 00:00:00 2001 From: Ahish Date: Sun, 7 Apr 2019 21:23:50 +0530 Subject: [PATCH 17/51] Update basic_binary_tree.py (#748) --- binary_tree/basic_binary_tree.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/binary_tree/basic_binary_tree.py b/binary_tree/basic_binary_tree.py index 5738e4ee114f..7c6240fb4dd4 100644 --- a/binary_tree/basic_binary_tree.py +++ b/binary_tree/basic_binary_tree.py @@ -4,6 +4,20 @@ def __init__(self, data): self.left = None self.right = None +def display(tree): #In Order traversal of the tree + + if tree is None: + return + + if tree.left is not None: + display(tree.left) + + print(tree.data) + + if tree.right is not None: + display(tree.right) + + return def depth_of_tree(tree): #This is the recursive function to find the depth of binary tree. if tree is None: @@ -41,6 +55,8 @@ def main(): # Main func for testing. print(is_full_binary_tree(tree)) print(depth_of_tree(tree)) + print("Tree is: ") + display(tree) if __name__ == '__main__': From 137871bfef005f8c53d9c2f20cc8ba7b5d944eeb Mon Sep 17 00:00:00 2001 From: rohan11074 <34051577+rohan11074@users.noreply.github.com> Date: Sun, 7 Apr 2019 21:25:32 +0530 Subject: [PATCH 18/51] feature to add input (#749) --- .../linked_list/singly_linked_list.py | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/data_structures/linked_list/singly_linked_list.py b/data_structures/linked_list/singly_linked_list.py index 0b9e44768e15..5ae97523b9a1 100644 --- a/data_structures/linked_list/singly_linked_list.py +++ b/data_structures/linked_list/singly_linked_list.py @@ -70,16 +70,20 @@ def reverse(self): def main(): A = Linked_List() - print("Inserting 10 at Head") - A.insert_head(10) - print("Inserting 0 at Head") - A.insert_head(0) + print("Inserting 1st at Head") + a1=input() + A.insert_head(a1) + print("Inserting 2nd at Head") + a2=input() + A.insert_head(a2) print("\nPrint List : ") A.printList() - print("\nInserting 100 at Tail") - A.insert_tail(100) - print("Inserting 1000 at Tail") - A.insert_tail(1000) + print("\nInserting 1st at Tail") + a3=input() + A.insert_tail(a3) + print("Inserting 2nd at Tail") + a4=input() + A.insert_tail(a4) print("\nPrint List : ") A.printList() print("\nDelete Head") From 52d2fbf3cfd646b2dafd5a451c8863887a261e85 Mon Sep 17 00:00:00 2001 From: Reshad Hasan Date: Wed, 10 Apr 2019 21:59:49 +0600 Subject: [PATCH 19/51] Add lowest common ancestor to data structures (#732) * add longest common ancestor in data structures * add lowest common ancestor to data structures --- data_structures/LCA.py | 91 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 data_structures/LCA.py diff --git a/data_structures/LCA.py b/data_structures/LCA.py new file mode 100644 index 000000000000..9c9d8ca629c7 --- /dev/null +++ b/data_structures/LCA.py @@ -0,0 +1,91 @@ +import queue + + +def swap(a, b): + a ^= b + b ^= a + a ^= b + return a, b + + +# creating sparse table which saves each nodes 2^ith parent +def creatSparse(max_node, parent): + j = 1 + while (1 << j) < max_node: + for i in range(1, max_node + 1): + parent[j][i] = parent[j - 1][parent[j - 1][i]] + j += 1 + return parent + + +# returns lca of node u,v +def LCA(u, v, level, parent): + # u must be deeper in the tree than v + if level[u] < level[v]: + u, v = swap(u, v) + # making depth of u same as depth of v + for i in range(18, -1, -1): + if level[u] - (1 << i) >= level[v]: + u = parent[i][u] + # at the same depth if u==v that mean lca is found + if u == v: + return u + # moving both nodes upwards till lca in found + for i in range(18, -1, -1): + if parent[i][u] != 0 and parent[i][u] != parent[i][v]: + u, v = parent[i][u], parent[i][v] + # returning longest common ancestor of u,v + return parent[0][u] + + +# runs a breadth first search from root node of the tree +# sets every nodes direct parent +# parent of root node is set to 0 +# calculates depth of each node from root node +def bfs(level, parent, max_node, graph, root=1): + level[root] = 0 + q = queue.Queue(maxsize=max_node) + q.put(root) + while q.qsize() != 0: + u = q.get() + for v in graph[u]: + if level[v] == -1: + level[v] = level[u] + 1 + q.put(v) + parent[0][v] = u + return level, parent + + +def main(): + max_node = 13 + # initializing with 0 + parent = [[0 for _ in range(max_node + 10)] for _ in range(20)] + # initializing with -1 which means every node is unvisited + level = [-1 for _ in range(max_node + 10)] + graph = { + 1: [2, 3, 4], + 2: [5], + 3: [6, 7], + 4: [8], + 5: [9, 10], + 6: [11], + 7: [], + 8: [12, 13], + 9: [], + 10: [], + 11: [], + 12: [], + 13: [] + } + level, parent = bfs(level, parent, max_node, graph, 1) + parent = creatSparse(max_node, parent) + print("LCA of node 1 and 3 is: ", LCA(1, 3, level, parent)) + print("LCA of node 5 and 6 is: ", LCA(5, 6, level, parent)) + print("LCA of node 7 and 11 is: ", LCA(7, 11, level, parent)) + print("LCA of node 6 and 7 is: ", LCA(6, 7, level, parent)) + print("LCA of node 4 and 12 is: ", LCA(4, 12, level, parent)) + print("LCA of node 8 and 8 is: ", LCA(8, 8, level, parent)) + + +if __name__ == "__main__": + main() From b2f1d9c337a104f1f2f52d87f03f53c5431313f2 Mon Sep 17 00:00:00 2001 From: WILFRIED NJANGUI Date: Sun, 14 Apr 2019 13:58:16 +0200 Subject: [PATCH 20/51] implementation of tower_of_hanoi algorithm (#756) --- maths/Hanoi.py | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 maths/Hanoi.py diff --git a/maths/Hanoi.py b/maths/Hanoi.py new file mode 100644 index 000000000000..dd04d0fa58d8 --- /dev/null +++ b/maths/Hanoi.py @@ -0,0 +1,24 @@ +# @author willx75 +# Tower of Hanoi recursion game algorithm is a game, it consists of three rods and a number of disks of different sizes, which can slide onto any rod + +import logging + +log = logging.getLogger() +logging.basicConfig(level=logging.DEBUG) + + +def Tower_Of_Hanoi(n, source, dest, by, mouvement): + if n == 0: + return n + elif n == 1: + mouvement += 1 + # no print statement (you could make it an optional flag for printing logs) + logging.debug('Move the plate from', source, 'to', dest) + return mouvement + else: + + mouvement = mouvement + Tower_Of_Hanoi(n-1, source, by, dest, 0) + logging.debug('Move the plate from', source, 'to', dest) + + mouvement = mouvement + 1 + Tower_Of_Hanoi(n-1, by, dest, source, 0) + return mouvement From a170997eafc15733baa70a858600a47c34daacf2 Mon Sep 17 00:00:00 2001 From: jfeng43 Date: Fri, 19 Apr 2019 11:31:06 -0400 Subject: [PATCH 21/51] Add animation for heap sort --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 7cf1f4f64ffd..faebd313507a 100644 --- a/README.md +++ b/README.md @@ -89,6 +89,7 @@ __Properties__ ### Heap +![alt text][heapsort-image] **Heapsort** is a _comparison-based_ sorting algorithm. It can be thought of as an improved selection sort. It divides its input into a sorted and an unsorted region, and it iteratively shrinks the unsorted region by extracting the largest element and moving that to the sorted region. @@ -311,6 +312,7 @@ where {\displaystyle \oplus } \oplus denotes the exclusive disjunction (XOR) op [quick-wiki]: https://en.wikipedia.org/wiki/Quicksort [quick-image]: https://upload.wikimedia.org/wikipedia/commons/6/6a/Sorting_quicksort_anim.gif "Quick Sort" +[heapsort-image]: https://upload.wikimedia.org/wikipedia/commons/4/4d/Heapsort-example.gif "Heap Sort" [heap-wiki]: https://en.wikipedia.org/wiki/Heapsort [radix-wiki]: https://en.wikipedia.org/wiki/Radix_sort From a91f0e7ca07fbd176af6eb4f89d0d592a6fff620 Mon Sep 17 00:00:00 2001 From: Sanders Lin <45224617+SandersLin@users.noreply.github.com> Date: Sat, 20 Apr 2019 00:00:40 +0800 Subject: [PATCH 22/51] Updated Euler problem 21 sol1.py --- project_euler/problem_21/sol1.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/project_euler/problem_21/sol1.py b/project_euler/problem_21/sol1.py index 6d137a7d4332..da29a5c7b631 100644 --- a/project_euler/problem_21/sol1.py +++ b/project_euler/problem_21/sol1.py @@ -24,19 +24,7 @@ def sum_of_divisors(n): total += i + n//i elif i == sqrt(n): total += i - return total-n -sums = [] -total = 0 - -for i in xrange(1, 10000): - n = sum_of_divisors(i) - - if n < len(sums): - if sums[n-1] == i: - total += n + i - - sums.append(n) - -print(total) \ No newline at end of file +total = [i for i in range(1,10000) if sum_of_divisors(sum_of_divisors(i)) == i and sum_of_divisors(i) != i] +print(sum(total)) From 48bba495ae1007c9e21a0c11d5b95585825f9a9e Mon Sep 17 00:00:00 2001 From: John Law Date: Sat, 20 Apr 2019 15:13:02 +0800 Subject: [PATCH 23/51] Rename is_Palindrome to is_Palindrome.py (#752) --- data_structures/linked_list/{is_Palindrome => is_Palindrome.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename data_structures/linked_list/{is_Palindrome => is_Palindrome.py} (100%) diff --git a/data_structures/linked_list/is_Palindrome b/data_structures/linked_list/is_Palindrome.py similarity index 100% rename from data_structures/linked_list/is_Palindrome rename to data_structures/linked_list/is_Palindrome.py From df04d9454332dc0ea916a6dbe6da2b71d7ae9782 Mon Sep 17 00:00:00 2001 From: Vysor Date: Tue, 23 Apr 2019 00:53:56 +1000 Subject: [PATCH 24/51] Some directories had a capital in their name [fixed]. Added a recursive factorial algorithm. (#763) * Renaming directories * Adding a recursive factorial algorithm --- {Graphs => graphs}/BFS.py | 0 {Graphs => graphs}/DFS.py | 0 .../Directed and Undirected (Weighted) Graph.py | 0 {Graphs => graphs}/a_star.py | 0 {Graphs => graphs}/articulation_points.py | 0 {Graphs => graphs}/basic_graphs.py | 0 {Graphs => graphs}/bellman_ford.py | 0 {Graphs => graphs}/breadth_first_search.py | 0 {Graphs => graphs}/check_bipartite_graph_bfs.py | 0 {Graphs => graphs}/depth_first_search.py | 0 {Graphs => graphs}/dijkstra.py | 0 {Graphs => graphs}/dijkstra_2.py | 0 {Graphs => graphs}/dijkstra_algorithm.py | 0 {Graphs => graphs}/even_tree.py | 0 {Graphs => graphs}/finding_bridges.py | 0 {Graphs => graphs}/floyd_warshall.py | 0 {Graphs => graphs}/graph.py | 0 {Graphs => graphs}/graph_list.py | 0 {Graphs => graphs}/graph_matrix.py | 0 {Graphs => graphs}/kahns_algorithm_long.py | 0 {Graphs => graphs}/kahns_algorithm_topo.py | 0 {Graphs => graphs}/minimum_spanning_tree_kruskal.py | 0 {Graphs => graphs}/minimum_spanning_tree_prims.py | 0 {Graphs => graphs}/multi_hueristic_astar.py | 0 {Graphs => graphs}/scc_kosaraju.py | 0 {Graphs => graphs}/tarjans_scc.py | 0 {Maths => maths}/3n+1.py | 0 {Maths => maths}/FindMax.py | 0 {Maths => maths}/FindMin.py | 0 {Maths => maths}/abs.py | 0 {Maths => maths}/absMax.py | 0 {Maths => maths}/absMin.py | 0 {Maths => maths}/average.py | 0 {Maths => maths}/extended_euclidean_algorithm.py | 0 maths/factorial_recursive.py | 13 +++++++++++++ {Maths => maths}/find_lcm.py | 0 36 files changed, 13 insertions(+) rename {Graphs => graphs}/BFS.py (100%) rename {Graphs => graphs}/DFS.py (100%) rename {Graphs => graphs}/Directed and Undirected (Weighted) Graph.py (100%) rename {Graphs => graphs}/a_star.py (100%) rename {Graphs => graphs}/articulation_points.py (100%) rename {Graphs => graphs}/basic_graphs.py (100%) rename {Graphs => graphs}/bellman_ford.py (100%) rename {Graphs => graphs}/breadth_first_search.py (100%) rename {Graphs => graphs}/check_bipartite_graph_bfs.py (100%) rename {Graphs => graphs}/depth_first_search.py (100%) rename {Graphs => graphs}/dijkstra.py (100%) rename {Graphs => graphs}/dijkstra_2.py (100%) rename {Graphs => graphs}/dijkstra_algorithm.py (100%) rename {Graphs => graphs}/even_tree.py (100%) rename {Graphs => graphs}/finding_bridges.py (100%) rename {Graphs => graphs}/floyd_warshall.py (100%) rename {Graphs => graphs}/graph.py (100%) rename {Graphs => graphs}/graph_list.py (100%) rename {Graphs => graphs}/graph_matrix.py (100%) rename {Graphs => graphs}/kahns_algorithm_long.py (100%) rename {Graphs => graphs}/kahns_algorithm_topo.py (100%) rename {Graphs => graphs}/minimum_spanning_tree_kruskal.py (100%) rename {Graphs => graphs}/minimum_spanning_tree_prims.py (100%) rename {Graphs => graphs}/multi_hueristic_astar.py (100%) rename {Graphs => graphs}/scc_kosaraju.py (100%) rename {Graphs => graphs}/tarjans_scc.py (100%) rename {Maths => maths}/3n+1.py (100%) rename {Maths => maths}/FindMax.py (100%) rename {Maths => maths}/FindMin.py (100%) rename {Maths => maths}/abs.py (100%) rename {Maths => maths}/absMax.py (100%) rename {Maths => maths}/absMin.py (100%) rename {Maths => maths}/average.py (100%) rename {Maths => maths}/extended_euclidean_algorithm.py (100%) create mode 100644 maths/factorial_recursive.py rename {Maths => maths}/find_lcm.py (100%) diff --git a/Graphs/BFS.py b/graphs/BFS.py similarity index 100% rename from Graphs/BFS.py rename to graphs/BFS.py diff --git a/Graphs/DFS.py b/graphs/DFS.py similarity index 100% rename from Graphs/DFS.py rename to graphs/DFS.py diff --git a/Graphs/Directed and Undirected (Weighted) Graph.py b/graphs/Directed and Undirected (Weighted) Graph.py similarity index 100% rename from Graphs/Directed and Undirected (Weighted) Graph.py rename to graphs/Directed and Undirected (Weighted) Graph.py diff --git a/Graphs/a_star.py b/graphs/a_star.py similarity index 100% rename from Graphs/a_star.py rename to graphs/a_star.py diff --git a/Graphs/articulation_points.py b/graphs/articulation_points.py similarity index 100% rename from Graphs/articulation_points.py rename to graphs/articulation_points.py diff --git a/Graphs/basic_graphs.py b/graphs/basic_graphs.py similarity index 100% rename from Graphs/basic_graphs.py rename to graphs/basic_graphs.py diff --git a/Graphs/bellman_ford.py b/graphs/bellman_ford.py similarity index 100% rename from Graphs/bellman_ford.py rename to graphs/bellman_ford.py diff --git a/Graphs/breadth_first_search.py b/graphs/breadth_first_search.py similarity index 100% rename from Graphs/breadth_first_search.py rename to graphs/breadth_first_search.py diff --git a/Graphs/check_bipartite_graph_bfs.py b/graphs/check_bipartite_graph_bfs.py similarity index 100% rename from Graphs/check_bipartite_graph_bfs.py rename to graphs/check_bipartite_graph_bfs.py diff --git a/Graphs/depth_first_search.py b/graphs/depth_first_search.py similarity index 100% rename from Graphs/depth_first_search.py rename to graphs/depth_first_search.py diff --git a/Graphs/dijkstra.py b/graphs/dijkstra.py similarity index 100% rename from Graphs/dijkstra.py rename to graphs/dijkstra.py diff --git a/Graphs/dijkstra_2.py b/graphs/dijkstra_2.py similarity index 100% rename from Graphs/dijkstra_2.py rename to graphs/dijkstra_2.py diff --git a/Graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py similarity index 100% rename from Graphs/dijkstra_algorithm.py rename to graphs/dijkstra_algorithm.py diff --git a/Graphs/even_tree.py b/graphs/even_tree.py similarity index 100% rename from Graphs/even_tree.py rename to graphs/even_tree.py diff --git a/Graphs/finding_bridges.py b/graphs/finding_bridges.py similarity index 100% rename from Graphs/finding_bridges.py rename to graphs/finding_bridges.py diff --git a/Graphs/floyd_warshall.py b/graphs/floyd_warshall.py similarity index 100% rename from Graphs/floyd_warshall.py rename to graphs/floyd_warshall.py diff --git a/Graphs/graph.py b/graphs/graph.py similarity index 100% rename from Graphs/graph.py rename to graphs/graph.py diff --git a/Graphs/graph_list.py b/graphs/graph_list.py similarity index 100% rename from Graphs/graph_list.py rename to graphs/graph_list.py diff --git a/Graphs/graph_matrix.py b/graphs/graph_matrix.py similarity index 100% rename from Graphs/graph_matrix.py rename to graphs/graph_matrix.py diff --git a/Graphs/kahns_algorithm_long.py b/graphs/kahns_algorithm_long.py similarity index 100% rename from Graphs/kahns_algorithm_long.py rename to graphs/kahns_algorithm_long.py diff --git a/Graphs/kahns_algorithm_topo.py b/graphs/kahns_algorithm_topo.py similarity index 100% rename from Graphs/kahns_algorithm_topo.py rename to graphs/kahns_algorithm_topo.py diff --git a/Graphs/minimum_spanning_tree_kruskal.py b/graphs/minimum_spanning_tree_kruskal.py similarity index 100% rename from Graphs/minimum_spanning_tree_kruskal.py rename to graphs/minimum_spanning_tree_kruskal.py diff --git a/Graphs/minimum_spanning_tree_prims.py b/graphs/minimum_spanning_tree_prims.py similarity index 100% rename from Graphs/minimum_spanning_tree_prims.py rename to graphs/minimum_spanning_tree_prims.py diff --git a/Graphs/multi_hueristic_astar.py b/graphs/multi_hueristic_astar.py similarity index 100% rename from Graphs/multi_hueristic_astar.py rename to graphs/multi_hueristic_astar.py diff --git a/Graphs/scc_kosaraju.py b/graphs/scc_kosaraju.py similarity index 100% rename from Graphs/scc_kosaraju.py rename to graphs/scc_kosaraju.py diff --git a/Graphs/tarjans_scc.py b/graphs/tarjans_scc.py similarity index 100% rename from Graphs/tarjans_scc.py rename to graphs/tarjans_scc.py diff --git a/Maths/3n+1.py b/maths/3n+1.py similarity index 100% rename from Maths/3n+1.py rename to maths/3n+1.py diff --git a/Maths/FindMax.py b/maths/FindMax.py similarity index 100% rename from Maths/FindMax.py rename to maths/FindMax.py diff --git a/Maths/FindMin.py b/maths/FindMin.py similarity index 100% rename from Maths/FindMin.py rename to maths/FindMin.py diff --git a/Maths/abs.py b/maths/abs.py similarity index 100% rename from Maths/abs.py rename to maths/abs.py diff --git a/Maths/absMax.py b/maths/absMax.py similarity index 100% rename from Maths/absMax.py rename to maths/absMax.py diff --git a/Maths/absMin.py b/maths/absMin.py similarity index 100% rename from Maths/absMin.py rename to maths/absMin.py diff --git a/Maths/average.py b/maths/average.py similarity index 100% rename from Maths/average.py rename to maths/average.py diff --git a/Maths/extended_euclidean_algorithm.py b/maths/extended_euclidean_algorithm.py similarity index 100% rename from Maths/extended_euclidean_algorithm.py rename to maths/extended_euclidean_algorithm.py diff --git a/maths/factorial_recursive.py b/maths/factorial_recursive.py new file mode 100644 index 000000000000..41391a2718f6 --- /dev/null +++ b/maths/factorial_recursive.py @@ -0,0 +1,13 @@ +def fact(n): + """ + Return 1, if n is 1 or below, + otherwise, return n * fact(n-1). + """ + return 1 if n <= 1 else n * fact(n-1) + +""" +Shown factorial for i, +where i ranges from 1 to 20. +""" +for i in range(1,21): + print(i, ": ", fact(i), sep='') diff --git a/Maths/find_lcm.py b/maths/find_lcm.py similarity index 100% rename from Maths/find_lcm.py rename to maths/find_lcm.py From 2fc2ae3f32fad16226c88358cb7c9e4e5c790a8f Mon Sep 17 00:00:00 2001 From: Viraat Das Date: Thu, 25 Apr 2019 07:48:14 -0400 Subject: [PATCH 25/51] Created a generalized algo to edmonds karp (#724) Edmonds Karp algorithm is traditionally with only one source and one sink. What do you do if you have multiple sources and sinks? This algorithm is a generalized algorithm that regardless of however many sinks and sources you have, will allow you to use this algorithm. It does this by using the traditional algorithm but adding an artificial source and sink that allows with "infinite" weight. --- Graphs/edmonds_karp_Multiple_SourceAndSink.py | 182 ++++++++++++++++++ 1 file changed, 182 insertions(+) create mode 100644 Graphs/edmonds_karp_Multiple_SourceAndSink.py diff --git a/Graphs/edmonds_karp_Multiple_SourceAndSink.py b/Graphs/edmonds_karp_Multiple_SourceAndSink.py new file mode 100644 index 000000000000..d231ac2c4cc3 --- /dev/null +++ b/Graphs/edmonds_karp_Multiple_SourceAndSink.py @@ -0,0 +1,182 @@ +class FlowNetwork: + def __init__(self, graph, sources, sinks): + self.sourceIndex = None + self.sinkIndex = None + self.graph = graph + + self._normalizeGraph(sources, sinks) + self.verticesCount = len(graph) + self.maximumFlowAlgorithm = None + + # make only one source and one sink + def _normalizeGraph(self, sources, sinks): + if sources is int: + sources = [sources] + if sinks is int: + sinks = [sinks] + + if len(sources) == 0 or len(sinks) == 0: + return + + self.sourceIndex = sources[0] + self.sinkIndex = sinks[0] + + # make fake vertex if there are more + # than one source or sink + if len(sources) > 1 or len(sinks) > 1: + maxInputFlow = 0 + for i in sources: + maxInputFlow += sum(self.graph[i]) + + + size = len(self.graph) + 1 + for room in self.graph: + room.insert(0, 0) + self.graph.insert(0, [0] * size) + for i in sources: + self.graph[0][i + 1] = maxInputFlow + self.sourceIndex = 0 + + size = len(self.graph) + 1 + for room in self.graph: + room.append(0) + self.graph.append([0] * size) + for i in sinks: + self.graph[i + 1][size - 1] = maxInputFlow + self.sinkIndex = size - 1 + + + def findMaximumFlow(self): + if self.maximumFlowAlgorithm is None: + raise Exception("You need to set maximum flow algorithm before.") + if self.sourceIndex is None or self.sinkIndex is None: + return 0 + + self.maximumFlowAlgorithm.execute() + return self.maximumFlowAlgorithm.getMaximumFlow() + + def setMaximumFlowAlgorithm(self, Algorithm): + self.maximumFlowAlgorithm = Algorithm(self) + + +class FlowNetworkAlgorithmExecutor(object): + def __init__(self, flowNetwork): + self.flowNetwork = flowNetwork + self.verticesCount = flowNetwork.verticesCount + self.sourceIndex = flowNetwork.sourceIndex + self.sinkIndex = flowNetwork.sinkIndex + # it's just a reference, so you shouldn't change + # it in your algorithms, use deep copy before doing that + self.graph = flowNetwork.graph + self.executed = False + + def execute(self): + if not self.executed: + self._algorithm() + self.executed = True + + # You should override it + def _algorithm(self): + pass + + + +class MaximumFlowAlgorithmExecutor(FlowNetworkAlgorithmExecutor): + def __init__(self, flowNetwork): + super(MaximumFlowAlgorithmExecutor, self).__init__(flowNetwork) + # use this to save your result + self.maximumFlow = -1 + + def getMaximumFlow(self): + if not self.executed: + raise Exception("You should execute algorithm before using its result!") + + return self.maximumFlow + +class PushRelabelExecutor(MaximumFlowAlgorithmExecutor): + def __init__(self, flowNetwork): + super(PushRelabelExecutor, self).__init__(flowNetwork) + + self.preflow = [[0] * self.verticesCount for i in range(self.verticesCount)] + + self.heights = [0] * self.verticesCount + self.excesses = [0] * self.verticesCount + + def _algorithm(self): + self.heights[self.sourceIndex] = self.verticesCount + + # push some substance to graph + for nextVertexIndex, bandwidth in enumerate(self.graph[self.sourceIndex]): + self.preflow[self.sourceIndex][nextVertexIndex] += bandwidth + self.preflow[nextVertexIndex][self.sourceIndex] -= bandwidth + self.excesses[nextVertexIndex] += bandwidth + + # Relabel-to-front selection rule + verticesList = [i for i in range(self.verticesCount) + if i != self.sourceIndex and i != self.sinkIndex] + + # move through list + i = 0 + while i < len(verticesList): + vertexIndex = verticesList[i] + previousHeight = self.heights[vertexIndex] + self.processVertex(vertexIndex) + if self.heights[vertexIndex] > previousHeight: + # if it was relabeled, swap elements + # and start from 0 index + verticesList.insert(0, verticesList.pop(i)) + i = 0 + else: + i += 1 + + self.maximumFlow = sum(self.preflow[self.sourceIndex]) + + def processVertex(self, vertexIndex): + while self.excesses[vertexIndex] > 0: + for neighbourIndex in range(self.verticesCount): + # if it's neighbour and current vertex is higher + if self.graph[vertexIndex][neighbourIndex] - self.preflow[vertexIndex][neighbourIndex] > 0\ + and self.heights[vertexIndex] > self.heights[neighbourIndex]: + self.push(vertexIndex, neighbourIndex) + + self.relabel(vertexIndex) + + def push(self, fromIndex, toIndex): + preflowDelta = min(self.excesses[fromIndex], + self.graph[fromIndex][toIndex] - self.preflow[fromIndex][toIndex]) + self.preflow[fromIndex][toIndex] += preflowDelta + self.preflow[toIndex][fromIndex] -= preflowDelta + self.excesses[fromIndex] -= preflowDelta + self.excesses[toIndex] += preflowDelta + + def relabel(self, vertexIndex): + minHeight = None + for toIndex in range(self.verticesCount): + if self.graph[vertexIndex][toIndex] - self.preflow[vertexIndex][toIndex] > 0: + if minHeight is None or self.heights[toIndex] < minHeight: + minHeight = self.heights[toIndex] + + if minHeight is not None: + self.heights[vertexIndex] = minHeight + 1 + +if __name__ == '__main__': + entrances = [0] + exits = [3] + # graph = [ + # [0, 0, 4, 6, 0, 0], + # [0, 0, 5, 2, 0, 0], + # [0, 0, 0, 0, 4, 4], + # [0, 0, 0, 0, 6, 6], + # [0, 0, 0, 0, 0, 0], + # [0, 0, 0, 0, 0, 0], + # ] + graph = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]] + + # prepare our network + flowNetwork = FlowNetwork(graph, entrances, exits) + # set algorithm + flowNetwork.setMaximumFlowAlgorithm(PushRelabelExecutor) + # and calculate + maximumFlow = flowNetwork.findMaximumFlow() + + print("maximum flow is {}".format(maximumFlow)) From 48553da785f9233311ae59dd6eb93f79cf675965 Mon Sep 17 00:00:00 2001 From: sakuralethe Date: Fri, 26 Apr 2019 17:43:51 +0800 Subject: [PATCH 26/51] variable in function should be lowercase (#768) --- sorts/quick_sort.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/sorts/quick_sort.py b/sorts/quick_sort.py index 136cbc021669..e01d319a4b29 100644 --- a/sorts/quick_sort.py +++ b/sorts/quick_sort.py @@ -12,7 +12,7 @@ from __future__ import print_function -def quick_sort(ARRAY): +def quick_sort(collection): """Pure implementation of quick sort algorithm in Python :param collection: some mutable ordered collection with heterogeneous @@ -29,14 +29,14 @@ def quick_sort(ARRAY): >>> quick_sort([-2, -5, -45]) [-45, -5, -2] """ - ARRAY_LENGTH = len(ARRAY) - if( ARRAY_LENGTH <= 1): - return ARRAY + length = len(collection) + if length <= 1: + return collection else: - PIVOT = ARRAY[0] - GREATER = [ element for element in ARRAY[1:] if element > PIVOT ] - LESSER = [ element for element in ARRAY[1:] if element <= PIVOT ] - return quick_sort(LESSER) + [PIVOT] + quick_sort(GREATER) + pivot = collection[0] + greater = [element for element in collection[1:] if element > pivot] + lesser = [element for element in collection[1:] if element <= pivot] + return quick_sort(lesser) + [pivot] + quick_sort(greater) if __name__ == '__main__': From 06dbef04a0700095b156c26b113bf08466a46c90 Mon Sep 17 00:00:00 2001 From: Gattlin Walker Date: Tue, 30 Apr 2019 08:16:42 -0500 Subject: [PATCH 27/51] Adding quick sort where random pivot point is chosen (#774) --- sorts/random_pivot_quick_sort.py | 33 ++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 sorts/random_pivot_quick_sort.py diff --git a/sorts/random_pivot_quick_sort.py b/sorts/random_pivot_quick_sort.py new file mode 100644 index 000000000000..fc8f90486ee6 --- /dev/null +++ b/sorts/random_pivot_quick_sort.py @@ -0,0 +1,33 @@ +""" +Picks the random index as the pivot +""" +import random + +def partition(A, left_index, right_index): + pivot = A[left_index] + i = left_index + 1 + for j in range(left_index + 1, right_index): + if A[j] < pivot: + A[j], A[i] = A[i], A[j] + i += 1 + A[left_index], A[i - 1] = A[i - 1], A[left_index] + return i - 1 + +def quick_sort_random(A, left, right): + if left < right: + pivot = random.randint(left, right - 1) + A[pivot], A[left] = A[left], A[pivot] #switches the pivot with the left most bound + pivot_index = partition(A, left, right) + quick_sort_random(A, left, pivot_index) #recursive quicksort to the left of the pivot point + quick_sort_random(A, pivot_index + 1, right) #recursive quicksort to the right of the pivot point + +def main(): + user_input = input('Enter numbers separated by a comma:\n').strip() + arr = [int(item) for item in user_input.split(',')] + + quick_sort_random(arr, 0, len(arr)) + + print(arr) + +if __name__ == "__main__": + main() \ No newline at end of file From 7b89d03dd7d80087fa95bcf7a1983fe3d8b424ca Mon Sep 17 00:00:00 2001 From: yolstatrisch Date: Thu, 2 May 2019 00:44:21 +0800 Subject: [PATCH 28/51] Added an O(1) solution to problem 002 (#776) * Added an O(1) solution to problem 002 * Removed comments from sol3.py that were accidentally added to sol4.py --- project_euler/problem_02/sol4.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 project_euler/problem_02/sol4.py diff --git a/project_euler/problem_02/sol4.py b/project_euler/problem_02/sol4.py new file mode 100644 index 000000000000..64bae65f49b4 --- /dev/null +++ b/project_euler/problem_02/sol4.py @@ -0,0 +1,13 @@ +import math +from decimal import * + +getcontext().prec = 100 +phi = (Decimal(5) ** Decimal(0.5) + 1) / Decimal(2) + +n = Decimal(int(input()) - 1) + +index = (math.floor(math.log(n * (phi + 2), phi) - 1) // 3) * 3 + 2 +num = round(phi ** Decimal(index + 1)) / (phi + 2) +sum = num // 2 + +print(int(sum)) From c5c3a74f8fbeed522288a63099a2121f9fe6bddb Mon Sep 17 00:00:00 2001 From: Anup Kumar Panwar <1anuppanwar@gmail.com> Date: Sat, 4 May 2019 15:43:37 +0530 Subject: [PATCH 29/51] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index faebd313507a..1e43deb6bdef 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ # The Algorithms - Python +[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=JP3BLXA6KMDGW) + ### All algorithms implemented in Python (for education) From e22ea7e380d75012990c8bb1648081b229cdd6fa Mon Sep 17 00:00:00 2001 From: Anup Kumar Panwar <1anuppanwar@gmail.com> Date: Sat, 4 May 2019 21:53:06 +0530 Subject: [PATCH 30/51] Update Directed and Undirected (Weighted) Graph.py --- graphs/Directed and Undirected (Weighted) Graph.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/graphs/Directed and Undirected (Weighted) Graph.py b/graphs/Directed and Undirected (Weighted) Graph.py index 68977de8d311..a31a4a96d6d0 100644 --- a/graphs/Directed and Undirected (Weighted) Graph.py +++ b/graphs/Directed and Undirected (Weighted) Graph.py @@ -152,6 +152,7 @@ def cycle_nodes(self): parent = -2 indirect_parents = [] ss = s + on_the_way_back = False anticipating_nodes = set() while True: @@ -199,6 +200,7 @@ def has_cycle(self): parent = -2 indirect_parents = [] ss = s + on_the_way_back = False anticipating_nodes = set() while True: @@ -367,6 +369,7 @@ def cycle_nodes(self): parent = -2 indirect_parents = [] ss = s + on_the_way_back = False anticipating_nodes = set() while True: @@ -414,6 +417,7 @@ def has_cycle(self): parent = -2 indirect_parents = [] ss = s + on_the_way_back = False anticipating_nodes = set() while True: From 7677c370115faa49759b72f5d7c9debfe1081e35 Mon Sep 17 00:00:00 2001 From: weixuanhu <44716380+weixuanhu@users.noreply.github.com> Date: Mon, 6 May 2019 17:54:31 +0800 Subject: [PATCH 31/51] update 'sorted' to 'ascending sorted' in comments (#789) To avoid confusion all 'sorted' to 'ascending sorted' in comments --- searches/binary_search.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/searches/binary_search.py b/searches/binary_search.py index 7df45883c09a..1d5da96586cd 100644 --- a/searches/binary_search.py +++ b/searches/binary_search.py @@ -21,10 +21,10 @@ def binary_search(sorted_collection, item): """Pure implementation of binary search algorithm in Python - Be careful collection must be sorted, otherwise result will be + Be careful collection must be ascending sorted, otherwise result will be unpredictable - :param sorted_collection: some sorted collection with comparable items + :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search :return: index of found item or None if item is not found @@ -60,10 +60,10 @@ def binary_search(sorted_collection, item): def binary_search_std_lib(sorted_collection, item): """Pure implementation of binary search algorithm in Python using stdlib - Be careful collection must be sorted, otherwise result will be + Be careful collection must be ascending sorted, otherwise result will be unpredictable - :param sorted_collection: some sorted collection with comparable items + :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search :return: index of found item or None if item is not found @@ -89,11 +89,11 @@ def binary_search_by_recursion(sorted_collection, item, left, right): """Pure implementation of binary search algorithm in Python by recursion - Be careful collection must be sorted, otherwise result will be + Be careful collection must be ascending sorted, otherwise result will be unpredictable First recursion should be started with left=0 and right=(len(sorted_collection)-1) - :param sorted_collection: some sorted collection with comparable items + :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search :return: index of found item or None if item is not found @@ -123,11 +123,11 @@ def binary_search_by_recursion(sorted_collection, item, left, right): return binary_search_by_recursion(sorted_collection, item, midpoint+1, right) def __assert_sorted(collection): - """Check if collection is sorted, if not - raises :py:class:`ValueError` + """Check if collection is ascending sorted, if not - raises :py:class:`ValueError` :param collection: collection - :return: True if collection is sorted - :raise: :py:class:`ValueError` if collection is not sorted + :return: True if collection is ascending sorted + :raise: :py:class:`ValueError` if collection is not ascending sorted Examples: >>> __assert_sorted([0, 1, 2, 4]) @@ -136,10 +136,10 @@ def __assert_sorted(collection): >>> __assert_sorted([10, -1, 5]) Traceback (most recent call last): ... - ValueError: Collection must be sorted + ValueError: Collection must be ascending sorted """ if collection != sorted(collection): - raise ValueError('Collection must be sorted') + raise ValueError('Collection must be ascending sorted') return True @@ -150,7 +150,7 @@ def __assert_sorted(collection): try: __assert_sorted(collection) except ValueError: - sys.exit('Sequence must be sorted to apply binary search') + sys.exit('Sequence must be ascending sorted to apply binary search') target_input = raw_input('Enter a single number to be found in the list:\n') target = int(target_input) From 30a358298385e0e29b70a841d0b4019dc235f3a3 Mon Sep 17 00:00:00 2001 From: Lorenz Nickel Date: Wed, 8 May 2019 21:48:30 +0200 Subject: [PATCH 32/51] fix: replaced outdated url (#791) http://www.lpb-riannetrujillo.com/blog/python-fractal/ moved to http://www.riannetrujillo.com/blog/python-fractal/ --- other/sierpinski_triangle.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/other/sierpinski_triangle.py b/other/sierpinski_triangle.py index 6a06058fe03e..329a8ce5c43f 100644 --- a/other/sierpinski_triangle.py +++ b/other/sierpinski_triangle.py @@ -21,7 +21,7 @@ Usage: - $python sierpinski_triangle.py -Credits: This code was written by editing the code from http://www.lpb-riannetrujillo.com/blog/python-fractal/ +Credits: This code was written by editing the code from http://www.riannetrujillo.com/blog/python-fractal/ ''' import turtle @@ -64,4 +64,4 @@ def triangle(points,depth): depth-1) -triangle(points,int(sys.argv[1])) \ No newline at end of file +triangle(points,int(sys.argv[1])) From 56513cb21f759ac26b31ac1edcb45d886a97f715 Mon Sep 17 00:00:00 2001 From: Junth Basnet <25685098+Junth19@users.noreply.github.com> Date: Fri, 10 May 2019 16:48:05 +0545 Subject: [PATCH 33/51] add-binary-exponentiation (#790) --- maths/BinaryExponentiation.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 maths/BinaryExponentiation.py diff --git a/maths/BinaryExponentiation.py b/maths/BinaryExponentiation.py new file mode 100644 index 000000000000..2411cd58a76b --- /dev/null +++ b/maths/BinaryExponentiation.py @@ -0,0 +1,25 @@ +#Author : Junth Basnet +#Time Complexity : O(logn) + +def binary_exponentiation(a, n): + + if (n == 0): + return 1 + + elif (n % 2 == 1): + return binary_exponentiation(a, n - 1) * a + + else: + b = binary_exponentiation(a, n / 2) + return b * b + + +try: + base = int(input('Enter Base : ')) + power = int(input("Enter Power : ")) +except ValueError: + print ("Invalid literal for integer") + +result = binary_exponentiation(base, power) +print("{}^({}) : {}".format(base, power, result)) + From 36828b106f7905ecc0c0776e40c99929728a91a9 Mon Sep 17 00:00:00 2001 From: Julien Castiaux Date: Sat, 11 May 2019 13:20:25 +0200 Subject: [PATCH 34/51] [FIX] maths/PrimeCheck (#796) Current implementation is buggy and hard to read. * Negative values were raising a TypeError due to `math.sqrt` * 1 was considered prime, it is not. * 2 was considered not prime, it is. The implementation has been corrected to fix the bugs and to enhance readability. A docstring has been added with the definition of a prime number. A complete test suite has been written, it tests the 10 first primes, a negative value, 0, 1 and some not prime numbers. closes #795 --- maths/PrimeCheck.py | 55 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 48 insertions(+), 7 deletions(-) diff --git a/maths/PrimeCheck.py b/maths/PrimeCheck.py index e0c51d77a038..8c5c181689dd 100644 --- a/maths/PrimeCheck.py +++ b/maths/PrimeCheck.py @@ -1,13 +1,54 @@ import math +import unittest + + def primeCheck(number): - if number % 2 == 0 and number > 2: + """ + A number is prime if it has exactly two dividers: 1 and itself. + """ + if number < 2: + # Negatives, 0 and 1 are not primes return False - return all(number % i for i in range(3, int(math.sqrt(number)) + 1, 2)) + if number < 4: + # 2 and 3 are primes + return True + if number % 2 == 0: + # Even values are not primes + return False + + # Except 2, all primes are odd. If any odd value divide + # the number, then that number is not prime. + odd_numbers = range(3, int(math.sqrt(number)) + 1, 2) + return not any(number % i == 0 for i in odd_numbers) + + +class Test(unittest.TestCase): + def test_primes(self): + self.assertTrue(primeCheck(2)) + self.assertTrue(primeCheck(3)) + self.assertTrue(primeCheck(5)) + self.assertTrue(primeCheck(7)) + self.assertTrue(primeCheck(11)) + self.assertTrue(primeCheck(13)) + self.assertTrue(primeCheck(17)) + self.assertTrue(primeCheck(19)) + self.assertTrue(primeCheck(23)) + self.assertTrue(primeCheck(29)) + + def test_not_primes(self): + self.assertFalse(primeCheck(-19), + "Negative numbers are not prime.") + self.assertFalse(primeCheck(0), + "Zero doesn't have any divider, primes must have two") + self.assertFalse(primeCheck(1), + "One just have 1 divider, primes must have two.") + self.assertFalse(primeCheck(2 * 2)) + self.assertFalse(primeCheck(2 * 3)) + self.assertFalse(primeCheck(3 * 3)) + self.assertFalse(primeCheck(3 * 5)) + self.assertFalse(primeCheck(3 * 5 * 7)) -def main(): - print(primeCheck(37)) - print(primeCheck(100)) - print(primeCheck(77)) if __name__ == '__main__': - main() + unittest.main() + From d8badcc6d5568e3ed8b060305f6d02e74019f1a4 Mon Sep 17 00:00:00 2001 From: Anup Kumar Panwar <1anuppanwar@gmail.com> Date: Sun, 12 May 2019 09:10:56 +0530 Subject: [PATCH 35/51] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 1e43deb6bdef..9b61f1b63287 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ # The Algorithms - Python -[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=JP3BLXA6KMDGW) +[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.me/TheAlgorithms/100) ### All algorithms implemented in Python (for education) From 3f7bec6c00c089490c8b5d38686373ca6e1ea97b Mon Sep 17 00:00:00 2001 From: Bhushan Borole <37565807+bhushan-borole@users.noreply.github.com> Date: Sun, 12 May 2019 17:16:47 +0530 Subject: [PATCH 36/51] Added page-rank algorithm implementation (#780) * Added page-rank algorithm implementation * changed init variables --- Graphs/pagerank.py | 72 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 Graphs/pagerank.py diff --git a/Graphs/pagerank.py b/Graphs/pagerank.py new file mode 100644 index 000000000000..59f15a99e6b2 --- /dev/null +++ b/Graphs/pagerank.py @@ -0,0 +1,72 @@ +''' +Author: https://github.com/bhushan-borole +''' +''' +The input graph for the algorithm is: + + A B C +A 0 1 1 +B 0 0 1 +C 1 0 0 + +''' + +graph = [[0, 1, 1], + [0, 0, 1], + [1, 0, 0]] + + +class Node: + def __init__(self, name): + self.name = name + self.inbound = [] + self.outbound = [] + + def add_inbound(self, node): + self.inbound.append(node) + + def add_outbound(self, node): + self.outbound.append(node) + + def __repr__(self): + return 'Node {}: Inbound: {} ; Outbound: {}'.format(self.name, + self.inbound, + self.outbound) + + +def page_rank(nodes, limit=3, d=0.85): + ranks = {} + for node in nodes: + ranks[node.name] = 1 + + outbounds = {} + for node in nodes: + outbounds[node.name] = len(node.outbound) + + for i in range(limit): + print("======= Iteration {} =======".format(i+1)) + for j, node in enumerate(nodes): + ranks[node.name] = (1 - d) + d * sum([ ranks[ib]/outbounds[ib] for ib in node.inbound ]) + print(ranks) + + +def main(): + names = list(input('Enter Names of the Nodes: ').split()) + + nodes = [Node(name) for name in names] + + for ri, row in enumerate(graph): + for ci, col in enumerate(row): + if col == 1: + nodes[ci].add_inbound(names[ri]) + nodes[ri].add_outbound(names[ci]) + + print("======= Nodes =======") + for node in nodes: + print(node) + + page_rank(nodes) + + +if __name__ == '__main__': + main() \ No newline at end of file From 70bb6b2f18bec6cadca052d96e526d014d18ff32 Mon Sep 17 00:00:00 2001 From: Ravi Patel Date: Mon, 13 May 2019 01:15:27 -0400 Subject: [PATCH 37/51] Added Huffman Coding Algorithm (#798) --- compression/huffman.py | 87 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 compression/huffman.py diff --git a/compression/huffman.py b/compression/huffman.py new file mode 100644 index 000000000000..b6238b66e9fd --- /dev/null +++ b/compression/huffman.py @@ -0,0 +1,87 @@ +import sys + +class Letter: + def __init__(self, letter, freq): + self.letter = letter + self.freq = freq + self.bitstring = "" + + def __repr__(self): + return f'{self.letter}:{self.freq}' + + +class TreeNode: + def __init__(self, freq, left, right): + self.freq = freq + self.left = left + self.right = right + + +def parse_file(file_path): + """ + Read the file and build a dict of all letters and their + frequences, then convert the dict into a list of Letters. + """ + chars = {} + with open(file_path) as f: + while True: + c = f.read(1) + if not c: + break + chars[c] = chars[c] + 1 if c in chars.keys() else 1 + letters = [] + for char, freq in chars.items(): + letter = Letter(char, freq) + letters.append(letter) + letters.sort(key=lambda l: l.freq) + return letters + +def build_tree(letters): + """ + Run through the list of Letters and build the min heap + for the Huffman Tree. + """ + while len(letters) > 1: + left = letters.pop(0) + right = letters.pop(0) + total_freq = left.freq + right.freq + node = TreeNode(total_freq, left, right) + letters.append(node) + letters.sort(key=lambda l: l.freq) + return letters[0] + +def traverse_tree(root, bitstring): + """ + Recursively traverse the Huffman Tree to set each + Letter's bitstring, and return the list of Letters + """ + if type(root) is Letter: + root.bitstring = bitstring + return [root] + letters = [] + letters += traverse_tree(root.left, bitstring + "0") + letters += traverse_tree(root.right, bitstring + "1") + return letters + +def huffman(file_path): + """ + Parse the file, build the tree, then run through the file + again, using the list of Letters to find and print out the + bitstring for each letter. + """ + letters_list = parse_file(file_path) + root = build_tree(letters_list) + letters = traverse_tree(root, "") + print(f'Huffman Coding of {file_path}: ') + with open(file_path) as f: + while True: + c = f.read(1) + if not c: + break + le = list(filter(lambda l: l.letter == c, letters))[0] + print(le.bitstring, end=" ") + print() + +if __name__ == "__main__": + # pass the file path to the huffman function + huffman(sys.argv[1]) From 3c40fda6a3ed8f59f1afc11b653be505557a41ef Mon Sep 17 00:00:00 2001 From: "Tommy.Liu" <447569003@qq.com> Date: Tue, 14 May 2019 18:17:25 +0800 Subject: [PATCH 38/51] More elegant coding for merge_sort_fastest (#804) * More elegant coding for merge_sort_fastest * More elegant coding for merge_sort --- sorts/merge_sort.py | 46 +++++++++++--------------------- sorts/merge_sort_fastest.py | 53 ++++++++++++++++++++++++++++--------- 2 files changed, 55 insertions(+), 44 deletions(-) diff --git a/sorts/merge_sort.py b/sorts/merge_sort.py index ca4d319fa7f1..4a6201a40cb4 100644 --- a/sorts/merge_sort.py +++ b/sorts/merge_sort.py @@ -29,36 +29,20 @@ def merge_sort(collection): >>> merge_sort([-2, -5, -45]) [-45, -5, -2] """ - length = len(collection) - if length > 1: - midpoint = length // 2 - left_half = merge_sort(collection[:midpoint]) - right_half = merge_sort(collection[midpoint:]) - i = 0 - j = 0 - k = 0 - left_length = len(left_half) - right_length = len(right_half) - while i < left_length and j < right_length: - if left_half[i] < right_half[j]: - collection[k] = left_half[i] - i += 1 - else: - collection[k] = right_half[j] - j += 1 - k += 1 - - while i < left_length: - collection[k] = left_half[i] - i += 1 - k += 1 - - while j < right_length: - collection[k] = right_half[j] - j += 1 - k += 1 - - return collection + def merge(left, right): + '''merge left and right + :param left: left collection + :param right: right collection + :return: merge result + ''' + result = [] + while left and right: + result.append(left.pop(0) if left[0] <= right[0] else right.pop(0)) + return result + left + right + if len(collection) <= 1: + return collection + mid = len(collection) // 2 + return merge(merge_sort(collection[:mid]), merge_sort(collection[mid:])) if __name__ == '__main__': @@ -69,4 +53,4 @@ def merge_sort(collection): user_input = raw_input('Enter numbers separated by a comma:\n').strip() unsorted = [int(item) for item in user_input.split(',')] - print(merge_sort(unsorted)) + print(*merge_sort(unsorted), sep=',') diff --git a/sorts/merge_sort_fastest.py b/sorts/merge_sort_fastest.py index 9fc9275aacba..86cb8ae1a699 100644 --- a/sorts/merge_sort_fastest.py +++ b/sorts/merge_sort_fastest.py @@ -1,19 +1,46 @@ ''' -Python implementation of merge sort algorithm. +Python implementation of the fastest merge sort algorithm. Takes an average of 0.6 microseconds to sort a list of length 1000 items. Best Case Scenario : O(n) Worst Case Scenario : O(n) ''' -def merge_sort(LIST): - start = [] - end = [] - while len(LIST) > 1: - a = min(LIST) - b = max(LIST) - start.append(a) - end.append(b) - LIST.remove(a) - LIST.remove(b) - if LIST: start.append(LIST[0]) +from __future__ import print_function + + +def merge_sort(collection): + """Pure implementation of the fastest merge sort algorithm in Python + + :param collection: some mutable ordered collection with heterogeneous + comparable items inside + :return: a collection ordered by ascending + + Examples: + >>> merge_sort([0, 5, 3, 2, 2]) + [0, 2, 2, 3, 5] + + >>> merge_sort([]) + [] + + >>> merge_sort([-2, -5, -45]) + [-45, -5, -2] + """ + start, end = [], [] + while len(collection) > 1: + min_one, max_one = min(collection), max(collection) + start.append(min_one) + end.append(max_one) + collection.remove(min_one) + collection.remove(max_one) end.reverse() - return (start + end) + return start + collection + end + + +if __name__ == '__main__': + try: + raw_input # Python 2 + except NameError: + raw_input = input # Python 3 + + user_input = raw_input('Enter numbers separated by a comma:\n').strip() + unsorted = [int(item) for item in user_input.split(',')] + print(*merge_sort(unsorted), sep=',') From c4d16820bc062ebb1f311e74885e7ca0e2fa5973 Mon Sep 17 00:00:00 2001 From: Erfan Alimohammadi Date: Tue, 14 May 2019 21:45:53 +0430 Subject: [PATCH 39/51] Fix typo (#806) --- strings/manacher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/strings/manacher.py b/strings/manacher.py index 9a44b19ba77a..e73e173b43e0 100644 --- a/strings/manacher.py +++ b/strings/manacher.py @@ -1,4 +1,4 @@ -# calculate palindromic length from center with incresmenting difference +# calculate palindromic length from center with incrementing difference def palindromic_length( center, diff, string): if center-diff == -1 or center+diff == len(string) or string[center-diff] != string[center+diff] : return 0 From 76061ab2cc7f4e07fa7b2c952ca715cc6d09d7c2 Mon Sep 17 00:00:00 2001 From: Reshad Hasan Date: Thu, 16 May 2019 17:20:27 +0600 Subject: [PATCH 40/51] added eulerian path and circuit finding algorithm (#787) --- ...n path and circuit for undirected graph.py | 93 +++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 Graphs/Eulerian path and circuit for undirected graph.py diff --git a/Graphs/Eulerian path and circuit for undirected graph.py b/Graphs/Eulerian path and circuit for undirected graph.py new file mode 100644 index 000000000000..c6c6a1a25f03 --- /dev/null +++ b/Graphs/Eulerian path and circuit for undirected graph.py @@ -0,0 +1,93 @@ +# Eulerian Path is a path in graph that visits every edge exactly once. +# Eulerian Circuit is an Eulerian Path which starts and ends on the same +# vertex. +# time complexity is O(V+E) +# space complexity is O(VE) + + +# using dfs for finding eulerian path traversal +def dfs(u, graph, visited_edge, path=[]): + path = path + [u] + for v in graph[u]: + if visited_edge[u][v] == False: + visited_edge[u][v], visited_edge[v][u] = True, True + path = dfs(v, graph, visited_edge, path) + return path + + +# for checking in graph has euler path or circuit +def check_circuit_or_path(graph, max_node): + odd_degree_nodes = 0 + odd_node = -1 + for i in range(max_node): + if i not in graph.keys(): + continue + if len(graph[i]) % 2 == 1: + odd_degree_nodes += 1 + odd_node = i + if odd_degree_nodes == 0: + return 1, odd_node + if odd_degree_nodes == 2: + return 2, odd_node + return 3, odd_node + + +def check_euler(graph, max_node): + visited_edge = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)] + check, odd_node = check_circuit_or_path(graph, max_node) + if check == 3: + print("graph is not Eulerian") + print("no path") + return + start_node = 1 + if check == 2: + start_node = odd_node + print("graph has a Euler path") + if check == 1: + print("graph has a Euler cycle") + path = dfs(start_node, graph, visited_edge) + print(path) + + +def main(): + G1 = { + 1: [2, 3, 4], + 2: [1, 3], + 3: [1, 2], + 4: [1, 5], + 5: [4] + } + G2 = { + 1: [2, 3, 4, 5], + 2: [1, 3], + 3: [1, 2], + 4: [1, 5], + 5: [1, 4] + } + G3 = { + 1: [2, 3, 4], + 2: [1, 3, 4], + 3: [1, 2], + 4: [1, 2, 5], + 5: [4] + } + G4 = { + 1: [2, 3], + 2: [1, 3], + 3: [1, 2], + } + G5 = { + 1: [], + 2: [] + # all degree is zero + } + max_node = 10 + check_euler(G1, max_node) + check_euler(G2, max_node) + check_euler(G3, max_node) + check_euler(G4, max_node) + check_euler(G5, max_node) + + +if __name__ == "__main__": + main() From c47c1ab03ce80963d5dcd2136d03555f3b283055 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=ADkolas=20Vargas?= Date: Thu, 16 May 2019 08:20:42 -0300 Subject: [PATCH 41/51] enhancement (#803) --- compression/huffman.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/compression/huffman.py b/compression/huffman.py index b6238b66e9fd..7417551ba209 100644 --- a/compression/huffman.py +++ b/compression/huffman.py @@ -18,7 +18,7 @@ def __init__(self, freq, left, right): def parse_file(file_path): - """ + """ Read the file and build a dict of all letters and their frequences, then convert the dict into a list of Letters. """ @@ -29,15 +29,10 @@ def parse_file(file_path): if not c: break chars[c] = chars[c] + 1 if c in chars.keys() else 1 - letters = [] - for char, freq in chars.items(): - letter = Letter(char, freq) - letters.append(letter) - letters.sort(key=lambda l: l.freq) - return letters + return sorted([Letter(c, f) for c, f in chars.items()], key=lambda l: l.freq) def build_tree(letters): - """ + """ Run through the list of Letters and build the min heap for the Huffman Tree. """ @@ -51,7 +46,7 @@ def build_tree(letters): return letters[0] def traverse_tree(root, bitstring): - """ + """ Recursively traverse the Huffman Tree to set each Letter's bitstring, and return the list of Letters """ @@ -64,9 +59,9 @@ def traverse_tree(root, bitstring): return letters def huffman(file_path): - """ + """ Parse the file, build the tree, then run through the file - again, using the list of Letters to find and print out the + again, using the list of Letters to find and print out the bitstring for each letter. """ letters_list = parse_file(file_path) From 13c0c166d8f80398de39ab41632fd54be86ae2cc Mon Sep 17 00:00:00 2001 From: ImNandha <49323522+ImNandha@users.noreply.github.com> Date: Thu, 16 May 2019 16:53:23 +0530 Subject: [PATCH 42/51] Update graph.py (#809) --- graphs/graph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/graphs/graph.py b/graphs/graph.py index 9bd61559dcbf..0c981c39d320 100644 --- a/graphs/graph.py +++ b/graphs/graph.py @@ -4,7 +4,7 @@ from __future__ import print_function # Author: OMKAR PATHAK -# We can use Python's dictionary for constructing the graph +# We can use Python's dictionary for constructing the graph. class AdjacencyList(object): def __init__(self): From a65efd42c4683b628338b84c822d22eab199c058 Mon Sep 17 00:00:00 2001 From: Erfan Alimohammadi Date: Thu, 16 May 2019 15:54:56 +0430 Subject: [PATCH 43/51] Implement check_bipartite_graph using DFS. (#808) --- graphs/check_bipartite_graph_dfs.py | 33 +++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 graphs/check_bipartite_graph_dfs.py diff --git a/graphs/check_bipartite_graph_dfs.py b/graphs/check_bipartite_graph_dfs.py new file mode 100644 index 000000000000..eeb3a84b7a15 --- /dev/null +++ b/graphs/check_bipartite_graph_dfs.py @@ -0,0 +1,33 @@ +# Check whether Graph is Bipartite or Not using DFS + +# A Bipartite Graph is a graph whose vertices can be divided into two independent sets, +# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex +# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V, +# or u belongs to V and v to U. We can also say that there is no edge that connects +# vertices of same set. +def check_bipartite_dfs(l): + visited = [False] * len(l) + color = [-1] * len(l) + + def dfs(v, c): + visited[v] = True + color[v] = c + for u in l[v]: + if not visited[u]: + dfs(u, 1 - c) + + for i in range(len(l)): + if not visited[i]: + dfs(i, 0) + + for i in range(len(l)): + for j in l[i]: + if color[i] == color[j]: + return False + + return True + + +# Adjacency list of graph +l = {0:[1,3], 1:[0,2], 2:[1,3], 3:[0,2], 4: []} +print(check_bipartite_dfs(l)) From 5b86928c4b6ab23cbff51ddf9023ac230d4dff26 Mon Sep 17 00:00:00 2001 From: cclauss Date: Thu, 16 May 2019 13:26:46 +0200 Subject: [PATCH 44/51] Use ==/!= to compare str, bytes, and int literals (#767) * Travis CI: Add more flake8 tests * Use ==/!= to compare str, bytes, and int literals ./project_euler/problem_17/sol1.py:25:7: F632 use ==/!= to compare str, bytes, and int literals if i%100 is not 0: ^ * Use ==/!= to compare str, bytes, and int literals * Update sol1.py --- .travis.yml | 2 +- project_euler/problem_17/sol1.py | 4 ++-- project_euler/problem_19/sol1.py | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index 5fba6987bb66..2440899e4f25 100644 --- a/.travis.yml +++ b/.travis.yml @@ -16,7 +16,7 @@ install: - pip install flake8 # pytest # add another testing frameworks later before_script: # stop the build if there are Python syntax errors or undefined names - - flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics + - flake8 . --count --select=E9,F63,F72,F82 --show-source --statistics # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics script: diff --git a/project_euler/problem_17/sol1.py b/project_euler/problem_17/sol1.py index 9de5d80b9b29..8dd6f1af2093 100644 --- a/project_euler/problem_17/sol1.py +++ b/project_euler/problem_17/sol1.py @@ -22,7 +22,7 @@ if i >= 100: count += ones_counts[i/100] + 7 #add number of letters for "n hundred" - if i%100 is not 0: + if i%100 != 0: count += 3 #add number of letters for "and" if number is not multiple of 100 if 0 < i%100 < 20: @@ -32,4 +32,4 @@ else: count += ones_counts[i/1000] + 8 -print(count) \ No newline at end of file +print(count) diff --git a/project_euler/problem_19/sol1.py b/project_euler/problem_19/sol1.py index 94cf117026a4..13e520ca76e4 100644 --- a/project_euler/problem_19/sol1.py +++ b/project_euler/problem_19/sol1.py @@ -30,10 +30,10 @@ day += 7 if (year%4 == 0 and not year%100 == 0) or (year%400 == 0): - if day > days_per_month[month-1] and month is not 2: + if day > days_per_month[month-1] and month != 2: month += 1 day = day-days_per_month[month-2] - elif day > 29 and month is 2: + elif day > 29 and month == 2: month += 1 day = day-29 else: @@ -45,7 +45,7 @@ year += 1 month = 1 - if year < 2001 and day is 1: + if year < 2001 and day == 1: sundays += 1 -print(sundays) \ No newline at end of file +print(sundays) From f3608acfd5c3c66531942434769c8260c983e877 Mon Sep 17 00:00:00 2001 From: Sarvesh Dubey <38752758+dubesar@users.noreply.github.com> Date: Fri, 17 May 2019 08:42:06 +0530 Subject: [PATCH 45/51] Created shortest path using bfs (#794) * Created shortest path using bfs --- graphs/bfs-shortestpath.py | 43 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 graphs/bfs-shortestpath.py diff --git a/graphs/bfs-shortestpath.py b/graphs/bfs-shortestpath.py new file mode 100644 index 000000000000..5853351a53a3 --- /dev/null +++ b/graphs/bfs-shortestpath.py @@ -0,0 +1,43 @@ +graph = {'A': ['B', 'C', 'E'], + 'B': ['A','D', 'E'], + 'C': ['A', 'F', 'G'], + 'D': ['B'], + 'E': ['A', 'B','D'], + 'F': ['C'], + 'G': ['C']} + +def bfs_shortest_path(graph, start, goal): + # keep track of explored nodes + explored = [] + # keep track of all the paths to be checked + queue = [[start]] + + # return path if start is goal + if start == goal: + return "That was easy! Start = goal" + + # keeps looping until all possible paths have been checked + while queue: + # pop the first path from the queue + path = queue.pop(0) + # get the last node from the path + node = path[-1] + if node not in explored: + neighbours = graph[node] + # go through all neighbour nodes, construct a new path and + # push it into the queue + for neighbour in neighbours: + new_path = list(path) + new_path.append(neighbour) + queue.append(new_path) + # return path if neighbour is goal + if neighbour == goal: + return new_path + + # mark node as explored + explored.append(node) + + # in case there's no path between the 2 nodes + return "So sorry, but a connecting path doesn't exist :(" + +bfs_shortest_path(graph, 'G', 'D') # returns ['G', 'C', 'A', 'B', 'D'] From b6c3fa8992e1f3430e623b6c4b1268c89e26f71f Mon Sep 17 00:00:00 2001 From: weixuanhu <44716380+weixuanhu@users.noreply.github.com> Date: Sat, 18 May 2019 10:59:12 +0800 Subject: [PATCH 46/51] Interpolation search - fix endless loop bug, divide 0 bug and update description (#793) * fix endless loop bug, divide 0 bug and update description fix an endless bug, for example, if collection = [10,30,40,45,50,66,77,93], item = 67. fix divide 0 bug, when right=left it is not OK to point = left + ((item - sorted_collection[left]) * (right - left)) // (sorted_collection[right] - sorted_collection[left]) update 'sorted' to 'ascending sorted' in description to avoid confusion * delete swap files * delete 'address' and add input validation --- searches/interpolation_search.py | 80 +++++++++++++++++------ searches/quick_select.py | 11 +++- searches/test_interpolation_search.py | 93 +++++++++++++++++++++++++++ 3 files changed, 160 insertions(+), 24 deletions(-) create mode 100644 searches/test_interpolation_search.py diff --git a/searches/interpolation_search.py b/searches/interpolation_search.py index db9893bdb5d4..329596d340a5 100644 --- a/searches/interpolation_search.py +++ b/searches/interpolation_search.py @@ -11,9 +11,9 @@ def interpolation_search(sorted_collection, item): """Pure implementation of interpolation search algorithm in Python - Be careful collection must be sorted, otherwise result will be + Be careful collection must be ascending sorted, otherwise result will be unpredictable - :param sorted_collection: some sorted collection with comparable items + :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search :return: index of found item or None if item is not found """ @@ -21,6 +21,13 @@ def interpolation_search(sorted_collection, item): right = len(sorted_collection) - 1 while left <= right: + #avoid devided by 0 during interpolation + if sorted_collection[left]==sorted_collection[right]: + if sorted_collection[left]==item: + return left + else: + return None + point = left + ((item - sorted_collection[left]) * (right - left)) // (sorted_collection[right] - sorted_collection[left]) #out of range check @@ -31,66 +38,97 @@ def interpolation_search(sorted_collection, item): if current_item == item: return point else: - if item < current_item: - right = point - 1 - else: - left = point + 1 + if pointright: + left = right + right = point + else: + if item < current_item: + right = point - 1 + else: + left = point + 1 return None - def interpolation_search_by_recursion(sorted_collection, item, left, right): """Pure implementation of interpolation search algorithm in Python by recursion - Be careful collection must be sorted, otherwise result will be + Be careful collection must be ascending sorted, otherwise result will be unpredictable First recursion should be started with left=0 and right=(len(sorted_collection)-1) - :param sorted_collection: some sorted collection with comparable items + :param sorted_collection: some ascending sorted collection with comparable items :param item: item value to search :return: index of found item or None if item is not found """ - point = left + ((item - sorted_collection[left]) * (right - left)) // (sorted_collection[right] - sorted_collection[left]) + #avoid devided by 0 during interpolation + if sorted_collection[left]==sorted_collection[right]: + if sorted_collection[left]==item: + return left + else: + return None + + point = left + ((item - sorted_collection[left]) * (right - left)) // (sorted_collection[right] - sorted_collection[left]) + #out of range check if point<0 or point>=len(sorted_collection): return None if sorted_collection[point] == item: return point - elif sorted_collection[point] > item: - return interpolation_search_by_recursion(sorted_collection, item, left, point-1) + elif pointright: + return interpolation_search_by_recursion(sorted_collection, item, right, left) else: - return interpolation_search_by_recursion(sorted_collection, item, point+1, right) + if sorted_collection[point] > item: + return interpolation_search_by_recursion(sorted_collection, item, left, point-1) + else: + return interpolation_search_by_recursion(sorted_collection, item, point+1, right) def __assert_sorted(collection): - """Check if collection is sorted, if not - raises :py:class:`ValueError` + """Check if collection is ascending sorted, if not - raises :py:class:`ValueError` :param collection: collection - :return: True if collection is sorted - :raise: :py:class:`ValueError` if collection is not sorted + :return: True if collection is ascending sorted + :raise: :py:class:`ValueError` if collection is not ascending sorted Examples: >>> __assert_sorted([0, 1, 2, 4]) True >>> __assert_sorted([10, -1, 5]) Traceback (most recent call last): ... - ValueError: Collection must be sorted + ValueError: Collection must be ascending sorted """ if collection != sorted(collection): - raise ValueError('Collection must be sorted') + raise ValueError('Collection must be ascending sorted') return True if __name__ == '__main__': import sys - - user_input = raw_input('Enter numbers separated by comma:\n').strip() + + """ + user_input = raw_input('Enter numbers separated by comma:\n').strip() collection = [int(item) for item in user_input.split(',')] try: __assert_sorted(collection) except ValueError: - sys.exit('Sequence must be sorted to apply interpolation search') + sys.exit('Sequence must be ascending sorted to apply interpolation search') target_input = raw_input('Enter a single number to be found in the list:\n') target = int(target_input) + """ + + debug = 0 + if debug == 1: + collection = [10,30,40,45,50,66,77,93] + try: + __assert_sorted(collection) + except ValueError: + sys.exit('Sequence must be ascending sorted to apply interpolation search') + target = 67 + result = interpolation_search(collection, target) if result is not None: print('{} found at positions: {}'.format(target, result)) diff --git a/searches/quick_select.py b/searches/quick_select.py index 1596cf040e0c..76d09cb97f97 100644 --- a/searches/quick_select.py +++ b/searches/quick_select.py @@ -14,9 +14,9 @@ def _partition(data, pivot): """ less, equal, greater = [], [], [] for element in data: - if element.address < pivot.address: + if element < pivot: less.append(element) - elif element.address > pivot.address: + elif element > pivot: greater.append(element) else: equal.append(element) @@ -24,6 +24,11 @@ def _partition(data, pivot): def quickSelect(list, k): #k = len(list) // 2 when trying to find the median (index that value would be when list is sorted) + + #invalid input + if k>=len(list) or k<0: + return None + smaller = [] larger = [] pivot = random.randint(0, len(list) - 1) @@ -41,4 +46,4 @@ def quickSelect(list, k): return quickSelect(smaller, k) #must be in larger else: - return quickSelect(larger, k - (m + count)) + return quickSelect(larger, k - (m + count)) \ No newline at end of file diff --git a/searches/test_interpolation_search.py b/searches/test_interpolation_search.py new file mode 100644 index 000000000000..60bb3af22e0f --- /dev/null +++ b/searches/test_interpolation_search.py @@ -0,0 +1,93 @@ +import unittest +from interpolation_search import interpolation_search, interpolation_search_by_recursion + +class Test_interpolation_search(unittest.TestCase): + def setUp(self): + # un-sorted case + self.collection1 = [5,3,4,6,7] + self.item1 = 4 + # sorted case, result exists + self.collection2 = [10,30,40,45,50,66,77,93] + self.item2 = 66 + # sorted case, result doesn't exist + self.collection3 = [10,30,40,45,50,66,77,93] + self.item3 = 67 + # equal elements case, result exists + self.collection4 = [10,10,10,10,10] + self.item4 = 10 + # equal elements case, result doesn't exist + self.collection5 = [10,10,10,10,10] + self.item5 = 3 + # 1 element case, result exists + self.collection6 = [10] + self.item6 = 10 + # 1 element case, result doesn't exists + self.collection7 = [10] + self.item7 = 1 + + def tearDown(self): + pass + + def test_interpolation_search(self): + self.assertEqual(interpolation_search(self.collection1, self.item1), None) + + self.assertEqual(interpolation_search(self.collection2, self.item2), self.collection2.index(self.item2)) + + self.assertEqual(interpolation_search(self.collection3, self.item3), None) + + self.assertEqual(interpolation_search(self.collection4, self.item4), self.collection4.index(self.item4)) + + self.assertEqual(interpolation_search(self.collection5, self.item5), None) + + self.assertEqual(interpolation_search(self.collection6, self.item6), self.collection6.index(self.item6)) + + self.assertEqual(interpolation_search(self.collection7, self.item7), None) + + + +class Test_interpolation_search_by_recursion(unittest.TestCase): + def setUp(self): + # un-sorted case + self.collection1 = [5,3,4,6,7] + self.item1 = 4 + # sorted case, result exists + self.collection2 = [10,30,40,45,50,66,77,93] + self.item2 = 66 + # sorted case, result doesn't exist + self.collection3 = [10,30,40,45,50,66,77,93] + self.item3 = 67 + # equal elements case, result exists + self.collection4 = [10,10,10,10,10] + self.item4 = 10 + # equal elements case, result doesn't exist + self.collection5 = [10,10,10,10,10] + self.item5 = 3 + # 1 element case, result exists + self.collection6 = [10] + self.item6 = 10 + # 1 element case, result doesn't exists + self.collection7 = [10] + self.item7 = 1 + + def tearDown(self): + pass + + def test_interpolation_search_by_recursion(self): + self.assertEqual(interpolation_search_by_recursion(self.collection1, self.item1, 0, len(self.collection1)-1), None) + + self.assertEqual(interpolation_search_by_recursion(self.collection2, self.item2, 0, len(self.collection2)-1), self.collection2.index(self.item2)) + + self.assertEqual(interpolation_search_by_recursion(self.collection3, self.item3, 0, len(self.collection3)-1), None) + + self.assertEqual(interpolation_search_by_recursion(self.collection4, self.item4, 0, len(self.collection4)-1), self.collection4.index(self.item4)) + + self.assertEqual(interpolation_search_by_recursion(self.collection5, self.item5, 0, len(self.collection5)-1), None) + + self.assertEqual(interpolation_search_by_recursion(self.collection6, self.item6, 0, len(self.collection6)-1), self.collection6.index(self.item6)) + + self.assertEqual(interpolation_search_by_recursion(self.collection7, self.item7, 0, len(self.collection7)-1), None) + + + +if __name__ == '__main__': + unittest.main() From f5abc04176b9635da963ca701f643acde5da24dc Mon Sep 17 00:00:00 2001 From: Andy Lau Date: Sun, 19 May 2019 17:00:54 +0800 Subject: [PATCH 47/51] Update bucket_sort.py (#821) * Some simplification --- sorts/bucket_sort.py | 64 +++++++++++++++++++------------------------- 1 file changed, 27 insertions(+), 37 deletions(-) diff --git a/sorts/bucket_sort.py b/sorts/bucket_sort.py index bd4281e463eb..5c17703c26f0 100644 --- a/sorts/bucket_sort.py +++ b/sorts/bucket_sort.py @@ -13,45 +13,35 @@ # Time Complexity of Solution: # Best Case O(n); Average Case O(n); Worst Case O(n) -from __future__ import print_function -from insertion_sort import insertion_sort -import math - -DEFAULT_BUCKET_SIZE = 5 - -def bucketSort(myList, bucketSize=DEFAULT_BUCKET_SIZE): - if(len(myList) == 0): - print('You don\'t have any elements in array!') - - minValue = myList[0] - maxValue = myList[0] - - # For finding minimum and maximum values - for i in range(0, len(myList)): - if myList[i] < minValue: - minValue = myList[i] - elif myList[i] > maxValue: - maxValue = myList[i] - - # Initialize buckets - bucketCount = math.floor((maxValue - minValue) / bucketSize) + 1 - buckets = [] - for i in range(0, bucketCount): +DEFAULT_BUCKET_SIZE=5 +def bucket_sort(my_list,bucket_size=DEFAULT_BUCKET_SIZE): + if(my_list==0): + print("you don't have any elements in array!") + + + min_value=min(my_list) + max_value=max(my_list) + + bucket_count=(max_value-min_value)//bucket_size+1 + buckets=[] + for i in range(bucket_count): buckets.append([]) + for i in range(len(my_list)): + buckets[(my_list[i]-min_value)//bucket_size].append(my_list[i]) + + + sorted_array=[] + for i in range(len(buckets)): + buckets[i].sort() + for j in range(len(buckets[i])): + sorted_array.append(buckets[i][j]) + return sorted_array - # For putting values in buckets - for i in range(0, len(myList)): - buckets[math.floor((myList[i] - minValue) / bucketSize)].append(myList[i]) - # Sort buckets and place back into input array - sortedArray = [] - for i in range(0, len(buckets)): - insertion_sort(buckets[i]) - for j in range(0, len(buckets[i])): - sortedArray.append(buckets[i][j]) - return sortedArray -if __name__ == '__main__': - sortedArray = bucketSort([12, 23, 4, 5, 3, 2, 12, 81, 56, 95]) - print(sortedArray) +#test +#besd on python 3.7.3 +user_input =input('Enter numbers separated by a comma:').strip() +unsorted =[int(item) for item in user_input.split(',')] +print(bucket_sort(unsorted)) From 316d5ffa374a35cb1a0237a7da1e12309da7aece Mon Sep 17 00:00:00 2001 From: DaveAxiom Date: Sun, 19 May 2019 16:36:46 -0400 Subject: [PATCH 48/51] Add NQueens backtracking search implementation (#504) --- other/nqueens.py | 77 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100644 other/nqueens.py diff --git a/other/nqueens.py b/other/nqueens.py new file mode 100644 index 000000000000..1b1c75878ae6 --- /dev/null +++ b/other/nqueens.py @@ -0,0 +1,77 @@ +#! /usr/bin/python3 +import sys + +def nqueens(board_width): + board = [0] + current_row = 0 + while True: + conflict = False + + for review_index in range(0, current_row): + left = board[review_index] - (current_row - review_index) + right = board[review_index] + (current_row - review_index); + if (board[current_row] == board[review_index] or (left >= 0 and left == board[current_row]) or (right < board_width and right == board[current_row])): + conflict = True; + break + + if (current_row == 0 and conflict == False): + board.append(0) + current_row = 1 + continue + + if (conflict == True): + board[current_row] += 1 + + if (current_row == 0 and board[current_row] == board_width): + print("No solution exists for specificed board size.") + return None + + while True: + if (board[current_row] == board_width): + board[current_row] = 0 + if (current_row == 0): + print("No solution exists for specificed board size.") + return None + + board.pop() + current_row -= 1 + board[current_row] += 1 + + if board[current_row] != board_width: + break + else: + current_row += 1 + if (current_row == board_width): + break + + board.append(0) + return board + +def print_board(board): + if (board == None): + return + + board_width = len(board) + for row in range(board_width): + line_print = [] + for column in range(board_width): + if column == board[row]: + line_print.append("Q") + else: + line_print.append(".") + print(line_print) + + +if __name__ == '__main__': + default_width = 8 + for arg in sys.argv: + if (arg.isdecimal() and int(arg) > 3): + default_width = int(arg) + break + + if (default_width == 8): + print("Running algorithm with board size of 8. Specify an alternative Chess board size for N-Queens as a command line argument.") + + board = nqueens(default_width) + print(board) + print_board(board) \ No newline at end of file From c1130490d7534412bea66cb3864e2bb7f7e13dd7 Mon Sep 17 00:00:00 2001 From: Adam <34916469+coderpower0@users.noreply.github.com> Date: Mon, 20 May 2019 21:22:20 +0800 Subject: [PATCH 49/51] fix spelling on line 44 of bucket sort (#824) * change besd to best --- sorts/bucket_sort.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sorts/bucket_sort.py b/sorts/bucket_sort.py index 5c17703c26f0..aba0124ad909 100644 --- a/sorts/bucket_sort.py +++ b/sorts/bucket_sort.py @@ -41,7 +41,7 @@ def bucket_sort(my_list,bucket_size=DEFAULT_BUCKET_SIZE): #test -#besd on python 3.7.3 +#best on python 3.7.3 user_input =input('Enter numbers separated by a comma:').strip() unsorted =[int(item) for item in user_input.split(',')] print(bucket_sort(unsorted)) From b5667e5ee98f9f68c8f40dd9691bb9006a5ac832 Mon Sep 17 00:00:00 2001 From: Anirudh Ajith Date: Tue, 21 May 2019 11:36:05 +0530 Subject: [PATCH 50/51] Removed the (incorrectly named) redundant file graph_list.py and renamed graph.py to graph_list.py (#820) --- graphs/graph.py | 44 -------------------------- graphs/graph_list.py | 73 ++++++++++++++++++++++++++------------------ 2 files changed, 43 insertions(+), 74 deletions(-) delete mode 100644 graphs/graph.py diff --git a/graphs/graph.py b/graphs/graph.py deleted file mode 100644 index 0c981c39d320..000000000000 --- a/graphs/graph.py +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python -# encoding=utf8 - -from __future__ import print_function -# Author: OMKAR PATHAK - -# We can use Python's dictionary for constructing the graph. - -class AdjacencyList(object): - def __init__(self): - self.List = {} - - def addEdge(self, fromVertex, toVertex): - # check if vertex is already present - if fromVertex in self.List.keys(): - self.List[fromVertex].append(toVertex) - else: - self.List[fromVertex] = [toVertex] - - def printList(self): - for i in self.List: - print((i,'->',' -> '.join([str(j) for j in self.List[i]]))) - -if __name__ == '__main__': - al = AdjacencyList() - al.addEdge(0, 1) - al.addEdge(0, 4) - al.addEdge(4, 1) - al.addEdge(4, 3) - al.addEdge(1, 0) - al.addEdge(1, 4) - al.addEdge(1, 3) - al.addEdge(1, 2) - al.addEdge(2, 3) - al.addEdge(3, 4) - - al.printList() - - # OUTPUT: - # 0 -> 1 -> 4 - # 1 -> 0 -> 4 -> 3 -> 2 - # 2 -> 3 - # 3 -> 4 - # 4 -> 1 -> 3 diff --git a/graphs/graph_list.py b/graphs/graph_list.py index d67bc96c4a81..0c981c39d320 100644 --- a/graphs/graph_list.py +++ b/graphs/graph_list.py @@ -1,31 +1,44 @@ -from __future__ import print_function - - -class Graph: - def __init__(self, vertex): - self.vertex = vertex - self.graph = [[0] for i in range(vertex)] - - def add_edge(self, u, v): - self.graph[u - 1].append(v - 1) - - def show(self): - for i in range(self.vertex): - print('%d: '% (i + 1), end=' ') - for j in self.graph[i]: - print('%d-> '% (j + 1), end=' ') - print(' ') - - - -g = Graph(100) - -g.add_edge(1,3) -g.add_edge(2,3) -g.add_edge(3,4) -g.add_edge(3,5) -g.add_edge(4,5) - - -g.show() +#!/usr/bin/python +# encoding=utf8 +from __future__ import print_function +# Author: OMKAR PATHAK + +# We can use Python's dictionary for constructing the graph. + +class AdjacencyList(object): + def __init__(self): + self.List = {} + + def addEdge(self, fromVertex, toVertex): + # check if vertex is already present + if fromVertex in self.List.keys(): + self.List[fromVertex].append(toVertex) + else: + self.List[fromVertex] = [toVertex] + + def printList(self): + for i in self.List: + print((i,'->',' -> '.join([str(j) for j in self.List[i]]))) + +if __name__ == '__main__': + al = AdjacencyList() + al.addEdge(0, 1) + al.addEdge(0, 4) + al.addEdge(4, 1) + al.addEdge(4, 3) + al.addEdge(1, 0) + al.addEdge(1, 4) + al.addEdge(1, 3) + al.addEdge(1, 2) + al.addEdge(2, 3) + al.addEdge(3, 4) + + al.printList() + + # OUTPUT: + # 0 -> 1 -> 4 + # 1 -> 0 -> 4 -> 3 -> 2 + # 2 -> 3 + # 3 -> 4 + # 4 -> 1 -> 3 From 023f5e092d38f7e220ae68a23f7183eeb8fd9e04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?N=C3=ADkolas=20Vargas?= Date: Wed, 22 May 2019 09:09:36 -0300 Subject: [PATCH 51/51] fix empty list validation and code data structures (#826) * fix empty list validation and code data structures * Update bucket_sort.py https://github.com/TheAlgorithms/Python/pull/826#pullrequestreview-240357549 --- sorts/bucket_sort.py | 38 +++++++++++++------------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/sorts/bucket_sort.py b/sorts/bucket_sort.py index aba0124ad909..cca913328e40 100644 --- a/sorts/bucket_sort.py +++ b/sorts/bucket_sort.py @@ -14,34 +14,22 @@ # Best Case O(n); Average Case O(n); Worst Case O(n) DEFAULT_BUCKET_SIZE=5 -def bucket_sort(my_list,bucket_size=DEFAULT_BUCKET_SIZE): - if(my_list==0): - print("you don't have any elements in array!") +def bucket_sort(my_list, bucket_size=DEFAULT_BUCKET_SIZE): + if len(my_list) == 0: + raise Exception("Please add some elements in the array.") - min_value=min(my_list) - max_value=max(my_list) + min_value, max_value = (min(my_list), max(my_list)) + bucket_count = ((max_value - min_value) // bucket_size + 1) + buckets = [[] for _ in range(int(bucket_count))] - bucket_count=(max_value-min_value)//bucket_size+1 - buckets=[] - for i in range(bucket_count): - buckets.append([]) for i in range(len(my_list)): - buckets[(my_list[i]-min_value)//bucket_size].append(my_list[i]) + buckets[int((my_list[i] - min_value) // bucket_size)].append(my_list[i]) + return sorted([buckets[i][j] for i in range(len(buckets)) + for j in range(len(buckets[i]))]) - sorted_array=[] - for i in range(len(buckets)): - buckets[i].sort() - for j in range(len(buckets[i])): - sorted_array.append(buckets[i][j]) - return sorted_array - - - - -#test -#best on python 3.7.3 -user_input =input('Enter numbers separated by a comma:').strip() -unsorted =[int(item) for item in user_input.split(',')] -print(bucket_sort(unsorted)) +if __name__ == "__main__": + user_input = input('Enter numbers separated by a comma:').strip() + unsorted = [float(n) for n in user_input.split(',') if len(user_input) > 0] + print(bucket_sort(unsorted))