From 9a3f46be35ab18e63afb69d69acf79061b0d6446 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Wed, 19 Apr 2023 23:30:00 +0530 Subject: [PATCH 01/17] Pass: Refactor code into a function --- src/libasr/pass/pass_array_by_data.cpp | 68 +++++++++++++------------- 1 file changed, 35 insertions(+), 33 deletions(-) diff --git a/src/libasr/pass/pass_array_by_data.cpp b/src/libasr/pass/pass_array_by_data.cpp index d3f1e82c48..07f11ee7db 100644 --- a/src/libasr/pass/pass_array_by_data.cpp +++ b/src/libasr/pass/pass_array_by_data.cpp @@ -358,45 +358,47 @@ class EditProcedureCallsVisitor : public ASR::ASRPassBaseWalkVisitor + void check_and_update_args_for_pass_arr_by_data_passed_as_callback(const T& x) { + bool args_updated = false; + Vec new_args; + new_args.reserve(al, x.n_args); + for ( size_t i = 0; i < x.n_args; i++ ) { + ASR::call_arg_t arg = x.m_args[i]; + ASR::expr_t* expr = arg.m_value; + if (expr) { + if (ASR::is_a(*expr)) { + ASR::Var_t* var = ASR::down_cast(expr); + ASR::symbol_t* sym = var->m_v; + if ( v.proc2newproc.find(sym) != v.proc2newproc.end() ) { + ASR::symbol_t* new_var_sym = v.proc2newproc[sym].first; + ASR::expr_t* new_var = ASRUtils::EXPR(ASR::make_Var_t(al, var->base.base.loc, new_var_sym)); + { + // update exisiting arg + arg.m_value = new_var; + arg.loc = arg.loc; + } + args_updated = true; + } + } + } + new_args.push_back(al, arg); + } + if (args_updated) { + T&xx = const_cast(x); + xx.m_args = new_args.p; + xx.n_args = new_args.size(); + } + } + template void visit_Call(const T& x) { ASR::symbol_t* subrout_sym = x.m_name; bool is_external = ASR::is_a(*subrout_sym); subrout_sym = ASRUtils::symbol_get_past_external(subrout_sym); if( v.proc2newproc.find(subrout_sym) == v.proc2newproc.end() ) { - bool args_updated = false; - Vec new_args; - new_args.reserve(al, x.n_args); - for ( size_t i = 0; i < x.n_args; i++ ) { - ASR::call_arg_t arg = x.m_args[i]; - ASR::expr_t* expr = arg.m_value; - bool use_original_arg = true; - if (expr) { - if (ASR::is_a(*expr)) { - ASR::Var_t* var = ASR::down_cast(expr); - ASR::symbol_t* sym = var->m_v; - if ( v.proc2newproc.find(sym) != v.proc2newproc.end() ) { - ASR::symbol_t* new_var_sym = v.proc2newproc[sym].first; - ASR::expr_t* new_var = ASRUtils::EXPR(ASR::make_Var_t(al, var->base.base.loc, new_var_sym)); - ASR::call_arg_t new_arg; - new_arg.m_value = new_var; - new_arg.loc = arg.loc; - new_args.push_back(al, new_arg); - args_updated = true; - use_original_arg = false; - } - } - } - if( use_original_arg ) { - new_args.push_back(al, arg); - } - } - if (args_updated) { - T&xx = const_cast(x); - xx.m_args = new_args.p; - xx.n_args = new_args.size(); - } - return ; + check_and_update_args_for_pass_arr_by_data_passed_as_callback(x); + return; } ASR::symbol_t* new_func_sym = v.proc2newproc[subrout_sym].first; From e7683385292a50c61b0e58eddd1001cb9aad6dcd Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Wed, 19 Apr 2023 23:32:58 +0530 Subject: [PATCH 02/17] Pass: Refactor: Check for nullptr earlier This could improve performance as null checking would be constant If it evaluates to true, there is no call to std::find() --- src/libasr/pass/pass_array_by_data.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/libasr/pass/pass_array_by_data.cpp b/src/libasr/pass/pass_array_by_data.cpp index 07f11ee7db..0f635142d6 100644 --- a/src/libasr/pass/pass_array_by_data.cpp +++ b/src/libasr/pass/pass_array_by_data.cpp @@ -408,9 +408,9 @@ class EditProcedureCallsVisitor : public ASR::ASRPassBaseWalkVisitor dim_vars; From deed90ef89619982a777f7de523e3a7c6714aa87 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Wed, 19 Apr 2023 23:42:11 +0530 Subject: [PATCH 03/17] Pass: Refactor code into function --- src/libasr/pass/pass_array_by_data.cpp | 41 +++++++++++++++----------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/src/libasr/pass/pass_array_by_data.cpp b/src/libasr/pass/pass_array_by_data.cpp index 0f635142d6..95802c406d 100644 --- a/src/libasr/pass/pass_array_by_data.cpp +++ b/src/libasr/pass/pass_array_by_data.cpp @@ -391,31 +391,19 @@ class EditProcedureCallsVisitor : public ASR::ASRPassBaseWalkVisitor - void visit_Call(const T& x) { - ASR::symbol_t* subrout_sym = x.m_name; - bool is_external = ASR::is_a(*subrout_sym); - subrout_sym = ASRUtils::symbol_get_past_external(subrout_sym); - if( v.proc2newproc.find(subrout_sym) == v.proc2newproc.end() ) { - check_and_update_args_for_pass_arr_by_data_passed_as_callback(x); - return; - } - - ASR::symbol_t* new_func_sym = v.proc2newproc[subrout_sym].first; - std::vector& indices = v.proc2newproc[subrout_sym].second; - + Vec construct_new_args(size_t n_args, ASR::call_arg_t* orig_args, std::vector& indices) { Vec new_args; - new_args.reserve(al, x.n_args); - for( size_t i = 0; i < x.n_args; i++ ) { - new_args.push_back(al, x.m_args[i]); - if (x.m_args[i].m_value == nullptr || + new_args.reserve(al, n_args); + for( size_t i = 0; i < n_args; i++ ) { + new_args.push_back(al, orig_args[i]); + if (orig_args[i].m_value == nullptr || std::find(indices.begin(), indices.end(), i) == indices.end()) { continue; } Vec dim_vars; dim_vars.reserve(al, 2); - ASRUtils::get_dimensions(x.m_args[i].m_value, dim_vars, al); + ASRUtils::get_dimensions(orig_args[i].m_value, dim_vars, al); for( size_t j = 0; j < dim_vars.size(); j++ ) { ASR::call_arg_t dim_var; dim_var.loc = dim_vars[j]->base.loc; @@ -423,6 +411,23 @@ class EditProcedureCallsVisitor : public ASR::ASRPassBaseWalkVisitor + void visit_Call(const T& x) { + ASR::symbol_t* subrout_sym = x.m_name; + bool is_external = ASR::is_a(*subrout_sym); + subrout_sym = ASRUtils::symbol_get_past_external(subrout_sym); + if( v.proc2newproc.find(subrout_sym) == v.proc2newproc.end() ) { + check_and_update_args_for_pass_arr_by_data_passed_as_callback(x); + return; + } + + ASR::symbol_t* new_func_sym = v.proc2newproc[subrout_sym].first; + std::vector& indices = v.proc2newproc[subrout_sym].second; + + Vec new_args = construct_new_args(x.n_args, x.m_args, indices); { ASR::Function_t* new_func_ = ASR::down_cast(new_func_sym); From 79230551d3ffe986dbf3a8d3e9a278aa30054e89 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Wed, 19 Apr 2023 10:58:31 +0530 Subject: [PATCH 04/17] PKG: Basic package ready --- integration_tests/lnn/__init__.py | 0 integration_tests/lnn/perceptron/__init__.py | 1 + .../lnn/perceptron/perceptron_main.py | 100 ++++++++++++++++++ 3 files changed, 101 insertions(+) create mode 100644 integration_tests/lnn/__init__.py create mode 100644 integration_tests/lnn/perceptron/__init__.py create mode 100644 integration_tests/lnn/perceptron/perceptron_main.py diff --git a/integration_tests/lnn/__init__.py b/integration_tests/lnn/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/integration_tests/lnn/perceptron/__init__.py b/integration_tests/lnn/perceptron/__init__.py new file mode 100644 index 0000000000..c9826fb608 --- /dev/null +++ b/integration_tests/lnn/perceptron/__init__.py @@ -0,0 +1 @@ +from .perceptron_main import init_perceptron, train_dataset, test_perceptron diff --git a/integration_tests/lnn/perceptron/perceptron_main.py b/integration_tests/lnn/perceptron/perceptron_main.py new file mode 100644 index 0000000000..2c0c3940a1 --- /dev/null +++ b/integration_tests/lnn/perceptron/perceptron_main.py @@ -0,0 +1,100 @@ +from lpython import dataclass, i32, f64, TypeVar +from numpy import empty, float64 +from sys import exit + +# from utils import init_zeros, dot_product + +@dataclass +class Perceptron: + no_of_inputs: i32 + weights: f64[1001] + learn_rate: f64 + iterations_limit: i32 + des_accuracy: f64 + cur_accuracy: f64 + epochs_cnt: i32 + +n: i32 +n = TypeVar("n") + +def dot_product(a: f64[:], b: i32[:], n: i32) -> f64: + result: f64 = 0.0 + i: i32 = 0 + for i in range(n): + result = result + a[i] * f64(b[i]) + return result + +def get_inp_vec_with_bias(a: i32[:, :], i: i32, n: i32) -> i32[n + 1]: + b: i32[:] = empty([n + 1]) + j: i32 + for j in range(n): + b[j] = a[i, j] + b[n] = 1 + return b + +def init_perceptron(p: Perceptron, n: i32, rate: f64, iterations_limit: i32, des_accuracy: f64): + if (n < 1 or n > 1000): + print("no_of_inputs must be between [1, 1000]") + exit(1) + p.no_of_inputs = n + p.weights = empty(n + 1) # last element is bias + i: i32 + for i in range(n + 1): p.weights[i] = 0.0 + p.learn_rate = rate + p.iterations_limit = iterations_limit + p.des_accuracy = des_accuracy + p.cur_accuracy = 0.0 + p.epochs_cnt = 0 + +def train_perceptron(p: Perceptron, input_vector: i32[:], actual_output: i32): + predicted_output: i32 = predict_perceptron(p, input_vector) + error: i32 = actual_output - predicted_output + i: i32 + for i in range(p.no_of_inputs + 1): + p.weights[i] += p.learn_rate * f64(error) * f64(input_vector[i]) + +def predict_perceptron(p: Perceptron, input_vector: i32[:]) -> i32: + weighted_sum: f64 = dot_product(p.weights, input_vector, p.no_of_inputs + 1) + return activation_function(weighted_sum) + +def activation_function(value: f64) -> i32: + if value >= 0.0: + return 1 + return -1 + +def train_epoch(p: Perceptron, no_of_inp_vecs: i32, input_vectors: i32[:, :], outputs: i32[:]): + i: i32 + for i in range(no_of_inp_vecs): + input_vector: i32[:] = get_inp_vec_with_bias(input_vectors, i, p.no_of_inputs) + if predict_perceptron(p, input_vector) != outputs[i]: + train_perceptron(p, input_vector, outputs[i]) + +def train_dataset(p: Perceptron, no_of_inp_vecs: i32, input_vectors: i32[:, :], outputs: i32[:]): + p.cur_accuracy = 0.0 + p.epochs_cnt = 0 + while p.cur_accuracy < p.des_accuracy and p.epochs_cnt < p.iterations_limit: + p.epochs_cnt += 1 + train_epoch(p, no_of_inp_vecs, input_vectors, outputs) + p.cur_accuracy = test_perceptron(p, no_of_inp_vecs, input_vectors, outputs) + +def test_perceptron(p: Perceptron, no_of_inp_vecs: i32, input_vectors: i32[:, :], outputs: i32[:]) -> f64: + correctly_classified_cnt: i32 = 0 + i: i32 + for i in range(no_of_inp_vecs): + input_vector: i32[:] = get_inp_vec_with_bias(input_vectors, i, p.no_of_inputs) + if predict_perceptron(p, input_vector) == outputs[i]: + correctly_classified_cnt += 1 + return (correctly_classified_cnt / no_of_inp_vecs) * 100.0 + +def print_perceptron(p: Perceptron): + print("weights = [", end = "") + i: i32 + for i in range(p.no_of_inputs): + print(p.weights[i], end = ", ") + print(p.weights[p.no_of_inputs], end = "(bias)]\n") + print("learn_rate = ", end = "") + print(p.learn_rate) + print("accuracy = ", end = "") + print(p.cur_accuracy) + print("epochs_cnt = ", end = "") + print(p.epochs_cnt) From 9696125effde0ab16513798f3761c684c9690ac3 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 00:28:59 +0530 Subject: [PATCH 05/17] PKG: Use list inplace of numpy arrays --- .../lnn/perceptron/perceptron_main.py | 52 +++++++++---------- 1 file changed, 24 insertions(+), 28 deletions(-) diff --git a/integration_tests/lnn/perceptron/perceptron_main.py b/integration_tests/lnn/perceptron/perceptron_main.py index 2c0c3940a1..43a8255cef 100644 --- a/integration_tests/lnn/perceptron/perceptron_main.py +++ b/integration_tests/lnn/perceptron/perceptron_main.py @@ -7,29 +7,26 @@ @dataclass class Perceptron: no_of_inputs: i32 - weights: f64[1001] + weights: list[f64] learn_rate: f64 iterations_limit: i32 des_accuracy: f64 cur_accuracy: f64 epochs_cnt: i32 -n: i32 -n = TypeVar("n") - -def dot_product(a: f64[:], b: i32[:], n: i32) -> f64: +def dot_product(a: list[f64], b: list[i32]) -> f64: result: f64 = 0.0 i: i32 = 0 - for i in range(n): + for i in range(len(a)): result = result + a[i] * f64(b[i]) return result -def get_inp_vec_with_bias(a: i32[:, :], i: i32, n: i32) -> i32[n + 1]: - b: i32[:] = empty([n + 1]) - j: i32 - for j in range(n): - b[j] = a[i, j] - b[n] = 1 +def get_inp_vec_with_bias(a: list[i32]) -> list[i32]: + b: list[i32] + i: i32 + for i in range(len(a)): + b.append(a[i]) + b.append(1) return b def init_perceptron(p: Perceptron, n: i32, rate: f64, iterations_limit: i32, des_accuracy: f64): @@ -37,24 +34,23 @@ def init_perceptron(p: Perceptron, n: i32, rate: f64, iterations_limit: i32, des print("no_of_inputs must be between [1, 1000]") exit(1) p.no_of_inputs = n - p.weights = empty(n + 1) # last element is bias i: i32 - for i in range(n + 1): p.weights[i] = 0.0 + for i in range(n + 1): p.weights.append(0.0) # last element is bias p.learn_rate = rate p.iterations_limit = iterations_limit p.des_accuracy = des_accuracy p.cur_accuracy = 0.0 p.epochs_cnt = 0 -def train_perceptron(p: Perceptron, input_vector: i32[:], actual_output: i32): +def train_perceptron(p: Perceptron, input_vector: list[i32], actual_output: i32): predicted_output: i32 = predict_perceptron(p, input_vector) error: i32 = actual_output - predicted_output i: i32 - for i in range(p.no_of_inputs + 1): + for i in range(len(input_vector)): p.weights[i] += p.learn_rate * f64(error) * f64(input_vector[i]) -def predict_perceptron(p: Perceptron, input_vector: i32[:]) -> i32: - weighted_sum: f64 = dot_product(p.weights, input_vector, p.no_of_inputs + 1) +def predict_perceptron(p: Perceptron, input_vector: list[i32]) -> i32: + weighted_sum: f64 = dot_product(p.weights, input_vector) return activation_function(weighted_sum) def activation_function(value: f64) -> i32: @@ -62,29 +58,29 @@ def activation_function(value: f64) -> i32: return 1 return -1 -def train_epoch(p: Perceptron, no_of_inp_vecs: i32, input_vectors: i32[:, :], outputs: i32[:]): +def train_epoch(p: Perceptron, input_vectors: list[list[i32]], outputs: list[i32]): i: i32 - for i in range(no_of_inp_vecs): - input_vector: i32[:] = get_inp_vec_with_bias(input_vectors, i, p.no_of_inputs) + for i in range(len(input_vectors)): + input_vector: list[i32] = get_inp_vec_with_bias(input_vectors[i]) if predict_perceptron(p, input_vector) != outputs[i]: train_perceptron(p, input_vector, outputs[i]) -def train_dataset(p: Perceptron, no_of_inp_vecs: i32, input_vectors: i32[:, :], outputs: i32[:]): +def train_dataset(p: Perceptron, input_vectors: list[list[i32]], outputs: list[i32]): p.cur_accuracy = 0.0 p.epochs_cnt = 0 while p.cur_accuracy < p.des_accuracy and p.epochs_cnt < p.iterations_limit: p.epochs_cnt += 1 - train_epoch(p, no_of_inp_vecs, input_vectors, outputs) - p.cur_accuracy = test_perceptron(p, no_of_inp_vecs, input_vectors, outputs) + train_epoch(p, input_vectors, outputs) + p.cur_accuracy = test_perceptron(p, input_vectors, outputs) -def test_perceptron(p: Perceptron, no_of_inp_vecs: i32, input_vectors: i32[:, :], outputs: i32[:]) -> f64: +def test_perceptron(p: Perceptron, input_vectors: list[list[i32]], outputs: list[i32]) -> f64: correctly_classified_cnt: i32 = 0 i: i32 - for i in range(no_of_inp_vecs): - input_vector: i32[:] = get_inp_vec_with_bias(input_vectors, i, p.no_of_inputs) + for i in range(len(input_vectors)): + input_vector: list[i32] = get_inp_vec_with_bias(input_vectors[i]) if predict_perceptron(p, input_vector) == outputs[i]: correctly_classified_cnt += 1 - return (correctly_classified_cnt / no_of_inp_vecs) * 100.0 + return (correctly_classified_cnt / len(input_vectors)) * 100.0 def print_perceptron(p: Perceptron): print("weights = [", end = "") From c9a572713008f9191a278699fa486055821b6e20 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 00:29:33 +0530 Subject: [PATCH 06/17] PKG: Fix weights initialization --- integration_tests/lnn/perceptron/perceptron_main.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/integration_tests/lnn/perceptron/perceptron_main.py b/integration_tests/lnn/perceptron/perceptron_main.py index 43a8255cef..7fd44a23b0 100644 --- a/integration_tests/lnn/perceptron/perceptron_main.py +++ b/integration_tests/lnn/perceptron/perceptron_main.py @@ -29,13 +29,20 @@ def get_inp_vec_with_bias(a: list[i32]) -> list[i32]: b.append(1) return b +def init_weights(size: i32) -> list[f64]: + weights: list[f64] + i: i32 + for i in range(size): + weights.append(0.0) + weights.append(0.0) # append bias + return weights + def init_perceptron(p: Perceptron, n: i32, rate: f64, iterations_limit: i32, des_accuracy: f64): if (n < 1 or n > 1000): print("no_of_inputs must be between [1, 1000]") exit(1) p.no_of_inputs = n - i: i32 - for i in range(n + 1): p.weights.append(0.0) # last element is bias + p.weights = init_weights(n) p.learn_rate = rate p.iterations_limit = iterations_limit p.des_accuracy = des_accuracy From 397124d7fa1d71eaac4cf82769ae4b166327f624 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 01:19:21 +0530 Subject: [PATCH 07/17] PKG: Use floating points Also define normalizing functions --- integration_tests/lnn/perceptron/__init__.py | 2 +- .../lnn/perceptron/perceptron_main.py | 66 +++++++++++++------ 2 files changed, 46 insertions(+), 22 deletions(-) diff --git a/integration_tests/lnn/perceptron/__init__.py b/integration_tests/lnn/perceptron/__init__.py index c9826fb608..8941bd659e 100644 --- a/integration_tests/lnn/perceptron/__init__.py +++ b/integration_tests/lnn/perceptron/__init__.py @@ -1 +1 @@ -from .perceptron_main import init_perceptron, train_dataset, test_perceptron +from .perceptron_main import init_perceptron, train_dataset, test_perceptron, normalize_input_vectors, print_perceptron diff --git a/integration_tests/lnn/perceptron/perceptron_main.py b/integration_tests/lnn/perceptron/perceptron_main.py index 7fd44a23b0..fadb6a7a44 100644 --- a/integration_tests/lnn/perceptron/perceptron_main.py +++ b/integration_tests/lnn/perceptron/perceptron_main.py @@ -1,9 +1,6 @@ -from lpython import dataclass, i32, f64, TypeVar -from numpy import empty, float64 +from lpython import dataclass, i32, f64 from sys import exit -# from utils import init_zeros, dot_product - @dataclass class Perceptron: no_of_inputs: i32 @@ -14,19 +11,43 @@ class Perceptron: cur_accuracy: f64 epochs_cnt: i32 -def dot_product(a: list[f64], b: list[i32]) -> f64: - result: f64 = 0.0 - i: i32 = 0 - for i in range(len(a)): - result = result + a[i] * f64(b[i]) - return result +def normalize(value: f64, leftMin: f64, leftMax: f64, rightMin: f64, rightMax: f64) -> f64: + # Figure out how 'wide' each range is + leftSpan: f64 = leftMax - leftMin + rightSpan: f64 = rightMax - rightMin + + # Convert the left range into a 0-1 range (float) + valueScaled: f64 = (value - leftMin) / leftSpan + + # Convert the 0-1 range into a value in the right range. + return rightMin + (valueScaled * rightSpan) + +def normalize_input_vectors(input_vectors: list[list[f64]]): + rows: i32 = len(input_vectors) + cols: i32 = len(input_vectors[0]) -def get_inp_vec_with_bias(a: list[i32]) -> list[i32]: - b: list[i32] + j: i32 + for j in range(cols): + colMinVal: f64 = input_vectors[0][j] + colMaxVal: f64 = input_vectors[0][j] + i: i32 + for i in range(rows): + if input_vectors[i][j] > colMaxVal: + colMaxVal = input_vectors[i][j] + if input_vectors[i][j] < colMinVal: + colMinVal = input_vectors[i][j] + + for i in range(rows): + input_vectors[i][j] = normalize(input_vectors[i][j], colMinVal, colMaxVal, -1.0, 1.0) + + + +def get_inp_vec_with_bias(a: list[f64]) -> list[f64]: + b: list[f64] i: i32 for i in range(len(a)): b.append(a[i]) - b.append(1) + b.append(1.0) return b def init_weights(size: i32) -> list[f64]: @@ -49,15 +70,18 @@ def init_perceptron(p: Perceptron, n: i32, rate: f64, iterations_limit: i32, des p.cur_accuracy = 0.0 p.epochs_cnt = 0 -def train_perceptron(p: Perceptron, input_vector: list[i32], actual_output: i32): +def train_perceptron(p: Perceptron, input_vector: list[f64], actual_output: i32): predicted_output: i32 = predict_perceptron(p, input_vector) error: i32 = actual_output - predicted_output i: i32 for i in range(len(input_vector)): p.weights[i] += p.learn_rate * f64(error) * f64(input_vector[i]) -def predict_perceptron(p: Perceptron, input_vector: list[i32]) -> i32: - weighted_sum: f64 = dot_product(p.weights, input_vector) +def predict_perceptron(p: Perceptron, input_vector: list[f64]) -> i32: + weighted_sum: f64 = 0.0 + i: i32 = 0 + for i in range(len(input_vector)): + weighted_sum = weighted_sum + p.weights[i] * f64(input_vector[i]) return activation_function(weighted_sum) def activation_function(value: f64) -> i32: @@ -65,14 +89,14 @@ def activation_function(value: f64) -> i32: return 1 return -1 -def train_epoch(p: Perceptron, input_vectors: list[list[i32]], outputs: list[i32]): +def train_epoch(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32]): i: i32 for i in range(len(input_vectors)): - input_vector: list[i32] = get_inp_vec_with_bias(input_vectors[i]) + input_vector: list[f64] = get_inp_vec_with_bias(input_vectors[i]) if predict_perceptron(p, input_vector) != outputs[i]: train_perceptron(p, input_vector, outputs[i]) -def train_dataset(p: Perceptron, input_vectors: list[list[i32]], outputs: list[i32]): +def train_dataset(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32]): p.cur_accuracy = 0.0 p.epochs_cnt = 0 while p.cur_accuracy < p.des_accuracy and p.epochs_cnt < p.iterations_limit: @@ -80,11 +104,11 @@ def train_dataset(p: Perceptron, input_vectors: list[list[i32]], outputs: list[i train_epoch(p, input_vectors, outputs) p.cur_accuracy = test_perceptron(p, input_vectors, outputs) -def test_perceptron(p: Perceptron, input_vectors: list[list[i32]], outputs: list[i32]) -> f64: +def test_perceptron(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32]) -> f64: correctly_classified_cnt: i32 = 0 i: i32 for i in range(len(input_vectors)): - input_vector: list[i32] = get_inp_vec_with_bias(input_vectors[i]) + input_vector: list[f64] = get_inp_vec_with_bias(input_vectors[i]) if predict_perceptron(p, input_vector) == outputs[i]: correctly_classified_cnt += 1 return (correctly_classified_cnt / len(input_vectors)) * 100.0 From 4f5d2bc7610541ce53066d85be674c625abda6b9 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 02:11:26 +0530 Subject: [PATCH 08/17] Support importing StructType --- src/lpython/semantics/python_ast_to_asr.cpp | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/lpython/semantics/python_ast_to_asr.cpp b/src/lpython/semantics/python_ast_to_asr.cpp index 4163413d11..ac1801eb2c 100644 --- a/src/lpython/semantics/python_ast_to_asr.cpp +++ b/src/lpython/semantics/python_ast_to_asr.cpp @@ -454,6 +454,22 @@ ASR::symbol_t* import_from_module(Allocator &al, ASR::Module_t *m, SymbolTable * ASR::accessType::Public ); return ASR::down_cast(fn); + } else if (ASR::is_a(*t)) { + ASR::StructType_t *st = ASR::down_cast(t); + // `st` is the StructType in a module. Now we construct + // an ExternalSymbol that points to it. + Str name; + name.from_str(al, new_sym_name); + char *cname = name.c_str(al); + ASR::asr_t *est = ASR::make_ExternalSymbol_t( + al, st->base.base.loc, + /* a_symtab */ current_scope, + /* a_name */ cname, + (ASR::symbol_t*)st, + m->m_name, nullptr, 0, st->m_name, + ASR::accessType::Public + ); + return ASR::down_cast(est); } else if (ASR::is_a(*t)) { ASR::Variable_t *mv = ASR::down_cast(t); // `mv` is the Variable in a module. Now we construct @@ -501,7 +517,7 @@ ASR::symbol_t* import_from_module(Allocator &al, ASR::Module_t *m, SymbolTable * return import_from_module(al, mt, current_scope, std::string(mt->m_name), cur_sym_name, new_sym_name, loc); } else { - throw SemanticError("Only Subroutines, Functions, Variables and " + throw SemanticError("Only Subroutines, Functions, StructType, Variables and " "ExternalSymbol are currently supported in 'import'", loc); } LCOMPILERS_ASSERT(false); From b2d085359b47560d7e0d7258929b879f4f65799a Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 02:13:31 +0530 Subject: [PATCH 09/17] TEST: Add package test --- integration_tests/test_pkg_lnn.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 integration_tests/test_pkg_lnn.py diff --git a/integration_tests/test_pkg_lnn.py b/integration_tests/test_pkg_lnn.py new file mode 100644 index 0000000000..9f3e4ad3a2 --- /dev/null +++ b/integration_tests/test_pkg_lnn.py @@ -0,0 +1,17 @@ +from lnn.perceptron import init_perceptron, print_perceptron, normalize_input_vectors + +from lpython import i32, f64 + + + +def main0(): + print("hi") + # p: Perceptron + # init_perceptron(p, 5, 0.05, 100000, 90.0) + # print_perceptron(p) + input_vectors: list[list[f64]] = [[-15.0, -10.0], [-10.0, 10.0], [15.0, -10.0], [10.0, 10.0]] + outputs: list[i32] = [1, 1, 1, -1] + + normalize_input_vectors(input_vectors) + print(input_vectors) +main0() From 779ff705734fcd278c03d6c1598fedb6d915e5d8 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 10:07:36 +0530 Subject: [PATCH 10/17] ASR: Fix derived_type points outside symtab Fixes error: ASR verify: Struct::m_derived_type cannot point outside of its symbol table --- src/lpython/semantics/python_ast_to_asr.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/lpython/semantics/python_ast_to_asr.cpp b/src/lpython/semantics/python_ast_to_asr.cpp index ac1801eb2c..60b4cd7438 100644 --- a/src/lpython/semantics/python_ast_to_asr.cpp +++ b/src/lpython/semantics/python_ast_to_asr.cpp @@ -958,11 +958,11 @@ class CommonVisitor : public AST::BaseVisitor { ASR::symbol_t *der_sym = ASRUtils::symbol_get_past_external(s); if( der_sym ) { if ( ASR::is_a(*der_sym) ) { - return ASRUtils::TYPE(ASR::make_Struct_t(al, loc, der_sym, dims.p, dims.size())); + return ASRUtils::TYPE(ASR::make_Struct_t(al, loc, s, dims.p, dims.size())); } else if( ASR::is_a(*der_sym) ) { - return ASRUtils::TYPE(ASR::make_Enum_t(al, loc, der_sym, dims.p, dims.size())); + return ASRUtils::TYPE(ASR::make_Enum_t(al, loc, s, dims.p, dims.size())); } else if( ASR::is_a(*der_sym) ) { - return ASRUtils::TYPE(ASR::make_Union_t(al, loc, der_sym, dims.p, dims.size())); + return ASRUtils::TYPE(ASR::make_Union_t(al, loc, s, dims.p, dims.size())); } } } From a507c7fd52a4a83ba62ed770ffe104b82137a294 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 10:10:41 +0530 Subject: [PATCH 11/17] TEST: Add assert and finalize test_pkg_lnn.py --- integration_tests/lnn/perceptron/__init__.py | 2 +- integration_tests/test_pkg_lnn.py | 23 +++++++++++--------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/integration_tests/lnn/perceptron/__init__.py b/integration_tests/lnn/perceptron/__init__.py index 8941bd659e..a88f2db560 100644 --- a/integration_tests/lnn/perceptron/__init__.py +++ b/integration_tests/lnn/perceptron/__init__.py @@ -1 +1 @@ -from .perceptron_main import init_perceptron, train_dataset, test_perceptron, normalize_input_vectors, print_perceptron +from .perceptron_main import init_perceptron, train_dataset, test_perceptron, normalize_input_vectors, print_perceptron, Perceptron diff --git a/integration_tests/test_pkg_lnn.py b/integration_tests/test_pkg_lnn.py index 9f3e4ad3a2..c778eb34b5 100644 --- a/integration_tests/test_pkg_lnn.py +++ b/integration_tests/test_pkg_lnn.py @@ -1,17 +1,20 @@ -from lnn.perceptron import init_perceptron, print_perceptron, normalize_input_vectors - +from lnn.perceptron import init_perceptron, print_perceptron, normalize_input_vectors, Perceptron, train_dataset from lpython import i32, f64 - - def main0(): - print("hi") - # p: Perceptron - # init_perceptron(p, 5, 0.05, 100000, 90.0) - # print_perceptron(p) - input_vectors: list[list[f64]] = [[-15.0, -10.0], [-10.0, 10.0], [15.0, -10.0], [10.0, 10.0]] + p: Perceptron + init_perceptron(p, 2, 0.05, 10000, 90.0) + print_perceptron(p) + print("=================================") + + input_vectors: list[list[f64]] = [[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0]] outputs: list[i32] = [1, 1, 1, -1] normalize_input_vectors(input_vectors) - print(input_vectors) + train_dataset(p, input_vectors, outputs) + print_perceptron(p) + + assert p.cur_accuracy > 50.0 + assert p.epochs_cnt > 1 + main0() From 5265a015ab8687503d2621aa3b011cda3b66ea8b Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 11:53:22 +0530 Subject: [PATCH 12/17] lpdraw pkg: flip graph along y-axis Also conditionally enable pixel on screen --- integration_tests/lpdraw/draw.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/integration_tests/lpdraw/draw.py b/integration_tests/lpdraw/draw.py index aa74941a71..977b6742fe 100644 --- a/integration_tests/lpdraw/draw.py +++ b/integration_tests/lpdraw/draw.py @@ -5,7 +5,8 @@ W = TypeVar("W") def Pixel(H: i32, W: i32, Screen: i32[H, W], x: i32, y: i32) -> None: - Screen[y, x] = 255 + if x >= 0 and y >= 0 and x < W and y < H: + Screen[H - 1 - y, x] = 255 def Clear(H: i32, W: i32, Screen: i32[H, W]): i: i32 From ca9af5133362afa801f9ade61cb1d34f23b04c80 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 11:53:54 +0530 Subject: [PATCH 13/17] TEST: Plot results in test_pkg_lnn.py --- integration_tests/test_pkg_lnn.py | 52 ++++++++++++++++++++++++++++++- 1 file changed, 51 insertions(+), 1 deletion(-) diff --git a/integration_tests/test_pkg_lnn.py b/integration_tests/test_pkg_lnn.py index c778eb34b5..be8e72b2be 100644 --- a/integration_tests/test_pkg_lnn.py +++ b/integration_tests/test_pkg_lnn.py @@ -1,5 +1,53 @@ from lnn.perceptron import init_perceptron, print_perceptron, normalize_input_vectors, Perceptron, train_dataset -from lpython import i32, f64 +from lpdraw import Line, Circle, Display, Clear +from lpython import i32, f64, Const +from numpy import empty, int32 + + +def compute_decision_boundary(p: Perceptron, x: f64) -> f64: + bias: f64 = p.weights[-1] + slope: f64 = (-p.weights[0] / p.weights[1]) + intercept: f64 = (-bias / p.weights[1]) + return slope * x + intercept + +def plot_graph(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32]): + Width: Const[i32] = 500 # x-axis limits [0, 499] + Height: Const[i32] = 500 # y-axis limits [0, 499] + Screen: i32[Height, Width] = empty((Height, Width), dtype=int32) + Clear(Height, Width, Screen) + + x1: f64 = 2.0 + y1: f64 = compute_decision_boundary(p, x1) + x2: f64 = -2.0 + y2: f64 = compute_decision_boundary(p, x2) + + # center the graph using the following offset + scale_offset: f64 = Width / 4 + shift_offset: f64 = Width / 2 + x1 *= scale_offset + y1 *= scale_offset + x2 *= scale_offset + y2 *= scale_offset + + # print (x1, y1, x2, y2) + Line(Height, Width, Screen, i32(x1 + shift_offset), i32(y1 + shift_offset), i32(x2 + shift_offset), i32(y2 + shift_offset)) + + i: i32 + point_size: i32 = 5 + for i in range(len(input_vectors)): + input_vectors[i][0] *= scale_offset + input_vectors[i][1] *= scale_offset + input_vectors[i][0] += shift_offset + input_vectors[i][1] += shift_offset + if outputs[i] == 1: + x: i32 = i32(input_vectors[i][0]) + y: i32 = i32(input_vectors[i][1]) + Line(Height, Width, Screen, x - point_size, y, x + point_size, y) + Line(Height, Width, Screen, x, y - point_size, x, y + point_size) + else: + Circle(Height, Width, Screen, i32(input_vectors[i][0]), i32(input_vectors[i][1]), f64(point_size)) + + Display(Height, Width, Screen) def main0(): p: Perceptron @@ -17,4 +65,6 @@ def main0(): assert p.cur_accuracy > 50.0 assert p.epochs_cnt > 1 + plot_graph(p, input_vectors, outputs) + main0() From f8fe29668582fcea4ba5fc197d12c11a60b134d0 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 12:16:31 +0530 Subject: [PATCH 14/17] TEST: Make test case work with both lpython, python --- integration_tests/lnn/perceptron/perceptron_main.py | 4 ++-- integration_tests/lpdraw/draw.py | 2 +- integration_tests/test_pkg_lnn.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/integration_tests/lnn/perceptron/perceptron_main.py b/integration_tests/lnn/perceptron/perceptron_main.py index fadb6a7a44..2e7ecbd73a 100644 --- a/integration_tests/lnn/perceptron/perceptron_main.py +++ b/integration_tests/lnn/perceptron/perceptron_main.py @@ -43,7 +43,7 @@ def normalize_input_vectors(input_vectors: list[list[f64]]): def get_inp_vec_with_bias(a: list[f64]) -> list[f64]: - b: list[f64] + b: list[f64] = [] i: i32 for i in range(len(a)): b.append(a[i]) @@ -51,7 +51,7 @@ def get_inp_vec_with_bias(a: list[f64]) -> list[f64]: return b def init_weights(size: i32) -> list[f64]: - weights: list[f64] + weights: list[f64] = [] i: i32 for i in range(size): weights.append(0.0) diff --git a/integration_tests/lpdraw/draw.py b/integration_tests/lpdraw/draw.py index 977b6742fe..5ea4b67c84 100644 --- a/integration_tests/lpdraw/draw.py +++ b/integration_tests/lpdraw/draw.py @@ -6,7 +6,7 @@ def Pixel(H: i32, W: i32, Screen: i32[H, W], x: i32, y: i32) -> None: if x >= 0 and y >= 0 and x < W and y < H: - Screen[H - 1 - y, x] = 255 + Screen[i32(int(H - 1 - y)), i32(int(x))] = 255 def Clear(H: i32, W: i32, Screen: i32[H, W]): i: i32 diff --git a/integration_tests/test_pkg_lnn.py b/integration_tests/test_pkg_lnn.py index be8e72b2be..63dee7eb24 100644 --- a/integration_tests/test_pkg_lnn.py +++ b/integration_tests/test_pkg_lnn.py @@ -50,7 +50,7 @@ def plot_graph(p: Perceptron, input_vectors: list[list[f64]], outputs: list[i32] Display(Height, Width, Screen) def main0(): - p: Perceptron + p: Perceptron = Perceptron(0, [0.0], 0.0, 0, 0.0, 0.0, 0) init_perceptron(p, 2, 0.05, 10000, 90.0) print_perceptron(p) print("=================================") From 6b4bf077d5dc313f9c622645978f8c38e405c690 Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 12:49:31 +0530 Subject: [PATCH 15/17] TEST: Add another test --- integration_tests/test_pkg_lnn.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/integration_tests/test_pkg_lnn.py b/integration_tests/test_pkg_lnn.py index 63dee7eb24..151e4ab2dc 100644 --- a/integration_tests/test_pkg_lnn.py +++ b/integration_tests/test_pkg_lnn.py @@ -67,4 +67,23 @@ def main0(): plot_graph(p, input_vectors, outputs) +def main1(): + p: Perceptron = Perceptron(0, [0.0], 0.0, 0, 0.0, 0.0, 0) + init_perceptron(p, 2, 0.05, 10000, 90.0) + print_perceptron(p) + print("=================================") + + input_vectors: list[list[f64]] = [[-1.0, -1.0], [-1.0, 1.0], [1.0, -1.0], [1.0, 1.0], [1.5, 1.0]] + outputs: list[i32] = [1, 1, -1, 1, -1] + + normalize_input_vectors(input_vectors) + train_dataset(p, input_vectors, outputs) + print_perceptron(p) + + assert p.cur_accuracy > 50.0 + assert p.epochs_cnt > 1 + + plot_graph(p, input_vectors, outputs) + main0() +main1() From 1feb3bb4f19baaf7d0d9b689a158f20a4414f2ed Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 12:55:19 +0530 Subject: [PATCH 16/17] TEST: Enable added test case --- integration_tests/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/integration_tests/CMakeLists.txt b/integration_tests/CMakeLists.txt index 0dbcdc71bf..3b5d8edce3 100644 --- a/integration_tests/CMakeLists.txt +++ b/integration_tests/CMakeLists.txt @@ -419,6 +419,7 @@ RUN(NAME str_to_list_cast LABELS cpython llvm c) RUN(NAME test_package_01 LABELS cpython llvm) RUN(NAME test_pkg_lpdraw LABELS cpython llvm wasm) +RUN(NAME test_pkg_lnn LABELS cpython llvm) RUN(NAME generics_01 LABELS cpython llvm c) RUN(NAME generics_02 LABELS cpython llvm c) From fb669ddc273326be0033d547d337a95bbf7dde3a Mon Sep 17 00:00:00 2001 From: Shaikh Ubaid Date: Thu, 20 Apr 2023 20:04:57 +0530 Subject: [PATCH 17/17] PASS: Refactor: Rename to update_args_for_pass_arr_by_data_funcs_passed_as_callback() --- src/libasr/pass/pass_array_by_data.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/libasr/pass/pass_array_by_data.cpp b/src/libasr/pass/pass_array_by_data.cpp index 95802c406d..6840841225 100644 --- a/src/libasr/pass/pass_array_by_data.cpp +++ b/src/libasr/pass/pass_array_by_data.cpp @@ -359,7 +359,7 @@ class EditProcedureCallsVisitor : public ASR::ASRPassBaseWalkVisitor - void check_and_update_args_for_pass_arr_by_data_passed_as_callback(const T& x) { + void update_args_for_pass_arr_by_data_funcs_passed_as_callback(const T& x) { bool args_updated = false; Vec new_args; new_args.reserve(al, x.n_args); @@ -420,7 +420,7 @@ class EditProcedureCallsVisitor : public ASR::ASRPassBaseWalkVisitor(*subrout_sym); subrout_sym = ASRUtils::symbol_get_past_external(subrout_sym); if( v.proc2newproc.find(subrout_sym) == v.proc2newproc.end() ) { - check_and_update_args_for_pass_arr_by_data_passed_as_callback(x); + update_args_for_pass_arr_by_data_funcs_passed_as_callback(x); return; }