Skip to content

Commit 09f6252

Browse files
authored
Merge pull request #10 from kvedala/machine_learning/adaline
[fix] Updates to Machine learning/adaline
2 parents 48e24af + 5e6c374 commit 09f6252

File tree

1 file changed

+78
-12
lines changed

1 file changed

+78
-12
lines changed

machine_learning/adaline_learning.cpp

Lines changed: 78 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@
2626
#include <cstdlib>
2727
#include <ctime>
2828
#include <iostream>
29+
#include <numeric>
2930
#include <vector>
3031

3132
#define MAX_ITER 500 // INT_MAX ///< Maximum number of iterations to learn
@@ -52,8 +53,8 @@ class adaline {
5253
1); // additional weight is for the constant bias term
5354

5455
// initialize with random weights in the range [-50, 49]
55-
for (int i = 0; i < weights.size(); i++)
56-
weights[i] = (static_cast<double>(std::rand() % 100) - 50);
56+
for (int i = 0; i < weights.size(); i++) weights[i] = 1.f;
57+
// weights[i] = (static_cast<double>(std::rand() % 100) - 50);
5758
}
5859

5960
/**
@@ -73,17 +74,23 @@ class adaline {
7374
/**
7475
* predict the output of the model for given set of features
7576
* \param[in] x input vector
77+
* \param[out] out optional argument to return neuron output before applying
78+
* activation function (optional, `nullptr` to ignore)
7679
* \returns model prediction output
7780
*/
78-
int predict(const std::vector<double> &x) {
81+
int predict(const std::vector<double> &x, double *out = nullptr) {
7982
if (!check_size_match(x))
8083
return 0;
8184

8285
double y = weights.back(); // assign bias value
8386

84-
for (int i = 0; i < x.size(); i++) y += x[i] * weights[i];
87+
// for (int i = 0; i < x.size(); i++) y += x[i] * weights[i];
88+
y = std::inner_product(x.begin(), x.end(), weights.begin(), y);
8589

86-
return y >= 0 ? 1 : -1; // quantizer: apply ADALINE threshold function
90+
if (out != nullptr) // if out variable is provided
91+
*out = y;
92+
93+
return activation(y); // quantizer: apply ADALINE threshold function
8794
}
8895

8996
/**
@@ -148,6 +155,8 @@ class adaline {
148155
<< std::endl;
149156
}
150157

158+
int activation(double x) { return x > 0 ? 1 : -1; }
159+
151160
private:
152161
/**
153162
* convenient function to check if input feature vector size matches the
@@ -207,7 +216,7 @@ void test1(double eta = 0.01) {
207216

208217
/**
209218
* test function to predict points in a 2D coordinate system above the line
210-
* \f$x+y=-1\f$ as +1 and others as -1.
219+
* \f$x+3y=-1\f$ as +1 and others as -1.
211220
* Note that each point is defined by 2 values or 2 features.
212221
* The function will create random sample points for training and test purposes.
213222
* \param[in] eta learning rate (optional, default=0.01)
@@ -220,16 +229,18 @@ void test2(double eta = 0.01) {
220229
std::vector<double> X[N];
221230
int Y[N]; // corresponding y-values
222231

223-
int range = 500; // sample points range
224-
int range2 = range >> 1;
232+
// generate sample points in the interval
233+
// [-range2/100 , (range2-1)/100]
234+
int range = 500; // sample points full-range
235+
int range2 = range >> 1; // sample points half-range
225236
for (int i = 0; i < N; i++) {
226237
double x0 = ((std::rand() % range) - range2) / 100.f;
227238
double x1 = ((std::rand() % range) - range2) / 100.f;
228239
X[i] = {x0, x1};
229-
Y[i] = (x0 + x1) > -1 ? 1 : -1;
240+
Y[i] = (x0 + 3. * x1) > -1 ? 1 : -1;
230241
}
231242

232-
std::cout << "------- Test 1 -------" << std::endl;
243+
std::cout << "------- Test 2 -------" << std::endl;
233244
std::cout << "Model before fit: " << ada << std::endl;
234245

235246
ada.fit(X, Y);
@@ -244,7 +255,57 @@ void test2(double eta = 0.01) {
244255

245256
std::cout << "Predict for x=(" << x0 << "," << x1 << "): " << predict;
246257

247-
int expected_val = (x0 + x1) > -1 ? 1 : -1;
258+
int expected_val = (x0 + 3. * x1) > -1 ? 1 : -1;
259+
assert(predict == expected_val);
260+
std::cout << " ...passed" << std::endl;
261+
}
262+
}
263+
264+
/**
265+
* test function to predict points in a 3D coordinate system lying within the
266+
* sphere of radius 1 and centre at origin as +1 and others as -1. Note that
267+
* each point is defined by 3 values but we use 6 features. The function will
268+
* create random sample points for training and test purposes.
269+
* \param[in] eta learning rate (optional, default=0.01)
270+
*/
271+
void test3(double eta = 0.01) {
272+
adaline ada(6, eta); // 2 features
273+
274+
const int N = 100; // number of sample points
275+
276+
std::vector<double> X[N];
277+
int Y[N]; // corresponding y-values
278+
279+
// generate sample points in the interval
280+
// [-range2/100 , (range2-1)/100]
281+
int range = 200; // sample points full-range
282+
int range2 = range >> 1; // sample points half-range
283+
for (int i = 0; i < N; i++) {
284+
double x0 = ((std::rand() % range) - range2) / 100.f;
285+
double x1 = ((std::rand() % range) - range2) / 100.f;
286+
double x2 = ((std::rand() % range) - range2) / 100.f;
287+
X[i] = {x0, x1, x2, x0 * x0, x1 * x1, x2 * x2};
288+
Y[i] = ((x0 * x0) + (x1 * x1) + (x2 * x2)) <= 1.f ? 1 : -1;
289+
}
290+
291+
std::cout << "------- Test 3 -------" << std::endl;
292+
std::cout << "Model before fit: " << ada << std::endl;
293+
294+
ada.fit(X, Y);
295+
std::cout << "Model after fit: " << ada << std::endl;
296+
297+
int N_test_cases = 5;
298+
for (int i = 0; i < N_test_cases; i++) {
299+
double x0 = ((std::rand() % range) - range2) / 100.f;
300+
double x1 = ((std::rand() % range) - range2) / 100.f;
301+
double x2 = ((std::rand() % range) - range2) / 100.f;
302+
303+
int predict = ada.predict({x0, x1, x2, x0 * x0, x1 * x1, x2 * x2});
304+
305+
std::cout << "Predict for x=(" << x0 << "," << x1 << "," << x2
306+
<< "): " << predict;
307+
308+
int expected_val = ((x0 * x0) + (x1 * x1) + (x2 * x2)) <= 1.f ? 1 : -1;
248309
assert(predict == expected_val);
249310
std::cout << " ...passed" << std::endl;
250311
}
@@ -254,7 +315,7 @@ void test2(double eta = 0.01) {
254315
int main(int argc, char **argv) {
255316
std::srand(std::time(nullptr)); // initialize random number generator
256317

257-
double eta = 0.2; // default value of eta
318+
double eta = 0.1; // default value of eta
258319
if (argc == 2) // read eta value from commandline argument if present
259320
eta = strtof(argv[1], nullptr);
260321

@@ -265,5 +326,10 @@ int main(int argc, char **argv) {
265326

266327
test2(eta);
267328

329+
std::cout << "Press ENTER to continue..." << std::endl;
330+
std::cin.get();
331+
332+
test3(eta);
333+
268334
return 0;
269335
}

0 commit comments

Comments
 (0)