Skip to content
This repository was archived by the owner on Aug 15, 2019. It is now read-only.

Centralize precision in tests - remove explicit precision from tests unless needed #157

Merged
merged 1 commit into from
Sep 27, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 13 additions & 15 deletions src/data/dataset_test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -55,49 +55,47 @@ describe('Dataset', () => {

test_util.expectArraysClose(
new Float32Array([0, 0, 0, 1, .25, .75]),
normalizedInputs[0].getValues(), 1e-5);
normalizedInputs[0].getValues());
test_util.expectArraysClose(
new Float32Array([1 / 3, 1 / 3, 1 / 3, 2 / 3, .75, .5]),
normalizedInputs[1].getValues(), 1e-5);
normalizedInputs[1].getValues());
test_util.expectArraysClose(
new Float32Array([2 / 3, 2 / 3, 2 / 3, 1 / 3, 0, 0]),
normalizedInputs[2].getValues(), 1e-5);
normalizedInputs[2].getValues());
test_util.expectArraysClose(
new Float32Array([1, 1, 1, 0, 1, 1]), normalizedInputs[3].getValues(),
1e-5);
new Float32Array([1, 1, 1, 0, 1, 1]), normalizedInputs[3].getValues());

dataset.normalizeWithinBounds(dataIndex, -1, 1);

normalizedInputs = dataset.getData()[0];

test_util.expectArraysClose(
new Float32Array([-1, -1, -1, 1, -.5, .5]),
normalizedInputs[0].getValues(), 1e-5);
normalizedInputs[0].getValues());
test_util.expectArraysClose(
new Float32Array([-1 / 3, -1 / 3, -1 / 3, 1 / 3, .5, .0]),
normalizedInputs[1].getValues(), 1e-5);
normalizedInputs[1].getValues());
test_util.expectArraysClose(
new Float32Array([1 / 3, 1 / 3, 1 / 3, -1 / 3, -1, -1]),
normalizedInputs[2].getValues(), 1e-5);
normalizedInputs[2].getValues());
test_util.expectArraysClose(
new Float32Array([1, 1, 1, -1, 1, 1]), normalizedInputs[3].getValues(),
1e-5);
new Float32Array([1, 1, 1, -1, 1, 1]), normalizedInputs[3].getValues());

dataset.removeNormalization(dataIndex);

normalizedInputs = dataset.getData()[0];

test_util.expectArraysClose(
new Float32Array([1, 2, 10, -1, -2, .75]),
normalizedInputs[0].getValues(), 1e-5);
normalizedInputs[0].getValues());
test_util.expectArraysClose(
new Float32Array([2, 3, 20, -2, 2, .5]),
normalizedInputs[1].getValues(), 1e-5);
normalizedInputs[1].getValues());
test_util.expectArraysClose(
new Float32Array([3, 4, 30, -3, -4, 0]),
normalizedInputs[2].getValues(), 1e-5);
normalizedInputs[2].getValues());
test_util.expectArraysClose(
new Float32Array([4, 5, 40, -4, 4, 1]), normalizedInputs[3].getValues(),
1e-5);
new Float32Array([4, 5, 40, -4, 4, 1]),
normalizedInputs[3].getValues());
});
});
6 changes: 2 additions & 4 deletions src/graph/ops/max_pool_test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,7 @@ describe('Max pool', () => {
const y = activations.get(yTensor);
const expectedResult =
Array3D.new([2, 2, 2], [6, 66, 8, 88, 14, 140, 16, 160]);
test_util.expectArraysClose(
y.getValues(), expectedResult.getValues(), 1e-6);
test_util.expectArraysClose(y.getValues(), expectedResult.getValues());
});

it('MaxPool depth = 2, with some negative numbers', () => {
Expand All @@ -133,8 +132,7 @@ describe('Max pool', () => {
const expectedResult =
Array3D.new([2, 2, 2], [6, 55, 8, 88, 14, 140, 16, 150]);

test_util.expectArraysClose(
y.getValues(), expectedResult.getValues(), 1e-6);
test_util.expectArraysClose(y.getValues(), expectedResult.getValues());
});

it('MaxPool downsampling depth is preserved', () => {
Expand Down
43 changes: 20 additions & 23 deletions src/graph/session_test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,14 @@ import {InputProvider} from '../data/input_provider';
import {NDArrayMathCPU} from '../math/math_cpu';
import {NDArrayMathGPU} from '../math/math_gpu';
import {Array1D, NDArray, Scalar} from '../math/ndarray';
import * as test_util from '../test_util';

import {Graph, Tensor} from './graph';
import {AdagradOptimizer} from './optimizers/adagrad_optimizer';
import {MomentumOptimizer} from './optimizers/momentum_optimizer';
import {RMSPropOptimizer} from './optimizers/rmsprop_optimizer';
import {FeedDictionary, FeedEntry, Session} from './session';
import {SGDOptimizer} from './optimizers/sgd_optimizer';
import * as test_util from '../test_util';
import {FeedDictionary, FeedEntry, Session} from './session';


describe('FeedDictionary', () => {
Expand Down Expand Up @@ -95,7 +95,7 @@ describe('Session', () => {
const session = new Session(g, new NDArrayMathCPU());
const yVal = session.eval(y, [{tensor: x, data: Array1D.new([5, 4])}]);
const expected = new Float32Array([28, 19]);
test_util.expectArraysClose(yVal.getValues(), expected, 1e-5);
test_util.expectArraysClose(yVal.getValues(), expected);
});

it('y=x^2 + 3: GPU', () => {
Expand All @@ -107,7 +107,7 @@ describe('Session', () => {
math.scope(() => {
const yVal = session.eval(y, [{tensor: x, data: Array1D.new([5, 4])}]);
const expected = new Float32Array([28, 19]);
test_util.expectArraysClose(yVal.getValues(), expected, 1e-5);
test_util.expectArraysClose(yVal.getValues(), expected);
});
});

Expand All @@ -122,7 +122,7 @@ describe('Session', () => {
const yVal =
session.eval(y, [{tensor: xSquared, data: Array1D.new([25, 16])}]);
const expected = new Float32Array([28, 19]);
test_util.expectArraysClose(yVal.getValues(), expected, 1e-5);
test_util.expectArraysClose(yVal.getValues(), expected);
});
});

Expand All @@ -139,8 +139,8 @@ describe('Session', () => {
session.evalAll([y, z], [{tensor: x, data: Array1D.new([5, 4])}]);
const expectedY = new Float32Array([28, 19]);
const expectedZ = new Float32Array([27, 18]);
test_util.expectArraysClose(result[0].getValues(), expectedY, 1e-5);
test_util.expectArraysClose(result[1].getValues(), expectedZ, 1e-5);
test_util.expectArraysClose(result[0].getValues(), expectedY);
test_util.expectArraysClose(result[1].getValues(), expectedZ);
});
});

Expand All @@ -155,11 +155,11 @@ describe('Session', () => {
math.scope(() => {
const result1 = session.eval(y, [{tensor: x, data: Array1D.new([5, 4])}]);
const expectedY = new Float32Array([30, 20]);
test_util.expectArraysClose(result1.getValues(), expectedY, 1e-5);
test_util.expectArraysClose(result1.getValues(), expectedY);

const result2 = session.eval(z, [{tensor: x, data: Array1D.new([5, 4])}]);
const expectedZ = new Float32Array([31, 21]);
test_util.expectArraysClose(result2.getValues(), expectedZ, 1e-5);
test_util.expectArraysClose(result2.getValues(), expectedZ);
});
});

Expand Down Expand Up @@ -214,7 +214,7 @@ describe('Session', () => {
// dw/dx = [2*x_1 + 1, 2*x_2 + 1]
session.train(w, [{tensor: x, data: inputProvider}], 1, optimizer);
const dwdx = session.gradientArrayMap.get(x).getValues();
test_util.expectArraysClose(dwdx, new Float32Array([5, 9]), 1e-5);
test_util.expectArraysClose(dwdx, new Float32Array([5, 9]));
});

it('Specify which variables to update (var_list)', () => {
Expand Down Expand Up @@ -248,8 +248,8 @@ describe('Session', () => {
const b0After1 = session.activationArrayMap.get(b0).getValues();
const b1After1 = session.activationArrayMap.get(b1).getValues();

test_util.expectArraysClose(b0After1, new Float32Array([-0.8, -1.6]), 1e-5);
test_util.expectArraysClose(b1After1, new Float32Array([0, 0]), 1e-5);
test_util.expectArraysClose(b0After1, new Float32Array([-0.8, -1.6]));
test_util.expectArraysClose(b1After1, new Float32Array([0, 0]));

// Update both b0 and b1
const optimizerAll = new SGDOptimizer(0.1);
Expand Down Expand Up @@ -283,7 +283,7 @@ describe('Session', () => {
math.scope(() => {
const yVal = session.eval(y, [{tensor: x, data: Array1D.new([5, 4])}]);
const expected = new Float32Array([25, 16]);
test_util.expectArraysClose(yVal.getValues(), expected, 1e-5);
test_util.expectArraysClose(yVal.getValues(), expected);
});
});

Expand All @@ -309,7 +309,7 @@ describe('Session', () => {
// dw/dx = [2*x_1 + 1, 2*x_2 + 1]
session.train(w, [{tensor: x, data: inputProvider}], 1, optimizer);
const dwdx = session.gradientArrayMap.get(x).getValues();
test_util.expectArraysClose(dwdx, new Float32Array([5, 9]), 1e-5);
test_util.expectArraysClose(dwdx, new Float32Array([5, 9]));
});
});

Expand Down Expand Up @@ -337,13 +337,13 @@ describe('Session', () => {
// w = [ w_old - lr*vel_w1, w_old - lr*vel_w2] = [-0.2, -0.4]
session.train(y, [{tensor: x, data: inputProvider}], 1, optimizer);
const dydw = session.activationArrayMap.get(w).getValues();
test_util.expectArraysClose(dydw, new Float32Array([-.2, -0.4]), 1e-5);
test_util.expectArraysClose(dydw, new Float32Array([-.2, -0.4]));
// velocity_w = [momentum* old_vel_w1 + x_1,
// momentum* old_vel_w2 + x_2] = [3,6]
// w = [ w_old - lr*vel_w1, w_old - lr*vel_w2] = [-0.5, -1.0]
session.train(y, [{tensor: x, data: inputProvider}], 1, optimizer);
const dydw2 = session.activationArrayMap.get(w).getValues();
test_util.expectArraysClose(dydw2, new Float32Array([-.5, -1.0]), 2e-5);
test_util.expectArraysClose(dydw2, new Float32Array([-.5, -1.0]));
});
});

Expand Down Expand Up @@ -374,16 +374,15 @@ describe('Session', () => {
// = [-0.1, -0.1]
session.train(y, [{tensor: x, data: inputProvider}], 1, optimizer);
const dydw = session.activationArrayMap.get(w).getValues();
test_util.expectArraysClose(dydw, new Float32Array([-.1, -0.1]), 1e-5);
test_util.expectArraysClose(dydw, new Float32Array([-.1, -0.1]));
// cache = [old_cache_w1 + grad_w1**2,
// old_cache_w2 + grad_w2**2] = [4,16]
// w = [ w1_old - lr*grad_w1/sqrt(cahce_w2 + eps),
// w2_old - lr*grad_w1/sqrt(cahce_w2 + eps)]
// = [-0.1707, -0.1707]
session.train(y, [{tensor: x, data: inputProvider}], 1, optimizer);
const dydw2 = session.activationArrayMap.get(w).getValues();
test_util.expectArraysClose(
dydw2, new Float32Array([-.1707, -.1707]), 2e-5);
test_util.expectArraysClose(dydw2, new Float32Array([-.1707, -.1707]));
});
});

Expand Down Expand Up @@ -413,8 +412,7 @@ describe('Session', () => {
// = [-0.2236, -0.2236]
session.train(y, [{tensor: x, data: inputProvider}], 1, optimizer);
const dydw = session.activationArrayMap.get(w).getValues();
test_util.expectArraysClose(
dydw, new Float32Array([-.2236, -0.2236]), 1e-5);
test_util.expectArraysClose(dydw, new Float32Array([-.2236, -0.2236]));
// cache = [gamma*old_cache_w1 + (1-gamma)*grad_w1**2,
// gamma*old_cache_w2 + (1-gamma)*grad_w2**2]
// = [1.44, 5.76]
Expand All @@ -423,8 +421,7 @@ describe('Session', () => {
// = [-.39027, -.39027]
session.train(y, [{tensor: x, data: inputProvider}], 1, optimizer);
const dydw2 = session.activationArrayMap.get(w).getValues();
test_util.expectArraysClose(
dydw2, new Float32Array([-.39027, -.39027]), 2e-5);
test_util.expectArraysClose(dydw2, new Float32Array([-.39027, -.39027]));
});
});

Expand Down
32 changes: 12 additions & 20 deletions src/math/math_cpu_test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ describe('NDArrayMathCPU slice2D', () => {
const aValues = a.getValues();
const expected =
new Float32Array([aValues[0], aValues[1], aValues[10], aValues[11]]);
test_util.expectArraysClose(b.getValues(), expected, 0);
test_util.expectArraysClose(b.getValues(), expected);
});

it('returns the rectangle specified', () => {
Expand Down Expand Up @@ -863,9 +863,9 @@ describe('NDArrayMathCPU argmin/max, argmaxequals, min/max', () => {
it('topk', () => {
const topk = math.topK(Array1D.new([1, -1, 100, -5, -10.6, 3.3, 5]), 3);
test_util.expectArraysClose(
topk.values.getValues(), new Float32Array([100, 5, 3.3]), 1e-6);
topk.values.getValues(), new Float32Array([100, 5, 3.3]));
test_util.expectArraysClose(
topk.indices.getValues(), new Float32Array([2, 6, 5]), 1e-6);
topk.indices.getValues(), new Float32Array([2, 6, 5]));
});

it('Arg min', () => {
Expand Down Expand Up @@ -1560,16 +1560,15 @@ describe('NDArrayMathCPU resizeBilinear', () => {

test_util.expectArraysClose(
output.getValues(),
new Float32Array([2, 2, 2, 10 / 3, 10 / 3, 10 / 3, 4, 4, 4]), 1e-4);
new Float32Array([2, 2, 2, 10 / 3, 10 / 3, 10 / 3, 4, 4, 4]));
});

it('simple alignCorners=true', () => {
const input = Array3D.new([2, 2, 1], [2, 2, 4, 4]);
const output = math.resizeBilinear3D(input, [3, 3], true);

test_util.expectArraysClose(
output.getValues(), new Float32Array([2, 2, 2, 3, 3, 3, 4, 4, 4]),
1e-4);
output.getValues(), new Float32Array([2, 2, 2, 3, 3, 3, 4, 4, 4]));
});

it('matches tensorflow w/ random numbers alignCorners=false', () => {
Expand All @@ -1589,8 +1588,7 @@ describe('NDArrayMathCPU resizeBilinear', () => {
0.69152176, 0.44905344, 1.07186723, 0.03823943, 1.19864893,
0.6183514, 3.49600649, 1.50272655, 1.73724651, 1.68149579,
0.69152176, 0.44905344, 1.07186723, 0.03823943, 1.19864893
]),
1e-4);
]));
});

it('matches tensorflow w/ random numbers alignCorners=true', () => {
Expand All @@ -1610,8 +1608,7 @@ describe('NDArrayMathCPU resizeBilinear', () => {
1.70539713, 1.3923912, 1.68282723, 1.54382229, 1.66025746,
1.62451875, 1.83673346, 1.38198328, 1.92833281, 1.13944793,
2.01993227, 1.57932377, 2.34758639, 2.01919961, 2.67524052
]),
1e-4);
]));
});
});

Expand Down Expand Up @@ -1640,8 +1637,7 @@ describe('NDArrayMathCPU batchNorm', () => {
Math.sqrt(variance.get(0) + varianceEpsilon),
(x.get(1, 0, 1) - mean.get(1)) * 1 /
Math.sqrt(variance.get(1) + varianceEpsilon)
]),
1e-6);
]));
});

it('simple batchnorm, no offset, 2x1x2', () => {
Expand All @@ -1664,8 +1660,7 @@ describe('NDArrayMathCPU batchNorm', () => {
Math.sqrt(variance.get(0) + varianceEpsilon),
(x.get(1, 0, 1) - mean.get(1)) * scale.get(1) /
Math.sqrt(variance.get(1) + varianceEpsilon)
]),
1e-6);
]));
});

it('simple batchnorm, no scale, 2x1x2', () => {
Expand Down Expand Up @@ -1693,8 +1688,7 @@ describe('NDArrayMathCPU batchNorm', () => {
offset.get(1) +
(x.get(1, 0, 1) - mean.get(1)) * 1 /
Math.sqrt(variance.get(1) + varianceEpsilon)
]),
1e-6);
]));
});

it('simple batchnorm, 2x1x2', () => {
Expand Down Expand Up @@ -1723,8 +1717,7 @@ describe('NDArrayMathCPU batchNorm', () => {
offset.get(1) +
(x.get(1, 0, 1) - mean.get(1)) * scale.get(1) /
Math.sqrt(variance.get(1) + varianceEpsilon)
]),
1e-6);
]));
});

it('batchnorm matches tensorflow, 2x3x3', () => {
Expand All @@ -1750,7 +1743,6 @@ describe('NDArrayMathCPU batchNorm', () => {
1.52106473, -0.07704776, 0.26144429, 1.28010017, -1.14422404,
-1.15776136, 1.15425493, 1.82644104, -0.52249442, 1.04803919,
0.74932291, 0.40568101, 1.2844412
]),
1e-5);
]));
});
});
Loading