diff --git a/.circleci/config.yml b/.circleci/config.yml index fa6a97669..ba0497c2c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -126,6 +126,7 @@ jobs: command: | mkdir -p ~/workspace/tests docker run --gpus all -v $HOME/workspace/tests:/build/test/logs -it --rm redisai-gpu:latest-x64-bionic-test + no_output_timeout: 30m - store_test_results: path: ~/workspace/tests deploy_package: diff --git a/opt/Makefile b/opt/Makefile index aac388dbd..fb4b4dfca 100755 --- a/opt/Makefile +++ b/opt/Makefile @@ -55,7 +55,7 @@ BINDIR=$(BINROOT)/src # INSTALL_DIR=$(ROOT)/install-$(DEVICE) DEPS_DIR=$(ROOT)/deps/$(OS)-$(ARCH)-$(DEVICE) INSTALL_DIR=$(ROOT)/bin/$(OS)-$(ARCH)-$(DEVICE)/install -REDIS_VALGRID_SUPRESS=./redis_valgrind.sup +REDIS_VALGRID_SUPRESS=$(ROOT)/opt/redis_valgrind.sup TARGET=$(BINDIR)/redisai.so BACKENDS_PATH ?= $(INSTALL_DIR)/backends @@ -147,22 +147,23 @@ ifeq ($(VERBOSE),1) TEST_ARGS += -v endif ifeq ($(TEST),) -TEST=basic_tests.py +TEST= PYDEBUG= else -TEST_ARGS += -s +TEST_ARGS += -s --test $(TEST) PYDEBUG=1 endif -TEST_PREFIX=set -e; cd $(ROOT)/test -TEST_CMD=\ - DEVICE=$(DEVICE) PYDEBUG=$(PYDEBUG) \ - python3 -m RLTest $(TEST_ARGS) --test $(TEST) --module $(INSTALL_DIR)/redisai.so - GEN ?= 1 SLAVES ?= 1 AOF ?= 1 +TEST_PREFIX=set -e; cd $(ROOT)/test +# TODO: --errors-for-leak-kinds=definite +VALGRIND_OPTIONS="--leak-check=full -q --show-reachable=no --show-possibly-lost=no" +TEST_CMD= DEVICE=$(DEVICE) PYDEBUG=$(PYDEBUG) python3 -m RLTest $(TEST_ARGS) --module $(INSTALL_DIR)/redisai.so +VALGRIND_TEST_CMD= DEVICE=$(DEVICE) PYDEBUG=$(PYDEBUG) python3 -m RLTest $(TEST_ARGS) --module $(INSTALL_DIR)/redisai.so --no-output-catch --use-valgrind --vg-no-fail-on-errors --vg-verbose --vg-options $(VALGRIND_OPTIONS) --vg-suppressions $(realpath $(REDIS_VALGRID_SUPRESS)) + test: ifneq ($(NO_LFS),1) $(SHOW)if [ "$(git lfs env > /dev/null 2>&1 ; echo $?)" != "0" ]; then cd $(ROOT); git lfs install; fi @@ -179,6 +180,10 @@ ifeq ($(SLAVES),1) $(SHOW)$(TEST_PREFIX); printf "\nTests with --use-slaves:\n\n" ;\ $(TEST_CMD) --use-slaves endif +ifeq ($(VALGRIND),1) + $(SHOW)$(TEST_PREFIX); printf "\nTests with valgrind:\n\n" ;\ + $(VALGRIND_TEST_CMD) +endif #---------------------------------------------------------------------------------------------- @@ -192,10 +197,7 @@ MODULE_ARGS=\ TF redisai_tensorflow.so VALGRIND_ARGS=\ - --leak-check=full \ - --show-reachable=no \ - --show-possibly-lost=no \ - --leak-check=full \ + $(VALGRIND_OPTIONS) \ --suppressions=$(realpath $(REDIS_VALGRID_SUPRESS)) \ -v redis-server --protected-mode no --save "" --appendonly no diff --git a/opt/redis_valgrind.sup b/opt/redis_valgrind.sup index 3024d63bc..ab43f68fa 100644 --- a/opt/redis_valgrind.sup +++ b/opt/redis_valgrind.sup @@ -1,3 +1,31 @@ +{ + ignore_unversioned_libs + Memcheck:Leak + ... + obj:*/libtensorflow.so.* +} + +{ + ignore_unversioned_libs + Memcheck:Leak + ... + obj:*/libonnxruntime.so.* +} + +{ + ignore_unversioned_libs + Memcheck:Leak + ... + obj:*/libtorch.so.* +} + +{ + ignore_unversioned_libs + Memcheck:Leak + ... + obj:*/libtorch.so* +} + { Memcheck:Cond diff --git a/src/backends/torch.c b/src/backends/torch.c index 176cfb025..f34ce8cda 100644 --- a/src/backends/torch.c +++ b/src/backends/torch.c @@ -61,6 +61,9 @@ RAI_Model *RAI_ModelCreateTorch(RAI_Backend backend, const char* devicestr, } void RAI_ModelFreeTorch(RAI_Model* model, RAI_Error *error) { + if(model->devicestr){ + RedisModule_Free(model->devicestr); + } torchDeallocContext(model->model); } diff --git a/src/redisai.c b/src/redisai.c index c4009e114..64e958f6f 100644 --- a/src/redisai.c +++ b/src/redisai.c @@ -831,9 +831,9 @@ void RedisAI_ReplicateTensorSet(RedisModuleCtx *ctx, RedisModuleString *key, RAI RedisModule_Replicate(ctx, "AI.TENSORSET", "scvcb", key, dtypestr, dims, ndims, "BLOB", data, size); - // for (long long i=0; i= 1) + + +# Ensures command is sent and forced disconnect +# after without waiting for the reply to be parsed +# Usefull for checking behaviour of commands +# that are run with background threads +def send_and_disconnect(cmd, red): + pool = red.connection_pool + con = pool.get_connection(cmd[0]) + ret = con.send_command(*cmd) + con.disconnect() + return ret + + +def check_cuda(): + return os.system('which nvcc') + + +def info_to_dict(info): + info = [el.decode('utf-8') if type(el) is bytes else el for el in info] + return dict(zip(info[::2], info[1::2])) + + +def load_mobilenet_test_data(): + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + labels_filename = os.path.join(test_data_path, 'imagenet_class_index.json') + image_filename = os.path.join(test_data_path, 'panda.jpg') + model_filename = os.path.join(test_data_path, 'mobilenet_v2_1.4_224_frozen.pb') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + with open(labels_filename, 'r') as f: + labels = json.load(f) + + img_height, img_width = 224, 224 + + img = imread(image_filename) + img = resize(img, (img_height, img_width), mode='constant', anti_aliasing=True) + img = img.astype(np.float32) + + return model_pb, labels, img + + +def run_mobilenet(con, img, input_var, output_var): + time.sleep(0.5 * random.randint(0, 10)) + con.execute_command('AI.TENSORSET', 'input', + 'FLOAT', 1, img.shape[1], img.shape[0], img.shape[2], + 'BLOB', img.tobytes()) + + con.execute_command('AI.MODELRUN', 'mobilenet', + 'INPUTS', 'input', 'OUTPUTS', 'output') + + +def run_test_multiproc(env, n_procs, fn, args=tuple()): + procs = [] + + def tmpfn(): + con = env.getConnection() + fn(con, *args) + return 1 + + for _ in range(n_procs): + p = Process(target=tmpfn) + p.start() + procs.append(p) + + [p.join() for p in procs] diff --git a/test/tests_common.py b/test/tests_common.py new file mode 100644 index 000000000..518dd9630 --- /dev/null +++ b/test/tests_common.py @@ -0,0 +1,191 @@ +import redis + +from includes import * + +''' +python -m RLTest --test tests_common.py --module path/to/redisai.so +''' + + +def test_common_tensorset(env): + con = env.getConnection() + + tested_datatypes = ["FLOAT", "DOUBLE", "INT8", "INT16", "INT32", "INT64", "UINT8", "UINT16"] + for datatype in tested_datatypes: + ret = con.execute_command('AI.TENSORSET', 'tensor_{0}'.format(datatype), datatype, 2, 'VALUES', 1, 1) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + # AI.TENSORGET in BLOB format and set in a new key + for datatype in tested_datatypes: + tensor_dtype, tensor_dim, tensor_blob = con.execute_command('AI.TENSORGET', 'tensor_{0}'.format(datatype), + 'BLOB') + ret = con.execute_command('AI.TENSORSET', 'tensor_blob_{0}'.format(datatype), datatype, 2, 'BLOB', tensor_blob) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + reply_types = ["META", "VALUES", "BLOB"] + # Confirm that tensor_{0} and tensor_blog_{0} are equal for META VALUES BLOB + for datatype in tested_datatypes: + for reply_type in reply_types: + tensor_1_reply = con.execute_command('AI.TENSORGET', 'tensor_{0}'.format(datatype), reply_type) + tensor_2_reply = con.execute_command('AI.TENSORGET', 'tensor_blob_{0}'.format(datatype), reply_type) + env.assertEqual(tensor_1_reply, tensor_2_reply) + + +def test_common_tensorset_error_replies(env): + con = env.getConnection() + + # ERR unsupported data format + try: + con.execute_command('AI.TENSORSET', 'z', 'INT32', 2, 'unsupported', 2, 3) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual(exception.__str__(), "invalid argument found in tensor shape") + + # ERR invalid value + try: + con.execute_command('AI.TENSORSET', 'z', 'FLOAT', 2, 'VALUES', 2, 'A') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual(exception.__str__(), "invalid value") + + # ERR invalid value + try: + con.execute_command('AI.TENSORSET', 'z', 'INT32', 2, 'VALUES', 2, 'A') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual(exception.__str__(), "invalid value") + + try: + con.execute_command('AI.TENSORSET', 1) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.TENSORSET', 'y', 'FLOAT') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.TENSORSET', 'y', 'FLOAT', '2') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.TENSORSET', 'y', 'FLOAT', 2, 'VALUES') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.TENSORSET', 'y', 'FLOAT', 2, 'VALUES', 1) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.TENSORSET', 'y', 'FLOAT', 2, 'VALUES', '1') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + +def test_common_tensorget(env): + con = env.getConnection() + tested_datatypes = ["FLOAT", "DOUBLE", "INT8", "INT16", "INT32", "INT64", "UINT8", "UINT16"] + tested_datatypes_fp = ["FLOAT", "DOUBLE"] + tested_datatypes_int = ["INT8", "INT16", "INT32", "INT64", "UINT8", "UINT16"] + for datatype in tested_datatypes: + ret = con.execute_command('AI.TENSORSET', 'tensor_{0}'.format(datatype), datatype, 2, 'VALUES', 1, 1) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + # AI.TENSORGET in BLOB format and set in a new key + for datatype in tested_datatypes: + tensor_dtype, tensor_dim, tensor_blob = con.execute_command('AI.TENSORGET', 'tensor_{0}'.format(datatype), + 'BLOB') + ret = con.execute_command('AI.TENSORSET', 'tensor_blob_{0}'.format(datatype), datatype, 2, 'BLOB', tensor_blob) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + reply_types = ["META", "VALUES", "BLOB"] + # Confirm that tensor_{0} and tensor_blog_{0} are equal for META VALUES BLOB + for datatype in tested_datatypes: + for reply_type in reply_types: + tensor_1_reply = con.execute_command('AI.TENSORGET', 'tensor_{0}'.format(datatype), reply_type) + tensor_2_reply = con.execute_command('AI.TENSORGET', 'tensor_blob_{0}'.format(datatype), reply_type) + env.assertEqual(tensor_1_reply, tensor_2_reply) + + # Confirm that the output is the expected for META + for datatype in tested_datatypes: + tensor_dtype, tensor_dim = con.execute_command('AI.TENSORGET', 'tensor_{0}'.format(datatype), "META") + env.assertEqual(datatype.encode('utf-8'), tensor_dtype) + env.assertEqual([2], tensor_dim) + + # Confirm that the output is the expected for VALUES + for datatype in tested_datatypes: + tensor_dtype, tensor_dim, tensor_values = con.execute_command('AI.TENSORGET', 'tensor_{0}'.format(datatype), + "VALUES") + env.assertEqual(datatype.encode('utf-8'), tensor_dtype) + env.assertEqual([2], tensor_dim) + if datatype in tested_datatypes_fp: + env.assertEqual([b'1', b'1'], tensor_values) + if datatype in tested_datatypes_int: + env.assertEqual([1, 1], tensor_values) + + # Confirm that the output is the expected for BLOB + for datatype in tested_datatypes: + tensor_dtype, tensor_dim, tensor_blog = con.execute_command('AI.TENSORGET', 'tensor_{0}'.format(datatype), + "BLOB") + env.assertEqual(datatype.encode('utf-8'), tensor_dtype) + env.assertEqual([2], tensor_dim) + + +def test_common_tensorget_error_replies(env): + con = env.getConnection() + + # ERR unsupported data format + try: + ret = con.execute_command('AI.TENSORSET', "T_FLOAT", "FLOAT", 2, 'VALUES', 1, 1) + env.assertEqual(ret, b'OK') + con.execute_command('AI.TENSORGET', 'T_FLOAT', 'unsupported') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual(exception.__str__(), "unsupported data format") + + +def test_common_tensorset_multiproc(env): + run_test_multiproc(env, 10, + lambda env: env.execute_command('AI.TENSORSET', 'x', 'FLOAT', 2, 'VALUES', 2, 3)) + + con = env.getConnection() + ensureSlaveSynced(con, env) + tensor = con.execute_command('AI.TENSORGET', 'x', 'VALUES') + values = tensor[-1] + env.assertEqual(values, [b'2', b'3']) + + +def test_tensorset_disconnect(env): + red = env.getConnection() + ret = send_and_disconnect(('AI.TENSORSET', 't_FLOAT', 'FLOAT', 2, 'VALUES', 2, 3), red) + env.assertEqual(ret, None) + + +def test_tensorget_disconnect(env): + red = env.getConnection() + ret = red.execute_command('AI.TENSORSET', 't_FLOAT', 'FLOAT', 2, 'VALUES', 2, 3) + env.assertEqual(ret, b'OK') + ret = send_and_disconnect(('AI.TENSORGET', 't_FLOAT'), red) + env.assertEqual(ret, None) diff --git a/test/tests_onnx.py b/test/tests_onnx.py new file mode 100644 index 000000000..2996f7145 --- /dev/null +++ b/test/tests_onnx.py @@ -0,0 +1,254 @@ +import sys + +import redis +from includes import * + +''' +python -m RLTest --test tests_onnx.py --module path/to/redisai.so +''' + + +def test_onnx_modelrun_mnist(env): + if not TEST_ONNX: + env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'mnist.onnx') + wrong_model_filename = os.path.join(test_data_path, 'graph.pb') + sample_filename = os.path.join(test_data_path, 'one.raw') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + with open(wrong_model_filename, 'rb') as f: + wrong_model_pb = f.read() + + with open(sample_filename, 'rb') as f: + sample_raw = f.read() + + ret = con.execute_command('AI.MODELSET', 'm', 'ONNX', DEVICE, model_pb) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + ret = con.execute_command('AI.MODELGET', 'm') + env.assertEqual(len(ret), 3) + # TODO: enable me + # env.assertEqual(ret[0], b'ONNX') + # env.assertEqual(ret[1], b'CPU') + + try: + con.execute_command('AI.MODELSET', 'm', 'ONNX', DEVICE, wrong_model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_1', 'ONNX', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_2', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + con.execute_command('AI.TENSORSET', 'a', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw) + + try: + con.execute_command('AI.MODELRUN', 'm_1', 'INPUTS', 'a', 'OUTPUTS') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_2', 'INPUTS', 'a', 'b', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_3', 'a', 'b', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_1', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_1', 'INPUTS', 'OUTPUTS') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_1', 'INPUTS', 'a', 'OUTPUTS', 'b') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'OUTPUTS', 'b') + + ensureSlaveSynced(con, env) + + tensor = con.execute_command('AI.TENSORGET', 'b', 'VALUES') + values = tensor[-1] + argmax = max(range(len(values)), key=lambda i: values[i]) + + env.assertEqual(argmax, 1) + + if env.useSlaves: + con2 = env.getSlaveConnection() + tensor2 = con2.execute_command('AI.TENSORGET', 'b', 'VALUES') + env.assertEqual(tensor2, tensor) + + +def test_onnx_modelrun_iris(env): + if not TEST_ONNX: + env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + linear_model_filename = os.path.join(test_data_path, 'linear_iris.onnx') + logreg_model_filename = os.path.join(test_data_path, 'logreg_iris.onnx') + + with open(linear_model_filename, 'rb') as f: + linear_model = f.read() + + with open(logreg_model_filename, 'rb') as f: + logreg_model = f.read() + + ret = con.execute_command('AI.MODELSET', 'linear', 'ONNX', DEVICE, linear_model) + env.assertEqual(ret, b'OK') + + ret = con.execute_command('AI.MODELSET', 'logreg', 'ONNX', DEVICE, logreg_model) + env.assertEqual(ret, b'OK') + + con.execute_command('AI.TENSORSET', 'features', 'FLOAT', 1, 4, 'VALUES', 5.1, 3.5, 1.4, 0.2) + + ensureSlaveSynced(con, env) + + con.execute_command('AI.MODELRUN', 'linear', 'INPUTS', 'features', 'OUTPUTS', 'linear_out') + con.execute_command('AI.MODELRUN', 'logreg', 'INPUTS', 'features', 'OUTPUTS', 'logreg_out', 'logreg_probs') + + ensureSlaveSynced(con, env) + + linear_out = con.execute_command('AI.TENSORGET', 'linear_out', 'VALUES') + logreg_out = con.execute_command('AI.TENSORGET', 'logreg_out', 'VALUES') + + env.assertEqual(float(linear_out[2][0]), -0.090524077415466309) + env.assertEqual(logreg_out[2][0], 0) + + if env.useSlaves: + con2 = env.getSlaveConnection() + linear_out2 = con2.execute_command('AI.TENSORGET', 'linear_out', 'VALUES') + logreg_out2 = con2.execute_command('AI.TENSORGET', 'logreg_out', 'VALUES') + env.assertEqual(linear_out, linear_out2) + env.assertEqual(logreg_out, logreg_out2) + + +def test_onnx_modelinfo(env): + if not TEST_ONNX: + env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + linear_model_filename = os.path.join(test_data_path, 'linear_iris.onnx') + + with open(linear_model_filename, 'rb') as f: + linear_model = f.read() + + ret = con.execute_command('AI.MODELSET', 'linear', 'ONNX', DEVICE, linear_model) + env.assertEqual(ret, b'OK') + + model_serialized_master = con.execute_command('AI.MODELGET', 'linear') + con.execute_command('AI.TENSORSET', 'features', 'FLOAT', 1, 4, 'VALUES', 5.1, 3.5, 1.4, 0.2) + + ensureSlaveSynced(con, env) + + if env.useSlaves: + con2 = env.getSlaveConnection() + model_serialized_slave = con2.execute_command('AI.MODELGET', 'linear') + env.assertEqual(len(model_serialized_master), len(model_serialized_slave)) + previous_duration = 0 + for call in range(1, 10): + res = con.execute_command('AI.MODELRUN', 'linear', 'INPUTS', 'features', 'OUTPUTS', 'linear_out') + env.assertEqual(res, b'OK') + ensureSlaveSynced(con, env) + + info = con.execute_command('AI.INFO', 'linear') + info_dict_0 = info_to_dict(info) + + env.assertEqual(info_dict_0['KEY'], 'linear') + env.assertEqual(info_dict_0['TYPE'], 'MODEL') + env.assertEqual(info_dict_0['BACKEND'], 'ONNX') + env.assertEqual(info_dict_0['DEVICE'], DEVICE) + env.assertTrue(info_dict_0['DURATION'] > previous_duration) + env.assertEqual(info_dict_0['SAMPLES'], call) + env.assertEqual(info_dict_0['CALLS'], call) + env.assertEqual(info_dict_0['ERRORS'], 0) + + previous_duration = info_dict_0['DURATION'] + + res = con.execute_command('AI.INFO', 'linear', 'RESETSTAT') + env.assertEqual(res, b'OK') + + info = con.execute_command('AI.INFO', 'linear') + info_dict_0 = info_to_dict(info) + env.assertEqual(info_dict_0['DURATION'], 0) + env.assertEqual(info_dict_0['SAMPLES'], 0) + env.assertEqual(info_dict_0['CALLS'], 0) + env.assertEqual(info_dict_0['ERRORS'], 0) + + +def test_onnx_modelrun_disconnect(env): + if not TEST_ONNX: + env.debugPrint("skipping {} since TEST_ONNX=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + linear_model_filename = os.path.join(test_data_path, 'linear_iris.onnx') + + with open(linear_model_filename, 'rb') as f: + linear_model = f.read() + + ret = con.execute_command('AI.MODELSET', 'linear', 'ONNX', DEVICE, linear_model) + env.assertEqual(ret, b'OK') + + model_serialized_master = con.execute_command('AI.MODELGET', 'linear') + con.execute_command('AI.TENSORSET', 'features', 'FLOAT', 1, 4, 'VALUES', 5.1, 3.5, 1.4, 0.2) + + ensureSlaveSynced(con, env) + + if env.useSlaves: + con2 = env.getSlaveConnection() + model_serialized_slave = con2.execute_command('AI.MODELGET', 'linear') + env.assertEqual(len(model_serialized_master), len(model_serialized_slave)) + + ret = send_and_disconnect(('AI.MODELRUN', 'linear', 'INPUTS', 'features', 'OUTPUTS', 'linear_out'), con) + env.assertEqual(ret, None) diff --git a/test/tests_pytorch.py b/test/tests_pytorch.py new file mode 100644 index 000000000..262fb45ae --- /dev/null +++ b/test/tests_pytorch.py @@ -0,0 +1,519 @@ +import redis + +from includes import * + +''' +python -m RLTest --test tests_pytorch.py --module path/to/redisai.so +''' + + +def test_pytorch_modelrun(env): + if not TEST_PT: + env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'pt-minimal.pt') + wrong_model_filename = os.path.join(test_data_path, 'graph.pb') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + with open(wrong_model_filename, 'rb') as f: + wrong_model_pb = f.read() + + ret = con.execute_command('AI.MODELSET', 'm', 'TORCH', DEVICE, model_pb) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + ret = con.execute_command('AI.MODELGET', 'm') + # TODO: enable me + # env.assertEqual(ret[0], b'TORCH') + # env.assertEqual(ret[1], b'CPU') + + try: + con.execute_command('AI.MODELSET', 'm', 'TORCH', DEVICE, wrong_model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_1', 'TORCH', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_2', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + env.execute_command('AI.TENSORSET', 'a', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.execute_command('AI.TENSORSET', 'b', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + + try: + con.execute_command('AI.MODELRUN', 'm_1', 'INPUTS', 'a', 'b', 'OUTPUTS') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_2', 'INPUTS', 'a', 'b', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_3', 'a', 'b', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_1', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_1', 'INPUTS', 'OUTPUTS') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_1', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c', 'd') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + + ensureSlaveSynced(con, env) + + tensor = con.execute_command('AI.TENSORGET', 'c', 'VALUES') + values = tensor[-1] + env.assertEqual(values, [b'4', b'6', b'4', b'6']) + + if env.useSlaves: + con2 = env.getSlaveConnection() + tensor2 = con2.execute_command('AI.TENSORGET', 'c', 'VALUES') + env.assertEqual(tensor2, tensor) + + +def test_pytorch_modelinfo(env): + if not TEST_PT: + env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'pt-minimal.pt') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + ret = con.execute_command('AI.MODELSET', 'm', 'TORCH', DEVICE, model_pb) + env.assertEqual(ret, b'OK') + + ret = env.execute_command('AI.TENSORSET', 'a', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ret = env.execute_command('AI.TENSORSET', 'b', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + previous_duration = 0 + for call in range(1, 10): + ret = con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + env.assertEqual(ret, b'OK') + ensureSlaveSynced(con, env) + + info = con.execute_command('AI.INFO', 'm') + info_dict_0 = info_to_dict(info) + + env.assertEqual(info_dict_0['KEY'], 'm') + env.assertEqual(info_dict_0['TYPE'], 'MODEL') + env.assertEqual(info_dict_0['BACKEND'], 'TORCH') + env.assertEqual(info_dict_0['DEVICE'], DEVICE) + env.assertTrue(info_dict_0['DURATION'] > previous_duration) + env.assertEqual(info_dict_0['SAMPLES'], 2 * call) + env.assertEqual(info_dict_0['CALLS'], call) + env.assertEqual(info_dict_0['ERRORS'], 0) + + previous_duration = info_dict_0['DURATION'] + + res = con.execute_command('AI.INFO', 'm', 'RESETSTAT') + env.assertEqual(res, b'OK') + info = con.execute_command('AI.INFO', 'm') + info_dict_0 = info_to_dict(info) + env.assertEqual(info_dict_0['DURATION'], 0) + env.assertEqual(info_dict_0['SAMPLES'], 0) + env.assertEqual(info_dict_0['CALLS'], 0) + env.assertEqual(info_dict_0['ERRORS'], 0) + + +def test_pytorch_scriptset(env): + if not TEST_PT: + env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + try: + con.execute_command('AI.SCRIPTSET', 'ket', DEVICE, 'return 1') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.SCRIPTSET', 'nope') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.SCRIPTSET', 'more', DEVICE) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + script_filename = os.path.join(test_data_path, 'script.txt') + + with open(script_filename, 'rb') as f: + script = f.read() + + ret = con.execute_command('AI.SCRIPTSET', 'ket', DEVICE, script) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + # TODO: Check why this COMMAND is hanging CI + # ret = con.execute_command('AI.SCRIPTGET', 'ket') + # env.assertEqual([b'CPU',script],ret) + # + # if env.useSlaves: + # con2 = env.getSlaveConnection() + # script_slave = con2.execute_command('AI.SCRIPTGET', 'ket') + # env.assertEqual(ret, script_slave) + + +def test_pytorch_scriptdel(env): + if not TEST_PT: + env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + script_filename = os.path.join(test_data_path, 'script.txt') + + with open(script_filename, 'rb') as f: + script = f.read() + + ret = con.execute_command('AI.SCRIPTSET', 'ket', DEVICE, script) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + ret = con.execute_command('AI.SCRIPTDEL', 'ket') + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + env.assertFalse(con.execute_command('EXISTS', 'ket')) + + if env.useSlaves: + con2 = env.getSlaveConnection() + env.assertFalse(con2.execute_command('EXISTS', 'ket')) + + # ERR no script at key from SCRIPTDEL + try: + con.execute_command('DEL', 'EMPTY') + con.execute_command('AI.SCRIPTDEL', 'EMPTY') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("no script at key", exception.__str__()) + + # ERR wrong type from SCRIPTDEL + try: + con.execute_command('SET', 'NOT_SCRIPT', 'BAR') + con.execute_command('AI.SCRIPTDEL', 'NOT_SCRIPT') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("WRONGTYPE Operation against a key holding the wrong kind of value", exception.__str__()) + + +def test_pytorch_scriptrun(env): + if not TEST_PT: + env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + script_filename = os.path.join(test_data_path, 'script.txt') + + with open(script_filename, 'rb') as f: + script = f.read() + + ret = con.execute_command('AI.SCRIPTSET', 'ket', DEVICE, script) + env.assertEqual(ret, b'OK') + + ret = con.execute_command('AI.TENSORSET', 'a', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + ret = con.execute_command('AI.TENSORSET', 'b', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + # TODO: Check why this COMMAND is hanging CI + # master_scriptget_result = con.execute_command('AI.SCRIPTGET', 'ket') + # env.assertEqual([b'CPU',script],master_scriptget_result) + # + # if env.useSlaves: + # con2 = env.getSlaveConnection() + # slave_scriptget_result = con2.execute_command('AI.SCRIPTGET', 'ket') + # env.assertEqual(master_scriptget_result, slave_scriptget_result) + + # ERR no script at key from SCRIPTGET + try: + con.execute_command('DEL', 'EMPTY') + con.execute_command('AI.SCRIPTGET', 'EMPTY') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("cannot get script from empty key", exception.__str__()) + + # ERR wrong type from SCRIPTGET + try: + con.execute_command('SET', 'NOT_SCRIPT', 'BAR') + con.execute_command('AI.SCRIPTGET', 'NOT_SCRIPT') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("WRONGTYPE Operation against a key holding the wrong kind of value", exception.__str__()) + + # ERR no script at key from SCRIPTRUN + try: + con.execute_command('DEL', 'EMPTY') + con.execute_command('AI.SCRIPTRUN', 'EMPTY', 'bar', 'INPUTS', 'b', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("script key is empty", exception.__str__()) + + # ERR wrong type from SCRIPTRUN + try: + con.execute_command('SET', 'NOT_SCRIPT', 'BAR') + con.execute_command('AI.SCRIPTRUN', 'NOT_SCRIPT', 'bar', 'INPUTS', 'b', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("WRONGTYPE Operation against a key holding the wrong kind of value", exception.__str__()) + + # ERR Input key is empty + try: + con.execute_command('DEL', 'EMPTY') + con.execute_command('AI.SCRIPTRUN', 'ket', 'bar', 'INPUTS', 'EMPTY', 'b', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("Input key is empty", exception.__str__()) + + # ERR Input key not tensor + try: + con.execute_command('SET', 'NOT_TENSOR', 'BAR') + con.execute_command('AI.SCRIPTRUN', 'ket', 'bar', 'INPUTS', 'NOT_TENSOR', 'b', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("WRONGTYPE Operation against a key holding the wrong kind of value", exception.__str__()) + + try: + con.execute_command('AI.SCRIPTRUN', 'ket', 'bar', 'INPUTS', 'b', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.SCRIPTRUN', 'ket', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.SCRIPTRUN', 'ket', 'bar', 'INPUTS', 'b', 'OUTPUTS') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.SCRIPTRUN', 'ket', 'bar', 'INPUTS', 'OUTPUTS') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + con.execute_command('AI.SCRIPTRUN', 'ket', 'bar', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + + ensureSlaveSynced(con, env) + + info = con.execute_command('AI.INFO', 'ket') + info_dict_0 = info_to_dict(info) + + env.assertEqual(info_dict_0['KEY'], 'ket') + env.assertEqual(info_dict_0['TYPE'], 'SCRIPT') + env.assertEqual(info_dict_0['BACKEND'], 'TORCH') + env.assertTrue(info_dict_0['DURATION'] > 0) + env.assertEqual(info_dict_0['SAMPLES'], -1) + env.assertEqual(info_dict_0['CALLS'], 4) + env.assertEqual(info_dict_0['ERRORS'], 3) + + tensor = con.execute_command('AI.TENSORGET', 'c', 'VALUES') + values = tensor[-1] + env.assertEqual(values, [b'4', b'6', b'4', b'6']) + + if env.useSlaves: + con2 = env.getSlaveConnection() + tensor2 = con2.execute_command('AI.TENSORGET', 'c', 'VALUES') + env.assertEqual(tensor2, tensor) + + +def test_pytorch_scriptinfo(env): + if not TEST_PT: + env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True) + return + + env.debugPrint("skipping this test for now", force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + script_filename = os.path.join(test_data_path, 'script.txt') + + with open(script_filename, 'rb') as f: + script = f.read() + + ret = con.execute_command('AI.SCRIPTSET', 'ket_script', DEVICE, script) + env.assertEqual(ret, b'OK') + + ret = con.execute_command('AI.TENSORSET', 'a', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + ret = con.execute_command('AI.TENSORSET', 'b', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + previous_duration = 0 + for call in range(1, 10): + ret = con.execute_command('AI.SCRIPTRUN', 'ket_script', 'bar', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + env.assertEqual(ret, b'OK') + ensureSlaveSynced(con, env) + + info = con.execute_command('AI.INFO', 'ket_script') + info_dict_0 = info_to_dict(info) + + env.assertEqual(info_dict_0['KEY'], 'ket_script') + env.assertEqual(info_dict_0['TYPE'], 'SCRIPT') + env.assertEqual(info_dict_0['BACKEND'], 'TORCH') + env.assertEqual(info_dict_0['DEVICE'], DEVICE) + env.assertTrue(info_dict_0['DURATION'] > previous_duration) + env.assertEqual(info_dict_0['SAMPLES'], -1) + env.assertEqual(info_dict_0['CALLS'], call) + env.assertEqual(info_dict_0['ERRORS'], 0) + + previous_duration = info_dict_0['DURATION'] + + res = con.execute_command('AI.INFO', 'ket_script', 'RESETSTAT') + env.assertEqual(res, b'OK') + info = con.execute_command('AI.INFO', 'ket_script') + info_dict_0 = info_to_dict(info) + env.assertEqual(info_dict_0['DURATION'], 0) + env.assertEqual(info_dict_0['SAMPLES'], -1) + env.assertEqual(info_dict_0['CALLS'], 0) + env.assertEqual(info_dict_0['ERRORS'], 0) + + +def test_pytorch_scriptrun_disconnect(env): + if not TEST_PT: + env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True) + return + + if DEVICE == "GPU": + env.debugPrint("skipping {} since it's hanging CI".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + script_filename = os.path.join(test_data_path, 'script.txt') + + with open(script_filename, 'rb') as f: + script = f.read() + + ret = con.execute_command('AI.SCRIPTSET', 'ket_script', DEVICE, script) + env.assertEqual(ret, b'OK') + + ret = con.execute_command('AI.TENSORSET', 'a', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + ret = con.execute_command('AI.TENSORSET', 'b', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + ret = send_and_disconnect(('AI.SCRIPTRUN', 'ket_script', 'bar', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c'), con) + env.assertEqual(ret, None) + + +def test_pytorch_modelrun_disconnect(env): + if not TEST_PT: + env.debugPrint("skipping {} since TEST_PT=0".format(sys._getframe().f_code.co_name), force=True) + return + + if DEVICE == "GPU": + env.debugPrint("skipping {} since it's hanging CI".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'pt-minimal.pt') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + ret = con.execute_command('AI.MODELSET', 'm', 'TORCH', DEVICE, model_pb) + env.assertEqual(ret, b'OK') + + ret = env.execute_command('AI.TENSORSET', 'a', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ret = env.execute_command('AI.TENSORSET', 'b', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + ret = send_and_disconnect(('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c'), con) + env.assertEqual(ret, None) diff --git a/test/tests_tensorflow.py b/test/tests_tensorflow.py new file mode 100644 index 000000000..341adda69 --- /dev/null +++ b/test/tests_tensorflow.py @@ -0,0 +1,449 @@ +import redis + +from includes import * + +''' +python -m RLTest --test tests_tensorflow.py --module path/to/redisai.so +''' + + +def test_run_mobilenet(env): + if not TEST_TF: + env.debugPrint("skipping {} since TEST_TF=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + input_var = 'input' + output_var = 'MobilenetV2/Predictions/Reshape_1' + + model_pb, labels, img = load_mobilenet_test_data() + + con.execute_command('AI.MODELSET', 'mobilenet', 'TF', DEVICE, + 'INPUTS', input_var, 'OUTPUTS', output_var, model_pb) + + ensureSlaveSynced(con, env) + + mobilenet_model_serialized = con.execute_command('AI.MODELGET', 'mobilenet') + + ensureSlaveSynced(con, env) + if env.useSlaves: + con2 = env.getSlaveConnection() + slave_mobilenet_model_serialized = con2.execute_command('AI.MODELGET', 'mobilenet') + env.assertEqual(len(mobilenet_model_serialized), len(slave_mobilenet_model_serialized)) + + con.execute_command('AI.TENSORSET', 'input', + 'FLOAT', 1, img.shape[1], img.shape[0], img.shape[2], + 'BLOB', img.tobytes()) + + ensureSlaveSynced(con, env) + input_tensor_meta = con.execute_command('AI.TENSORGET', 'input', 'META') + env.assertEqual([b'FLOAT', [1, img.shape[1], img.shape[0], img.shape[2]]], input_tensor_meta) + + ensureSlaveSynced(con, env) + if env.useSlaves: + con2 = env.getSlaveConnection() + slave_tensor_meta = con2.execute_command('AI.TENSORGET', 'input', 'META') + env.assertEqual(input_tensor_meta, slave_tensor_meta) + + con.execute_command('AI.MODELRUN', 'mobilenet', + 'INPUTS', 'input', 'OUTPUTS', 'output') + + ensureSlaveSynced(con, env) + + dtype, shape, data = con.execute_command('AI.TENSORGET', 'output', 'BLOB') + + dtype_map = {b'FLOAT': np.float32} + tensor = np.frombuffer(data, dtype=dtype_map[dtype]).reshape(shape) + label_id = np.argmax(tensor) - 1 + + _, label = labels[str(label_id)] + + env.assertEqual(label, 'giant_panda') + + if env.useSlaves: + con2 = env.getSlaveConnection() + slave_dtype, slave_shape, slave_data = con2.execute_command('AI.TENSORGET', 'output', 'BLOB') + env.assertEqual(dtype, slave_dtype) + env.assertEqual(shape, slave_shape) + env.assertEqual(data, slave_data) + + +def test_run_mobilenet_multiproc(env): + if not TEST_TF: + env.debugPrint("skipping {} since TEST_TF=0".format(sys._getframe().f_code.co_name), force=True) + return + + if VALGRIND: + env.debugPrint("skipping {} since VALGRIND=1".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + input_var = 'input' + output_var = 'MobilenetV2/Predictions/Reshape_1' + + model_pb, labels, img = load_mobilenet_test_data() + con.execute_command('AI.MODELSET', 'mobilenet', 'TF', DEVICE, + 'INPUTS', input_var, 'OUTPUTS', output_var, model_pb) + ensureSlaveSynced(con, env) + + run_test_multiproc(env, 30, run_mobilenet, (img, input_var, output_var)) + + ensureSlaveSynced(con, env) + + dtype, shape, data = con.execute_command('AI.TENSORGET', 'output', 'BLOB') + + dtype_map = {b'FLOAT': np.float32} + tensor = np.frombuffer(data, dtype=dtype_map[dtype]).reshape(shape) + label_id = np.argmax(tensor) - 1 + + _, label = labels[str(label_id)] + + env.assertEqual( + label, 'giant_panda' + ) + + if env.useSlaves: + con2 = env.getSlaveConnection() + slave_dtype, slave_shape, slave_data = con2.execute_command('AI.TENSORGET', 'output', 'BLOB') + env.assertEqual(dtype, slave_dtype) + env.assertEqual(shape, slave_shape) + env.assertEqual(data, slave_data) + + +def test_del_tf_model(env): + if not TEST_TF: + env.debugPrint("skipping {} since TEST_TF=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'graph.pb') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + ret = con.execute_command('AI.MODELSET', 'm', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul', model_pb) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + con.execute_command('AI.MODELDEL', 'm') + env.assertFalse(env.execute_command('EXISTS', 'm')) + + ensureSlaveSynced(con, env) + if env.useSlaves: + con2 = env.getSlaveConnection() + env.assertFalse(con2.execute_command('EXISTS', 'm')) + + # ERR no model at key + try: + con.execute_command('AI.MODELDEL', 'm') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("no model at key", exception.__str__()) + + # ERR wrong type + try: + con.execute_command('SET', 'NOT_MODEL', 'BAR') + con.execute_command('AI.MODELDEL', 'NOT_MODEL') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("WRONGTYPE Operation against a key holding the wrong kind of value", exception.__str__()) + + +def test_run_tf_model(env): + if not TEST_TF: + env.debugPrint("skipping {} since TEST_TF=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'graph.pb') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + ret = con.execute_command('AI.MODELSET', 'm', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul', model_pb) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + ret = con.execute_command('AI.MODELGET', 'm') + env.assertEqual(len(ret), 3) + # TODO: enable me + # env.assertEqual(ret[0], b'TF') + # env.assertEqual(ret[1], b'CPU') + + con.execute_command('AI.TENSORSET', 'a', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + con.execute_command('AI.TENSORSET', 'b', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + + ensureSlaveSynced(con, env) + + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + + ensureSlaveSynced(con, env) + + tensor = con.execute_command('AI.TENSORGET', 'c', 'VALUES') + values = tensor[-1] + env.assertEqual(values, [b'4', b'9', b'4', b'9']) + + if env.useSlaves: + con2 = env.getSlaveConnection() + tensor2 = con2.execute_command('AI.TENSORGET', 'c', 'VALUES') + env.assertEqual(tensor2, tensor) + + for _ in env.reloadingIterator(): + env.assertExists('m') + env.assertExists('a') + env.assertExists('b') + env.assertExists('c') + + con.execute_command('AI.MODELDEL', 'm') + ensureSlaveSynced(con, env) + + env.assertFalse(env.execute_command('EXISTS', 'm')) + + ensureSlaveSynced(con, env) + if env.useSlaves: + con2 = env.getSlaveConnection() + env.assertFalse(con2.execute_command('EXISTS', 'm')) + + +def test_run_tf_model_errors(env): + if not TEST_TF: + env.debugPrint("skipping {} since TEST_TF=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'graph.pb') + wrong_model_filename = os.path.join(test_data_path, 'pt-minimal.pt') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + with open(wrong_model_filename, 'rb') as f: + wrong_model_pb = f.read() + + ret = con.execute_command('AI.MODELSET', 'm', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul', model_pb) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + try: + con.execute_command('AI.MODELGET') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("wrong number of arguments for 'AI.MODELGET' command", exception.__str__()) + + # ERR WRONGTYPE + con.execute_command('SET', 'NOT_MODEL', 'BAR') + try: + con.execute_command('AI.MODELGET', 'NOT_MODEL') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("WRONGTYPE Operation against a key holding the wrong kind of value", exception.__str__()) + # cleanup + con.execute_command('DEL', 'NOT_MODEL') + + # ERR cannot get model from empty key + con.execute_command('DEL', 'DONT_EXIST') + try: + con.execute_command('AI.MODELGET', 'DONT_EXIST') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual("cannot get model from empty key", exception.__str__()) + + try: + ret = con.execute_command('AI.MODELSET', 'm', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul', wrong_model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_1', 'TF', + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_2', 'PORCH', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_3', 'TORCH', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_4', 'TF', + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_5', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'c', 'OUTPUTS', 'mul', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_6', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mult', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_7', 'TF', DEVICE, model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_8', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_8', 'TF', DEVICE, + 'INPUTS', 'a_', 'b', 'OUTPUTS', 'mul') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELSET', 'm_8', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul_') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + # ERR Invalid GraphDef + try: + con.execute_command('AI.MODELSET', 'm_8', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + env.assertEqual(exception.__str__(), "Invalid GraphDef") + + try: + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + +def test_tensorflow_modelinfo(env): + if not TEST_TF: + env.debugPrint("skipping {} since TEST_TF=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'graph.pb') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + ret = con.execute_command('AI.MODELSET', 'm', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul', model_pb) + env.assertEqual(ret, b'OK') + + ret = con.execute_command('AI.TENSORSET', 'a', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ret = con.execute_command('AI.TENSORSET', 'b', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + previous_duration = 0 + for call in range(1, 10): + ret = con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c') + env.assertEqual(ret, b'OK') + ensureSlaveSynced(con, env) + + info = con.execute_command('AI.INFO', 'm') + info_dict_0 = info_to_dict(info) + + env.assertEqual(info_dict_0['KEY'], 'm') + env.assertEqual(info_dict_0['TYPE'], 'MODEL') + env.assertEqual(info_dict_0['BACKEND'], 'TF') + env.assertEqual(info_dict_0['DEVICE'], DEVICE) + env.assertTrue(info_dict_0['DURATION'] > previous_duration) + env.assertEqual(info_dict_0['SAMPLES'], 2 * call) + env.assertEqual(info_dict_0['CALLS'], call) + env.assertEqual(info_dict_0['ERRORS'], 0) + + previous_duration = info_dict_0['DURATION'] + + res = con.execute_command('AI.INFO', 'm', 'RESETSTAT') + env.assertEqual(res, b'OK') + info = con.execute_command('AI.INFO', 'm') + info_dict_0 = info_to_dict(info) + env.assertEqual(info_dict_0['DURATION'], 0) + env.assertEqual(info_dict_0['SAMPLES'], 0) + env.assertEqual(info_dict_0['CALLS'], 0) + env.assertEqual(info_dict_0['ERRORS'], 0) + + +def test_tensorflow_modelrun_disconnect(env): + if not TEST_TF: + env.debugPrint("skipping {} since TEST_TF=0".format(sys._getframe().f_code.co_name), force=True) + return + + red = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'graph.pb') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + ret = red.execute_command('AI.MODELSET', 'm', 'TF', DEVICE, + 'INPUTS', 'a', 'b', 'OUTPUTS', 'mul', model_pb) + env.assertEqual(ret, b'OK') + + ret = red.execute_command('AI.TENSORSET', 'a', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ret = red.execute_command('AI.TENSORSET', 'b', 'FLOAT', 2, 2, 'VALUES', 2, 3, 2, 3) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(red, env) + + ret = send_and_disconnect(('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b', 'OUTPUTS', 'c'), red) + env.assertEqual(ret, None) diff --git a/test/tests_tflite.py b/test/tests_tflite.py new file mode 100644 index 000000000..22f6c899b --- /dev/null +++ b/test/tests_tflite.py @@ -0,0 +1,204 @@ +import redis + +from includes import * + +''' +python -m RLTest --test tests_tflite.py --module path/to/redisai.so +''' + + +def test_run_tflite_model(env): + if not TEST_TFLITE: + env.debugPrint("skipping {} since TEST_TFLITE=0".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'mnist_model_quant.tflite') + wrong_model_filename = os.path.join(test_data_path, 'graph.pb') + sample_filename = os.path.join(test_data_path, 'one.raw') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + with open(model_filename, 'rb') as f: + model_pb2 = f.read() + + with open(wrong_model_filename, 'rb') as f: + wrong_model_pb = f.read() + + with open(sample_filename, 'rb') as f: + sample_raw = f.read() + + ret = con.execute_command('AI.MODELSET', 'm', 'TFLITE', 'CPU', model_pb) + env.assertEqual(ret, b'OK') + + ret = con.execute_command('AI.TENSORSET', 'a', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + ret = con.execute_command('AI.MODELGET', 'm') + env.assertEqual(len(ret), 3) + # TODO: enable me + # env.assertEqual(ret[0], b'TFLITE') + # env.assertEqual(ret[1], b'CPU') + + try: + con.execute_command('AI.MODELSET', 'm_1', 'TFLITE', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + ret = con.execute_command('AI.MODELSET', 'm_2', 'TFLITE', 'CPU', model_pb2) + ensureSlaveSynced(con, env) + + try: + con.execute_command('AI.MODELSET', 'm_2', model_pb) + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_2', 'INPUTS', 'a', 'OUTPUTS') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_2', 'INPUTS', 'a', 'b', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_2', 'a', 'b', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm_2', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm', 'OUTPUTS', 'c') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'b') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'OUTPUTS') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + try: + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'OUTPUTS', 'b') + except Exception as e: + exception = e + env.assertEqual(type(exception), redis.exceptions.ResponseError) + + con.execute_command('AI.MODELRUN', 'm', 'INPUTS', 'a', 'OUTPUTS', 'b', 'c') + + ensureSlaveSynced(con, env) + + tensor = con.execute_command('AI.TENSORGET', 'b', 'VALUES') + value = tensor[-1][0] + + env.assertEqual(value, 1) + + +def test_tflite_modelinfo(env): + if not TEST_TFLITE: + env.debugPrint("skipping {} since TEST_TFLITE=0".format(sys._getframe().f_code.co_name), force=True) + return + + if DEVICE == "GPU": + env.debugPrint("skipping {} since it's hanging CI".format(sys._getframe().f_code.co_name), force=True) + return + + con = env.getConnection() + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'mnist_model_quant.tflite') + sample_filename = os.path.join(test_data_path, 'one.raw') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + with open(sample_filename, 'rb') as f: + sample_raw = f.read() + + ret = con.execute_command('AI.MODELSET', 'mnist', 'TFLITE', 'CPU', model_pb) + env.assertEqual(ret, b'OK') + + ret = con.execute_command('AI.TENSORSET', 'a', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(con, env) + + previous_duration = 0 + for call in range(1, 10): + ret = con.execute_command('AI.MODELRUN', 'mnist', 'INPUTS', 'a', 'OUTPUTS', 'b', 'c') + env.assertEqual(ret, b'OK') + ensureSlaveSynced(con, env) + + info = con.execute_command('AI.INFO', 'mnist') + info_dict_0 = info_to_dict(info) + + env.assertEqual(info_dict_0['KEY'], 'mnist') + env.assertEqual(info_dict_0['TYPE'], 'MODEL') + env.assertEqual(info_dict_0['BACKEND'], 'TFLITE') + env.assertEqual(info_dict_0['DEVICE'], DEVICE) + env.assertTrue(info_dict_0['DURATION'] > previous_duration) + env.assertEqual(info_dict_0['SAMPLES'], call) + env.assertEqual(info_dict_0['CALLS'], call) + env.assertEqual(info_dict_0['ERRORS'], 0) + + previous_duration = info_dict_0['DURATION'] + + res = con.execute_command('AI.INFO', 'mnist', 'RESETSTAT') + env.assertEqual(res, b'OK') + info = con.execute_command('AI.INFO', 'mnist') + info_dict_0 = info_to_dict(info) + env.assertEqual(info_dict_0['DURATION'], 0) + env.assertEqual(info_dict_0['SAMPLES'], 0) + env.assertEqual(info_dict_0['CALLS'], 0) + env.assertEqual(info_dict_0['ERRORS'], 0) + + +def test_tflite_modelrun_disconnect(env): + if not TEST_TFLITE: + env.debugPrint("skipping {} since TEST_TFLITE=0".format(sys._getframe().f_code.co_name), force=True) + return + + red = env.getConnection() + test_data_path = os.path.join(os.path.dirname(__file__), 'test_data') + model_filename = os.path.join(test_data_path, 'mnist_model_quant.tflite') + sample_filename = os.path.join(test_data_path, 'one.raw') + + with open(model_filename, 'rb') as f: + model_pb = f.read() + + with open(sample_filename, 'rb') as f: + sample_raw = f.read() + + ret = red.execute_command('AI.MODELSET', 'mnist', 'TFLITE', 'CPU', model_pb) + env.assertEqual(ret, b'OK') + + ret = red.execute_command('AI.TENSORSET', 'a', 'FLOAT', 1, 1, 28, 28, 'BLOB', sample_raw) + env.assertEqual(ret, b'OK') + + ensureSlaveSynced(red, env) + + ret = send_and_disconnect(('AI.MODELRUN', 'mnist', 'INPUTS', 'a', 'OUTPUTS', 'b', 'c'), red) + env.assertEqual(ret, None)