Skip to content

Commit b7f6d8a

Browse files
committed
feat(//examples/int8): Implement Makefile based execution for ptq and qat
Signed-off-by: Dheeraj Peri <[email protected]>
1 parent 436c239 commit b7f6d8a

File tree

5 files changed

+119
-12
lines changed

5 files changed

+119
-12
lines changed

examples/int8/ptq/Makefile

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
CXX=g++
2+
DEP_DIR=$(PWD)/deps
3+
ROOT_DIR?="../../../" # path to TRTorch directory (including TRTorch)
4+
INCLUDE_DIRS=-I$(DEP_DIR)/libtorch/include -I$(DEP_DIR)/libtorch/include/torch/csrc/api/include/ -I$(DEP_DIR)/TensorRT-8.0.1.6/include/ -I$(ROOT_DIR) -I /usr/local/cuda-11.1/include -I$(DEP_DIR)/trtorch/include
5+
LIB_DIRS=-L$(DEP_DIR)/trtorch/lib -L$(DEP_DIR)/libtorch/lib -L /usr/local/cuda-11.1/lib64
6+
LIBS=-Wl,--no-as-needed -ltrtorch -Wl,--as-needed -ltorch -ltorch_cuda -ltorch_cpu -ltorch_global_deps -lbackend_with_compiler -lc10 -lc10_cuda -lpthread -lcudart
7+
SRCS=../datasets/cifar10.cpp ../benchmark/benchmark.cpp main.cpp
8+
9+
TARGET=ptq
10+
11+
$(TARGET):
12+
$(CXX) $(SRCS) $(INCLUDE_DIRS) $(LIB_DIRS) $(LIBS) -o $(TARGET)
13+
14+
clean:
15+
$(RM) $(TARGET)

examples/int8/ptq/README.md

Lines changed: 44 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -120,25 +120,62 @@ This is a short example application that shows how to use TRTorch to perform pos
120120
## Prerequisites
121121
122122
1. Download CIFAR10 Dataset Binary version ([https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz](https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz))
123-
2. Train a network on CIFAR10 (see `training/` for a VGG16 recipie)
123+
2. Train a network on CIFAR10 (see `training/` for a VGG16 recipe)
124124
3. Export model to torchscript
125125
126-
## Compilation
126+
## Compilation using bazel
127127
128128
``` shell
129-
bazel build //cpp/ptq --compilation_mode=opt
129+
bazel run //cpp/ptq --compilation_mode=opt <path-to-module> <path-to-cifar10>
130130
```
131131

132132
If you want insight into what is going under the hood or need debug symbols
133133

134134
``` shell
135-
bazel build //cpp/ptq --compilation_mode=dbg
135+
bazel run //cpp/ptq --compilation_mode=dbg <path-to-module> <path-to-cifar10>
136136
```
137137

138-
## Usage
138+
This will build a binary named `ptq` in `bazel-out/k8-<opt|dbg>/bin/cpp/int8/qat/` directory. Optionally you can add this to `$PATH` environment variable to run `ptq` from anywhere on your system.
139139

140-
``` shell
141-
ptq <path-to-module> <path-to-cifar10>
140+
## Compilation using Makefile
141+
142+
1) Download releases of <a href="https://pytorch.org">LibTorch</a>, <a href="https://github.com/NVIDIA/TRTorch/releases">TRTorch </a>and <a href="https://developer.nvidia.com/nvidia-tensorrt-download">TensorRT</a> and unpack them in the deps directory. Ensure CUDA is installed at `/usr/local/cuda` , if not you need to modify the CUDA include and lib paths in the Makefile.
143+
144+
```sh
145+
cd examples/trtorchrt_example/deps
146+
# Download latest TRTorch release tar file (libtrtorch.tar.gz) from https://github.com/NVIDIA/TRTorch/releases
147+
tar -xvzf libtrtorch.tar.gz
148+
# unzip libtorch downloaded from pytorch.org
149+
unzip libtorch-cxx11-abi-shared-with-deps-1.9.0+cu111.zip
150+
# Extract TensorRT downloaded from developer.nvidia.com
151+
tar -xvzf TensorRT-8.0.1.6.Linux.x86_64-gnu.cuda-11.3.cudnn8.2.tar.gz
152+
```
153+
154+
> If cuDNN is not installed on your system / in your LD_LIBRARY_PATH then do the following as well
155+
156+
```sh
157+
cd deps
158+
mkdir cudnn && tar -xvzf <cuDNN TARBALL> --directory cudnn --strip-components=1
159+
cd ..
160+
```
161+
162+
Set your LD_LIBRARY_PATH accordingly
163+
164+
```sh
165+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/deps/trtorch/lib:$(pwd)/deps/libtorch/lib:$(pwd)/deps/TensorRT-8.0.1.6/lib:$(pwd)/deps/cudnn/lib64:/usr/local/cuda/lib64/
166+
```
167+
168+
2) Build and run `ptq`
169+
170+
We import header files `cifar10.h` and `benchmark.h` from `ROOT_DIR`. `ROOT_DIR` should point to the path where TRTorch is located `<path_to_TRTORCH>`.
171+
172+
By default it is set to `../../../`. If your TRTorch directory structure is different, please set `ROOT_DIR` accordingly.
173+
174+
```sh
175+
cd examples/int8/ptq
176+
# This will generate a ptq binary
177+
make
178+
./ptq <path-to-module> <path-to-cifar10>
142179
```
143180

144181
## Example Output

examples/int8/qat/Makefile

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,15 @@
1+
CXX=g++
2+
DEP_DIR=$(PWD)/deps
3+
ROOT_DIR?="../../../" # path to TRTorch directory (including TRTorch)
4+
INCLUDE_DIRS=-I$(DEP_DIR)/libtorch/include -I$(DEP_DIR)/libtorch/include/torch/csrc/api/include/ -I$(DEP_DIR)/TensorRT-8.0.1.6/include/ -I$(ROOT_DIR) -I /usr/local/cuda-11.1/include -I$(DEP_DIR)/trtorch/include
5+
LIB_DIRS=-L$(DEP_DIR)/trtorch/lib -L$(DEP_DIR)/libtorch/lib -L /usr/local/cuda-11.1/lib64
6+
LIBS=-Wl,--no-as-needed -ltrtorch -Wl,--as-needed -ltorch -ltorch_cuda -ltorch_cpu -ltorch_global_deps -lbackend_with_compiler -lc10 -lc10_cuda -lpthread -lcudart
7+
SRCS=../datasets/cifar10.cpp ../benchmark/benchmark.cpp main.cpp
8+
9+
TARGET=qat
10+
11+
$(TARGET):
12+
$(CXX) $(SRCS) $(INCLUDE_DIRS) $(LIB_DIRS) $(LIBS) -o $(TARGET)
13+
14+
clean:
15+
$(RM) $(TARGET)

examples/int8/qat/README.md

Lines changed: 45 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,23 +14,64 @@ This is a short example application that shows how to use TRTorch to perform inf
1414
1. Download CIFAR10 Dataset Binary version ([https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz](https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz))
1515
2. Train a network on CIFAR10 and perform quantization aware training on it. Refer to `cpp/int8/training/vgg16/README.md` for detailed instructions.
1616
Export the QAT model to Torchscript.
17-
3. Install NVIDIA's <a href="https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization">pytorch quantization toolkit</a>
17+
3. Install NVIDIA's <a href="https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization">pytorch quantization toolkit</a>
1818
4. TensorRT 8.0.1.6 or above
1919

20-
## Compilation
20+
## Compilation using bazel
2121

2222
``` shell
23-
bazel build //cpp/qat --compilation_mode=opt
23+
bazel run //cpp/qat --compilation_mode=opt <path-to-module> <path-to-cifar10>
2424
```
2525

2626
If you want insight into what is going under the hood or need debug symbols
2727

2828
``` shell
29-
bazel build //cpp/qat --compilation_mode=dbg
29+
bazel run //cpp/qat --compilation_mode=dbg <path-to-module> <path-to-cifar10>
3030
```
3131

3232
This will build a binary named `qat` in `bazel-out/k8-<opt|dbg>/bin/cpp/int8/qat/` directory. Optionally you can add this to `$PATH` environment variable to run `qat` from anywhere on your system.
3333

34+
## Compilation using Makefile
35+
36+
1) Download releases of <a href="https://pytorch.org">LibTorch</a>, <a href="https://github.com/NVIDIA/TRTorch/releases">TRTorch </a>and <a href="https://developer.nvidia.com/nvidia-tensorrt-download">TensorRT</a> and unpack them in the deps directory. Ensure CUDA is installed at `/usr/local/cuda` , if not you need to modify the CUDA include and lib paths in the Makefile.
37+
38+
```sh
39+
cd examples/trtorchrt_example/deps
40+
# Download latest TRTorch release tar file (libtrtorch.tar.gz) from https://github.com/NVIDIA/TRTorch/releases
41+
tar -xvzf libtrtorch.tar.gz
42+
# unzip libtorch downloaded from pytorch.org
43+
unzip libtorch-cxx11-abi-shared-with-deps-1.9.0+cu111.zip
44+
# Extract TensorRT downloaded from developer.nvidia.com
45+
tar -xvzf TensorRT-8.0.1.6.Linux.x86_64-gnu.cuda-11.3.cudnn8.2.tar.gz
46+
```
47+
48+
> If cuDNN is not installed on your system / in your LD_LIBRARY_PATH then do the following as well
49+
50+
```sh
51+
cd deps
52+
mkdir cudnn && tar -xvzf <cuDNN TARBALL> --directory cudnn --strip-components=1
53+
cd ..
54+
```
55+
56+
Set your LD_LIBRARY_PATH accordingly
57+
58+
```sh
59+
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$(pwd)/deps/trtorch/lib:$(pwd)/deps/libtorch/lib:$(pwd)/deps/TensorRT-8.0.1.6/lib:$(pwd)/deps/cudnn/lib64:/usr/local/cuda/lib64/
60+
```
61+
62+
2) Build and run `qat`
63+
64+
We import header files `cifar10.h` and `benchmark.h` from `ROOT_DIR`. `ROOT_DIR` should point to the path where TRTorch is located `<path_to_TRTORCH>`.
65+
66+
By default it is set to `../../../`. If your TRTorch directory structure is different, please set `ROOT_DIR` accordingly.
67+
68+
```sh
69+
cd examples/int8/qat
70+
# This will generate a qat binary
71+
make
72+
./qat <path-to-module> <path-to-cifar10>
73+
```
74+
3475
## Usage
3576

3677
``` shell

examples/int8/qat/main.cpp

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ torch::jit::Module compile_int8_qat_model(const std::string& data_dir, torch::ji
3434
/// Configure settings for compilation
3535
auto compile_spec = trtorch::CompileSpec(inputs);
3636
/// Set operating precision to INT8
37-
// compile_spec.enabled_precisions.insert(torch::kF16);
3837
compile_spec.enabled_precisions.insert(torch::kI8);
3938
/// Set max batch size for the engine
4039
compile_spec.max_batch_size = 32;

0 commit comments

Comments
 (0)