Skip to content

Commit 8041d96

Browse files
author
Wei Wei
committed
Merge branch 'main'
2 parents f81aed1 + bc9f4e4 commit 8041d96

File tree

144 files changed

+26166
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

144 files changed

+26166
-0
lines changed

fx2trt/.gitignore

+6
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
build/
2+
dist/
3+
*__pycache__*
4+
.DS_Store
5+
fx2trt_oss.egg-info/
6+

fx2trt/LICENSE

+28
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
Copyright (c) Meta Platforms, Inc. and affiliates.
2+
3+
Redistribution and use in source and binary forms, with or without
4+
modification, are permitted provided that the following conditions
5+
are met:
6+
7+
1. Redistributions of source code must retain the above copyright
8+
notice, this list of conditions and the following disclaimer.
9+
10+
2. Redistributions in binary form must reproduce the above copyright
11+
notice, this list of conditions and the following disclaimer in the
12+
documentation and/or other materials provided with the distribution.
13+
14+
3. Neither the name of the copyright holder nor the names of its contributors
15+
may be used to endorse or promote products derived from this software
16+
without specific prior written permission.
17+
18+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
19+
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21+
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
22+
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23+
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24+
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25+
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
26+
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

fx2trt/README.md

+29
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
# FX2TRT
2+
This package provide pure eager-mode tooling to convert a PyTorch nn.Module to a TensorRT engine.
3+
4+
## Installation
5+
First, let's install PyTorch.
6+
```
7+
conda install -y pytorch cudatoolkit=11.3 -c pytorch-nightly
8+
```
9+
Then, you need to install your TensorRT and it's pythin binding
10+
```
11+
tar -xzvf TensorRT-8.2.1.8.Linux.x86_64-gnu.cuda-11.4.cudnn8.2.tar.gz
12+
export LD_LIBRARY_PATH=$HOME/TensorRT-8.2.1.8/lib:$HOME/TensorRT-8.2.1.8/targets/x86_64-linux-gnu/lib${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
13+
cd TensorRT-8.2.1.8/python
14+
python3 -m pip install tensorrt-8.2.1.8-cp36-none-linux_x86_64.whl
15+
```
16+
Then, it's simply as this.
17+
```
18+
cd fx2trt
19+
python setup.py install
20+
```
21+
## Test
22+
Follow instruction in [pytorch/benchmark](https://github.com/pytorch/benchmark) to setup some benchmarks.
23+
24+
Then try using `--fx2trt` for individual cases, e.g.
25+
```
26+
cd benchmark
27+
python run.py resnet50 -d cuda -t eval -m eager --fx2trt
28+
```
29+
And you should expect TensorRT logs being printed and the case ran through without error.

fx2trt/fx/__init__.py

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
from .converters import * # noqa: F403 F401
2+
from .converter_registry import ( # noqa
3+
CONVERTERS,
4+
NO_EXPLICIT_BATCH_DIM_SUPPORT,
5+
NO_IMPLICIT_BATCH_DIM_SUPPORT,
6+
tensorrt_converter,
7+
)
8+
from .fx2trt import TRTInterpreter, TRTInterpreterResult # noqa
9+
from .input_tensor_spec import InputTensorSpec # noqa
10+
from .trt_module import TRTModule # noqa

fx2trt/fx/converter_registry.py

+31
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
from typing import Any, Callable, Dict
2+
3+
from torch.fx.node import Target
4+
5+
6+
CONVERTERS: Dict[Target, Any] = {}
7+
NO_IMPLICIT_BATCH_DIM_SUPPORT = {}
8+
NO_EXPLICIT_BATCH_DIM_SUPPORT = {}
9+
10+
11+
def tensorrt_converter(
12+
key: Target,
13+
no_implicit_batch_dim: bool = False,
14+
no_explicit_batch_dim: bool = False,
15+
enabled: bool = True,
16+
) -> Callable[[Any], Any]:
17+
def register_converter(converter):
18+
CONVERTERS[key] = converter
19+
if no_implicit_batch_dim:
20+
NO_IMPLICIT_BATCH_DIM_SUPPORT[key] = converter
21+
if no_explicit_batch_dim:
22+
NO_EXPLICIT_BATCH_DIM_SUPPORT[key] = converter
23+
return converter
24+
25+
def disable_converter(converter):
26+
return converter
27+
28+
if enabled:
29+
return register_converter
30+
else:
31+
return disable_converter

fx2trt/fx/converters/__init__.py

+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
# @manual=//deeplearning/trt/python:py_tensorrt
2+
import tensorrt as trt
3+
4+
if hasattr(trt, "__version__"):
5+
from .activation import * # noqa: F401 F403
6+
from .adaptive_avgpool import * # noqa: F401 F403
7+
from .add import * # noqa: F401 F403
8+
from .batchnorm import * # noqa: F401 F403
9+
from .convolution import * # noqa: F401 F403
10+
from .linear import * # noqa: F401 F403
11+
from .maxpool import * # noqa: F401 F403
12+
from .mul import * # noqa: F401 F403
13+
from .transformation import * # noqa: F401 F403
14+
from .quantization import * # noqa: F401 F403
15+
from .acc_ops_converters import * # noqa: F401 F403
16+
17+
TRT_LOGGER = trt.Logger()
18+
trt.init_libnvinfer_plugins(TRT_LOGGER, "")

0 commit comments

Comments
 (0)