We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 6ceaed8 commit bd9c29aCopy full SHA for bd9c29a
py/torch_tensorrt/dynamo/backend/backends.py
@@ -121,6 +121,9 @@ def _compile_module(
121
torch_executed_ops=settings.torch_executed_ops,
122
)
123
124
+ # Store TRT replicas of Torch subgraphs
125
+ trt_modules = {}
126
+
127
# Iterate over all components that can be accelerated
128
# Generate the corresponding TRT Module for those
129
for name, _ in partitioned_module.named_children():
@@ -138,7 +141,10 @@ def _compile_module(
138
141
settings=settings,
139
142
140
143
- # Replace FX Module with TRT Module
144
+ trt_modules[name] = trt_mod
145
146
+ # Replace all FX Modules with TRT Modules
147
+ for name, trt_mod in trt_modules.items():
148
setattr(partitioned_module, name, trt_mod)
149
150
return partitioned_module
0 commit comments