Skip to content

Commit 3b4ddc1

Browse files
TensorRT Release 10.14 (#4633)
Signed-off-by: Asfiya Baig <asfiyab@nvidia.com>
1 parent a833f79 commit 3b4ddc1

308 files changed

Lines changed: 9554 additions & 3644 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

CHANGELOG.md

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,27 @@
11
# TensorRT OSS Release Changelog
22

3+
## 10.14 GA - 2025-11-7
4+
- Sample changes
5+
- Replace all pycuda usages with cuda-python APIs
6+
- Removed the efficientnet samples
7+
- Deprecated tensorflow_object_detection and efficientdet samples
8+
- Samples will no longer be released with the packages. The TensorRT GitHub repository will be the single source.
9+
10+
11+
- Parsers:
12+
- Added support for the `Attention` operator
13+
- Improved refit for `ConstantOfShape` nodes
14+
15+
- Demos
16+
- demoDiffusion:
17+
- Added support for the Cosmos-Predict2 text2image and video2world pipelines
18+
19+
320
## 10.13.3 GA - 2025-9-8
421
- Added support for TensorRT API Capture and Replay feature, see the [developer guide](https://docs.nvidia.com/deeplearning/tensorrt/latest/inference-library/advanced.html) for more information.
522
- Demo changes
623
- Added support for Flux Kontext pipeline.
724

8-
925
## 10.13.2 GA - 2025-8-18
1026
- Added support for CUDA 13.0, dropped support for CUDA 11.X
1127
- Dropped support for Ubuntu 20.04
@@ -24,7 +40,6 @@
2440
- Added `loadModelProto`, `loadInitializer` and `refitModelProto` APIs for IParserRefitter. These APIs are meant to be used to load user initializers when refitting ONNX models.
2541
- Deprecated `IParser::parseWithWeightDescriptors`.
2642

27-
2843
## 10.12.0 GA - 2025-6-10
2944
- Plugin changes
3045
- Migrated `IPluginV2`-descendent version 1 of `cropAndResizeDynamic`, to version 2, which implements `IPluginV3`.

CMakeLists.txt

Lines changed: 16 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,9 @@ endif()
6767
set(CMAKE_SKIP_BUILD_RPATH True)
6868

6969
# CUDA targets
70+
set(DEFAULT_CUDA_VERSION 13.0.0)
71+
set_ifndef(CUDA_VERSION ${DEFAULT_CUDA_VERSION})
72+
message(STATUS "CUDA version set to ${CUDA_VERSION}")
7073

7174
if (DEFINED GPU_ARCHS)
7275
message(STATUS "GPU_ARCHS defined as ${GPU_ARCHS}. Generating CUDA code for SM ${GPU_ARCHS}")
@@ -81,6 +84,9 @@ else()
8184
list(APPEND CMAKE_CUDA_ARCHITECTURES 100 120)
8285
endif()
8386

87+
if(CUDA_VERSION VERSION_GREATER_EQUAL 13.0)
88+
list(APPEND CMAKE_CUDA_ARCHITECTURES 110)
89+
endif()
8490
message(STATUS "GPU_ARCHS is not defined. Generating CUDA code for default SMs: ${CMAKE_CUDA_ARCHITECTURES}")
8591
endif()
8692
set(BERT_GENCODES)
@@ -105,6 +111,10 @@ project(TensorRT
105111
DESCRIPTION "TensorRT is a C++ library that facilitates high-performance inference on NVIDIA GPUs and deep learning accelerators."
106112
HOMEPAGE_URL "https://github.com/NVIDIA/TensorRT")
107113

114+
if (WIN32)
115+
enable_language(C)
116+
endif()
117+
108118
if(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
109119
set(CMAKE_INSTALL_PREFIX ${TRT_LIB_DIR}/../ CACHE PATH "TensorRT installation" FORCE)
110120
endif(CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
@@ -113,7 +123,7 @@ option(BUILD_PLUGINS "Build TensorRT plugin" ON)
113123
option(BUILD_PARSERS "Build TensorRT parsers" ON)
114124
option(BUILD_SAMPLES "Build TensorRT samples" ON)
115125

116-
# C++14
126+
# C++17
117127
set(CMAKE_CXX_STANDARD 17)
118128
set(CMAKE_CXX_STANDARD_REQUIRED ON)
119129
set(CMAKE_CXX_EXTENSIONS OFF)
@@ -141,14 +151,10 @@ endif()
141151

142152
############################################################################################
143153
# Dependencies
144-
145-
set(DEFAULT_CUDA_VERSION 12.2.0)
146154
set(DEFAULT_CUDNN_VERSION 8.9)
147-
set(DEFAULT_PROTOBUF_VERSION 3.20.1)
155+
set(DEFAULT_PROTOBUF_VERSION 3.20.3)
148156

149157
# Dependency Version Resolution
150-
set_ifndef(CUDA_VERSION ${DEFAULT_CUDA_VERSION})
151-
message(STATUS "CUDA version set to ${CUDA_VERSION}")
152158
set_ifndef(CUDNN_VERSION ${DEFAULT_CUDNN_VERSION})
153159
message(STATUS "cuDNN version set to ${CUDNN_VERSION}")
154160
set_ifndef(PROTOBUF_VERSION ${DEFAULT_PROTOBUF_VERSION})
@@ -221,16 +227,18 @@ endif()
221227
############################################################################################
222228
# TensorRT
223229

230+
set(HINT_PATHS "${TRT_OUT_DIR}" "${TRT_LIB_DIR}")
231+
224232
if(BUILD_PLUGINS)
225233
add_subdirectory(plugin)
226234
else()
227-
find_library_create_target(nvinfer_plugin ${nvinfer_plugin_lib_name} SHARED "${TRT_OUT_DIR}" "${TRT_LIB_DIR}")
235+
find_library_create_target(${nvinfer_plugin_lib_name} ${nvinfer_plugin_lib_name} SHARED "${HINT_PATHS}")
228236
endif()
229237

230238
if(BUILD_PARSERS)
231239
add_subdirectory(parsers)
232240
else()
233-
find_library_create_target(nvonnxparser ${nvonnxparser_lib_name} SHARED "${TRT_OUT_DIR}" "${TRT_LIB_DIR}")
241+
find_library_create_target(${nvonnxparser_lib_name} ${nvonnxparser_lib_name} SHARED "${HINT_PATHS}")
234242
endif()
235243

236244
if(BUILD_SAMPLES)

README.md

Lines changed: 27 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ To build the TensorRT-OSS components, you will first need the following software
3232

3333
**TensorRT GA build**
3434

35-
- TensorRT v10.13.3.9
35+
- TensorRT v10.14.1.48
3636
- Available from direct download links listed below
3737

3838
**System Packages**
@@ -86,24 +86,24 @@ To build the TensorRT-OSS components, you will first need the following software
8686

8787
Else download and extract the TensorRT GA build from [NVIDIA Developer Zone](https://developer.nvidia.com) with the direct links below:
8888

89-
- [TensorRT 10.13.3.9 for CUDA 13.0, Linux x86_64](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.3/tars/TensorRT-10.13.3.9.Linux.x86_64-gnu.cuda-13.0.tar.gz)
90-
- [TensorRT 10.13.3.9 for CUDA 12.9, Linux x86_64](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.3/tars/TensorRT-10.13.3.9.Linux.x86_64-gnu.cuda-12.9.tar.gz)
91-
- [TensorRT 10.13.3.9 for CUDA 13.0, Windows x86_64](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.3/zip/TensorRT-10.13.3.9.Windows.win10.cuda-13.0.zip)
92-
- [TensorRT 10.13.3.9 for CUDA 12.9, Windows x86_64](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.13.3/zip/TensorRT-10.13.3.9.Windows.win10.cuda-12.9.zip)
89+
- [TensorRT 10.14.1.48 for CUDA 13.0, Linux x86_64](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/tars/TensorRT-10.14.1.48.Linux.x86_64-gnu.cuda-13.0.tar.gz)
90+
- [TensorRT 10.14.1.48 for CUDA 12.9, Linux x86_64](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/tars/TensorRT-10.14.1.48.Linux.x86_64-gnu.cuda-12.9.tar.gz)
91+
- [TensorRT 10.14.1.48 for CUDA 13.0, Windows x86_64](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/zip/TensorRT-10.14.1.48.Windows.win10.cuda-13.0.zip)
92+
- [TensorRT 10.14.1.48 for CUDA 12.9, Windows x86_64](https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.14.1/zip/TensorRT-10.14.1.48.Windows.win10.cuda-12.9.zip)
9393

9494
**Example: Ubuntu 22.04 on x86-64 with cuda-13.0**
9595

9696
```bash
9797
cd ~/Downloads
98-
tar -xvzf TensorRT-10.13.3.9.Linux.x86_64-gnu.cuda-13.0.tar.gz
99-
export TRT_LIBPATH=`pwd`/TensorRT-10.13.3.9
98+
tar -xvzf TensorRT-10.14.1.48.Linux.x86_64-gnu.cuda-13.0.tar.gz
99+
export TRT_LIBPATH=`pwd`/TensorRT-10.14.1.48
100100
```
101101

102102
**Example: Windows on x86-64 with cuda-12.9**
103103

104104
```powershell
105-
Expand-Archive -Path TensorRT-10.13.3.9.Windows.win10.cuda-12.9.zip
106-
$env:TRT_LIBPATH="$pwd\TensorRT-10.13.3.9\lib"
105+
Expand-Archive -Path TensorRT-10.14.1.48.Windows.win10.cuda-12.9.zip
106+
$env:TRT_LIBPATH="$pwd\TensorRT-10.14.1.48\lib"
107107
```
108108

109109
## Setting Up The Build Environment
@@ -112,16 +112,16 @@ For Linux platforms, we recommend that you generate a docker container for build
112112

113113
1. #### Generate the TensorRT-OSS build container.
114114

115-
**Example: Ubuntu 22.04 on x86-64 with cuda-13.0 (default)**
115+
**Example: Ubuntu 24.04 on x86-64 with cuda-13.0 (default)**
116116

117117
```bash
118-
./docker/build.sh --file docker/ubuntu-22.04.Dockerfile --tag tensorrt-ubuntu22.04-cuda13.0
118+
./docker/build.sh --file docker/ubuntu-24.04.Dockerfile --tag tensorrt-ubuntu24.04-cuda13.0
119119
```
120120

121-
**Example: Rockylinux8 on x86-64 with cuda-12.9**
121+
**Example: Rockylinux8 on x86-64 with cuda-13.0**
122122

123123
```bash
124-
./docker/build.sh --file docker/rockylinux8.Dockerfile --tag tensorrt-rockylinux8-cuda12.9
124+
./docker/build.sh --file docker/rockylinux8.Dockerfile --tag tensorrt-rockylinux8-cuda13.0
125125
```
126126

127127
**Example: Ubuntu 24.04 cross-compile for Jetson (aarch64) with cuda-13.0 (JetPack SDK)**
@@ -137,9 +137,9 @@ For Linux platforms, we recommend that you generate a docker container for build
137137
```
138138

139139
2. #### Launch the TensorRT-OSS build container.
140-
**Example: Ubuntu 22.04 build container**
140+
**Example: Ubuntu 24.04 build container**
141141
```bash
142-
./docker/launch.sh --tag tensorrt-ubuntu22.04-cuda13.0 --gpus all
142+
./docker/launch.sh --tag tensorrt-ubuntu24.04-cuda13.0 --gpus all
143143
```
144144
> NOTE:
145145
> <br> 1. Use the `--tag` corresponding to build container generated in Step 1.
@@ -175,7 +175,7 @@ For Linux platforms, we recommend that you generate a docker container for build
175175
```bash
176176
cd $TRT_OSSPATH
177177
mkdir -p build && cd build
178-
cmake .. -DTRT_LIB_DIR=$TRT_LIBPATH -DTRT_OUT_DIR=`pwd`/out -DTRT_PLATFORM_ID=aarch64 -DGPU_ARCHS=110
178+
cmake .. -DTRT_LIB_DIR=$TRT_LIBPATH -DTRT_OUT_DIR=`pwd`/out -DTRT_PLATFORM_ID=aarch64
179179
CC=/usr/bin/gcc make -j$(nproc)
180180
```
181181

@@ -186,7 +186,16 @@ For Linux platforms, we recommend that you generate a docker container for build
186186
```bash
187187
cd $TRT_OSSPATH
188188
mkdir -p build && cd build
189-
cmake .. -DTRT_LIB_DIR=$TRT_LIBPATH -DCMAKE_TOOLCHAIN_FILE=$TRT_OSSPATH/cmake/toolchains/cmake_aarch64_cross.toolchain -DGPU_ARCHS=110
189+
cmake .. -DTRT_LIB_DIR=$TRT_LIBPATH -DCMAKE_TOOLCHAIN_FILE=$TRT_OSSPATH/cmake/toolchains/cmake_aarch64_cross.toolchain
190+
make -j$(nproc)
191+
```
192+
193+
**Example: Ubuntu 24.04 Cross-Compile for DriveOS (aarch64) with cuda-13.0**
194+
195+
```bash
196+
cd $TRT_OSSPATH
197+
mkdir -p build && cd build
198+
cmake .. -DTRT_LIB_DIR=$TRT_LIBPATH -DCMAKE_TOOLCHAIN_FILE=$TRT_OSSPATH/cmake/toolchains/cmake_aarch64_dos_cross.toolchain
190199
make -j$(nproc)
191200
```
192201

@@ -196,7 +205,7 @@ For Linux platforms, we recommend that you generate a docker container for build
196205
cd $TRT_OSSPATH
197206
mkdir -p build
198207
cd -p build
199-
cmake .. -DTRT_LIB_DIR="$env:TRT_LIBPATH" -DCUDNN_ROOT_DIR="$env:CUDNN_PATH" -DTRT_OUT_DIR="$pwd\\out"
208+
cmake .. -DTRT_LIB_DIR="$env:TRT_LIBPATH" -DTRT_OUT_DIR="$pwd\\out"
200209
msbuild TensorRT.sln /property:Configuration=Release -m:$env:NUMBER_OF_PROCESSORS
201210
```
202211

VERSION

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
10.13.3.9
1+
10.14.1.48

0 commit comments

Comments
 (0)