-
Notifications
You must be signed in to change notification settings - Fork 390
Expand file tree
/
Copy pathMODULE.bazel
More file actions
183 lines (156 loc) · 6.64 KB
/
MODULE.bazel
File metadata and controls
183 lines (156 loc) · 6.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
module(
name = "torch_tensorrt",
version = "2.12.0a0",
repo_name = "org_pytorch_tensorrt",
)
bazel_dep(name = "googletest", version = "1.16.0")
bazel_dep(name = "platforms", version = "0.0.11")
bazel_dep(name = "rules_cc", version = "0.1.1")
bazel_dep(name = "rules_python", version = "1.3.0")
python = use_extension("@rules_python//python/extensions:python.bzl", "python")
python.toolchain(
ignore_root_user_error = True,
python_version = "3.11",
)
bazel_dep(name = "rules_pkg", version = "1.0.1")
git_override(
module_name = "rules_pkg",
commit = "17c57f4",
remote = "https://github.com/narendasan/rules_pkg",
)
new_local_repository = use_repo_rule("@bazel_tools//tools/build_defs/repo:local.bzl", "new_local_repository")
local_torch = use_repo_rule("//toolchains:local_torch.bzl", "local_torch")
# External dependency for torch_tensorrt if you already have precompiled binaries.
new_local_repository(
name = "torch_tensorrt",
build_file = "@//third_party/torch_tensorrt:BUILD",
path = "/usr/local/lib/python3.12/site-packages/torch_tensorrt/",
)
# CUDA should be installed on the system locally
# for linux x86_64 and aarch64
new_local_repository(
name = "cuda",
build_file = "@//third_party/cuda:BUILD",
path = "/usr/local/cuda-13.0/",
)
# for Jetson
# These versions can be selected using the flag `--//toolchains/dep_collection:compute_libs="jetpack"`
new_local_repository(
name = "cuda_l4t",
build_file = "@//third_party/cuda:BUILD",
path = "/usr/local/cuda-12.6",
)
# for windows
new_local_repository(
name = "cuda_win",
build_file = "@//third_party/cuda:BUILD",
path = "C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v13.0/",
)
http_archive = use_repo_rule("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
####################################################################################
# Locally installed dependencies (use in cases of custom dependencies or aarch64)
####################################################################################
# Detect the locally installed PyTorch at build time so the headers always match
# the runtime libtorch_cuda.so, avoiding ABI mismatches (e.g. CUDAGraph layout).
# Works with uv (.venv/), virtualenv (VIRTUAL_ENV), and conda (CONDA_PREFIX).
# Override with TORCH_PATH env var if auto-detection fails.
local_torch(name = "libtorch")
# NOTE: If you are using a local archive build of torch, just point the Libtorch dep to that path.
#new_local_repository(
# name = "libtorch",
# please modify this path according to your local python wheel path
# path = "/usr/local/lib/python3.11/dist-packages/torch",
# build_file = "third_party/libtorch/BUILD"
#)
#new_local_repository(
# name = "tensorrt",
# path = "/usr/",
# build_file = "@//third_party/tensorrt/local:BUILD"
#)
#############################################################################################################
# Tarballs and fetched dependencies (default - use in cases when building from precompiled bin and tarballs)
#############################################################################################################
# Alternatively, pin to a specific nightly for reproducible/frozen builds.
# Uncomment this block and comment out local_torch above.
# NOTE: The pinned version must match the PyTorch installed in your Python
# environment — a mismatch causes ABI breakage (see installation docs).
#http_archive(
# name = "libtorch",
# build_file = "@//third_party/libtorch:BUILD",
# strip_prefix = "libtorch",
# urls = ["https://download.pytorch.org/libtorch/nightly/cu130/libtorch-shared-with-deps-latest.zip"],
#)
# in aarch64 platform you can get libtorch via either local or wheel file
# It is possible to specify a wheel file to use as the libtorch source by providing the URL below and
# using the build flag `--//toolchains/dep_src:torch="whl"`
#http_archive(
# name = "torch_whl",
# build_file = "@//third_party/libtorch:BUILD",
# strip_prefix = "torch",
# type = "zip",
# urls = ["https://download.pytorch.org/whl/nightly/cu128/torch-2.8.0.dev20250415%2Bcu128-cp310-cp310-manylinux_2_28_aarch64.whl"],
#)
http_archive(
name = "libtorch_win",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "libtorch",
urls = ["https://download.pytorch.org/libtorch/nightly/cu130/libtorch-win-shared-with-deps-latest.zip"],
)
http_archive(
name = "torch_l4t",
build_file = "@//third_party/libtorch:BUILD",
strip_prefix = "torch",
type = "zip",
urls = ["https://pypi.jetson-ai-lab.io/jp6/cu126/+f/62a/1beee9f2f1470/torch-2.8.0-cp310-cp310-linux_aarch64.whl"],
)
# Download these tarballs manually from the NVIDIA website
# Either place them in the distdir directory in third_party and use the --distdir flag
# or modify the urls to "file:///<PATH TO TARBALL>/<TARBALL NAME>.tar.gz
http_archive(
name = "tensorrt",
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-10.16.0.72",
urls = [
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.16.0/tars/TensorRT-10.16.0.72.Linux.x86_64-gnu.cuda-13.2.tar.gz",
],
)
http_archive(
name = "tensorrt_rtx",
build_file = "@//third_party/tensorrt_rtx/archive:BUILD",
strip_prefix = "TensorRT-RTX-1.4.0.76",
urls = [
"https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.4/tensorrt-rtx-1.4.0.76-linux-x86_64-cuda-13.2-release-external.tar.gz",
],
)
http_archive(
name = "tensorrt_sbsa",
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-10.16.0.72",
urls = [
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.16.0/tars/TensorRT-10.16.0.72.Linux.aarch64-gnu.cuda-13.2.tar.gz",
],
)
http_archive(
name = "tensorrt_l4t",
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-10.3.0.26",
urls = [
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.3.0/tars/TensorRT-10.3.0.26.l4t.aarch64-gnu.cuda-12.6.tar.gz",
],
)
http_archive(
name = "tensorrt_win",
build_file = "@//third_party/tensorrt/archive:BUILD",
strip_prefix = "TensorRT-10.16.0.72",
urls = [
"https://developer.nvidia.com/downloads/compute/machine-learning/tensorrt/10.16.0/zip/TensorRT-10.16.0.72.Windows.amd64.cuda-13.2.zip",
],
)
http_archive(
name = "tensorrt_rtx_win",
build_file = "@//third_party/tensorrt_rtx/archive:BUILD",
strip_prefix = "TensorRT-RTX-1.4.0.76",
urls = [
"https://developer.nvidia.com/downloads/trt/rtx_sdk/secure/1.4/tensorrt-rtx-1.4.0.76-win10-amd64-cuda-13.2-release-external.zip",
],
)