Skip to content

Commit 3c47ee1

Browse files
committed
Initial working version of torch-runtime; WIP workflows
1 parent 7383ec9 commit 3c47ee1

21 files changed

+1304
-0
lines changed

.flake8

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
[flake8]
2+
max-line-length = 120
3+
extend-ignore = E203, E402, E722, W391
4+
per-file-ignores = __init__.py:F401
Lines changed: 134 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,134 @@
1+
name: Update PCI Database
2+
3+
on:
4+
# Manual trigger
5+
workflow_dispatch:
6+
# Run weekly on Thursday at 3:14 AM UTC
7+
schedule:
8+
- cron: '14 3 * * 4'
9+
10+
jobs:
11+
update-db:
12+
runs-on: ubuntu-latest
13+
permissions:
14+
contents: write
15+
steps:
16+
- uses: actions/checkout@v4
17+
with:
18+
fetch-depth: 0 # Required for getting tags
19+
20+
- name: Set up Python
21+
uses: actions/setup-python@v4
22+
with:
23+
python-version: '3.x'
24+
cache: "pip"
25+
26+
- name: Install dependencies
27+
run: |
28+
python -m pip install --upgrade pip
29+
pip install toml pytest
30+
31+
- name: Create raw_data directory if it doesn't exist
32+
run: mkdir -p raw_data
33+
34+
- name: Download latest pci.ids
35+
run: |
36+
curl -o pci.ids.new https://raw.githubusercontent.com/pciutils/pciids/master/pci.ids
37+
38+
- name: Check for changes
39+
id: check_changes
40+
run: |
41+
# Calculate new checksum
42+
NEW_CHECKSUM=$(sha256sum pci.ids.new | cut -d' ' -f1)
43+
44+
# Check if checksum file exists and compare
45+
if [ ! -f pci.ids.sha256 ]; then
46+
echo "No existing checksum file found. Will proceed with update."
47+
echo "has_changes=true" >> "$GITHUB_OUTPUT"
48+
echo "$NEW_CHECKSUM" > pci.ids.sha256
49+
else
50+
OLD_CHECKSUM=$(cat pci.ids.sha256)
51+
if [ "$NEW_CHECKSUM" != "$OLD_CHECKSUM" ]; then
52+
echo "Changes detected in pci.ids file"
53+
echo "$NEW_CHECKSUM" > pci.ids.sha256
54+
echo "has_changes=true" >> "$GITHUB_OUTPUT"
55+
else
56+
echo "No changes detected in pci.ids file"
57+
echo "has_changes=false" >> "$GITHUB_OUTPUT"
58+
rm pci.ids.new
59+
exit 0
60+
fi
61+
fi
62+
63+
- name: Update files and generate database
64+
if: steps.check_changes.outputs.has_changes == 'true'
65+
run: |
66+
python scripts/txt_to_db.py pci.ids.new torch_runtime/gpu_pci_ids.db
67+
68+
- name: Run tests
69+
if: steps.check_changes.outputs.has_changes == 'true'
70+
run: |
71+
python -m pytest
72+
73+
- name: Update version in pyproject.toml
74+
if: steps.check_changes.outputs.has_changes == 'true'
75+
id: update_version
76+
run: |
77+
python - <<EOF
78+
import toml
79+
import os
80+
81+
# Read the current pyproject.toml
82+
with open('pyproject.toml', 'r') as f:
83+
config = toml.load(f)
84+
85+
# Get current version and increment minor version
86+
current_version = config['project']['version']
87+
major, minor, patch = current_version.split('.')
88+
new_version = f"{major}.{int(minor) + 1}.{patch}"
89+
90+
# Update version in config
91+
config['project']['version'] = new_version
92+
93+
# Write back to pyproject.toml
94+
with open('pyproject.toml', 'w') as f:
95+
toml.dump(config, f)
96+
97+
# Set output for later steps
98+
print(f"new_version={new_version}", file=open(os.environ['GITHUB_OUTPUT'], 'a'))
99+
EOF
100+
101+
cat pyproject.toml
102+
cat pci.ids.sha256
103+
ls -l torch_runtime/gpu_pci_ids.db
104+
105+
- name: Configure Git
106+
if: steps.check_changes.outputs.has_changes == 'true'
107+
run: |
108+
git config --local user.email "github-actions[bot]@users.noreply.github.com"
109+
git config --local user.name "github-actions[bot]"
110+
111+
# - name: Commit changes
112+
# if: steps.check_changes.outputs.has_changes == 'true'
113+
# run: |
114+
# git add pci.ids.sha256 torch_runtime/gpu_pci_ids.db pyproject.toml
115+
# git commit -m "Update PCI database, raw data file and version"
116+
117+
# - name: Create Release
118+
# if: steps.check_changes.outputs.has_changes == 'true'
119+
# uses: softprops/action-gh-release@v1
120+
# with:
121+
# tag_name: v${{ steps.update_version.outputs.new_version }}
122+
# name: Release ${{ steps.update_version.outputs.new_version }}
123+
# body: |
124+
# Automatic update of PCI database. Install using `pip install --upgrade torch-runtime==${{ steps.update_version.outputs.new_version }}`
125+
# draft: false
126+
# prerelease: false
127+
128+
# - name: Push changes
129+
# if: steps.check_changes.outputs.has_changes == 'true'
130+
# run: git push && git push --tags
131+
132+
# - name: Cleanup
133+
# if: always()
134+
# run: rm -f pci.ids.new
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
name: Publish to PyPI
2+
3+
on:
4+
release:
5+
types: [created]
6+
# Manual trigger
7+
workflow_dispatch:
8+
9+
jobs:
10+
build-and-publish:
11+
runs-on: ubuntu-latest
12+
environment:
13+
name: pypi-beta
14+
url: https://test.pypi.org/p/torch-runtime
15+
permissions:
16+
contents: read
17+
id-token: write
18+
steps:
19+
- uses: actions/checkout@v4
20+
21+
- name: Set up Python
22+
uses: actions/setup-python@v4
23+
with:
24+
python-version: '3.x'
25+
cache: "pip"
26+
27+
- name: Install build dependencies
28+
run: |
29+
python -m pip install --upgrade pip
30+
pip install build wheel pytest
31+
32+
- name: Copy tests
33+
run: |
34+
cp -R tests torch_runtime/
35+
36+
- name: Build package
37+
run: python -m build
38+
39+
- name: Install and test the package
40+
run: |
41+
mkdir /tmp/install-test
42+
cp dist/*.whl /tmp/install-test
43+
cd /tmp/install-test
44+
pip install *.whl
45+
46+
python -m torch_runtime --help # test invocation
47+
pytest --pyargs torch-runtime # run tests
48+
49+
# - name: Publish to PyPI
50+
# uses: pypa/gh-action-pypi-publish@release/v1
51+
# with:
52+
# repository-url: https://test.pypi.org/legacy/

.github/workflows/run_tests.yml

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
name: Run Tests
2+
3+
on:
4+
push:
5+
branches: main
6+
pull_request:
7+
branches: main
8+
# Manual trigger
9+
workflow_dispatch:
10+
11+
jobs:
12+
test:
13+
runs-on: ubuntu-latest
14+
strategy:
15+
matrix:
16+
python-version: ["3.8", "3.11"]
17+
fail-fast: false
18+
19+
steps:
20+
- uses: actions/checkout@v4
21+
22+
- name: Set up Python ${{ matrix.python-version }}
23+
uses: actions/setup-python@v4
24+
with:
25+
python-version: ${{ matrix.python-version }}
26+
cache: "pip"
27+
28+
- name: Install dependencies
29+
run: |
30+
python -m pip install --upgrade pip
31+
pip install pytest
32+
33+
- name: Run tests
34+
run: |
35+
python -m pytest

MANIFEST.in

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
include torch_runtime/gpu_pci_ids.db

README.md

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
# torch-runtime
2+
[![Discord Server](https://img.shields.io/discord/1014774730907209781?label=Discord)](https://discord.com/invite/u9yhsFmEkB)
3+
4+
**torch-runtime** is a lightweight package for automatically installing the appropriate variant of PyTorch on a user's computer, based on their OS, and GPU manufacturer and GPU model.
5+
6+
This package is used by [Easy Diffusion](https://github.com/easydiffusion/easydiffusion), but you're welcome to use it as well. It's useful for developers who make PyTorch-based apps that target users with NVIDIA, AMD and Intel graphics cards (as well as CPU-only usage), on Windows, Mac and Linux.
7+
8+
### Why?
9+
It lets you treat PyTorch as a single dependency (like it should be), and lets you assume that each user will get the most-performant variant of PyTorch suitable for their computer's OS and hardware.
10+
11+
It deals with the complexity of the variety of torch builds and configurations required for CUDA, AMD (ROCm, DirectML), Intel (xpu/DirectML/ipex), and CPU-only.
12+
13+
**Compatibility table**: [Click here](#compatibility-table) to see the supported graphics cards and operating systems.
14+
15+
# Installation
16+
Supports Windows, Linux, and Mac.
17+
18+
`pip install torch-runtime`
19+
20+
## Usage
21+
### Step 1. Install the appropriate variant of PyTorch
22+
*This command should be run on the user's computer, or while creating platform-specific builds:*
23+
24+
`python -m torch_runtime install` (*note the underscore in `torch_runtime`*)
25+
26+
This will install `torch`, `torchvision`, and `torchaudio`, and will decide the variant based on the user's OS, GPU manufacturer and GPU model number. See [customizing packages](#customizing-packages) for more options.
27+
28+
### Step 2. Initialize torch
29+
This should be run inside your program, to initialize the required environment variables (if any) for the variant of torch being used.
30+
31+
```py
32+
import torch_runtime
33+
34+
torch_runtime.init_torch()
35+
```
36+
37+
## Customizing packages
38+
By default, `python -m torch_runtime install` will install the latest available `torch`, `torchvision` and `torchaudio` suitable on the user's platform.
39+
40+
You can customize the packages to install by including their names:
41+
* For e.g. to install only `torch` and `torchvision`, you can run `python -m torch_runtime install torch torchvision`
42+
* To install specific versions (in pip format), you can run `python -m torch_runtime install "torch>2.0" "torchvision==0.20"`
43+
44+
**Note:** If you specify package versions, please keep in mind that the version may not be available to *all* the users on *all* the torch platforms. For e.g. a user with Python 3.8 would not be able to install torch 2.5 (or higher), because torch 2.5 dropped support for Python 3.8.
45+
46+
So in general, it's better to avoid specifying a version unless it really matters to you (or you know what you're doing). Instead, please allow `torch-runtime` to pick the latest-possible version for the user.
47+
48+
# Compatibility table
49+
The list of platforms on which `torch-runtime` can install a working variant of PyTorch.
50+
51+
**Note:** *This list is based on user feedback (since I don't have all the cards). Please let me know if your card is supported (or not) by opening a pull request or issue or messaging on [Discord](https://discord.com/invite/u9yhsFmEkB) (with supporting logs).*
52+
53+
**CPU-only:**
54+
55+
| OS | Supported?| Notes |
56+
|---|---|---|
57+
| Windows | ✅ Yes | x86_64 |
58+
| Linux | ✅ Yes | x86_64 and aarch64 |
59+
| Mac (M1/M2/M3/M4) | ✅ Yes | arm64. `mps` backend |
60+
| Mac (Intel) | ✅ Yes | x86_64. Stopped after `torch 2.2.2` |
61+
62+
**NVIDIA:**
63+
64+
| Series | Supported? | OS | Notes |
65+
|---|---|---|---|
66+
| 40xx | ✅ Yes | Win/Linux | Uses CUDA 124 |
67+
| 30xx | ✅ Yes | Win/Linux | Uses CUDA 124 |
68+
| 20xx | ✅ Yes | Win/Linux | Uses CUDA 124 |
69+
| 10xx/16xx | ✅ Yes | Win/Linux | Uses CUDA 124. Full-precision required on 16xx series |
70+
71+
**AMD:**
72+
73+
| Series | Supported? | OS | Notes |
74+
|---|---|---|---|
75+
| 7xxx | ✅ Yes | Win/Linux | Navi3/RDNA3 (gfx110x). ROCm 6.2 on Linux. DirectML on Windows |
76+
| 6xxx | ✅ Yes | Win/Linux | Navi2/RDNA2 (gfx103x). ROCm 6.2 on Linux. DirectML on Windows |
77+
| 5xxx | ✅ Yes | Win/Linux | Navi1/RDNA1 (gfx101x). Full-precision required. DirectML on Windows. Linux only supports upto ROCm 5.2. Waiting for [this](https://github.com/pytorch/pytorch/issues/132570#issuecomment-2313071756) for ROCm 6.2 support. |
78+
| 5xxx on Intel Mac | ❓ Untested (WIP) | Intel Mac | gfx101x. Implemented but need testers, please message on [Discord](https://discord.com/invite/u9yhsFmEkB) |
79+
| 4xxx/Radeon VII | ❓ Untested (WIP) | N/A | gfx90x. Implemented but need testers, please message on [Discord](https://discord.com/invite/u9yhsFmEkB) |
80+
| 5xx/Polaris | ❓ Untested (WIP) | N/A | gfx80x. Implemented but need testers, please message on [Discord](https://discord.com/invite/u9yhsFmEkB) |
81+
82+
**Apple:**
83+
84+
| Series | Supported? |Notes |
85+
|---|---|---|
86+
| M1/M2/M3/M4 | ✅ Yes | 'mps' backend |
87+
| AMD on Intel Mac | ❓ Untested (WIP) | Implemented but need testers, please message on [Discord](https://discord.com/invite/u9yhsFmEkB) |
88+
89+
**Intel:**
90+
91+
| Series | Supported? | OS | Notes |
92+
|---|---|---|---|
93+
| Arc | ❓ Untested (WIP) | Win/Linux | Implemented but need testers, please message on [Discord](https://discord.com/invite/u9yhsFmEkB). Backends: 'xpu' or DirectML or [ipex](https://github.com/intel/intel-extension-for-pytorch) |
94+
95+
96+
# FAQ
97+
## Why can't I just run 'pip install torch'?
98+
`pip install torch` installs the CPU-only version of torch, so it won't utilize your GPU's capabilities.
99+
100+
## Why can't I just install torch-for-ROCm directly to support AMD?
101+
Different models of AMD cards require different LLVM targets, and sometimes different ROCm versions. And ROCm currently doesn't work on Windows, so AMD on Windows is best served (currently) with DirectML.
102+
103+
And plenty of AMD cards work with ROCm (even when they aren't in the official list of supported cards). Information about these cards (for e.g. the LLVM target to use) is pretty scattered.
104+
105+
`torch-runtime` deals with this complexity for your convenience.
106+
107+
# Contributing
108+
📢 I'm looking for contributions in these specific areas:
109+
- More testing on consumer AMD GPUs.
110+
- More support for older AMD GPUs. Explore: Compile and host PyTorch wheels and rocm (on GitHub) for older AMD gpus (e.g. 580/590/Polaris) with the required patches.
111+
- Intel GPUs.
112+
- Testing on professional AMD GPUs (e.g. the Instinct series).
113+
- An easy-to-run benchmark script (that people can run to check the level of compatibility on their platform).
114+
115+
Please message on the [Discord community](https://discord.com/invite/u9yhsFmEkB) if you have AMD or Intel GPUs, and would like to help with testing or adding support for them! Thanks!
116+
117+
# Credits
118+
* Code contributors on [Easy Diffusion](https://github.com/easydiffusion/easydiffusion).
119+
* Users on [Easy Diffusion's Discord](https://discord.com/invite/u9yhsFmEkB) who've helped with testing on various GPUs.
120+
121+
# More resources
122+
* [AMD GPU LLVM Architectures](https://web.archive.org/web/20241228163540/https://llvm.org/docs/AMDGPUUsage.html#processors)
123+
* [Status of ROCm support for AMD Navi 1](https://github.com/ROCm/ROCm/issues/2527)
124+
* [Torch support for ROCm 6.2 on AMD Navi 1](https://github.com/pytorch/pytorch/issues/132570#issuecomment-2313071756)
125+
* [ROCmLibs-for-gfx1103-AMD780M-APU](https://github.com/likelovewant/ROCmLibs-for-gfx1103-AMD780M-APU)

pyproject.toml

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
[build-system]
2+
requires = [ "setuptools",]
3+
build-backend = "setuptools.build_meta"
4+
5+
[project]
6+
name = "torch-runtime"
7+
version = "0.0.0"
8+
description = "Meant for app developers. A convenient way to install and configure the appropriate version of PyTorch on the user's computer, based on the OS and GPU manufacturer and model number."
9+
readme = "README.md"
10+
requires-python = ">=3.0"
11+
classifiers = [ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: Microsoft :: Windows :: Windows 10", "Operating System :: Microsoft :: Windows :: Windows 11", "Operating System :: POSIX :: Linux", "Operating System :: MacOS",]
12+
keywords = [ "torch", "ai", "ml", "llm", "installer", "runtime",]
13+
dynamic = [ "dependencies",]
14+
[[project.authors]]
15+
name = "cmdr2"
16+
17+
18+
[project.urls]
19+
Homepage = "https://github.com/easydiffusion/torch-runtime"
20+
"Bug Tracker" = "https://github.com/easydiffusion/torch-runtime/issues"
21+
22+
[tool.isort]
23+
profile = "black"
24+
25+
[tool.black]
26+
line-length = 120
27+
include = "\\.pyi?$"
28+
exclude = "/(\n \\.git\n | \\.hg\n | \\.mypy_cache\n | \\.tox\n | \\.venv\n | _build\n | buck-out\n | build\n | dist\n)/\n"
29+
30+
[tool.pytest.ini_options]
31+
minversion = "6.0"
32+
addopts = "-vs"
33+
testpaths = [ "tests", "integration",]

0 commit comments

Comments
 (0)