Kernels
kernels-bot commited on
Commit
cb60d1e
·
verified ·
1 Parent(s): e158f98

Uploaded using `kernel-builder`.

Browse files
Files changed (34) hide show
  1. .gitattributes +9 -0
  2. build/torch210-cxx11-cu126-x86_64-linux/{_mra_cuda_86f75d9.abi3.so → _mra_cuda_10efe7e.abi3.so} +1 -1
  3. build/torch210-cxx11-cu126-x86_64-linux/_ops.py +3 -3
  4. build/torch210-cxx11-cu126-x86_64-linux/metadata.json +1 -1
  5. build/{torch211-cxx11-cu128-x86_64-linux/_mra_cuda_86f75d9.abi3.so → torch210-cxx11-cu128-x86_64-linux/_mra_cuda_10efe7e.abi3.so} +1 -1
  6. build/torch210-cxx11-cu128-x86_64-linux/_ops.py +3 -3
  7. build/torch210-cxx11-cu128-x86_64-linux/metadata.json +1 -1
  8. build/{torch211-cxx11-cu130-x86_64-linux/_mra_cuda_86f75d9.abi3.so → torch210-cxx11-cu130-x86_64-linux/_mra_cuda_10efe7e.abi3.so} +1 -1
  9. build/torch210-cxx11-cu130-x86_64-linux/_ops.py +3 -3
  10. build/torch210-cxx11-cu130-x86_64-linux/metadata.json +1 -1
  11. build/torch211-cxx11-cu126-x86_64-linux/{_mra_cuda_86f75d9.abi3.so → _mra_cuda_10efe7e.abi3.so} +1 -1
  12. build/torch211-cxx11-cu126-x86_64-linux/_ops.py +3 -3
  13. build/torch211-cxx11-cu126-x86_64-linux/metadata.json +1 -1
  14. build/{torch210-cxx11-cu128-x86_64-linux/_mra_cuda_86f75d9.abi3.so → torch211-cxx11-cu128-x86_64-linux/_mra_cuda_10efe7e.abi3.so} +1 -1
  15. build/torch211-cxx11-cu128-x86_64-linux/_ops.py +3 -3
  16. build/torch211-cxx11-cu128-x86_64-linux/metadata.json +1 -1
  17. build/{torch210-cxx11-cu130-x86_64-linux/_mra_cuda_86f75d9.abi3.so → torch211-cxx11-cu130-x86_64-linux/_mra_cuda_10efe7e.abi3.so} +1 -1
  18. build/torch211-cxx11-cu130-x86_64-linux/_ops.py +3 -3
  19. build/torch211-cxx11-cu130-x86_64-linux/metadata.json +1 -1
  20. build/torch212-cxx11-cu126-x86_64-linux/__init__.py +25 -0
  21. build/torch212-cxx11-cu126-x86_64-linux/_mra_cuda_10efe7e.abi3.so +3 -0
  22. build/torch212-cxx11-cu126-x86_64-linux/_ops.py +9 -0
  23. build/torch212-cxx11-cu126-x86_64-linux/metadata.json +20 -0
  24. build/torch212-cxx11-cu126-x86_64-linux/mra/__init__.py +26 -0
  25. build/torch212-cxx11-cu130-x86_64-linux/__init__.py +25 -0
  26. build/torch212-cxx11-cu130-x86_64-linux/_mra_cuda_10efe7e.abi3.so +3 -0
  27. build/torch212-cxx11-cu130-x86_64-linux/_ops.py +9 -0
  28. build/torch212-cxx11-cu130-x86_64-linux/metadata.json +21 -0
  29. build/torch212-cxx11-cu130-x86_64-linux/mra/__init__.py +26 -0
  30. build/torch212-cxx11-cu132-x86_64-linux/__init__.py +25 -0
  31. build/torch212-cxx11-cu132-x86_64-linux/_mra_cuda_10efe7e.abi3.so +3 -0
  32. build/torch212-cxx11-cu132-x86_64-linux/_ops.py +9 -0
  33. build/torch212-cxx11-cu132-x86_64-linux/metadata.json +21 -0
  34. build/torch212-cxx11-cu132-x86_64-linux/mra/__init__.py +26 -0
.gitattributes CHANGED
@@ -165,3 +165,12 @@ build/torch211-cxx11-cu130-aarch64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs di
165
  build/torch212-cxx11-cu126-aarch64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
166
  build/torch212-cxx11-cu130-aarch64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
167
  build/torch212-cxx11-cu132-aarch64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
165
  build/torch212-cxx11-cu126-aarch64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
166
  build/torch212-cxx11-cu130-aarch64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
167
  build/torch212-cxx11-cu132-aarch64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
168
+ build/torch210-cxx11-cu126-x86_64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
169
+ build/torch210-cxx11-cu128-x86_64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
170
+ build/torch210-cxx11-cu130-x86_64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
171
+ build/torch211-cxx11-cu126-x86_64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
172
+ build/torch211-cxx11-cu128-x86_64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
173
+ build/torch211-cxx11-cu130-x86_64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
174
+ build/torch212-cxx11-cu126-x86_64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
175
+ build/torch212-cxx11-cu130-x86_64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
176
+ build/torch212-cxx11-cu132-x86_64-linux/_mra_cuda_10efe7e.abi3.so filter=lfs diff=lfs merge=lfs -text
build/torch210-cxx11-cu126-x86_64-linux/{_mra_cuda_86f75d9.abi3.so → _mra_cuda_10efe7e.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6aafad2a759af095aac2f203e003a04b8a6f835c6841debb8b38d838fbce7e28
3
  size 2451480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e54b57c1aefae767870d42cb2a9e64b66918cd74bf1919bbef453181c2c95ae
3
  size 2451480
build/torch210-cxx11-cu126-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mra_cuda_86f75d9
3
- ops = torch.ops._mra_cuda_86f75d9
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mra_cuda_86f75d9::{op_name}"
 
1
  import torch
2
+ from . import _mra_cuda_10efe7e
3
+ ops = torch.ops._mra_cuda_10efe7e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mra_cuda_10efe7e::{op_name}"
build/torch210-cxx11-cu126-x86_64-linux/metadata.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "mra",
3
- "id": "_mra_cuda_86f75d9",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
 
1
  {
2
  "name": "mra",
3
+ "id": "_mra_cuda_10efe7e",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
build/{torch211-cxx11-cu128-x86_64-linux/_mra_cuda_86f75d9.abi3.so → torch210-cxx11-cu128-x86_64-linux/_mra_cuda_10efe7e.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4c976bee5579f8b6d8b7081a166bcf587c3113a098e6080c9a5124172c652bc
3
  size 2719848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a12921f8eb17dc9c737c4e3b4f7623d5401cd4eb9e4b6d33cbf796ad6672e520
3
  size 2719848
build/torch210-cxx11-cu128-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mra_cuda_86f75d9
3
- ops = torch.ops._mra_cuda_86f75d9
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mra_cuda_86f75d9::{op_name}"
 
1
  import torch
2
+ from . import _mra_cuda_10efe7e
3
+ ops = torch.ops._mra_cuda_10efe7e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mra_cuda_10efe7e::{op_name}"
build/torch210-cxx11-cu128-x86_64-linux/metadata.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "mra",
3
- "id": "_mra_cuda_86f75d9",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
 
1
  {
2
  "name": "mra",
3
+ "id": "_mra_cuda_10efe7e",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
build/{torch211-cxx11-cu130-x86_64-linux/_mra_cuda_86f75d9.abi3.so → torch210-cxx11-cu130-x86_64-linux/_mra_cuda_10efe7e.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e640ffb1150c10b48c60f5cccc969409f0ac5c93307344b7b0c429e2a3871de5
3
  size 2641648
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cd2210ca0390e2eb965daeb12abdd438aa2a824a180f70bf5d51460bbcae000
3
  size 2641648
build/torch210-cxx11-cu130-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mra_cuda_86f75d9
3
- ops = torch.ops._mra_cuda_86f75d9
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mra_cuda_86f75d9::{op_name}"
 
1
  import torch
2
+ from . import _mra_cuda_10efe7e
3
+ ops = torch.ops._mra_cuda_10efe7e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mra_cuda_10efe7e::{op_name}"
build/torch210-cxx11-cu130-x86_64-linux/metadata.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "mra",
3
- "id": "_mra_cuda_86f75d9",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
 
1
  {
2
  "name": "mra",
3
+ "id": "_mra_cuda_10efe7e",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
build/torch211-cxx11-cu126-x86_64-linux/{_mra_cuda_86f75d9.abi3.so → _mra_cuda_10efe7e.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bcf60c6e5f025c2e8d9074962eb93ac46372d0972994c4dd2218ab8348ddf9d8
3
  size 2451480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95d8423cb866fcdd909f1613c4b5d25143e0ca9016d31e1a2d64443a6e1d91d4
3
  size 2451480
build/torch211-cxx11-cu126-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mra_cuda_86f75d9
3
- ops = torch.ops._mra_cuda_86f75d9
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mra_cuda_86f75d9::{op_name}"
 
1
  import torch
2
+ from . import _mra_cuda_10efe7e
3
+ ops = torch.ops._mra_cuda_10efe7e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mra_cuda_10efe7e::{op_name}"
build/torch211-cxx11-cu126-x86_64-linux/metadata.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "mra",
3
- "id": "_mra_cuda_86f75d9",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
 
1
  {
2
  "name": "mra",
3
+ "id": "_mra_cuda_10efe7e",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
build/{torch210-cxx11-cu128-x86_64-linux/_mra_cuda_86f75d9.abi3.so → torch211-cxx11-cu128-x86_64-linux/_mra_cuda_10efe7e.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a899fc473f4ed1b0a39c4c211015adcf9cb299cfb76b9e6a31dbf7b3fcfd58d7
3
  size 2719848
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f12669f16aae0189daddd00ee8a9bd9aaf502f709d2722c9529eec3dbcd4fe4
3
  size 2719848
build/torch211-cxx11-cu128-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mra_cuda_86f75d9
3
- ops = torch.ops._mra_cuda_86f75d9
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mra_cuda_86f75d9::{op_name}"
 
1
  import torch
2
+ from . import _mra_cuda_10efe7e
3
+ ops = torch.ops._mra_cuda_10efe7e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mra_cuda_10efe7e::{op_name}"
build/torch211-cxx11-cu128-x86_64-linux/metadata.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "mra",
3
- "id": "_mra_cuda_86f75d9",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
 
1
  {
2
  "name": "mra",
3
+ "id": "_mra_cuda_10efe7e",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
build/{torch210-cxx11-cu130-x86_64-linux/_mra_cuda_86f75d9.abi3.so → torch211-cxx11-cu130-x86_64-linux/_mra_cuda_10efe7e.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:383257eec6a36495c0fea2f70e60b24ccfb63bea4aad48a2ccefd272ec068132
3
  size 2641648
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d1e1e36287a248de28cff36994820aca925879526e0484a6a3adc877c4398e6
3
  size 2641648
build/torch211-cxx11-cu130-x86_64-linux/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _mra_cuda_86f75d9
3
- ops = torch.ops._mra_cuda_86f75d9
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_mra_cuda_86f75d9::{op_name}"
 
1
  import torch
2
+ from . import _mra_cuda_10efe7e
3
+ ops = torch.ops._mra_cuda_10efe7e
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_mra_cuda_10efe7e::{op_name}"
build/torch211-cxx11-cu130-x86_64-linux/metadata.json CHANGED
@@ -1,6 +1,6 @@
1
  {
2
  "name": "mra",
3
- "id": "_mra_cuda_86f75d9",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
 
1
  {
2
  "name": "mra",
3
+ "id": "_mra_cuda_10efe7e",
4
  "version": 1,
5
  "license": "Apache-2.0",
6
  "python-depends": [],
build/torch212-cxx11-cu126-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ import torch
3
+
4
+ def index_max(index_vals: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
5
+ return ops.index_max(index_vals, indices, A_num_block, B_num_block)
6
+
7
+ def mm_to_sparse(dense_A: torch.Tensor, dense_B: torch.Tensor, indices: torch.Tensor):
8
+ return ops.mm_to_sparse(dense_A, dense_B, indices)
9
+
10
+ def sparse_dense_mm(sparse_A: torch.Tensor, indices: torch.Tensor, dense_B: torch.Tensor, A_num_block: int):
11
+ return ops.sparse_dense_mm(sparse_A, indices, dense_B, A_num_block)
12
+
13
+ def reduce_sum(sparse_A: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
14
+ return ops.reduce_sum(sparse_A, indices, A_num_block, B_num_block)
15
+
16
+ def scatter(dense_A: torch.Tensor, indices: torch.Tensor, B_num_block: int):
17
+ return ops.scatter(dense_A, indices, B_num_block)
18
+
19
+ __all__ = [
20
+ "index_max",
21
+ "mm_to_sparse",
22
+ "sparse_dense_mm",
23
+ "reduce_sum",
24
+ "scatter",
25
+ ]
build/torch212-cxx11-cu126-x86_64-linux/_mra_cuda_10efe7e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc867220c038e06c40a75c2861706a4c88f8818e4be8ea058cca4bcfb7781071
3
+ size 2446448
build/torch212-cxx11-cu126-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _mra_cuda_10efe7e
3
+ ops = torch.ops._mra_cuda_10efe7e
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_mra_cuda_10efe7e::{op_name}"
build/torch212-cxx11-cu126-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "mra",
3
+ "id": "_mra_cuda_10efe7e",
4
+ "version": 1,
5
+ "license": "Apache-2.0",
6
+ "python-depends": [],
7
+ "backend": {
8
+ "type": "cuda",
9
+ "archs": [
10
+ "7.0",
11
+ "7.2",
12
+ "7.5",
13
+ "8.0",
14
+ "8.6",
15
+ "8.7",
16
+ "8.9",
17
+ "9.0+PTX"
18
+ ]
19
+ }
20
+ }
build/torch212-cxx11-cu126-x86_64-linux/mra/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch212-cxx11-cu130-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ import torch
3
+
4
+ def index_max(index_vals: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
5
+ return ops.index_max(index_vals, indices, A_num_block, B_num_block)
6
+
7
+ def mm_to_sparse(dense_A: torch.Tensor, dense_B: torch.Tensor, indices: torch.Tensor):
8
+ return ops.mm_to_sparse(dense_A, dense_B, indices)
9
+
10
+ def sparse_dense_mm(sparse_A: torch.Tensor, indices: torch.Tensor, dense_B: torch.Tensor, A_num_block: int):
11
+ return ops.sparse_dense_mm(sparse_A, indices, dense_B, A_num_block)
12
+
13
+ def reduce_sum(sparse_A: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
14
+ return ops.reduce_sum(sparse_A, indices, A_num_block, B_num_block)
15
+
16
+ def scatter(dense_A: torch.Tensor, indices: torch.Tensor, B_num_block: int):
17
+ return ops.scatter(dense_A, indices, B_num_block)
18
+
19
+ __all__ = [
20
+ "index_max",
21
+ "mm_to_sparse",
22
+ "sparse_dense_mm",
23
+ "reduce_sum",
24
+ "scatter",
25
+ ]
build/torch212-cxx11-cu130-x86_64-linux/_mra_cuda_10efe7e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b93821b4672177cfcd2dd5467c92312831ee94e494208d276466f8f5f847cf3f
3
+ size 2632472
build/torch212-cxx11-cu130-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _mra_cuda_10efe7e
3
+ ops = torch.ops._mra_cuda_10efe7e
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_mra_cuda_10efe7e::{op_name}"
build/torch212-cxx11-cu130-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "mra",
3
+ "id": "_mra_cuda_10efe7e",
4
+ "version": 1,
5
+ "license": "Apache-2.0",
6
+ "python-depends": [],
7
+ "backend": {
8
+ "type": "cuda",
9
+ "archs": [
10
+ "10.0",
11
+ "11.0",
12
+ "12.0+PTX",
13
+ "7.5",
14
+ "8.0",
15
+ "8.6",
16
+ "8.7",
17
+ "8.9",
18
+ "9.0"
19
+ ]
20
+ }
21
+ }
build/torch212-cxx11-cu130-x86_64-linux/mra/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
build/torch212-cxx11-cu132-x86_64-linux/__init__.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._ops import ops
2
+ import torch
3
+
4
+ def index_max(index_vals: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
5
+ return ops.index_max(index_vals, indices, A_num_block, B_num_block)
6
+
7
+ def mm_to_sparse(dense_A: torch.Tensor, dense_B: torch.Tensor, indices: torch.Tensor):
8
+ return ops.mm_to_sparse(dense_A, dense_B, indices)
9
+
10
+ def sparse_dense_mm(sparse_A: torch.Tensor, indices: torch.Tensor, dense_B: torch.Tensor, A_num_block: int):
11
+ return ops.sparse_dense_mm(sparse_A, indices, dense_B, A_num_block)
12
+
13
+ def reduce_sum(sparse_A: torch.Tensor, indices: torch.Tensor, A_num_block: int, B_num_block: int):
14
+ return ops.reduce_sum(sparse_A, indices, A_num_block, B_num_block)
15
+
16
+ def scatter(dense_A: torch.Tensor, indices: torch.Tensor, B_num_block: int):
17
+ return ops.scatter(dense_A, indices, B_num_block)
18
+
19
+ __all__ = [
20
+ "index_max",
21
+ "mm_to_sparse",
22
+ "sparse_dense_mm",
23
+ "reduce_sum",
24
+ "scatter",
25
+ ]
build/torch212-cxx11-cu132-x86_64-linux/_mra_cuda_10efe7e.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb3bf0488c644df07c7cf65068bd4ab6f5e83798201665f3a82656dc0014363b
3
+ size 2700264
build/torch212-cxx11-cu132-x86_64-linux/_ops.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from . import _mra_cuda_10efe7e
3
+ ops = torch.ops._mra_cuda_10efe7e
4
+
5
+ def add_op_namespace_prefix(op_name: str):
6
+ """
7
+ Prefix op by namespace.
8
+ """
9
+ return f"_mra_cuda_10efe7e::{op_name}"
build/torch212-cxx11-cu132-x86_64-linux/metadata.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name": "mra",
3
+ "id": "_mra_cuda_10efe7e",
4
+ "version": 1,
5
+ "license": "Apache-2.0",
6
+ "python-depends": [],
7
+ "backend": {
8
+ "type": "cuda",
9
+ "archs": [
10
+ "10.0",
11
+ "11.0",
12
+ "12.0+PTX",
13
+ "7.5",
14
+ "8.0",
15
+ "8.6",
16
+ "8.7",
17
+ "8.9",
18
+ "9.0"
19
+ ]
20
+ }
21
+ }
build/torch212-cxx11-cu132-x86_64-linux/mra/__init__.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes
2
+ import importlib.util
3
+ import sys
4
+ from pathlib import Path
5
+ from types import ModuleType
6
+
7
+
8
+ def _import_from_path(file_path: Path) -> ModuleType:
9
+ # We cannot use the module name as-is, after adding it to `sys.modules`,
10
+ # it would also be used for other imports. So, we make a module name that
11
+ # depends on the path for it to be unique using the hex-encoded hash of
12
+ # the path.
13
+ path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
14
+ module_name = path_hash
15
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
16
+ if spec is None:
17
+ raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
18
+ module = importlib.util.module_from_spec(spec)
19
+ if module is None:
20
+ raise ImportError(f"Cannot load module {module_name} from spec")
21
+ sys.modules[module_name] = module
22
+ spec.loader.exec_module(module) # type: ignore
23
+ return module
24
+
25
+
26
+ globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))