drbh commited on
Commit ·
aeb4e54
unverified ·
0
Parent(s):
Migrated from kernels-community/quantization-eetq
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +36 -0
- README.md +14 -0
- build/torch210-cxx11-cu126-aarch64-linux/__init__.py +3 -0
- build/torch210-cxx11-cu126-aarch64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu126-aarch64-linux/_quantization_eetq_cuda_4bc0051.abi3.so +3 -0
- build/torch210-cxx11-cu126-aarch64-linux/custom_ops.py +36 -0
- build/torch210-cxx11-cu126-aarch64-linux/metadata.json +18 -0
- build/torch210-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py +26 -0
- build/torch210-cxx11-cu126-x86_64-linux/__init__.py +3 -0
- build/torch210-cxx11-cu126-x86_64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu126-x86_64-linux/_quantization_eetq_cuda_4bc0051.abi3.so +3 -0
- build/torch210-cxx11-cu126-x86_64-linux/custom_ops.py +36 -0
- build/torch210-cxx11-cu126-x86_64-linux/metadata.json +18 -0
- build/torch210-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py +26 -0
- build/torch210-cxx11-cu128-aarch64-linux/__init__.py +3 -0
- build/torch210-cxx11-cu128-aarch64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu128-aarch64-linux/_quantization_eetq_cuda_4bc0051.abi3.so +3 -0
- build/torch210-cxx11-cu128-aarch64-linux/custom_ops.py +36 -0
- build/torch210-cxx11-cu128-aarch64-linux/metadata.json +21 -0
- build/torch210-cxx11-cu128-aarch64-linux/quantization_eetq/__init__.py +26 -0
- build/torch210-cxx11-cu128-x86_64-linux/__init__.py +3 -0
- build/torch210-cxx11-cu128-x86_64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu128-x86_64-linux/_quantization_eetq_cuda_4bc0051.abi3.so +3 -0
- build/torch210-cxx11-cu128-x86_64-linux/custom_ops.py +36 -0
- build/torch210-cxx11-cu128-x86_64-linux/metadata.json +21 -0
- build/torch210-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py +26 -0
- build/torch211-cxx11-cu126-aarch64-linux/__init__.py +3 -0
- build/torch211-cxx11-cu126-aarch64-linux/_ops.py +9 -0
- build/torch211-cxx11-cu126-aarch64-linux/_quantization_eetq_cuda_4bc0051.abi3.so +3 -0
- build/torch211-cxx11-cu126-aarch64-linux/custom_ops.py +36 -0
- build/torch211-cxx11-cu126-aarch64-linux/metadata.json +18 -0
- build/torch211-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py +26 -0
- build/torch211-cxx11-cu126-x86_64-linux/__init__.py +3 -0
- build/torch211-cxx11-cu126-x86_64-linux/_ops.py +9 -0
- build/torch211-cxx11-cu126-x86_64-linux/_quantization_eetq_cuda_4bc0051.abi3.so +3 -0
- build/torch211-cxx11-cu126-x86_64-linux/custom_ops.py +36 -0
- build/torch211-cxx11-cu126-x86_64-linux/metadata.json +18 -0
- build/torch211-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py +26 -0
- build/torch211-cxx11-cu128-aarch64-linux/__init__.py +3 -0
- build/torch211-cxx11-cu128-aarch64-linux/_ops.py +9 -0
- build/torch211-cxx11-cu128-aarch64-linux/_quantization_eetq_cuda_4bc0051.abi3.so +3 -0
- build/torch211-cxx11-cu128-aarch64-linux/custom_ops.py +36 -0
- build/torch211-cxx11-cu128-aarch64-linux/metadata.json +21 -0
- build/torch211-cxx11-cu128-aarch64-linux/quantization_eetq/__init__.py +26 -0
- build/torch211-cxx11-cu128-x86_64-linux/__init__.py +3 -0
- build/torch211-cxx11-cu128-x86_64-linux/_ops.py +9 -0
- build/torch211-cxx11-cu128-x86_64-linux/_quantization_eetq_cuda_4bc0051.abi3.so +3 -0
- build/torch211-cxx11-cu128-x86_64-linux/custom_ops.py +36 -0
- build/torch211-cxx11-cu128-x86_64-linux/metadata.json +21 -0
- build/torch211-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py +26 -0
.gitattributes
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.so filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: apache-2.0
|
| 3 |
+
tags:
|
| 4 |
+
- kernels
|
| 5 |
+
---
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
|
| 9 |
+
## eetq
|
| 10 |
+
|
| 11 |
+
EETQ kernels from [NetEase-FuXi/EETQ](https://github.com/NetEase-FuXi/EETQ).
|
| 12 |
+
|
| 13 |
+
Kernel source: https://github.com/huggingface/kernels-community/tree/main/quantization-eetq
|
| 14 |
+
|
build/torch210-cxx11-cu126-aarch64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch210-cxx11-cu126-aarch64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_cuda_4bc0051
|
| 3 |
+
ops = torch.ops._quantization_eetq_cuda_4bc0051
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_cuda_4bc0051::{op_name}"
|
build/torch210-cxx11-cu126-aarch64-linux/_quantization_eetq_cuda_4bc0051.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2d7c4c1ddd8d31cee535f23012aa490749a38b28dc3592473eea939e3611b3b1
|
| 3 |
+
size 39010048
|
build/torch210-cxx11-cu126-aarch64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch210-cxx11-cu126-aarch64-linux/metadata.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"7.0",
|
| 9 |
+
"7.2",
|
| 10 |
+
"7.5",
|
| 11 |
+
"8.0",
|
| 12 |
+
"8.6",
|
| 13 |
+
"8.7",
|
| 14 |
+
"8.9",
|
| 15 |
+
"9.0+PTX"
|
| 16 |
+
]
|
| 17 |
+
}
|
| 18 |
+
}
|
build/torch210-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-cu126-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch210-cxx11-cu126-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_cuda_4bc0051
|
| 3 |
+
ops = torch.ops._quantization_eetq_cuda_4bc0051
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_cuda_4bc0051::{op_name}"
|
build/torch210-cxx11-cu126-x86_64-linux/_quantization_eetq_cuda_4bc0051.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d60d2faac38ccbfeca46a6f9b025cdce2590bb5291da8ae42f0e2235a29ec066
|
| 3 |
+
size 39060416
|
build/torch210-cxx11-cu126-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch210-cxx11-cu126-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"7.0",
|
| 9 |
+
"7.2",
|
| 10 |
+
"7.5",
|
| 11 |
+
"8.0",
|
| 12 |
+
"8.6",
|
| 13 |
+
"8.7",
|
| 14 |
+
"8.9",
|
| 15 |
+
"9.0+PTX"
|
| 16 |
+
]
|
| 17 |
+
}
|
| 18 |
+
}
|
build/torch210-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-cu128-aarch64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch210-cxx11-cu128-aarch64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_cuda_4bc0051
|
| 3 |
+
ops = torch.ops._quantization_eetq_cuda_4bc0051
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_cuda_4bc0051::{op_name}"
|
build/torch210-cxx11-cu128-aarch64-linux/_quantization_eetq_cuda_4bc0051.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0aa8d32b1d6f07f1b095c7228dbe14325aeeba864aefcdc614654261babce162
|
| 3 |
+
size 45366048
|
build/torch210-cxx11-cu128-aarch64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch210-cxx11-cu128-aarch64-linux/metadata.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"10.0",
|
| 9 |
+
"10.1",
|
| 10 |
+
"12.0+PTX",
|
| 11 |
+
"7.0",
|
| 12 |
+
"7.2",
|
| 13 |
+
"7.5",
|
| 14 |
+
"8.0",
|
| 15 |
+
"8.6",
|
| 16 |
+
"8.7",
|
| 17 |
+
"8.9",
|
| 18 |
+
"9.0"
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
build/torch210-cxx11-cu128-aarch64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-cu128-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch210-cxx11-cu128-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_cuda_4bc0051
|
| 3 |
+
ops = torch.ops._quantization_eetq_cuda_4bc0051
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_cuda_4bc0051::{op_name}"
|
build/torch210-cxx11-cu128-x86_64-linux/_quantization_eetq_cuda_4bc0051.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4735c25762548fcf265184a9c06da1bddc95f663f183b15e53b09f98c35ad321
|
| 3 |
+
size 45394000
|
build/torch210-cxx11-cu128-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch210-cxx11-cu128-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"10.0",
|
| 9 |
+
"10.1",
|
| 10 |
+
"12.0+PTX",
|
| 11 |
+
"7.0",
|
| 12 |
+
"7.2",
|
| 13 |
+
"7.5",
|
| 14 |
+
"8.0",
|
| 15 |
+
"8.6",
|
| 16 |
+
"8.7",
|
| 17 |
+
"8.9",
|
| 18 |
+
"9.0"
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
build/torch210-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch211-cxx11-cu126-aarch64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch211-cxx11-cu126-aarch64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_cuda_4bc0051
|
| 3 |
+
ops = torch.ops._quantization_eetq_cuda_4bc0051
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_cuda_4bc0051::{op_name}"
|
build/torch211-cxx11-cu126-aarch64-linux/_quantization_eetq_cuda_4bc0051.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b1d6d36599980e86a95f70c00d05c9aacef4a4d24188a84c3b258a2a06133fd
|
| 3 |
+
size 39006256
|
build/torch211-cxx11-cu126-aarch64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch211-cxx11-cu126-aarch64-linux/metadata.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"7.0",
|
| 9 |
+
"7.2",
|
| 10 |
+
"7.5",
|
| 11 |
+
"8.0",
|
| 12 |
+
"8.6",
|
| 13 |
+
"8.7",
|
| 14 |
+
"8.9",
|
| 15 |
+
"9.0+PTX"
|
| 16 |
+
]
|
| 17 |
+
}
|
| 18 |
+
}
|
build/torch211-cxx11-cu126-aarch64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch211-cxx11-cu126-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch211-cxx11-cu126-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_cuda_4bc0051
|
| 3 |
+
ops = torch.ops._quantization_eetq_cuda_4bc0051
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_cuda_4bc0051::{op_name}"
|
build/torch211-cxx11-cu126-x86_64-linux/_quantization_eetq_cuda_4bc0051.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1264b126ed4d9c9d80758671f9965e830bfa916631934b81784e194c74b293a0
|
| 3 |
+
size 39053336
|
build/torch211-cxx11-cu126-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch211-cxx11-cu126-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"7.0",
|
| 9 |
+
"7.2",
|
| 10 |
+
"7.5",
|
| 11 |
+
"8.0",
|
| 12 |
+
"8.6",
|
| 13 |
+
"8.7",
|
| 14 |
+
"8.9",
|
| 15 |
+
"9.0+PTX"
|
| 16 |
+
]
|
| 17 |
+
}
|
| 18 |
+
}
|
build/torch211-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch211-cxx11-cu128-aarch64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch211-cxx11-cu128-aarch64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_cuda_4bc0051
|
| 3 |
+
ops = torch.ops._quantization_eetq_cuda_4bc0051
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_cuda_4bc0051::{op_name}"
|
build/torch211-cxx11-cu128-aarch64-linux/_quantization_eetq_cuda_4bc0051.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:894bebb496c1327c2855c6b1c4b362ebde395fbdc2a5050127b20ebd2faa943d
|
| 3 |
+
size 45296712
|
build/torch211-cxx11-cu128-aarch64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch211-cxx11-cu128-aarch64-linux/metadata.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"10.0",
|
| 9 |
+
"10.1",
|
| 10 |
+
"12.0+PTX",
|
| 11 |
+
"7.0",
|
| 12 |
+
"7.2",
|
| 13 |
+
"7.5",
|
| 14 |
+
"8.0",
|
| 15 |
+
"8.6",
|
| 16 |
+
"8.7",
|
| 17 |
+
"8.9",
|
| 18 |
+
"9.0"
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
build/torch211-cxx11-cu128-aarch64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch211-cxx11-cu128-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch211-cxx11-cu128-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_cuda_4bc0051
|
| 3 |
+
ops = torch.ops._quantization_eetq_cuda_4bc0051
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_cuda_4bc0051::{op_name}"
|
build/torch211-cxx11-cu128-x86_64-linux/_quantization_eetq_cuda_4bc0051.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a5782cd3f5577334deb176bd5e7502850d2302ade7b7df9ff9491c011ccba469
|
| 3 |
+
size 45382832
|
build/torch211-cxx11-cu128-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch211-cxx11-cu128-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"version": 1,
|
| 3 |
+
"license": "Apache-2.0",
|
| 4 |
+
"python-depends": [],
|
| 5 |
+
"backend": {
|
| 6 |
+
"type": "cuda",
|
| 7 |
+
"archs": [
|
| 8 |
+
"10.0",
|
| 9 |
+
"10.1",
|
| 10 |
+
"12.0+PTX",
|
| 11 |
+
"7.0",
|
| 12 |
+
"7.2",
|
| 13 |
+
"7.5",
|
| 14 |
+
"8.0",
|
| 15 |
+
"8.6",
|
| 16 |
+
"8.7",
|
| 17 |
+
"8.9",
|
| 18 |
+
"9.0"
|
| 19 |
+
]
|
| 20 |
+
}
|
| 21 |
+
}
|
build/torch211-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import importlib.util
|
| 3 |
+
import sys
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
from types import ModuleType
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|