Revert "Build uploaded using `kernels`."
Browse filesThis reverts commit b1abf654c7b0c085643388ebae3fb5629c167df1.
- build/torch210-cxx11-cu126-x86_64-linux/__init__.py +3 -0
- build/torch210-cxx11-cu126-x86_64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so +3 -0
- build/torch210-cxx11-cu126-x86_64-linux/custom_ops.py +36 -0
- build/torch210-cxx11-cu126-x86_64-linux/metadata.json +1 -0
- build/torch210-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py +26 -0
- build/torch210-cxx11-cu128-x86_64-linux/__init__.py +3 -0
- build/torch210-cxx11-cu128-x86_64-linux/_ops.py +9 -0
- build/torch210-cxx11-cu128-x86_64-linux/_quantization_eetq_2019ec2.abi3.so +3 -0
- build/torch210-cxx11-cu128-x86_64-linux/custom_ops.py +36 -0
- build/torch210-cxx11-cu128-x86_64-linux/metadata.json +1 -0
- build/torch210-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py +26 -0
- build/torch28-cxx11-cu126-x86_64-linux/__init__.py +3 -0
- build/torch28-cxx11-cu126-x86_64-linux/_ops.py +9 -0
- build/torch28-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so +3 -0
- build/torch28-cxx11-cu126-x86_64-linux/custom_ops.py +36 -0
- build/torch28-cxx11-cu126-x86_64-linux/metadata.json +1 -0
- build/torch28-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py +26 -0
- build/torch28-cxx11-cu128-x86_64-linux/__init__.py +3 -0
- build/torch28-cxx11-cu128-x86_64-linux/_ops.py +9 -0
- build/torch28-cxx11-cu128-x86_64-linux/_quantization_eetq_2019ec2.abi3.so +3 -0
- build/torch28-cxx11-cu128-x86_64-linux/custom_ops.py +36 -0
- build/torch28-cxx11-cu128-x86_64-linux/metadata.json +1 -0
- build/torch28-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py +26 -0
- build/torch28-cxx11-cu129-x86_64-linux/__init__.py +3 -0
- build/torch28-cxx11-cu129-x86_64-linux/_ops.py +9 -0
- build/torch28-cxx11-cu129-x86_64-linux/_quantization_eetq_2019ec2.abi3.so +3 -0
- build/torch28-cxx11-cu129-x86_64-linux/custom_ops.py +36 -0
- build/torch28-cxx11-cu129-x86_64-linux/metadata.json +1 -0
- build/torch28-cxx11-cu129-x86_64-linux/quantization_eetq/__init__.py +26 -0
- build/torch29-cxx11-cu126-x86_64-linux/__init__.py +3 -0
- build/torch29-cxx11-cu126-x86_64-linux/_ops.py +9 -0
- build/torch29-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so +3 -0
- build/torch29-cxx11-cu126-x86_64-linux/custom_ops.py +36 -0
- build/torch29-cxx11-cu126-x86_64-linux/metadata.json +1 -0
- build/torch29-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py +26 -0
- build/torch29-cxx11-cu128-x86_64-linux/__init__.py +3 -0
- build/torch29-cxx11-cu128-x86_64-linux/_ops.py +9 -0
- build/torch29-cxx11-cu128-x86_64-linux/_quantization_eetq_2019ec2.abi3.so +3 -0
- build/torch29-cxx11-cu128-x86_64-linux/custom_ops.py +36 -0
- build/torch29-cxx11-cu128-x86_64-linux/metadata.json +1 -0
- build/torch29-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py +26 -0
build/torch210-cxx11-cu126-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch210-cxx11-cu126-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_2019ec2
|
| 3 |
+
ops = torch.ops._quantization_eetq_2019ec2
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_2019ec2::{op_name}"
|
build/torch210-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7ddd95d6bd5fa005afb8ca1cca98f8e666888437a7b21faa044d7e3180aa0902
|
| 3 |
+
size 39060392
|
build/torch210-cxx11-cu126-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch210-cxx11-cu126-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"python-depends":[]}
|
build/torch210-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch210-cxx11-cu128-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch210-cxx11-cu128-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_2019ec2
|
| 3 |
+
ops = torch.ops._quantization_eetq_2019ec2
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_2019ec2::{op_name}"
|
build/torch210-cxx11-cu128-x86_64-linux/_quantization_eetq_2019ec2.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6026ada25ff3b5a4eb532b0e8a6825ef353d4c507f36817020557c68b22db2f8
|
| 3 |
+
size 45393976
|
build/torch210-cxx11-cu128-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch210-cxx11-cu128-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"python-depends":[]}
|
build/torch210-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch28-cxx11-cu126-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch28-cxx11-cu126-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_2019ec2
|
| 3 |
+
ops = torch.ops._quantization_eetq_2019ec2
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_2019ec2::{op_name}"
|
build/torch28-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a97b47d38cf622b58973de106d9b36fa9f41400c0de74a98c67b39d916d10410
|
| 3 |
+
size 39045544
|
build/torch28-cxx11-cu126-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch28-cxx11-cu126-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"python-depends":[]}
|
build/torch28-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch28-cxx11-cu128-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch28-cxx11-cu128-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_2019ec2
|
| 3 |
+
ops = torch.ops._quantization_eetq_2019ec2
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_2019ec2::{op_name}"
|
build/torch28-cxx11-cu128-x86_64-linux/_quantization_eetq_2019ec2.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fdfe703aa8a570bb3589ec89aeca00af0a983b968700933127f2762f50c47686
|
| 3 |
+
size 45378576
|
build/torch28-cxx11-cu128-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch28-cxx11-cu128-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"python-depends":[]}
|
build/torch28-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch28-cxx11-cu129-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch28-cxx11-cu129-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_2019ec2
|
| 3 |
+
ops = torch.ops._quantization_eetq_2019ec2
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_2019ec2::{op_name}"
|
build/torch28-cxx11-cu129-x86_64-linux/_quantization_eetq_2019ec2.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea1e527ad52b776e108df3ad077a84ec8db233a67630fa8389fa03a641b4a8c8
|
| 3 |
+
size 46492944
|
build/torch28-cxx11-cu129-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch28-cxx11-cu129-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"python-depends":[]}
|
build/torch28-cxx11-cu129-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch29-cxx11-cu126-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch29-cxx11-cu126-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_2019ec2
|
| 3 |
+
ops = torch.ops._quantization_eetq_2019ec2
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_2019ec2::{op_name}"
|
build/torch29-cxx11-cu126-x86_64-linux/_quantization_eetq_2019ec2.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:232a741853f62f20c6d4e418644918b2f8c2b11c8ee97bf396d0c9e382656edf
|
| 3 |
+
size 39049616
|
build/torch29-cxx11-cu126-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch29-cxx11-cu126-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"python-depends":[]}
|
build/torch29-cxx11-cu126-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|
build/torch29-cxx11-cu128-x86_64-linux/__init__.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .custom_ops import w8_a16_gemm, w8_a16_gemm_, preprocess_weights, quant_weights
|
| 2 |
+
|
| 3 |
+
__all__ = ["w8_a16_gemm", "w8_a16_gemm_", "preprocess_weights", "quant_weights"]
|
build/torch29-cxx11-cu128-x86_64-linux/_ops.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
from . import _quantization_eetq_2019ec2
|
| 3 |
+
ops = torch.ops._quantization_eetq_2019ec2
|
| 4 |
+
|
| 5 |
+
def add_op_namespace_prefix(op_name: str):
|
| 6 |
+
"""
|
| 7 |
+
Prefix op by namespace.
|
| 8 |
+
"""
|
| 9 |
+
return f"_quantization_eetq_2019ec2::{op_name}"
|
build/torch29-cxx11-cu128-x86_64-linux/_quantization_eetq_2019ec2.abi3.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:46ea0f0690cda2b0143842ccfdf0f3beff510ce8666c7c3b802856a4d28197cf
|
| 3 |
+
size 45378736
|
build/torch29-cxx11-cu128-x86_64-linux/custom_ops.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List
|
| 2 |
+
import torch
|
| 3 |
+
|
| 4 |
+
from ._ops import ops
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def w8_a16_gemm(
|
| 8 |
+
input: torch.Tensor, weight: torch.Tensor, scale: torch.Tensor
|
| 9 |
+
) -> torch.Tensor:
|
| 10 |
+
return ops.w8_a16_gemm(input, weight, scale)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def w8_a16_gemm_(
|
| 14 |
+
input: torch.Tensor,
|
| 15 |
+
weight: torch.Tensor,
|
| 16 |
+
scale: torch.Tensor,
|
| 17 |
+
output: torch.Tensor,
|
| 18 |
+
m: int,
|
| 19 |
+
n: int,
|
| 20 |
+
k: int,
|
| 21 |
+
) -> torch.Tensor:
|
| 22 |
+
return ops.w8_a16_gemm_(input, weight, scale, output, m, n, k)
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def preprocess_weights(origin_weight: torch.Tensor, is_int4: bool) -> torch.Tensor:
|
| 26 |
+
return ops.preprocess_weights(origin_weight, is_int4)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def quant_weights(
|
| 30 |
+
origin_weight: torch.Tensor,
|
| 31 |
+
quant_type: torch.dtype,
|
| 32 |
+
return_unprocessed_quantized_tensor: bool,
|
| 33 |
+
) -> List[torch.Tensor]:
|
| 34 |
+
return ops.quant_weights(
|
| 35 |
+
origin_weight, quant_type, return_unprocessed_quantized_tensor
|
| 36 |
+
)
|
build/torch29-cxx11-cu128-x86_64-linux/metadata.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"python-depends":[]}
|
build/torch29-cxx11-cu128-x86_64-linux/quantization_eetq/__init__.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import ctypes
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
import importlib
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from types import ModuleType
|
| 7 |
+
|
| 8 |
+
def _import_from_path(file_path: Path) -> ModuleType:
|
| 9 |
+
# We cannot use the module name as-is, after adding it to `sys.modules`,
|
| 10 |
+
# it would also be used for other imports. So, we make a module name that
|
| 11 |
+
# depends on the path for it to be unique using the hex-encoded hash of
|
| 12 |
+
# the path.
|
| 13 |
+
path_hash = "{:x}".format(ctypes.c_size_t(hash(file_path.absolute())).value)
|
| 14 |
+
module_name = path_hash
|
| 15 |
+
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
| 16 |
+
if spec is None:
|
| 17 |
+
raise ImportError(f"Cannot load spec for {module_name} from {file_path}")
|
| 18 |
+
module = importlib.util.module_from_spec(spec)
|
| 19 |
+
if module is None:
|
| 20 |
+
raise ImportError(f"Cannot load module {module_name} from spec")
|
| 21 |
+
sys.modules[module_name] = module
|
| 22 |
+
spec.loader.exec_module(module) # type: ignore
|
| 23 |
+
return module
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
globals().update(vars(_import_from_path(Path(__file__).parent.parent / "__init__.py")))
|