feat: update to include rev in kernel for reproducible symbols

#2
by drbh HF Staff - opened
This view is limited to 50 files because it contains too many changes.  See the raw diff here.
Files changed (50) hide show
  1. README.md +0 -3
  2. build.toml +1 -2
  3. build/torch25-cxx11-cu118-x86_64-linux/activation/{_activation_78448fa.abi3.so → _activation_o63kkyjirmkf4.abi3.so} +2 -2
  4. build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py +3 -3
  5. build/torch25-cxx11-cu121-x86_64-linux/activation/{_activation_78448fa.abi3.so → _activation_vrl36m2ejer54.abi3.so} +2 -2
  6. build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py +3 -3
  7. build/torch25-cxx11-cu124-x86_64-linux/activation/{_activation_78448fa.abi3.so → _activation_va3moa75vw7c2.abi3.so} +2 -2
  8. build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py +3 -3
  9. build/torch25-cxx98-cu118-x86_64-linux/activation/{_activation_78448fa.abi3.so → _activation_qr3gs3eckeig4.abi3.so} +2 -2
  10. build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py +3 -3
  11. build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
  12. build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so +3 -0
  13. build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py +3 -3
  14. build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
  15. build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so +3 -0
  16. build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py +3 -3
  17. build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
  18. build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so +3 -0
  19. build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py +3 -3
  20. build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
  21. build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so +3 -0
  22. build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py +3 -3
  23. build/torch26-cxx11-cu126-aarch64-linux/activation/__init__.py +0 -52
  24. build/torch26-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so +0 -3
  25. build/torch26-cxx11-cu126-aarch64-linux/activation/_ops.py +0 -9
  26. build/torch26-cxx11-cu126-aarch64-linux/activation/layers.py +0 -65
  27. build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
  28. build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so +3 -0
  29. build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py +3 -3
  30. build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so +3 -0
  31. build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
  32. build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py +3 -3
  33. build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
  34. build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so +3 -0
  35. build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py +3 -3
  36. build/torch26-cxx98-cu126-aarch64-linux/activation/__init__.py +0 -52
  37. build/torch26-cxx98-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so +0 -3
  38. build/torch26-cxx98-cu126-aarch64-linux/activation/_ops.py +0 -9
  39. build/torch26-cxx98-cu126-aarch64-linux/activation/layers.py +0 -65
  40. build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
  41. build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so +3 -0
  42. build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py +3 -3
  43. build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py +0 -52
  44. build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so +0 -3
  45. build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py +0 -9
  46. build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py +0 -65
  47. build/torch27-cxx11-cu126-aarch64-linux/activation/__init__.py +0 -52
  48. build/torch27-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so +0 -3
  49. build/torch27-cxx11-cu126-aarch64-linux/activation/_ops.py +0 -9
  50. build/torch27-cxx11-cu126-aarch64-linux/activation/layers.py +0 -65
README.md CHANGED
@@ -2,9 +2,6 @@
2
  tags:
3
  - kernel
4
  ---
5
-
6
- ![Status](https://hubwebhook.dholtz.com/shield?repo=kernels-community/activation)
7
-
8
  ## Activation
9
 
10
  Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu).
 
2
  tags:
3
  - kernel
4
  ---
 
 
 
5
  ## Activation
6
 
7
  Activation kernels from [vLLM](https://github.com/vllm-project/vllm/blob/main/csrc/activation_kernels.cu).
build.toml CHANGED
@@ -8,8 +8,7 @@ src = [
8
  ]
9
 
10
  [kernel.activation]
11
- #language = "cuda-hipify"
12
- #rocm-archs = [ "gfx906", "gfx908", "gfx90a", "gfx940", "gfx941", "gfx942", "gfx1030", "gfx1100", "gfx1101" ]
13
  src = [
14
  "activation/activation_kernels.cu",
15
  "activation/cuda_compat.h",
 
8
  ]
9
 
10
  [kernel.activation]
11
+ cuda-capabilities = [ "7.0", "7.2", "7.5", "8.0", "8.6", "8.7", "8.9", "9.0" ]
 
12
  src = [
13
  "activation/activation_kernels.cu",
14
  "activation/cuda_compat.h",
build/torch25-cxx11-cu118-x86_64-linux/activation/{_activation_78448fa.abi3.so → _activation_o63kkyjirmkf4.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:323dbf69b89390fd46b207abc1314a4cbe27491e1bb9f026c840bc3bff43b7d3
3
- size 2447952
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d50cdabfbed1df74e921ac34ff00bca0555977b14ef8082ddae7b1f30985a494
3
+ size 2370160
build/torch25-cxx11-cu118-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_o63kkyjirmkf4
3
+ ops = torch.ops._activation_o63kkyjirmkf4
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_o63kkyjirmkf4::{op_name}"
build/torch25-cxx11-cu121-x86_64-linux/activation/{_activation_78448fa.abi3.so → _activation_vrl36m2ejer54.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6146ac6e77cbd458560bf67c46d93217833f2caf08260cc80a4aa62ba5645ee9
3
- size 2471056
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bd0709ef09c8f0c18d1dc4a36c8096c59459bece61f5f5dbea95d1e73f54d44
3
+ size 2393264
build/torch25-cxx11-cu121-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_vrl36m2ejer54
3
+ ops = torch.ops._activation_vrl36m2ejer54
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_vrl36m2ejer54::{op_name}"
build/torch25-cxx11-cu124-x86_64-linux/activation/{_activation_78448fa.abi3.so → _activation_va3moa75vw7c2.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:28eea3907055742f99bc9d7d4260add848adc2f6464e97029f37cd42a5c6bd0a
3
- size 2509832
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8353447f64e7d2df1a6a341d9c53bced53abef267f079923ae774170d0d57c53
3
+ size 2427936
build/torch25-cxx11-cu124-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_va3moa75vw7c2
3
+ ops = torch.ops._activation_va3moa75vw7c2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_va3moa75vw7c2::{op_name}"
build/torch25-cxx98-cu118-x86_64-linux/activation/{_activation_78448fa.abi3.so → _activation_qr3gs3eckeig4.abi3.so} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5609ad07903b98c83c297bfb64f0d944df5edfe1c611fee23ec6c8fbd952604
3
- size 2440392
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df184a6315118d787a1bd6b435cb45f1ca7828445a1f1c0e55c57645cfbba43a
3
+ size 2362600
build/torch25-cxx98-cu118-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_qr3gs3eckeig4
3
+ ops = torch.ops._activation_qr3gs3eckeig4
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_qr3gs3eckeig4::{op_name}"
build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_78448fa.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7e6475ed603ad2cb565bd19ad2554484bd6c00d0d3f02decff60f2285df2546f
3
- size 2463232
 
 
 
 
build/torch25-cxx98-cu121-x86_64-linux/activation/_activation_p7gbzt25w3zg2.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ccb13cfc2e45cf483e8b9f77f1760f28b48bcf185508d51b32d45bc759c4e8bb
3
+ size 2385440
build/torch25-cxx98-cu121-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_p7gbzt25w3zg2
3
+ ops = torch.ops._activation_p7gbzt25w3zg2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_p7gbzt25w3zg2::{op_name}"
build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a0767f6dba00c543d3cb77e2044bccd32ef569abc55b921231112c8a1ddfb187
3
- size 2502088
 
 
 
 
build/torch25-cxx98-cu124-x86_64-linux/activation/_activation_jg7yaigtn7wco.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f8048853e8cb06e8574a9a9497800d2be438f7989d79f44dcf2e0ced38a75a9
3
+ size 2420192
build/torch25-cxx98-cu124-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_jg7yaigtn7wco
3
+ ops = torch.ops._activation_jg7yaigtn7wco
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_jg7yaigtn7wco::{op_name}"
build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0c04d860454cc565113a3c93ff755fe9cbba0578c4604b89ad89e47c2503932
3
- size 2448056
 
 
 
 
build/torch26-cxx11-cu118-x86_64-linux/activation/_activation_ncisyrun7guwk.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cde5439e78ba0e1aaa1937d798b214b46d38cbab8e4384b93a22239fed1a4dd4
3
+ size 2370264
build/torch26-cxx11-cu118-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_ncisyrun7guwk
3
+ ops = torch.ops._activation_ncisyrun7guwk
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_ncisyrun7guwk::{op_name}"
build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:48d7b0d190af1dd0366dbaeb0690b9c7cd1dfdc9aeda9b0b23bce56c70f5cbae
3
- size 2509928
 
 
 
 
build/torch26-cxx11-cu124-x86_64-linux/activation/_activation_ochhfvlnc3vyc.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6bd20d411c51fc8729b15cab6a60c5c9185222474aa035489e1bff299d76682
3
+ size 2428040
build/torch26-cxx11-cu124-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_ochhfvlnc3vyc
3
+ ops = torch.ops._activation_ochhfvlnc3vyc
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_ochhfvlnc3vyc::{op_name}"
build/torch26-cxx11-cu126-aarch64-linux/activation/__init__.py DELETED
@@ -1,52 +0,0 @@
1
- import torch
2
-
3
- from ._ops import ops
4
-
5
- from . import layers
6
-
7
-
8
- def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
- ops.silu_and_mul(out, x)
10
- return out
11
-
12
-
13
- def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
14
- ops.gelu_and_mul(out, x)
15
- return out
16
-
17
-
18
- def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
- ops.gelu_tanh_and_mul(out, x)
20
- return out
21
-
22
-
23
- def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
24
- ops.fatrelu_and_mul(out, x, threshold)
25
- return out
26
-
27
-
28
- def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
29
- ops.gelu_fast(out, x)
30
- return out
31
-
32
-
33
- def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
34
- ops.gelu_new(out, x)
35
- return out
36
-
37
-
38
- def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
- ops.gelu_quick(out, x)
40
- return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:558e4499ad3c09d02633488cfdc802a228b78a8cd51d963c92239d44744298c7
3
- size 2631936
 
 
 
 
build/torch26-cxx11-cu126-aarch64-linux/activation/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _activation_bbdc1b4_dirty
3
- ops = torch.ops._activation_bbdc1b4_dirty
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_activation_bbdc1b4_dirty::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu126-aarch64-linux/activation/layers.py DELETED
@@ -1,65 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- def forward(self, x: torch.Tensor):
9
- d = x.shape[-1] // 2
10
- output_shape = x.shape[:-1] + (d,)
11
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
12
- ops.silu_and_mul(out, x)
13
- return out
14
-
15
-
16
- class GeluAndMul(nn.Module):
17
- def forward(self, x: torch.Tensor):
18
- d = x.shape[-1] // 2
19
- output_shape = x.shape[:-1] + (d,)
20
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
21
- ops.gelu_and_mul(out, x)
22
- return out
23
-
24
-
25
- class GeluTanhAndMul(nn.Module):
26
- def forward(self, x: torch.Tensor):
27
- d = x.shape[-1] // 2
28
- output_shape = x.shape[:-1] + (d,)
29
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
30
- ops.gelu_tanh_and_mul(out, x)
31
- return out
32
-
33
-
34
- class FatreluAndMul(nn.Module):
35
- def __init__(self, threshold: float = 0.0):
36
- super().__init__()
37
- self.threshold = threshold
38
-
39
- def forward(self, x: torch.Tensor):
40
- d = x.shape[-1] // 2
41
- output_shape = x.shape[:-1] + (d,)
42
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
43
- ops.fatrelu_and_mul(out, x, self.threshold)
44
- return out
45
-
46
-
47
- class FastGELU(nn.Module):
48
- def forward(self, x: torch.Tensor) -> torch.Tensor:
49
- out = torch.empty_like(x)
50
- ops.gelu_fast(out, x)
51
- return out
52
-
53
-
54
- class NewGELU(nn.Module):
55
- def forward(self, x: torch.Tensor) -> torch.Tensor:
56
- out = torch.empty_like(x)
57
- ops.gelu_new(out, x)
58
- return out
59
-
60
-
61
- class QuickGELU(nn.Module):
62
- def forward(self, x: torch.Tensor) -> torch.Tensor:
63
- out = torch.empty_like(x)
64
- ops.gelu_quick(out, x)
65
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:11a11d0f4119edc5c637bab04ebd5669750a0e4f4000f58ab1bf5be2d8d9ab0b
3
- size 2518568
 
 
 
 
build/torch26-cxx11-cu126-x86_64-linux/activation/_activation_u6vnqubnicksq.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41c18b20c2bf8c49d2d3088a9bc1aad4293df0b57eafc9b141a9e8e595fe551a
3
+ size 2436672
build/torch26-cxx11-cu126-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_u6vnqubnicksq
3
+ ops = torch.ops._activation_u6vnqubnicksq
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_u6vnqubnicksq::{op_name}"
build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_2vn6ty3gfqfb6.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfbcd5da358cd5cb7982d19c8880cf4db6f08b46622a7a953f755ad59e4e1492
3
+ size 2362752
build/torch26-cxx98-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:56dcc985761e309cbef3fc2a201f26e800583128d6e5a3fc1b23800fb0b8b48c
3
- size 2440544
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_2vn6ty3gfqfb6
3
+ ops = torch.ops._activation_2vn6ty3gfqfb6
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_2vn6ty3gfqfb6::{op_name}"
build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_78448fa.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:03c5f08322796d0736024412babe5d7f13bb1126387976ae12a80485a40d3883
3
- size 2502240
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/activation/_activation_myvteedxdpqc6.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1bc928823117c800904bcd3492bf1a0c65a32f6d8a842dc039f55e29831ab49
3
+ size 2420344
build/torch26-cxx98-cu124-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_myvteedxdpqc6
3
+ ops = torch.ops._activation_myvteedxdpqc6
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_myvteedxdpqc6::{op_name}"
build/torch26-cxx98-cu126-aarch64-linux/activation/__init__.py DELETED
@@ -1,52 +0,0 @@
1
- import torch
2
-
3
- from ._ops import ops
4
-
5
- from . import layers
6
-
7
-
8
- def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
- ops.silu_and_mul(out, x)
10
- return out
11
-
12
-
13
- def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
14
- ops.gelu_and_mul(out, x)
15
- return out
16
-
17
-
18
- def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
- ops.gelu_tanh_and_mul(out, x)
20
- return out
21
-
22
-
23
- def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
24
- ops.fatrelu_and_mul(out, x, threshold)
25
- return out
26
-
27
-
28
- def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
29
- ops.gelu_fast(out, x)
30
- return out
31
-
32
-
33
- def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
34
- ops.gelu_new(out, x)
35
- return out
36
-
37
-
38
- def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
- ops.gelu_quick(out, x)
40
- return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f6afd50526ff4221cddd52cb947900cdf6bb95ad0a6bffcd1a86bda4d3f52349
3
- size 2628128
 
 
 
 
build/torch26-cxx98-cu126-aarch64-linux/activation/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _activation_bbdc1b4_dirty
3
- ops = torch.ops._activation_bbdc1b4_dirty
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_activation_bbdc1b4_dirty::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-aarch64-linux/activation/layers.py DELETED
@@ -1,65 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- def forward(self, x: torch.Tensor):
9
- d = x.shape[-1] // 2
10
- output_shape = x.shape[:-1] + (d,)
11
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
12
- ops.silu_and_mul(out, x)
13
- return out
14
-
15
-
16
- class GeluAndMul(nn.Module):
17
- def forward(self, x: torch.Tensor):
18
- d = x.shape[-1] // 2
19
- output_shape = x.shape[:-1] + (d,)
20
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
21
- ops.gelu_and_mul(out, x)
22
- return out
23
-
24
-
25
- class GeluTanhAndMul(nn.Module):
26
- def forward(self, x: torch.Tensor):
27
- d = x.shape[-1] // 2
28
- output_shape = x.shape[:-1] + (d,)
29
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
30
- ops.gelu_tanh_and_mul(out, x)
31
- return out
32
-
33
-
34
- class FatreluAndMul(nn.Module):
35
- def __init__(self, threshold: float = 0.0):
36
- super().__init__()
37
- self.threshold = threshold
38
-
39
- def forward(self, x: torch.Tensor):
40
- d = x.shape[-1] // 2
41
- output_shape = x.shape[:-1] + (d,)
42
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
43
- ops.fatrelu_and_mul(out, x, self.threshold)
44
- return out
45
-
46
-
47
- class FastGELU(nn.Module):
48
- def forward(self, x: torch.Tensor) -> torch.Tensor:
49
- out = torch.empty_like(x)
50
- ops.gelu_fast(out, x)
51
- return out
52
-
53
-
54
- class NewGELU(nn.Module):
55
- def forward(self, x: torch.Tensor) -> torch.Tensor:
56
- out = torch.empty_like(x)
57
- ops.gelu_new(out, x)
58
- return out
59
-
60
-
61
- class QuickGELU(nn.Module):
62
- def forward(self, x: torch.Tensor) -> torch.Tensor:
63
- out = torch.empty_like(x)
64
- ops.gelu_quick(out, x)
65
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_78448fa.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f6eae5c895c564fbd2524ce488f4e91e65dc63402cd41a8bc74474b7437b2e62
3
- size 2506784
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/activation/_activation_rbswus6emrhm2.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:474727e434a9cd4ec984a6da7124992ead4ca0fefce9581d0fd503e36c065aed
3
+ size 2424888
build/torch26-cxx98-cu126-x86_64-linux/activation/_ops.py CHANGED
@@ -1,9 +1,9 @@
1
  import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
- return f"_activation_78448fa::{op_name}"
 
1
  import torch
2
+ from . import _activation_rbswus6emrhm2
3
+ ops = torch.ops._activation_rbswus6emrhm2
4
 
5
  def add_op_namespace_prefix(op_name: str):
6
  """
7
  Prefix op by namespace.
8
  """
9
+ return f"_activation_rbswus6emrhm2::{op_name}"
build/torch27-cxx11-cu118-x86_64-linux/activation/__init__.py DELETED
@@ -1,52 +0,0 @@
1
- import torch
2
-
3
- from ._ops import ops
4
-
5
- from . import layers
6
-
7
-
8
- def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
- ops.silu_and_mul(out, x)
10
- return out
11
-
12
-
13
- def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
14
- ops.gelu_and_mul(out, x)
15
- return out
16
-
17
-
18
- def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
- ops.gelu_tanh_and_mul(out, x)
20
- return out
21
-
22
-
23
- def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
24
- ops.fatrelu_and_mul(out, x, threshold)
25
- return out
26
-
27
-
28
- def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
29
- ops.gelu_fast(out, x)
30
- return out
31
-
32
-
33
- def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
34
- ops.gelu_new(out, x)
35
- return out
36
-
37
-
38
- def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
- ops.gelu_quick(out, x)
40
- return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu118-x86_64-linux/activation/_activation_78448fa.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8086b2d9e0f2db80385b83e0bc28f8d158725d002e1613e1a46a87732197e9f
3
- size 2448152
 
 
 
 
build/torch27-cxx11-cu118-x86_64-linux/activation/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _activation_78448fa
3
- ops = torch.ops._activation_78448fa
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_activation_78448fa::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu118-x86_64-linux/activation/layers.py DELETED
@@ -1,65 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- def forward(self, x: torch.Tensor):
9
- d = x.shape[-1] // 2
10
- output_shape = x.shape[:-1] + (d,)
11
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
12
- ops.silu_and_mul(out, x)
13
- return out
14
-
15
-
16
- class GeluAndMul(nn.Module):
17
- def forward(self, x: torch.Tensor):
18
- d = x.shape[-1] // 2
19
- output_shape = x.shape[:-1] + (d,)
20
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
21
- ops.gelu_and_mul(out, x)
22
- return out
23
-
24
-
25
- class GeluTanhAndMul(nn.Module):
26
- def forward(self, x: torch.Tensor):
27
- d = x.shape[-1] // 2
28
- output_shape = x.shape[:-1] + (d,)
29
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
30
- ops.gelu_tanh_and_mul(out, x)
31
- return out
32
-
33
-
34
- class FatreluAndMul(nn.Module):
35
- def __init__(self, threshold: float = 0.0):
36
- super().__init__()
37
- self.threshold = threshold
38
-
39
- def forward(self, x: torch.Tensor):
40
- d = x.shape[-1] // 2
41
- output_shape = x.shape[:-1] + (d,)
42
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
43
- ops.fatrelu_and_mul(out, x, self.threshold)
44
- return out
45
-
46
-
47
- class FastGELU(nn.Module):
48
- def forward(self, x: torch.Tensor) -> torch.Tensor:
49
- out = torch.empty_like(x)
50
- ops.gelu_fast(out, x)
51
- return out
52
-
53
-
54
- class NewGELU(nn.Module):
55
- def forward(self, x: torch.Tensor) -> torch.Tensor:
56
- out = torch.empty_like(x)
57
- ops.gelu_new(out, x)
58
- return out
59
-
60
-
61
- class QuickGELU(nn.Module):
62
- def forward(self, x: torch.Tensor) -> torch.Tensor:
63
- out = torch.empty_like(x)
64
- ops.gelu_quick(out, x)
65
- return out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu126-aarch64-linux/activation/__init__.py DELETED
@@ -1,52 +0,0 @@
1
- import torch
2
-
3
- from ._ops import ops
4
-
5
- from . import layers
6
-
7
-
8
- def silu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
9
- ops.silu_and_mul(out, x)
10
- return out
11
-
12
-
13
- def gelu_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
14
- ops.gelu_and_mul(out, x)
15
- return out
16
-
17
-
18
- def gelu_tanh_and_mul(out: torch.Tensor, x: torch.Tensor) -> None:
19
- ops.gelu_tanh_and_mul(out, x)
20
- return out
21
-
22
-
23
- def fatrelu_and_mul(out: torch.Tensor, x: torch.Tensor, threshold: float = 0.0) -> None:
24
- ops.fatrelu_and_mul(out, x, threshold)
25
- return out
26
-
27
-
28
- def gelu_fast(out: torch.Tensor, x: torch.Tensor) -> None:
29
- ops.gelu_fast(out, x)
30
- return out
31
-
32
-
33
- def gelu_new(out: torch.Tensor, x: torch.Tensor) -> None:
34
- ops.gelu_new(out, x)
35
- return out
36
-
37
-
38
- def gelu_quick(out: torch.Tensor, x: torch.Tensor) -> None:
39
- ops.gelu_quick(out, x)
40
- return out
41
-
42
-
43
- __all__ = [
44
- "silu_and_mul",
45
- "gelu_and_mul",
46
- "gelu_tanh_and_mul",
47
- "fatrelu_and_mul",
48
- "gelu_fast",
49
- "gelu_new",
50
- "gelu_quick",
51
- "layers",
52
- ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu126-aarch64-linux/activation/_activation_bbdc1b4_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4210a6598f00b8921ecba1a0e24603eb05437a876ca1f473d2641e11d9a67ece
3
- size 2632160
 
 
 
 
build/torch27-cxx11-cu126-aarch64-linux/activation/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _activation_bbdc1b4_dirty
3
- ops = torch.ops._activation_bbdc1b4_dirty
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_activation_bbdc1b4_dirty::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch27-cxx11-cu126-aarch64-linux/activation/layers.py DELETED
@@ -1,65 +0,0 @@
1
- import torch
2
- import torch.nn as nn
3
-
4
- from ._ops import ops
5
-
6
-
7
- class SiluAndMul(nn.Module):
8
- def forward(self, x: torch.Tensor):
9
- d = x.shape[-1] // 2
10
- output_shape = x.shape[:-1] + (d,)
11
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
12
- ops.silu_and_mul(out, x)
13
- return out
14
-
15
-
16
- class GeluAndMul(nn.Module):
17
- def forward(self, x: torch.Tensor):
18
- d = x.shape[-1] // 2
19
- output_shape = x.shape[:-1] + (d,)
20
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
21
- ops.gelu_and_mul(out, x)
22
- return out
23
-
24
-
25
- class GeluTanhAndMul(nn.Module):
26
- def forward(self, x: torch.Tensor):
27
- d = x.shape[-1] // 2
28
- output_shape = x.shape[:-1] + (d,)
29
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
30
- ops.gelu_tanh_and_mul(out, x)
31
- return out
32
-
33
-
34
- class FatreluAndMul(nn.Module):
35
- def __init__(self, threshold: float = 0.0):
36
- super().__init__()
37
- self.threshold = threshold
38
-
39
- def forward(self, x: torch.Tensor):
40
- d = x.shape[-1] // 2
41
- output_shape = x.shape[:-1] + (d,)
42
- out = torch.empty(output_shape, dtype=x.dtype, device=x.device)
43
- ops.fatrelu_and_mul(out, x, self.threshold)
44
- return out
45
-
46
-
47
- class FastGELU(nn.Module):
48
- def forward(self, x: torch.Tensor) -> torch.Tensor:
49
- out = torch.empty_like(x)
50
- ops.gelu_fast(out, x)
51
- return out
52
-
53
-
54
- class NewGELU(nn.Module):
55
- def forward(self, x: torch.Tensor) -> torch.Tensor:
56
- out = torch.empty_like(x)
57
- ops.gelu_new(out, x)
58
- return out
59
-
60
-
61
- class QuickGELU(nn.Module):
62
- def forward(self, x: torch.Tensor) -> torch.Tensor:
63
- out = torch.empty_like(x)
64
- ops.gelu_quick(out, x)
65
- return out