Omaratef3221 commited on
Commit
b396537
1 Parent(s): fbb0afe

Upload folder using huggingface_hub

Browse files
CustomRBFQwen.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import AutoTokenizer, AutoModelForCausalLM
4
+ from RBFLayer import RBFLayer
5
+
6
+ def l_norm(x, p=2):
7
+ return torch.norm(x, p=p, dim=-1)
8
+
9
+
10
+ # Gaussian RBF
11
+ def rbf_gaussian(x):
12
+ return (-x.pow(2)).exp()
13
+
14
+ class CustomRBFFeedForward(nn.Module):
15
+ def __init__(self, in_features, out_features, num_kernels):
16
+ super(CustomRBFFeedForward, self).__init__()
17
+ # RBFLayer from the given implementation
18
+ self.rbf_layer = RBFLayer(
19
+ in_features_dim=in_features, # Input size (e.g., 896)
20
+ num_kernels=num_kernels, # Number of kernels in the RBF layer (can be tuned)
21
+ out_features_dim=out_features, # Output size (e.g., 4864)
22
+ radial_function=rbf_gaussian, # Use the Gaussian RBF
23
+ norm_function=l_norm # Use Euclidean norm
24
+ )
25
+
26
+ def forward(self, x):
27
+ # Apply the RBF layer to the input x
28
+ return self.rbf_layer(x)
RBFLayer.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from typing import Callable
4
+
5
+
6
+ class RBFLayer(nn.Module):
7
+ def __init__(self,
8
+ in_features_dim: int,
9
+ num_kernels: int,
10
+ out_features_dim: int,
11
+ radial_function: Callable[[torch.Tensor], torch.Tensor],
12
+ norm_function: Callable[[torch.Tensor], torch.Tensor],
13
+ normalization: bool = True,
14
+ initial_shape_parameter: torch.Tensor = None,
15
+ initial_centers_parameter: torch.Tensor = None,
16
+ initial_weights_parameters: torch.Tensor = None,
17
+ constant_shape_parameter: bool = False,
18
+ constant_centers_parameter: bool = False,
19
+ constant_weights_parameters: bool = False):
20
+ super(RBFLayer, self).__init__()
21
+
22
+ self.in_features_dim = in_features_dim
23
+ self.num_kernels = num_kernels
24
+ self.out_features_dim = out_features_dim
25
+ self.radial_function = radial_function
26
+ self.norm_function = norm_function
27
+ self.normalization = normalization
28
+
29
+ self.initial_shape_parameter = initial_shape_parameter
30
+ self.constant_shape_parameter = constant_shape_parameter
31
+
32
+ self.initial_centers_parameter = initial_centers_parameter
33
+ self.constant_centers_parameter = constant_centers_parameter
34
+
35
+ self.initial_weights_parameters = initial_weights_parameters
36
+ self.constant_weights_parameters = constant_weights_parameters
37
+
38
+ self._make_parameters()
39
+
40
+ def _make_parameters(self) -> None:
41
+ # Initialize linear combination weights
42
+ if self.constant_weights_parameters:
43
+ self.weights = nn.Parameter(self.initial_weights_parameters, requires_grad=False)
44
+ else:
45
+ self.weights = nn.Parameter(torch.zeros(self.out_features_dim, self.num_kernels, dtype=torch.float32))
46
+
47
+ # Initialize kernels' centers
48
+ if self.constant_centers_parameter:
49
+ self.kernels_centers = nn.Parameter(self.initial_centers_parameter, requires_grad=False)
50
+ else:
51
+ self.kernels_centers = nn.Parameter(torch.zeros(self.num_kernels, self.in_features_dim, dtype=torch.float32))
52
+
53
+ # Initialize shape parameter
54
+ if self.constant_shape_parameter:
55
+ self.log_shapes = nn.Parameter(self.initial_shape_parameter, requires_grad=False)
56
+ else:
57
+ self.log_shapes = nn.Parameter(torch.zeros(self.num_kernels, dtype=torch.float32))
58
+
59
+ self.reset()
60
+
61
+ def reset(self, upper_bound_kernels: float = 1.0, std_shapes: float = 0.1, gain_weights: float = 1.0) -> None:
62
+ if self.initial_centers_parameter is None:
63
+ nn.init.uniform_(self.kernels_centers, a=-upper_bound_kernels, b=upper_bound_kernels)
64
+
65
+ if self.initial_shape_parameter is None:
66
+ nn.init.normal_(self.log_shapes, mean=0.0, std=std_shapes)
67
+
68
+ if self.initial_weights_parameters is None:
69
+ nn.init.xavier_uniform_(self.weights, gain=gain_weights)
70
+
71
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
72
+ """
73
+ Computes the output of the RBF layer given an input tensor.
74
+ Input has size [batch_size, sequence_length, in_features].
75
+ """
76
+
77
+ batch_size = input.size(0)
78
+ sequence_length = input.size(1)
79
+
80
+ # Expand centers to match the batch and sequence length
81
+ c = self.kernels_centers.expand(batch_size, sequence_length, self.num_kernels, self.in_features_dim)
82
+
83
+ # Compute differences between input and centers
84
+ diff = input.unsqueeze(2) - c # Shape: [batch_size, sequence_length, num_kernels, in_features_dim]
85
+
86
+ # Apply norm function to get distances
87
+ r = self.norm_function(diff) # Shape: [batch_size, sequence_length, num_kernels]
88
+
89
+ # Apply shape parameters (log_shapes) to the distances
90
+ eps_r = self.log_shapes.exp().unsqueeze(0).unsqueeze(0) * r
91
+
92
+ # Apply radial basis function (e.g., Gaussian)
93
+ rbfs = self.radial_function(eps_r)
94
+
95
+ if self.normalization:
96
+ rbfs = rbfs / (1e-9 + rbfs.sum(dim=-1, keepdim=True))
97
+
98
+ # Combine RBF outputs using the weights
99
+ out = (self.weights.unsqueeze(0).unsqueeze(0) * rbfs.unsqueeze(2)).sum(dim=-1)
100
+
101
+ return out
added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen2-0.5B-Instruct",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "eos_token_id": 151645,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 896,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4864,
13
+ "max_position_embeddings": 32768,
14
+ "max_window_layers": 24,
15
+ "model_type": "qwen2",
16
+ "num_attention_heads": 14,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 2,
19
+ "rms_norm_eps": 1e-06,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": 32768,
22
+ "tie_word_embeddings": true,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.41.1",
25
+ "use_cache": true,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151936
28
+ }
generation_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "temperature": 0.7,
11
+ "top_k": 20,
12
+ "top_p": 0.8,
13
+ "transformers_version": "4.41.1"
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:460b1ce33a4a54d698e65d82f5bc3a4def3941de5b5775b0f96182a54ae2a359
3
+ size 729331576
special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "model_max_length": 32768,
39
+ "pad_token": "<|endoftext|>",
40
+ "split_special_tokens": false,
41
+ "tokenizer_class": "Qwen2Tokenizer",
42
+ "unk_token": null
43
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff