vrashad commited on
Commit
5ced29b
1 Parent(s): d6df744

Upload folder using huggingface_hub

Browse files
.ipynb_checkpoints/README-checkpoint.md ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ language:
4
+ - az
5
+ metrics:
6
+ - pearsonr
7
+ base_model:
8
+ - sentence-transformers/LaBSE
9
+ pipeline_tag: sentence-similarity
10
+ widget:
11
+ - source_sentence: Bu xoşbəxt bir insandır
12
+ sentences:
13
+ - Bu xoşbəxt bir itdir
14
+ - Bu çox xoşbəxt bir insandır
15
+ - Bu gün günəşli bir gündür
16
+ example_title: Sentence Similarity
17
+ tags:
18
+ - labse
19
+ ---
20
+
21
+ # TEmA-small
22
+
23
+ This model is a fine-tuned version of the [LaBSE](https://huggingface.co/sentence-transformers/LaBSE), which is specialized for sentence similarity tasks in Azerbaijan texts.
24
+ It maps sentences and paragraphs to a 768-dimensional dense vector space, useful for tasks like clustering, semantic search, and more.
25
+
26
+
27
+
28
+
29
+ ## Benchmark Results
30
+
31
+ | STSBenchmark | biosses-sts | sickr-sts | sts12-sts | sts13-sts | sts15-sts | sts16-sts | Average Pearson | Model |
32
+ |--------------|-------------|-----------|-----------|-----------|-----------|-----------|-----------------|------------------------------------|
33
+ | 0.8253 | 0.7859 | 0.7924 | 0.8444 | 0.7490 | 0.8141 | 0.7600 | 0.7959 | TEmA-small |
34
+ | 0.7872 | 0.8303 | 0.7801 | 0.7978 | 0.6963 | 0.8052 | 0.7794 | 0.7823 | Cohere/embed-multilingual-v3.0 |
35
+ | 0.7927 | 0.6672 | 0.7758 | 0.8122 | 0.7312 | 0.7831 | 0.7416 | 0.7577 | BAAI/bge-m3 |
36
+ | 0.7572 | 0.8139 | 0.7328 | 0.7646 | 0.6318 | 0.7542 | 0.7092 | 0.7377 | intfloat/multilingual-e5-large-instruct |
37
+ | 0.7400 | 0.8216 | 0.6946 | 0.7098 | 0.6781 | 0.7637 | 0.7222 | 0.7329 | labse_stripped |
38
+ | 0.7485 | 0.7714 | 0.7271 | 0.7170 | 0.6496 | 0.7570 | 0.7255 | 0.7280 | intfloat/multilingual-e5-large |
39
+ | 0.7245 | 0.8237 | 0.6839 | 0.6570 | 0.7125 | 0.7612 | 0.7386 | 0.7288 | OpenAI/text-embedding-3-large |
40
+ | 0.7363 | 0.8148 | 0.7067 | 0.7050 | 0.6535 | 0.7514 | 0.7070 | 0.7250 | sentence-transformers/LaBSE |
41
+ | 0.7376 | 0.7917 | 0.7190 | 0.7441 | 0.6286 | 0.7461 | 0.7026 | 0.7242 | intfloat/multilingual-e5-small |
42
+ | 0.7192 | 0.8198 | 0.7160 | 0.7338 | 0.5815 | 0.7318 | 0.6973 | 0.7142 | Cohere/embed-multilingual-light-v3.0 |
43
+ | 0.6960 | 0.8185 | 0.6950 | 0.6752 | 0.5899 | 0.7186 | 0.6790 | 0.6960 | intfloat/multilingual-e5-base |
44
+ | 0.5830 | 0.2486 | 0.5921 | 0.5593 | 0.5559 | 0.5404 | 0.5289 | 0.5155 | antoinelouis/colbert-xm |
45
+
46
+
47
+ [STS-Benchmark](https://github.com/LocalDoc-Azerbaijan/STS-Benchmark)
48
+
49
+
50
+
51
+
52
+ ## Accuracy Results
53
+ - **Cosine Distance:** 96.63
54
+ - **Manhattan Distance:** 96.52
55
+ - **Euclidean Distance:** 96.57
56
+
57
+
58
+
59
+
60
+ ## Usage
61
+
62
+ ```python
63
+ from transformers import AutoTokenizer, AutoModel
64
+ import torch
65
+
66
+ # Mean Pooling - Take attention mask into account for correct averaging
67
+ def mean_pooling(model_output, attention_mask):
68
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
69
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
70
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
71
+
72
+ # Function to normalize embeddings
73
+ def normalize_embeddings(embeddings):
74
+ return embeddings / embeddings.norm(dim=1, keepdim=True)
75
+
76
+ # Sentences we want embeddings for
77
+ sentences = [
78
+ "Bu xoşbəxt bir insandır",
79
+ "Bu çox xoşbəxt bir insandır",
80
+ "Bu gün günəşli bir gündür"
81
+ ]
82
+
83
+ # Load model from HuggingFace Hub
84
+ tokenizer = AutoTokenizer.from_pretrained('LocalDoc/TEmA-small')
85
+ model = AutoModel.from_pretrained('LocalDoc/TEmA-small')
86
+
87
+ # Tokenize sentences
88
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='pt')
89
+
90
+ # Compute token embeddings
91
+ with torch.no_grad():
92
+ model_output = model(**encoded_input)
93
+
94
+ # Perform pooling
95
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
96
+
97
+ # Normalize embeddings
98
+ sentence_embeddings = normalize_embeddings(sentence_embeddings)
99
+
100
+ # Calculate cosine similarities
101
+ cosine_similarities = torch.nn.functional.cosine_similarity(
102
+ sentence_embeddings[0].unsqueeze(0),
103
+ sentence_embeddings[1:],
104
+ dim=1
105
+ )
106
+
107
+ print("Cosine Similarities:")
108
+ for i, score in enumerate(cosine_similarities):
109
+ print(f"Sentence 1 <-> Sentence {i+2}: {score:.4f}")
110
+ ```
111
+
1_Pooling/config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 768,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": true,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false
7
+ }
README.md CHANGED
@@ -1,3 +1,111 @@
1
- ---
2
- license: cc-by-4.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-4.0
3
+ language:
4
+ - az
5
+ metrics:
6
+ - pearsonr
7
+ base_model:
8
+ - sentence-transformers/LaBSE
9
+ pipeline_tag: sentence-similarity
10
+ widget:
11
+ - source_sentence: Bu xoşbəxt bir insandır
12
+ sentences:
13
+ - Bu xoşbəxt bir itdir
14
+ - Bu çox xoşbəxt bir insandır
15
+ - Bu gün günəşli bir gündür
16
+ example_title: Sentence Similarity
17
+ tags:
18
+ - labse
19
+ ---
20
+
21
+ # TEmA-small
22
+
23
+ This model is a fine-tuned version of the [LaBSE](https://huggingface.co/sentence-transformers/LaBSE), which is specialized for sentence similarity tasks in Azerbaijan texts.
24
+ It maps sentences and paragraphs to a 768-dimensional dense vector space, useful for tasks like clustering, semantic search, and more.
25
+
26
+
27
+
28
+
29
+ ## Benchmark Results
30
+
31
+ | STSBenchmark | biosses-sts | sickr-sts | sts12-sts | sts13-sts | sts15-sts | sts16-sts | Average Pearson | Model |
32
+ |--------------|-------------|-----------|-----------|-----------|-----------|-----------|-----------------|------------------------------------|
33
+ | 0.8253 | 0.7859 | 0.7924 | 0.8444 | 0.7490 | 0.8141 | 0.7600 | 0.7959 | TEmA-small |
34
+ | 0.7872 | 0.8303 | 0.7801 | 0.7978 | 0.6963 | 0.8052 | 0.7794 | 0.7823 | Cohere/embed-multilingual-v3.0 |
35
+ | 0.7927 | 0.6672 | 0.7758 | 0.8122 | 0.7312 | 0.7831 | 0.7416 | 0.7577 | BAAI/bge-m3 |
36
+ | 0.7572 | 0.8139 | 0.7328 | 0.7646 | 0.6318 | 0.7542 | 0.7092 | 0.7377 | intfloat/multilingual-e5-large-instruct |
37
+ | 0.7400 | 0.8216 | 0.6946 | 0.7098 | 0.6781 | 0.7637 | 0.7222 | 0.7329 | labse_stripped |
38
+ | 0.7485 | 0.7714 | 0.7271 | 0.7170 | 0.6496 | 0.7570 | 0.7255 | 0.7280 | intfloat/multilingual-e5-large |
39
+ | 0.7245 | 0.8237 | 0.6839 | 0.6570 | 0.7125 | 0.7612 | 0.7386 | 0.7288 | OpenAI/text-embedding-3-large |
40
+ | 0.7363 | 0.8148 | 0.7067 | 0.7050 | 0.6535 | 0.7514 | 0.7070 | 0.7250 | sentence-transformers/LaBSE |
41
+ | 0.7376 | 0.7917 | 0.7190 | 0.7441 | 0.6286 | 0.7461 | 0.7026 | 0.7242 | intfloat/multilingual-e5-small |
42
+ | 0.7192 | 0.8198 | 0.7160 | 0.7338 | 0.5815 | 0.7318 | 0.6973 | 0.7142 | Cohere/embed-multilingual-light-v3.0 |
43
+ | 0.6960 | 0.8185 | 0.6950 | 0.6752 | 0.5899 | 0.7186 | 0.6790 | 0.6960 | intfloat/multilingual-e5-base |
44
+ | 0.5830 | 0.2486 | 0.5921 | 0.5593 | 0.5559 | 0.5404 | 0.5289 | 0.5155 | antoinelouis/colbert-xm |
45
+
46
+
47
+ [STS-Benchmark](https://github.com/LocalDoc-Azerbaijan/STS-Benchmark)
48
+
49
+
50
+
51
+
52
+ ## Accuracy Results
53
+ - **Cosine Distance:** 96.63
54
+ - **Manhattan Distance:** 96.52
55
+ - **Euclidean Distance:** 96.57
56
+
57
+
58
+
59
+
60
+ ## Usage
61
+
62
+ ```python
63
+ from transformers import AutoTokenizer, AutoModel
64
+ import torch
65
+
66
+ # Mean Pooling - Take attention mask into account for correct averaging
67
+ def mean_pooling(model_output, attention_mask):
68
+ token_embeddings = model_output[0] #First element of model_output contains all token embeddings
69
+ input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
70
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
71
+
72
+ # Function to normalize embeddings
73
+ def normalize_embeddings(embeddings):
74
+ return embeddings / embeddings.norm(dim=1, keepdim=True)
75
+
76
+ # Sentences we want embeddings for
77
+ sentences = [
78
+ "Bu xoşbəxt bir insandır",
79
+ "Bu çox xoşbəxt bir insandır",
80
+ "Bu gün günəşli bir gündür"
81
+ ]
82
+
83
+ # Load model from HuggingFace Hub
84
+ tokenizer = AutoTokenizer.from_pretrained('LocalDoc/TEmA-small')
85
+ model = AutoModel.from_pretrained('LocalDoc/TEmA-small')
86
+
87
+ # Tokenize sentences
88
+ encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='pt')
89
+
90
+ # Compute token embeddings
91
+ with torch.no_grad():
92
+ model_output = model(**encoded_input)
93
+
94
+ # Perform pooling
95
+ sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
96
+
97
+ # Normalize embeddings
98
+ sentence_embeddings = normalize_embeddings(sentence_embeddings)
99
+
100
+ # Calculate cosine similarities
101
+ cosine_similarities = torch.nn.functional.cosine_similarity(
102
+ sentence_embeddings[0].unsqueeze(0),
103
+ sentence_embeddings[1:],
104
+ dim=1
105
+ )
106
+
107
+ print("Cosine Similarities:")
108
+ for i, score in enumerate(cosine_similarities):
109
+ print(f"Sentence 1 <-> Sentence {i+2}: {score:.4f}")
110
+ ```
111
+
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/root/.cache/torch/sentence_transformers/LocalDoc_LaBSE-small-AZ",
3
+ "architectures": [
4
+ "BertModel"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "gradient_checkpointing": false,
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 3072,
15
+ "layer_norm_eps": 1e-12,
16
+ "max_position_embeddings": 512,
17
+ "model_type": "bert",
18
+ "num_attention_heads": 12,
19
+ "num_hidden_layers": 12,
20
+ "pad_token_id": 0,
21
+ "pooler_fc_size": 768,
22
+ "pooler_num_attention_heads": 12,
23
+ "pooler_num_fc_layers": 3,
24
+ "pooler_size_per_head": 128,
25
+ "pooler_type": "first_token_transform",
26
+ "position_embedding_type": "absolute",
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.30.2",
29
+ "type_vocab_size": 2,
30
+ "use_cache": true,
31
+ "vocab_size": 72164
32
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "__version__": {
3
+ "sentence_transformers": "2.2.2",
4
+ "transformers": "4.30.2",
5
+ "pytorch": "2.5.1+cu124"
6
+ }
7
+ }
modules.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ }
14
+ ]
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e5a2034a34ddddc9b978c4d32a8b5d422f944f3486c9aa4c03711074cfc3ea2
3
+ size 565924842
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 128,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": {
3
+ "content": "[CLS]",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "mask_token": {
10
+ "content": "[MASK]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "[PAD]",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "sep_token": {
24
+ "content": "[SEP]",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "unk_token": {
31
+ "content": "[UNK]",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ }
37
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_basic_tokenize": true,
47
+ "do_lower_case": false,
48
+ "full_tokenizer_file": null,
49
+ "mask_token": "[MASK]",
50
+ "model_max_length": 512,
51
+ "never_split": null,
52
+ "pad_token": "[PAD]",
53
+ "sep_token": "[SEP]",
54
+ "strip_accents": null,
55
+ "tokenize_chinese_chars": true,
56
+ "tokenizer_class": "BertTokenizer",
57
+ "unk_token": "[UNK]"
58
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff