sinhns2 commited on
Commit
aed13ab
·
1 Parent(s): a79b004

change(): model

Browse files
README.md DELETED
@@ -1,61 +0,0 @@
1
- from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
2
- import random
3
-
4
- # Load the fine-tuned model and tokenizer
5
- model_for_generation = AutoModelForSeq2SeqLM.from_pretrained("./fine_tuned_similar_question_generation_model")
6
- tokenizer_for_generation = AutoTokenizer.from_pretrained("./fine_tuned_similar_question_generation_model")
7
-
8
- # Define a function to generate similar questions
9
- def generate_similar_questions(question):
10
- # Tokenize the input question
11
- input_ids = tokenizer_for_generation.encode(question, return_tensors="pt")
12
-
13
- # Generate similar questions using the model
14
- output_ids = model_for_generation.generate(
15
- input_ids=input_ids,
16
- num_beams=4,
17
- max_length=64,
18
- early_stopping=True,
19
- num_return_sequences=2,
20
- no_repeat_ngram_size=3,
21
- temperature=1.0,
22
- )
23
-
24
- # Decode the output questions
25
- output_questions = []
26
- for output_id in output_ids:
27
- output_question = tokenizer_for_generation.decode(output_id, skip_special_tokens=True)
28
- if output_question.endswith("?"):
29
- output_questions.append(output_question)
30
- else:
31
- continue
32
-
33
- # If no valid question was generated, try again
34
- if len(output_questions) == 0:
35
- output_questions = generate_similar_questions(question)
36
-
37
- # Return the output questions
38
- return output_questions
39
-
40
-
41
- # Example usage:
42
- questions = [
43
- "Bạn có thích đọc sách không?",
44
- "Bạn thường xem phim ở đâu?",
45
- "Bạn có thích ăn kem không?",
46
- "Bạn đã từng đi du lịch ở nước ngoài chưa?",
47
- "Bạn thường đọc sách ở thư viện hay mua sách về đọc?",
48
- "Bạn thích chơi thể thao gì?",
49
- "Bạn thích nghe nhạc thể loại gì?",
50
- "Bạn có thường xem truyền hình không?",
51
- "Bạn thường uống gì khi đi ăn ngoài?",
52
- "Bạn đã từng đến Hà Nội chưa?"
53
- ]
54
-
55
- for question in questions:
56
- print(f"Input question: {question}")
57
- similar_questions = generate_similar_questions(question)
58
- print("Similar questions:")
59
- for similar_question in similar_questions:
60
- print(f"- {similar_question}")
61
- print("\n")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
added_tokens.json DELETED
@@ -1,3 +0,0 @@
1
- {
2
- "<mask>": 64000
3
- }
 
 
 
 
bpe.codes DELETED
The diff for this file is too large to render. See raw diff
 
config.json CHANGED
@@ -1,54 +1,60 @@
1
  {
2
- "_name_or_path": "vinai/phobert-base",
3
- "activation_dropout": 0.0,
4
- "activation_function": "gelu",
5
  "architectures": [
6
- "BartForConditionalGeneration"
7
  ],
8
- "attention_dropout": 0.0,
9
- "attention_probs_dropout_prob": 0.1,
10
- "bos_token_id": 0,
11
- "classifier_dropout": 0.0,
12
- "d_model": 768,
13
- "decoder_attention_heads": 16,
14
- "decoder_ffn_dim": 4096,
15
- "decoder_layerdrop": 0.0,
16
- "decoder_layers": 12,
17
- "decoder_start_token_id": 2,
18
- "dropout": 0.1,
19
- "encoder_attention_heads": 12,
20
- "encoder_ffn_dim": 4096,
21
- "encoder_layerdrop": 0.0,
22
- "encoder_layers": 12,
23
- "eos_token_id": 2,
24
- "forced_eos_token_id": 2,
25
- "gradient_checkpointing": false,
26
- "hidden_act": "gelu",
27
- "hidden_dropout_prob": 0.1,
28
- "id2label": {
29
- "0": "LABEL_0",
30
- "1": "LABEL_1",
31
- "2": "LABEL_2"
32
- },
33
- "init_std": 0.02,
34
- "initializer_range": 0.02,
35
- "intermediate_size": 3072,
36
  "is_encoder_decoder": true,
37
- "label2id": {
38
- "LABEL_0": 0,
39
- "LABEL_1": 1,
40
- "LABEL_2": 2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  },
42
- "layer_norm_eps": 1e-05,
43
- "max_position_embeddings": 258,
44
- "model_type": "bart",
45
- "num_hidden_layers": 12,
46
- "pad_token_id": 1,
47
- "scale_embedding": false,
48
- "tokenizer_class": "PhobertTokenizer",
49
  "torch_dtype": "float32",
50
- "transformers_version": "4.25.1",
51
- "type_vocab_size": 1,
52
  "use_cache": true,
53
- "vocab_size": 64001
54
  }
 
1
  {
2
+ "_name_or_path": "t5-small",
 
 
3
  "architectures": [
4
+ "T5ForConditionalGeneration"
5
  ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 512,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "relu",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "relu",
14
+ "initializer_factor": 1.0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  "is_encoder_decoder": true,
16
+ "is_gated_act": false,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 6,
21
+ "num_heads": 8,
22
+ "num_layers": 6,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "task_specific_params": {
28
+ "summarization": {
29
+ "early_stopping": true,
30
+ "length_penalty": 2.0,
31
+ "max_length": 200,
32
+ "min_length": 30,
33
+ "no_repeat_ngram_size": 3,
34
+ "num_beams": 4,
35
+ "prefix": "summarize: "
36
+ },
37
+ "translation_en_to_de": {
38
+ "early_stopping": true,
39
+ "max_length": 300,
40
+ "num_beams": 4,
41
+ "prefix": "translate English to German: "
42
+ },
43
+ "translation_en_to_fr": {
44
+ "early_stopping": true,
45
+ "max_length": 300,
46
+ "num_beams": 4,
47
+ "prefix": "translate English to French: "
48
+ },
49
+ "translation_en_to_ro": {
50
+ "early_stopping": true,
51
+ "max_length": 300,
52
+ "num_beams": 4,
53
+ "prefix": "translate English to Romanian: "
54
+ }
55
  },
 
 
 
 
 
 
 
56
  "torch_dtype": "float32",
57
+ "transformers_version": "4.26.1",
 
58
  "use_cache": true,
59
+ "vocab_size": 32128
60
  }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.26.1"
7
+ }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:572c85b9da8b92c11317385c16e8e26b29af1fc13ed2644aaeddd88957520b77
3
- size 1143647693
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b394894a2c51326b7ebfdbbd516a7a5a941ac19d7a555a9b539308053415acf4
3
+ size 242071641
special_tokens_map.json CHANGED
@@ -1,9 +1,107 @@
1
  {
2
- "bos_token": "<s>",
3
- "cls_token": "<s>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  "eos_token": "</s>",
5
- "mask_token": "<mask>",
6
  "pad_token": "<pad>",
7
- "sep_token": "</s>",
8
  "unk_token": "<unk>"
9
  }
 
1
  {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
  "eos_token": "</s>",
 
105
  "pad_token": "<pad>",
 
106
  "unk_token": "<unk>"
107
  }
spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
tokenizer_config.json CHANGED
@@ -1,13 +1,114 @@
1
  {
2
- "bos_token": "<s>",
3
- "cls_token": "<s>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  "eos_token": "</s>",
5
- "mask_token": "<mask>",
6
- "model_max_length": 256,
7
- "name_or_path": "vinai/phobert-base",
8
  "pad_token": "<pad>",
9
- "sep_token": "</s>",
10
  "special_tokens_map_file": null,
11
- "tokenizer_class": "PhobertTokenizer",
12
  "unk_token": "<unk>"
13
  }
 
1
  {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "clean_up_tokenization_spaces": true,
105
  "eos_token": "</s>",
106
+ "extra_ids": 100,
107
+ "model_max_length": 512,
108
+ "name_or_path": "t5-small",
109
  "pad_token": "<pad>",
110
+ "sp_model_kwargs": {},
111
  "special_tokens_map_file": null,
112
+ "tokenizer_class": "T5Tokenizer",
113
  "unk_token": "<unk>"
114
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b98ab1d86ee27aba4003bd8d30f85a11d73488b1051a6e8ad066bc52c7124e50
3
  size 3579
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70027d7721d8d3f9865755baa3a5fdc396eacbf2c687be95832f22f56a43f62e
3
  size 3579
vocab.txt DELETED
The diff for this file is too large to render. See raw diff