next4biz ahmet1338 commited on
Commit
08322df
0 Parent(s):

Duplicate from ahmet1338/gpt-2-experimental

Browse files

Co-authored-by: Ahmet Can GÜNAY <ahmet1338@users.noreply.huggingface.co>

.gitattributes ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
9
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Pipfile
2
+ Pipfile.lock
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: tr
3
+ tags:
4
+ - turkish
5
+ - tr
6
+ - gpt2-tr
7
+ - gpt2-turkish
8
+ license: mit
9
+ metrics:
10
+ - accuracy
11
+ ---
12
+ # Turkish GPT-2 Model (Experimental)
13
+
14
+ I've made available a GPT-2 model for Turkish that I trained on a variety of texts.
15
+
16
+ The model is intended to serve as a starting point for text-specific adjustments.
17
+
18
+
19
+ ## Training Source
20
+
21
+ I used a Turkish corpus that is taken from different written and oral sources.
22
+
23
+
24
+ I developed a LLM model with 50k vocabulary using the Custom Tokenizers library using the training resources.
25
+
26
+ I could train the GPT-2 for Turkish using the entire training corpus (ten epochs) after developing the vocabulary.
27
+
28
+
29
+
30
+ ## Using the model
31
+
32
+ The model itself can be used in this way:
33
+
34
+ ``` python
35
+ from transformers import AutoTokenizer, AutoModelWithLMHead
36
+ tokenizer = AutoTokenizer.from_pretrained("ahmet1338/gpt-2-experimental")
37
+ model = AutoModelWithLMHead.from_pretrained("ahmet1338/gpt-2-experimental")
38
+ ```
39
+
40
+
41
+ To generating text, we can use these lines of code which is Transformers Pipelines:
42
+
43
+ ``` python
44
+ from transformers import pipeline
45
+ pipe = pipeline('text-generation', model="ahmet1338/gpt-2-experimental",
46
+ tokenizer="ahmet1338/gpt-2-experimental", config={'max_length':800})
47
+ text = pipe("Akşamüstü yolda ilerlerken, ")[0]["generated_text"]
48
+ print(text)
49
+ ```
50
+
51
+ ### How to clone the model repo?
52
+ ```
53
+ git lfs install
54
+ git clone https://huggingface.co/ahmet1338/gpt-2-experimential
55
+ ```
56
+
config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "architectures": [
4
+ "GPT2LMHeadModel"
5
+ ],
6
+ "attn_pdrop": 0.1,
7
+ "bos_token_id": 50256,
8
+ "embd_pdrop": 0.1,
9
+ "eos_token_id": 50256,
10
+ "gradient_checkpointing": false,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "resid_pdrop": 0.1,
21
+ "summary_activation": null,
22
+ "summary_first_dropout": 0.1,
23
+ "summary_proj_to_labels": true,
24
+ "summary_type": "cls_index",
25
+ "summary_use_proj": true,
26
+ "task_specific_params": {
27
+ "text-generation": {
28
+ "do_sample": true,
29
+ "max_length": 50
30
+ }
31
+ },
32
+ "vocab_size": 50257
33
+ }
flax_model.msgpack ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe376584de6c19ca333b41c2cf0fdd688737edb81dfe42f57a66db2232aa957b
3
+ size 497764120
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
metadata.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "tags":[
3
+ "tr",
4
+ "turkish",
5
+ "gpt2-tr",
6
+ "gpt2-turkish"
7
+ "Türkçe"
8
+ ],
9
+ "languages":[
10
+ "tr"
11
+ ]
12
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c11264d48e8078c36f9decc41eff62fd905111237e2cd7d60651d9f94bb946f
3
+ size 510406550
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}}
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a4723564f63bbf7b892a5f305a7da2f615d61664796d8429bb82938eeea441f
3
+ size 497933648
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "model_max_length": 1024, "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "tokenized_data/"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f98dfdf111b52ba132eacb3565fd61dc1529f62b170c6ff6ed7fe5fee33eb501
3
+ size 1775
vocab.json ADDED
The diff for this file is too large to render. See raw diff