Unity AI commited on
Commit
a867fb5
1 Parent(s): 5e0d97e

First model version

Browse files
.gitattributes CHANGED
@@ -25,7 +25,6 @@
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
  *.tflite filter=lfs diff=lfs merge=lfs -text
30
  *.tgz filter=lfs diff=lfs merge=lfs -text
31
  *.wasm filter=lfs diff=lfs merge=lfs -text
 
25
  *.safetensors filter=lfs diff=lfs merge=lfs -text
26
  saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
  *.tar.* filter=lfs diff=lfs merge=lfs -text
 
28
  *.tflite filter=lfs diff=lfs merge=lfs -text
29
  *.tgz filter=lfs diff=lfs merge=lfs -text
30
  *.wasm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: agpl-3.0
3
+ language:
4
+ - en
5
+ thumbnail:
6
+ tags:
7
+ - text generation
8
+ - conversational
9
+ inference: false
10
+
11
+ ---
12
+
13
+ # Pygmalion 1.3B
14
+
15
+ ## Model description
16
+
17
+ Pymalion 1.3B is a proof-of-concept dialogue model based on EleutherAI's [pythia-1.3b-deduped](https://huggingface.co/EleutherAI/pythia-1.3b-deduped).
18
+
19
+ **Warning:** This model is **NOT** suitable for use by minors. It **will** output X-rated content under certain circumstances.
20
+
21
+ ## Training data
22
+
23
+ The fine-tuning dataset consisted of 56MB of dialogue data gathered from multiple sources, which includes both real _and_ partially machine-generated conversations.
24
+
25
+ ## Training procedure
26
+
27
+ Fine-tuning was done using [ColossalAI](https://github.com/hpcaitech/ColossalAI) (specifically, with a slightly modified version of their [OPT fine-tune example](https://github.com/hpcaitech/ColossalAI/blob/78509124d32b63b7fc36f6508e0576a326d51422/examples/language/opt/run_clm.py)) for around 11.4 million tokens over 5440 steps on a single 24GB GPU. The run took just under 21 hours.
28
+
29
+ ## Intended use
30
+
31
+ ### The easy way
32
+
33
+ We provide a notebook with a Gradio UI for playing around with the model without having to manually format inputs. This notebook can be found [here](https://github.com/PygmalionAI/gradio-ui/blob/master/notebooks/GPU.ipynb).
34
+
35
+ ### The manual way
36
+
37
+ The model can be used as a regular text generation model, but it'll perform best if the input prompt adheres to the following format:
38
+
39
+ ```
40
+ [CHARACTER]'s Persona: [A few sentences about the character you want the model to play]
41
+
42
+ [DIALOGUE HISTORY]
43
+ You: [Your input message here]
44
+ [CHARACTER]:
45
+ ```
46
+
47
+ Where `[CHARACTER] `is, as you can probably guess, the name of the character you want the model to portray, and `[DIALOGUE HISTORY]` is chat history so the model can have some conversational context to draw from. Ideally it'll be pairs of messages like:
48
+
49
+ ```
50
+ [CHARACTER]: [some dialogue here]
51
+ You: [your response to the dialogue above]
52
+ ```
53
+
54
+ Apart from chat history, you can also just add example conversations in `[DIALOGUE HISTORY]` to show how the character should speak - ideally at the beginning, so it doesn't get confused as to what's conversation history vs. character definition.
55
+
56
+ ## Known issues
57
+
58
+ - The model can get stuck repeating certain phrases, or sometimes even entire sentences.
59
+ - We believe this is due to that behavior being present in the training data itself, and plan to investigate and adjust accordingly for future versions.
config.json ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "EleutherAI/pythia-1.3b-deduped",
3
+ "architectures": [
4
+ "GPTNeoXForCausalLM"
5
+ ],
6
+ "bad_words_ids": [
7
+ [
8
+ 434,
9
+ 15694,
10
+ 66,
11
+ 27,
12
+ 209
13
+ ],
14
+ [
15
+ 15362
16
+ ],
17
+ [
18
+ 1713
19
+ ],
20
+ [
21
+ 1713,
22
+ 64
23
+ ],
24
+ [
25
+ 1713,
26
+ 876
27
+ ],
28
+ [
29
+ 2016,
30
+ 251,
31
+ 857,
32
+ 75,
33
+ 9194,
34
+ 35478
35
+ ],
36
+ [
37
+ 2391
38
+ ],
39
+ [
40
+ 20340
41
+ ],
42
+ [
43
+ 33021
44
+ ],
45
+ [
46
+ 2391,
47
+ 1051
48
+ ],
49
+ [
50
+ 5638
51
+ ],
52
+ [
53
+ 2391,
54
+ 20340
55
+ ],
56
+ [
57
+ 5638,
58
+ 537
59
+ ],
60
+ [
61
+ 1559,
62
+ 2345
63
+ ],
64
+ [
65
+ 1559,
66
+ 7849
67
+ ],
68
+ [
69
+ 1559,
70
+ 17379
71
+ ],
72
+ [
73
+ 25321,
74
+ 4611
75
+ ]
76
+ ],
77
+ "bos_token_id": 0,
78
+ "eos_token_id": 0,
79
+ "hidden_act": "gelu",
80
+ "hidden_size": 2048,
81
+ "initializer_range": 0.02,
82
+ "intermediate_size": 8192,
83
+ "layer_norm_eps": 1e-05,
84
+ "max_position_embeddings": 2048,
85
+ "model_type": "gpt_neox",
86
+ "num_attention_heads": 16,
87
+ "num_hidden_layers": 24,
88
+ "rotary_emb_base": 10000,
89
+ "rotary_pct": 0.25,
90
+ "tie_word_embeddings": false,
91
+ "torch_dtype": "float16",
92
+ "transformers_version": "4.25.1",
93
+ "use_cache": true,
94
+ "use_parallel_residual": true,
95
+ "vocab_size": 50304
96
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c09c32c69cfc9430deeebc8acd6c18a3f8420c90d267737ce217e442f9673576
3
+ size 2930002184
special_tokens_map.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
5
+ }
tensorboard_runs/2022-12-26T_19-55-05/events.out.tfevents.1672095305.lavidP6000.20829.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75a0d7b08dd9db7fe5cfa86f2160b5907ed98f27187a68e80982c648075bbc5d
3
+ size 14890
tensorboard_runs/2022-12-26T_20-36-16/events.out.tfevents.1672097776.lavidP6000.1415.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d7b5cc42203fb0878cb3ef4c5b10ee26a4c2bf1ebd520f8d1003c221c268784
3
+ size 23464
tensorboard_runs/2022-12-26T_21-04-54/events.out.tfevents.1672099494.lavidP6000.9498.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce0315c65928850fb05ff521feacd66e4548e1805e72b4a7b3c2ba93bad9e34c
3
+ size 11752
tensorboard_runs/2022-12-26T_21-17-43/events.out.tfevents.1672100263.lavidP6000.13066.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:839e9d6814b341244d9559c8d8da790924cddfde52ec7553839e0680cdb24775
3
+ size 35176
tensorboard_runs/2022-12-26T_22-05-19/events.out.tfevents.1672103119.lavidP6000.27877.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a86a31c7adaf2d7e19fb08bbf934a9a325becc02fbe28bf01f1972b04275277
3
+ size 597718
tensorboard_runs/2022-12-27T_12-19-34/events.out.tfevents.1672154374.lavidP6000.9711.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e98e45100f5830efc847fe0900b5eba223eacd4b1f26990245f0621252faedcb
3
+ size 82024
tensorboard_runs/2022-12-27T_14-17-28/events.out.tfevents.1672161448.lavidP6000.18901.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49a383522d051485967feefab91e1e6499f2a25a099c611fdd4bfe400a20aa87
3
+ size 234280
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "name_or_path": "EleutherAI/gpt-neox-20b",
6
+ "special_tokens_map_file": "/fsx/home-hailey/.cache/huggingface/hub/models--EleutherAI--gpt-neox-20b/snapshots/3523781c8df75f7741687a4284f6f70e1afa12f4/special_tokens_map.json",
7
+ "tokenizer_class": "GPTNeoXTokenizer",
8
+ "unk_token": "<|endoftext|>"
9
+ }