Julius ter Pelkwijk commited on
Commit
179f84c
1 Parent(s): 15e02af

Initial commit

Browse files
README.md CHANGED
@@ -1,3 +1,25 @@
1
  ---
 
2
  license: mit
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ language: en
3
  license: mit
4
  ---
5
+ # Fairseq-dense 6.7B - Janeway
6
+ ## Model Description
7
+ Fairseq-dense 6.7B-Janeway is a finetune created using Fairseq's MoE dense model.
8
+ ## Training data
9
+ The training data contains around 2210 ebooks, mostly in the sci-fi and fantasy genres. The dataset is identical as dataset used by GPT-Neo-2.7B-Janeway.
10
+ Some parts of the dataset have been prepended using the following text: `[Genre: <genre1>,<genre2>]`
11
+ ### How to use
12
+ You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run:
13
+ ```py
14
+ >>> from transformers import pipeline
15
+ >>> generator = pipeline('text-generation', model='KoboldAI/fairseq-dense-13B-Janeway')
16
+ >>> generator("Welcome Captain Janeway, I apologize for the delay.", do_sample=True, min_length=50)
17
+ [{'generated_text': 'Welcome Captain Janeway, I apologize for the delay."\nIt's all right," Janeway said. "I'm certain that you're doing your best to keep me informed of what\'s going on."'}]
18
+ ```
19
+ ### Limitations and Biases
20
+ Based on known problems with NLP technology, potential relevant factors include bias (gender, profession, race and religion).
21
+
22
+ ### BibTeX entry and citation info
23
+ ```
24
+ Artetxe et al. (2021): Efficient Large Scale Language Modeling with Mixtures of Experts
25
+ ```
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "KoboldAI/fairseq-dense-6.7B",
3
+ "activation_dropout": 0.0,
4
+ "activation_function": "gelu",
5
+ "architectures": [
6
+ "XGLMForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.1,
9
+ "attention_heads": 32,
10
+ "bos_token_id": 50257,
11
+ "d_model": 4096,
12
+ "decoder_start_token_id": 2,
13
+ "dropout": 0.1,
14
+ "eos_token_id": 50259,
15
+ "ffn_dim": 16384,
16
+ "init_std": 0.02,
17
+ "layerdrop": 0.0,
18
+ "max_position_embeddings": 2048,
19
+ "model_type": "xglm",
20
+ "newlinemode": "s",
21
+ "num_layers": 32,
22
+ "pad_token_id": 1,
23
+ "scale_embedding": true,
24
+ "tokenizer_class": "GPT2Tokenizer",
25
+ "torch_dtype": "float16",
26
+ "transformers_version": "4.17.0",
27
+ "use_cache": false,
28
+ "vocab_size": 50261,
29
+ "welcome": "You are currently running novel-writing model `Janeway, version 3.`\n\n This model is made by [Mr. Seeker](https://www.patreon.com/mrseeker)\n\n### How to use this model\n\nJaneway is designed to generate stories and novels. Use the authors note to give it a certain genre to follow, use memory to give an overview of the story and use World Information to give it specific details about the characters. To start off, give the AI an idea of what you are writing about by setting the scene. Give the AI around 10 sentences that make your story really interesting to read. Introduce your character, describe the world, blow something up, or let the AI use its creative mind.",
30
+ "antemplate": "[Genre: <|>]"
31
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:180de78383dcd5727c8af62c2d1f26ffbdb45d7468d3ad71238af3caf8ad89b0
3
+ size 13317022701
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "<|endoftext|>", "pad_token": "<pad>"}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"errors": "replace", "unk_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "bos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "<|endoftext|>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": false, "special_tokens_map_file": "/root/.cache/huggingface/transformers/19ddc3fbe8b06ec9333509d113756a7ba623051f50fc2703eee6bd7083aa66a5.f4b0476f9d35aab16d5dd877dd9e5d547702eff96a3d808497c0d3fc36a32c99", "name_or_path": "KoboldAI/fairseq-dense-6.7B", "tokenizer_class": "GPT2Tokenizer"}
vocab.json ADDED
The diff for this file is too large to render. See raw diff