Update README.md
Browse files
README.md
CHANGED
@@ -1,6 +1,13 @@
|
|
1 |
---
|
2 |
inference: false
|
3 |
license: other
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
---
|
5 |
|
6 |
<!-- header start -->
|
@@ -156,7 +163,7 @@ I trained the LoRA with the following configuration:
|
|
156 |
- Trained on 4-bit base model
|
157 |
- Cutoff length: 4096
|
158 |
|
159 |
-
# Original model card:
|
160 |
|
161 |
<h1 style="text-align: center">Pygmalion 7B</h1>
|
162 |
<h2 style="text-align: center">A conversational LLaMA fine-tune.</h2>
|
|
|
1 |
---
|
2 |
inference: false
|
3 |
license: other
|
4 |
+
language:
|
5 |
+
- en
|
6 |
+
thumbnail: null
|
7 |
+
tags:
|
8 |
+
- text generation
|
9 |
+
- conversational
|
10 |
+
pipeline_tag: text-generation
|
11 |
---
|
12 |
|
13 |
<!-- header start -->
|
|
|
163 |
- Trained on 4-bit base model
|
164 |
- Cutoff length: 4096
|
165 |
|
166 |
+
# Original model card: Pygmalion 7B
|
167 |
|
168 |
<h1 style="text-align: center">Pygmalion 7B</h1>
|
169 |
<h2 style="text-align: center">A conversational LLaMA fine-tune.</h2>
|