Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .dockerignore +13 -0
- .gitattributes +5 -35
- .github/CODE_OF_CONDUCT.md +128 -0
- .github/CONTRIBUTING.md +21 -0
- .github/ISSUE_TEMPLATE/bug-report.yml +66 -0
- .github/PULL_REQUEST_TEMPLATE.md +8 -0
- .github/SECURITY.md +7 -0
- .github/workflows/label_issue.yml +27 -0
- .github/workflows/publish.yml +40 -0
- .github/workflows/tests.yml +45 -0
- .gitignore +167 -0
- CITATION.cff +44 -0
- LICENSE +201 -0
- MANIFEST.in +1 -0
- Makefile +14 -0
- README.md +645 -0
- README_zh.md +645 -0
- assets/benchmark.svg +1216 -0
- assets/logo.png +0 -0
- assets/wechat.jpg +0 -0
- assets/wechat_npu.jpg +0 -0
- data/README.md +350 -0
- data/README_zh.md +350 -0
- data/alpaca_en_demo.json +0 -0
- data/alpaca_zh_demo.json +0 -0
- data/belle_multiturn/belle_multiturn.py +67 -0
- data/c4_demo.json +0 -0
- data/dataset_info.json +565 -0
- data/dpo_en_demo.json +0 -0
- data/dpo_zh_demo.json +0 -0
- data/glaive_toolcall_en_demo.json +0 -0
- data/glaive_toolcall_zh_demo.json +0 -0
- data/hh_rlhf_en/hh_rlhf_en.py +84 -0
- data/identity.json +457 -0
- data/kto_en_demo.json +0 -0
- data/mllm_demo.json +140 -0
- data/mllm_demo_data/1.jpg +0 -0
- data/mllm_demo_data/2.jpg +0 -0
- data/mllm_demo_data/3.jpg +0 -0
- data/ultra_chat/ultra_chat.py +60 -0
- data/wiki_demo.txt +0 -0
- docker/docker-cuda/Dockerfile +58 -0
- docker/docker-cuda/docker-compose.yml +32 -0
- docker/docker-npu/Dockerfile +45 -0
- docker/docker-npu/docker-compose.yml +31 -0
- evaluation/ceval/ceval.py +161 -0
- evaluation/ceval/ceval.zip +3 -0
- evaluation/ceval/mapping.json +210 -0
- evaluation/cmmlu/cmmlu.py +168 -0
- evaluation/cmmlu/cmmlu.zip +3 -0
.dockerignore
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.vscode
|
2 |
+
.git
|
3 |
+
.github
|
4 |
+
.venv
|
5 |
+
cache
|
6 |
+
data
|
7 |
+
docker
|
8 |
+
saves
|
9 |
+
hf_cache
|
10 |
+
output
|
11 |
+
.dockerignore
|
12 |
+
.gitattributes
|
13 |
+
.gitignore
|
.gitattributes
CHANGED
@@ -1,35 +1,5 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
1 |
+
# Auto detect text files and perform LF normalization
|
2 |
+
* text=auto
|
3 |
+
evaluation/ceval/ceval.zip filter=lfs diff=lfs merge=lfs -text
|
4 |
+
evaluation/cmmlu/cmmlu.zip filter=lfs diff=lfs merge=lfs -text
|
5 |
+
evaluation/mmlu/mmlu.zip filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
.github/CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Contributor Covenant Code of Conduct
|
2 |
+
|
3 |
+
## Our Pledge
|
4 |
+
|
5 |
+
We as members, contributors, and leaders pledge to make participation in our
|
6 |
+
community a harassment-free experience for everyone, regardless of age, body
|
7 |
+
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
8 |
+
identity and expression, level of experience, education, socio-economic status,
|
9 |
+
nationality, personal appearance, race, religion, or sexual identity
|
10 |
+
and orientation.
|
11 |
+
|
12 |
+
We pledge to act and interact in ways that contribute to an open, welcoming,
|
13 |
+
diverse, inclusive, and healthy community.
|
14 |
+
|
15 |
+
## Our Standards
|
16 |
+
|
17 |
+
Examples of behavior that contributes to a positive environment for our
|
18 |
+
community include:
|
19 |
+
|
20 |
+
* Demonstrating empathy and kindness toward other people
|
21 |
+
* Being respectful of differing opinions, viewpoints, and experiences
|
22 |
+
* Giving and gracefully accepting constructive feedback
|
23 |
+
* Accepting responsibility and apologizing to those affected by our mistakes,
|
24 |
+
and learning from the experience
|
25 |
+
* Focusing on what is best not just for us as individuals, but for the
|
26 |
+
overall community
|
27 |
+
|
28 |
+
Examples of unacceptable behavior include:
|
29 |
+
|
30 |
+
* The use of sexualized language or imagery, and sexual attention or
|
31 |
+
advances of any kind
|
32 |
+
* Trolling, insulting or derogatory comments, and personal or political attacks
|
33 |
+
* Public or private harassment
|
34 |
+
* Publishing others' private information, such as a physical or email
|
35 |
+
address, without their explicit permission
|
36 |
+
* Other conduct which could reasonably be considered inappropriate in a
|
37 |
+
professional setting
|
38 |
+
|
39 |
+
## Enforcement Responsibilities
|
40 |
+
|
41 |
+
Community leaders are responsible for clarifying and enforcing our standards of
|
42 |
+
acceptable behavior and will take appropriate and fair corrective action in
|
43 |
+
response to any behavior that they deem inappropriate, threatening, offensive,
|
44 |
+
or harmful.
|
45 |
+
|
46 |
+
Community leaders have the right and responsibility to remove, edit, or reject
|
47 |
+
comments, commits, code, wiki edits, issues, and other contributions that are
|
48 |
+
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
49 |
+
decisions when appropriate.
|
50 |
+
|
51 |
+
## Scope
|
52 |
+
|
53 |
+
This Code of Conduct applies within all community spaces, and also applies when
|
54 |
+
an individual is officially representing the community in public spaces.
|
55 |
+
Examples of representing our community include using an official e-mail address,
|
56 |
+
posting via an official social media account, or acting as an appointed
|
57 |
+
representative at an online or offline event.
|
58 |
+
|
59 |
+
## Enforcement
|
60 |
+
|
61 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
62 |
+
reported to the community leaders responsible for enforcement at
|
63 |
+
`hoshihiyouga AT gmail DOT com`.
|
64 |
+
All complaints will be reviewed and investigated promptly and fairly.
|
65 |
+
|
66 |
+
All community leaders are obligated to respect the privacy and security of the
|
67 |
+
reporter of any incident.
|
68 |
+
|
69 |
+
## Enforcement Guidelines
|
70 |
+
|
71 |
+
Community leaders will follow these Community Impact Guidelines in determining
|
72 |
+
the consequences for any action they deem in violation of this Code of Conduct:
|
73 |
+
|
74 |
+
### 1. Correction
|
75 |
+
|
76 |
+
**Community Impact**: Use of inappropriate language or other behavior deemed
|
77 |
+
unprofessional or unwelcome in the community.
|
78 |
+
|
79 |
+
**Consequence**: A private, written warning from community leaders, providing
|
80 |
+
clarity around the nature of the violation and an explanation of why the
|
81 |
+
behavior was inappropriate. A public apology may be requested.
|
82 |
+
|
83 |
+
### 2. Warning
|
84 |
+
|
85 |
+
**Community Impact**: A violation through a single incident or series
|
86 |
+
of actions.
|
87 |
+
|
88 |
+
**Consequence**: A warning with consequences for continued behavior. No
|
89 |
+
interaction with the people involved, including unsolicited interaction with
|
90 |
+
those enforcing the Code of Conduct, for a specified period of time. This
|
91 |
+
includes avoiding interactions in community spaces as well as external channels
|
92 |
+
like social media. Violating these terms may lead to a temporary or
|
93 |
+
permanent ban.
|
94 |
+
|
95 |
+
### 3. Temporary Ban
|
96 |
+
|
97 |
+
**Community Impact**: A serious violation of community standards, including
|
98 |
+
sustained inappropriate behavior.
|
99 |
+
|
100 |
+
**Consequence**: A temporary ban from any sort of interaction or public
|
101 |
+
communication with the community for a specified period of time. No public or
|
102 |
+
private interaction with the people involved, including unsolicited interaction
|
103 |
+
with those enforcing the Code of Conduct, is allowed during this period.
|
104 |
+
Violating these terms may lead to a permanent ban.
|
105 |
+
|
106 |
+
### 4. Permanent Ban
|
107 |
+
|
108 |
+
**Community Impact**: Demonstrating a pattern of violation of community
|
109 |
+
standards, including sustained inappropriate behavior, harassment of an
|
110 |
+
individual, or aggression toward or disparagement of classes of individuals.
|
111 |
+
|
112 |
+
**Consequence**: A permanent ban from any sort of public interaction within
|
113 |
+
the community.
|
114 |
+
|
115 |
+
## Attribution
|
116 |
+
|
117 |
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
118 |
+
version 2.0, available at
|
119 |
+
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
120 |
+
|
121 |
+
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
122 |
+
enforcement ladder](https://github.com/mozilla/diversity).
|
123 |
+
|
124 |
+
[homepage]: https://www.contributor-covenant.org
|
125 |
+
|
126 |
+
For answers to common questions about this code of conduct, see the FAQ at
|
127 |
+
https://www.contributor-covenant.org/faq. Translations are available at
|
128 |
+
https://www.contributor-covenant.org/translations.
|
.github/CONTRIBUTING.md
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Contributing to LLaMA Factory
|
2 |
+
|
3 |
+
Everyone is welcome to contribute, and we value everybody's contribution. Code contributions are not the only way to help the community. Answering questions, helping others, and improving the documentation are also immensely valuable.
|
4 |
+
|
5 |
+
It also helps us if you spread the word! Reference the library in blog posts about the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply ⭐️ the repository to say thank you.
|
6 |
+
|
7 |
+
However you choose to contribute, please be mindful and respect our [code of conduct](CODE_OF_CONDUCT.md).
|
8 |
+
|
9 |
+
**This guide was heavily inspired by [transformers guide to contributing](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md).**
|
10 |
+
|
11 |
+
## Ways to contribute
|
12 |
+
|
13 |
+
There are several ways you can contribute to LLaMA Factory:
|
14 |
+
|
15 |
+
* Fix outstanding issues with the existing code.
|
16 |
+
* Submit issues related to bugs or desired new features.
|
17 |
+
* Contribute to the examples or to the documentation.
|
18 |
+
|
19 |
+
### Style guide
|
20 |
+
|
21 |
+
LLaMA Factory follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html), check it for details.
|
.github/ISSUE_TEMPLATE/bug-report.yml
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "\U0001F41B Bug / Help"
|
2 |
+
description: Create a report to help us improve the LLaMA Factory
|
3 |
+
body:
|
4 |
+
- type: markdown
|
5 |
+
attributes:
|
6 |
+
value: |
|
7 |
+
Issues included in **FAQs** or those with **insufficient** information may be closed without a response.
|
8 |
+
包含在**常见问题**内或提供信息**不完整**的 issues 可能不会被回复。
|
9 |
+
|
10 |
+
- type: checkboxes
|
11 |
+
id: reminder
|
12 |
+
attributes:
|
13 |
+
label: Reminder
|
14 |
+
description: |
|
15 |
+
Please ensure you have read the README carefully and searched the existing issues (including FAQs).
|
16 |
+
请确保您已经认真阅读了 README 并且搜索过现有的 issues(包括常见问题)。
|
17 |
+
|
18 |
+
options:
|
19 |
+
- label: I have read the README and searched the existing issues.
|
20 |
+
required: true
|
21 |
+
|
22 |
+
- type: textarea
|
23 |
+
id: system-info
|
24 |
+
validations:
|
25 |
+
required: true
|
26 |
+
attributes:
|
27 |
+
label: System Info
|
28 |
+
description: |
|
29 |
+
Please share your system info with us. You can run the command **llamafactory-cli env** and copy-paste its output below.
|
30 |
+
请提供您的系统信息。您可以在命令行运行 **llamafactory-cli env** 并将其输出复制到该文本框中。
|
31 |
+
|
32 |
+
placeholder: llamafactory version, platform, python version, ...
|
33 |
+
|
34 |
+
- type: textarea
|
35 |
+
id: reproduction
|
36 |
+
validations:
|
37 |
+
required: true
|
38 |
+
attributes:
|
39 |
+
label: Reproduction
|
40 |
+
description: |
|
41 |
+
Please provide code snippets, error messages and stack traces that reproduces the problem.
|
42 |
+
请提供运行参数,错误信息以及异常堆栈以便于我们复现该问题。
|
43 |
+
Remember to use Markdown tags to correctly format your code.
|
44 |
+
请合理使用 Markdown 标签来格式化您的文本。
|
45 |
+
|
46 |
+
placeholder: |
|
47 |
+
```bash
|
48 |
+
llamafactory-cli train ...
|
49 |
+
```
|
50 |
+
|
51 |
+
- type: textarea
|
52 |
+
id: expected-behavior
|
53 |
+
validations:
|
54 |
+
required: false
|
55 |
+
attributes:
|
56 |
+
label: Expected behavior
|
57 |
+
description: |
|
58 |
+
Please provide a clear and concise description of what you would expect to happen.
|
59 |
+
请提供您原本的目的,即这段代码的期望行为。
|
60 |
+
|
61 |
+
- type: textarea
|
62 |
+
id: others
|
63 |
+
validations:
|
64 |
+
required: false
|
65 |
+
attributes:
|
66 |
+
label: Others
|
.github/PULL_REQUEST_TEMPLATE.md
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# What does this PR do?
|
2 |
+
|
3 |
+
Fixes # (issue)
|
4 |
+
|
5 |
+
## Before submitting
|
6 |
+
|
7 |
+
- [ ] Did you read the [contributor guideline](https://github.com/hiyouga/LLaMA-Factory/blob/main/.github/CONTRIBUTING.md)?
|
8 |
+
- [ ] Did you write any new necessary tests?
|
.github/SECURITY.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Reporting Security Issues
|
2 |
+
|
3 |
+
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/hiyouga/LLaMA-Factory/security/advisories/new) tab.
|
4 |
+
|
5 |
+
We will send a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
|
6 |
+
|
7 |
+
Report security bugs in third-party modules to the person or team maintaining the module.
|
.github/workflows/label_issue.yml
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: label_issue
|
2 |
+
|
3 |
+
on:
|
4 |
+
issues:
|
5 |
+
types:
|
6 |
+
- opened
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
label_issue:
|
10 |
+
runs-on: ubuntu-latest
|
11 |
+
|
12 |
+
steps:
|
13 |
+
- env:
|
14 |
+
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
15 |
+
ISSUE_URL: ${{ github.event.issue.html_url }}
|
16 |
+
ISSUE_TITLE: ${{ github.event.issue.title }}
|
17 |
+
run: |
|
18 |
+
LABEL=pending
|
19 |
+
NPU_KEYWORDS=(npu ascend huawei 华为 昇腾)
|
20 |
+
ISSUE_TITLE_LOWER=$(echo $ISSUE_TITLE | tr '[:upper:]' '[:lower:]')
|
21 |
+
for KEYWORD in ${NPU_KEYWORDS[@]}; do
|
22 |
+
if [[ $ISSUE_TITLE_LOWER == *$KEYWORD* ]] && [[ $ISSUE_TITLE_LOWER != *input* ]]; then
|
23 |
+
LABEL=pending,npu
|
24 |
+
break
|
25 |
+
fi
|
26 |
+
done
|
27 |
+
gh issue edit $ISSUE_URL --add-label $LABEL
|
.github/workflows/publish.yml
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: publish
|
2 |
+
|
3 |
+
on:
|
4 |
+
release:
|
5 |
+
types:
|
6 |
+
- published
|
7 |
+
|
8 |
+
jobs:
|
9 |
+
publish:
|
10 |
+
name: Upload release to PyPI
|
11 |
+
|
12 |
+
runs-on: ubuntu-latest
|
13 |
+
|
14 |
+
environment:
|
15 |
+
name: release
|
16 |
+
url: https://pypi.org/p/llamafactory
|
17 |
+
|
18 |
+
permissions:
|
19 |
+
id-token: write
|
20 |
+
|
21 |
+
steps:
|
22 |
+
- name: Checkout
|
23 |
+
uses: actions/checkout@v4
|
24 |
+
|
25 |
+
- name: Set up Python
|
26 |
+
uses: actions/setup-python@v5
|
27 |
+
with:
|
28 |
+
python-version: "3.8"
|
29 |
+
|
30 |
+
- name: Install dependencies
|
31 |
+
run: |
|
32 |
+
python -m pip install --upgrade pip
|
33 |
+
python -m pip install build
|
34 |
+
|
35 |
+
- name: Build package
|
36 |
+
run: |
|
37 |
+
python -m build
|
38 |
+
|
39 |
+
- name: Publish package
|
40 |
+
uses: pypa/gh-action-pypi-publish@release/v1
|
.github/workflows/tests.yml
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: tests
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches:
|
6 |
+
- main
|
7 |
+
paths:
|
8 |
+
- "**.py"
|
9 |
+
- "requirements.txt"
|
10 |
+
- ".github/workflows/*.yml"
|
11 |
+
pull_request:
|
12 |
+
branches:
|
13 |
+
- main
|
14 |
+
paths:
|
15 |
+
- "**.py"
|
16 |
+
- "requirements.txt"
|
17 |
+
- ".github/workflows/*.yml"
|
18 |
+
|
19 |
+
jobs:
|
20 |
+
tests:
|
21 |
+
runs-on: ubuntu-latest
|
22 |
+
|
23 |
+
steps:
|
24 |
+
- name: Checkout
|
25 |
+
uses: actions/checkout@v4
|
26 |
+
|
27 |
+
- name: Set up Python
|
28 |
+
uses: actions/setup-python@v5
|
29 |
+
with:
|
30 |
+
python-version: "3.8"
|
31 |
+
cache: "pip"
|
32 |
+
cache-dependency-path: "setup.py"
|
33 |
+
|
34 |
+
- name: Install dependencies
|
35 |
+
run: |
|
36 |
+
python -m pip install --upgrade pip
|
37 |
+
python -m pip install ".[torch,dev]"
|
38 |
+
|
39 |
+
- name: Check quality
|
40 |
+
run: |
|
41 |
+
make style && make quality
|
42 |
+
|
43 |
+
- name: Test with pytest
|
44 |
+
run: |
|
45 |
+
make test
|
.gitignore
ADDED
@@ -0,0 +1,167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
.idea/
|
161 |
+
|
162 |
+
# custom .gitignore
|
163 |
+
cache/
|
164 |
+
config/
|
165 |
+
saves/
|
166 |
+
output/
|
167 |
+
wandb/
|
CITATION.cff
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
cff-version: 1.2.0
|
2 |
+
date-released: 2024-03
|
3 |
+
message: "If you use this software, please cite it as below."
|
4 |
+
authors:
|
5 |
+
- family-names: "Zheng"
|
6 |
+
given-names: "Yaowei"
|
7 |
+
- family-names: "Zhang"
|
8 |
+
given-names: "Richong"
|
9 |
+
- family-names: "Zhang"
|
10 |
+
given-names: "Junhao"
|
11 |
+
- family-names: "Ye"
|
12 |
+
given-names: "Yanhan"
|
13 |
+
- family-names: "Luo"
|
14 |
+
given-names: "Zheyan"
|
15 |
+
- family-names: "Feng"
|
16 |
+
given-names: "Zhangchi"
|
17 |
+
- family-names: "Ma"
|
18 |
+
given-names: "Yongqiang"
|
19 |
+
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
20 |
+
url: "https://arxiv.org/abs/2403.13372"
|
21 |
+
preferred-citation:
|
22 |
+
type: conference-paper
|
23 |
+
conference:
|
24 |
+
name: "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)"
|
25 |
+
authors:
|
26 |
+
- family-names: "Zheng"
|
27 |
+
given-names: "Yaowei"
|
28 |
+
- family-names: "Zhang"
|
29 |
+
given-names: "Richong"
|
30 |
+
- family-names: "Zhang"
|
31 |
+
given-names: "Junhao"
|
32 |
+
- family-names: "Ye"
|
33 |
+
given-names: "Yanhan"
|
34 |
+
- family-names: "Luo"
|
35 |
+
given-names: "Zheyan"
|
36 |
+
- family-names: "Feng"
|
37 |
+
given-names: "Zhangchi"
|
38 |
+
- family-names: "Ma"
|
39 |
+
given-names: "Yongqiang"
|
40 |
+
title: "LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models"
|
41 |
+
url: "https://arxiv.org/abs/2403.13372"
|
42 |
+
year: 2024
|
43 |
+
publisher: "Association for Computational Linguistics"
|
44 |
+
address: "Bangkok, Thailand"
|
LICENSE
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright [yyyy] [name of copyright owner]
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
MANIFEST.in
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
include LICENSE requirements.txt
|
Makefile
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.PHONY: quality style test
|
2 |
+
|
3 |
+
check_dirs := scripts src tests
|
4 |
+
|
5 |
+
quality:
|
6 |
+
ruff check $(check_dirs)
|
7 |
+
ruff format --check $(check_dirs)
|
8 |
+
|
9 |
+
style:
|
10 |
+
ruff check $(check_dirs) --fix
|
11 |
+
ruff format $(check_dirs)
|
12 |
+
|
13 |
+
test:
|
14 |
+
CUDA_VISIBLE_DEVICES= pytest tests/
|
README.md
ADDED
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
![# LLaMA Factory](assets/logo.png)
|
2 |
+
|
3 |
+
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
4 |
+
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
|
5 |
+
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
6 |
+
[![PyPI](https://img.shields.io/pypi/v/llamafactory)](https://pypi.org/project/llamafactory/)
|
7 |
+
[![Citation](https://img.shields.io/badge/citation-72-green)](#projects-using-llama-factory)
|
8 |
+
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
9 |
+
[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
|
10 |
+
[![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai)
|
11 |
+
[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)
|
12 |
+
[![Open in DSW](https://gallery.pai-ml.com/assets/open-in-dsw.svg)](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
13 |
+
[![Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue)](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
14 |
+
[![Studios](https://img.shields.io/badge/ModelScope-Open%20in%20Studios-blue)](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
15 |
+
|
16 |
+
[![GitHub Tread](https://trendshift.io/api/badge/repositories/4535)](https://trendshift.io/repositories/4535)
|
17 |
+
|
18 |
+
👋 Join our [WeChat](assets/wechat.jpg) or [NPU user group](assets/wechat_npu.jpg).
|
19 |
+
|
20 |
+
\[ English | [中文](README_zh.md) \]
|
21 |
+
|
22 |
+
**Fine-tuning a large language model can be easy as...**
|
23 |
+
|
24 |
+
https://github.com/hiyouga/LLaMA-Factory/assets/16256802/9840a653-7e9c-41c8-ae89-7ace5698baf6
|
25 |
+
|
26 |
+
Choose your path:
|
27 |
+
|
28 |
+
- **Colab**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
|
29 |
+
- **PAI-DSW**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
30 |
+
- **Local machine**: Please refer to [usage](#getting-started)
|
31 |
+
|
32 |
+
## Table of Contents
|
33 |
+
|
34 |
+
- [Features](#features)
|
35 |
+
- [Benchmark](#benchmark)
|
36 |
+
- [Changelog](#changelog)
|
37 |
+
- [Supported Models](#supported-models)
|
38 |
+
- [Supported Training Approaches](#supported-training-approaches)
|
39 |
+
- [Provided Datasets](#provided-datasets)
|
40 |
+
- [Requirement](#requirement)
|
41 |
+
- [Getting Started](#getting-started)
|
42 |
+
- [Projects using LLaMA Factory](#projects-using-llama-factory)
|
43 |
+
- [License](#license)
|
44 |
+
- [Citation](#citation)
|
45 |
+
- [Acknowledgement](#acknowledgement)
|
46 |
+
|
47 |
+
## Features
|
48 |
+
|
49 |
+
- **Various models**: LLaMA, LLaVA, Mistral, Mixtral-MoE, Qwen, Yi, Gemma, Baichuan, ChatGLM, Phi, etc.
|
50 |
+
- **Integrated methods**: (Continuous) pre-training, (multimodal) supervised fine-tuning, reward modeling, PPO, DPO, KTO, ORPO, etc.
|
51 |
+
- **Scalable resources**: 16-bit full-tuning, freeze-tuning, LoRA and 2/3/4/5/6/8-bit QLoRA via AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ.
|
52 |
+
- **Advanced algorithms**: GaLore, BAdam, DoRA, LongLoRA, LLaMA Pro, Mixture-of-Depths, LoRA+, LoftQ, PiSSA and Agent tuning.
|
53 |
+
- **Practical tricks**: FlashAttention-2, Unsloth, RoPE scaling, NEFTune and rsLoRA.
|
54 |
+
- **Experiment monitors**: LlamaBoard, TensorBoard, Wandb, MLflow, etc.
|
55 |
+
- **Faster inference**: OpenAI-style API, Gradio UI and CLI with vLLM worker.
|
56 |
+
|
57 |
+
## Benchmark
|
58 |
+
|
59 |
+
Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning), LLaMA Factory's LoRA tuning offers up to **3.7 times faster** training speed with a better Rouge score on the advertising text generation task. By leveraging 4-bit quantization technique, LLaMA Factory's QLoRA further improves the efficiency regarding the GPU memory.
|
60 |
+
|
61 |
+
![benchmark](assets/benchmark.svg)
|
62 |
+
|
63 |
+
<details><summary>Definitions</summary>
|
64 |
+
|
65 |
+
- **Training Speed**: the number of training samples processed per second during the training. (bs=4, cutoff_len=1024)
|
66 |
+
- **Rouge Score**: Rouge-2 score on the development set of the [advertising text generation](https://aclanthology.org/D19-1321.pdf) task. (bs=4, cutoff_len=1024)
|
67 |
+
- **GPU Memory**: Peak GPU memory usage in 4-bit quantized training. (bs=1, cutoff_len=1024)
|
68 |
+
- We adopt `pre_seq_len=128` for ChatGLM's P-Tuning and `lora_rank=32` for LLaMA Factory's LoRA tuning.
|
69 |
+
|
70 |
+
</details>
|
71 |
+
|
72 |
+
## Changelog
|
73 |
+
|
74 |
+
[24/06/16] We support **[PiSSA](https://arxiv.org/abs/2404.02948)** algorithm. See [examples](examples/README.md) for usage.
|
75 |
+
|
76 |
+
[24/06/07] We supported fine-tuning the **[Qwen2](https://qwenlm.github.io/blog/qwen2/)** and **[GLM-4](https://github.com/THUDM/GLM-4)** models.
|
77 |
+
|
78 |
+
[24/05/26] We supported **[SimPO](https://arxiv.org/abs/2405.14734)** algorithm for preference learning. See [examples](examples/README.md) for usage.
|
79 |
+
|
80 |
+
<details><summary>Full Changelog</summary>
|
81 |
+
|
82 |
+
[24/05/20] We supported fine-tuning the **PaliGemma** series models. Note that the PaliGemma models are pre-trained models, you need to fine-tune them with `gemma` template for chat completion.
|
83 |
+
|
84 |
+
[24/05/18] We supported **[KTO](https://arxiv.org/abs/2402.01306)** algorithm for preference learning. See [examples](examples/README.md) for usage.
|
85 |
+
|
86 |
+
[24/05/14] We supported training and inference on the Ascend NPU devices. Check [installation](#installation) section for details.
|
87 |
+
|
88 |
+
[24/04/26] We supported fine-tuning the **LLaVA-1.5** multimodal LLMs. See [examples](examples/README.md) for usage.
|
89 |
+
|
90 |
+
[24/04/22] We provided a **[Colab notebook](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)** for fine-tuning the Llama-3 model on a free T4 GPU. Two Llama-3-derived models fine-tuned using LLaMA Factory are available at Hugging Face, check [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) and [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese) for details.
|
91 |
+
|
92 |
+
[24/04/21] We supported **[Mixture-of-Depths](https://arxiv.org/abs/2404.02258)** according to [AstraMindAI's implementation](https://github.com/astramind-ai/Mixture-of-depths). See [examples](examples/README.md) for usage.
|
93 |
+
|
94 |
+
[24/04/16] We supported **[BAdam](https://arxiv.org/abs/2404.02827)**. See [examples](examples/README.md) for usage.
|
95 |
+
|
96 |
+
[24/04/16] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s long-sequence training (Llama-2-7B-56k within 24GB). It achieves **117%** speed and **50%** memory compared with FlashAttention-2, more benchmarks can be found in [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison).
|
97 |
+
|
98 |
+
[24/03/31] We supported **[ORPO](https://arxiv.org/abs/2403.07691)**. See [examples](examples/README.md) for usage.
|
99 |
+
|
100 |
+
[24/03/21] Our paper "[LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models](https://arxiv.org/abs/2403.13372)" is available at arXiv!
|
101 |
+
|
102 |
+
[24/03/20] We supported **FSDP+QLoRA** that fine-tunes a 70B model on 2x24GB GPUs. See [examples](examples/README.md) for usage.
|
103 |
+
|
104 |
+
[24/03/13] We supported **[LoRA+](https://arxiv.org/abs/2402.12354)**. See [examples](examples/README.md) for usage.
|
105 |
+
|
106 |
+
[24/03/07] We supported gradient low-rank projection (**[GaLore](https://arxiv.org/abs/2403.03507)**) algorithm. See [examples](examples/README.md) for usage.
|
107 |
+
|
108 |
+
[24/03/07] We integrated **[vLLM](https://github.com/vllm-project/vllm)** for faster and concurrent inference. Try `infer_backend: vllm` to enjoy **270%** inference speed.
|
109 |
+
|
110 |
+
[24/02/28] We supported weight-decomposed LoRA (**[DoRA](https://arxiv.org/abs/2402.09353)**). Try `use_dora: true` to activate DoRA training.
|
111 |
+
|
112 |
+
[24/02/15] We supported **block expansion** proposed by [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro). See [examples](examples/README.md) for usage.
|
113 |
+
|
114 |
+
[24/02/05] Qwen1.5 (Qwen2 beta version) series models are supported in LLaMA-Factory. Check this [blog post](https://qwenlm.github.io/blog/qwen1.5/) for details.
|
115 |
+
|
116 |
+
[24/01/18] We supported **agent tuning** for most models, equipping model with tool using abilities by fine-tuning with `dataset: glaive_toolcall_en`.
|
117 |
+
|
118 |
+
[23/12/23] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s implementation to boost LoRA tuning for the LLaMA, Mistral and Yi models. Try `use_unsloth: true` argument to activate unsloth patch. It achieves **170%** speed in our benchmark, check [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison) for details.
|
119 |
+
|
120 |
+
[23/12/12] We supported fine-tuning the latest MoE model **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)** in our framework. See hardware requirement [here](#hardware-requirement).
|
121 |
+
|
122 |
+
[23/12/01] We supported downloading pre-trained models and datasets from the **[ModelScope Hub](https://modelscope.cn/models)** for Chinese mainland users. See [this tutorial](#download-from-modelscope-hub) for usage.
|
123 |
+
|
124 |
+
[23/10/21] We supported **[NEFTune](https://arxiv.org/abs/2310.05914)** trick for fine-tuning. Try `neftune_noise_alpha: 5` argument to activate NEFTune.
|
125 |
+
|
126 |
+
[23/09/27] We supported **$S^2$-Attn** proposed by [LongLoRA](https://github.com/dvlab-research/LongLoRA) for the LLaMA models. Try `shift_attn: true` argument to enable shift short attention.
|
127 |
+
|
128 |
+
[23/09/23] We integrated MMLU, C-Eval and CMMLU benchmarks in this repo. See [examples](examples/README.md) for usage.
|
129 |
+
|
130 |
+
[23/09/10] We supported **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**. Try `flash_attn: fa2` argument to enable FlashAttention-2 if you are using RTX4090, A100 or H100 GPUs.
|
131 |
+
|
132 |
+
[23/08/12] We supported **RoPE scaling** to extend the context length of the LLaMA models. Try `rope_scaling: linear` argument in training and `rope_scaling: dynamic` argument at inference to extrapolate the position embeddings.
|
133 |
+
|
134 |
+
[23/08/11] We supported **[DPO training](https://arxiv.org/abs/2305.18290)** for instruction-tuned models. See [examples](examples/README.md) for usage.
|
135 |
+
|
136 |
+
[23/07/31] We supported **dataset streaming**. Try `streaming: true` and `max_steps: 10000` arguments to load your dataset in streaming mode.
|
137 |
+
|
138 |
+
[23/07/29] We released two instruction-tuned 13B models at Hugging Face. See these Hugging Face Repos ([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft)) for details.
|
139 |
+
|
140 |
+
[23/07/18] We developed an **all-in-one Web UI** for training, evaluation and inference. Try `train_web.py` to fine-tune models in your Web browser. Thank [@KanadeSiina](https://github.com/KanadeSiina) and [@codemayq](https://github.com/codemayq) for their efforts in the development.
|
141 |
+
|
142 |
+
[23/07/09] We released **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹, an easy-to-use package for editing the factual knowledge of large language models efficiently. Please follow [FastEdit](https://github.com/hiyouga/FastEdit) if you are interested.
|
143 |
+
|
144 |
+
[23/06/29] We provided a **reproducible example** of training a chat model using instruction-following datasets, see [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft) for details.
|
145 |
+
|
146 |
+
[23/06/22] We aligned the [demo API](src/api_demo.py) with the [OpenAI's](https://platform.openai.com/docs/api-reference/chat) format where you can insert the fine-tuned model in **arbitrary ChatGPT-based applications**.
|
147 |
+
|
148 |
+
[23/06/03] We supported quantized training and inference (aka **[QLoRA](https://github.com/artidoro/qlora)**). See [examples](examples/README.md) for usage.
|
149 |
+
|
150 |
+
</details>
|
151 |
+
|
152 |
+
## Supported Models
|
153 |
+
|
154 |
+
| Model | Model size | Template |
|
155 |
+
| ------------------------------------------------------------ | -------------------------------- | --------- |
|
156 |
+
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
157 |
+
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
158 |
+
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
159 |
+
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
160 |
+
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
161 |
+
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
162 |
+
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
163 |
+
| [GLM-4](https://huggingface.co/THUDM) | 9B | glm4 |
|
164 |
+
| [InternLM2](https://huggingface.co/internlm) | 7B/20B | intern2 |
|
165 |
+
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
166 |
+
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
167 |
+
| [Llama 3](https://huggingface.co/meta-llama) | 8B/70B | llama3 |
|
168 |
+
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | vicuna |
|
169 |
+
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
170 |
+
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
171 |
+
| [PaliGemma](https://huggingface.co/google) | 3B | gemma |
|
172 |
+
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
173 |
+
| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi |
|
174 |
+
| [Qwen/Qwen1.5/Qwen2 (Code/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/4B/7B/14B/32B/72B/110B | qwen |
|
175 |
+
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
176 |
+
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
177 |
+
| [Yi/Yi-1.5](https://huggingface.co/01-ai) | 6B/9B/34B | yi |
|
178 |
+
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
179 |
+
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
180 |
+
|
181 |
+
> [!NOTE]
|
182 |
+
> For the "base" models, the `template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "instruct/chat" models.
|
183 |
+
>
|
184 |
+
> Remember to use the **SAME** template in training and inference.
|
185 |
+
|
186 |
+
Please refer to [constants.py](src/llamafactory/extras/constants.py) for a full list of models we supported.
|
187 |
+
|
188 |
+
You also can add a custom chat template to [template.py](src/llamafactory/data/template.py).
|
189 |
+
|
190 |
+
## Supported Training Approaches
|
191 |
+
|
192 |
+
| Approach | Full-tuning | Freeze-tuning | LoRA | QLoRA |
|
193 |
+
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
194 |
+
| Pre-Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
195 |
+
| Supervised Fine-Tuning | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
196 |
+
| Reward Modeling | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
197 |
+
| PPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
198 |
+
| DPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
199 |
+
| KTO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
200 |
+
| ORPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
201 |
+
| SimPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
202 |
+
|
203 |
+
## Provided Datasets
|
204 |
+
|
205 |
+
<details><summary>Pre-training datasets</summary>
|
206 |
+
|
207 |
+
- [Wiki Demo (en)](data/wiki_demo.txt)
|
208 |
+
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
|
209 |
+
- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
|
210 |
+
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
|
211 |
+
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
212 |
+
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
213 |
+
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
214 |
+
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
|
215 |
+
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
|
216 |
+
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
217 |
+
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
218 |
+
|
219 |
+
</details>
|
220 |
+
|
221 |
+
<details><summary>Supervised fine-tuning datasets</summary>
|
222 |
+
|
223 |
+
- [Identity (en&zh)](data/identity.json)
|
224 |
+
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
225 |
+
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3)
|
226 |
+
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
227 |
+
- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
228 |
+
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
229 |
+
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
230 |
+
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
231 |
+
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
232 |
+
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
|
233 |
+
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
|
234 |
+
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
235 |
+
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
236 |
+
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
237 |
+
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
238 |
+
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
239 |
+
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
240 |
+
- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
|
241 |
+
- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca)
|
242 |
+
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
|
243 |
+
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
|
244 |
+
- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa)
|
245 |
+
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
|
246 |
+
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
247 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
248 |
+
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
249 |
+
- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
250 |
+
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
251 |
+
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
252 |
+
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
253 |
+
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
254 |
+
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
255 |
+
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
256 |
+
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
257 |
+
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
|
258 |
+
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
|
259 |
+
- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2)
|
260 |
+
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
261 |
+
- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered)
|
262 |
+
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
263 |
+
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
264 |
+
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
265 |
+
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
266 |
+
- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de)
|
267 |
+
- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de)
|
268 |
+
- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de)
|
269 |
+
- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de)
|
270 |
+
- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de)
|
271 |
+
- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de)
|
272 |
+
|
273 |
+
</details>
|
274 |
+
|
275 |
+
<details><summary>Preference datasets</summary>
|
276 |
+
|
277 |
+
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
278 |
+
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
|
279 |
+
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
280 |
+
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
281 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
282 |
+
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
283 |
+
- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k)
|
284 |
+
|
285 |
+
</details>
|
286 |
+
|
287 |
+
Some datasets require confirmation before using them, so we recommend logging in with your Hugging Face account using these commands.
|
288 |
+
|
289 |
+
```bash
|
290 |
+
pip install --upgrade huggingface_hub
|
291 |
+
huggingface-cli login
|
292 |
+
```
|
293 |
+
|
294 |
+
## Requirement
|
295 |
+
|
296 |
+
| Mandatory | Minimum | Recommend |
|
297 |
+
| ------------ | ------- | --------- |
|
298 |
+
| python | 3.8 | 3.11 |
|
299 |
+
| torch | 1.13.1 | 2.3.0 |
|
300 |
+
| transformers | 4.41.2 | 4.41.2 |
|
301 |
+
| datasets | 2.16.0 | 2.19.2 |
|
302 |
+
| accelerate | 0.30.1 | 0.30.1 |
|
303 |
+
| peft | 0.11.1 | 0.11.1 |
|
304 |
+
| trl | 0.8.6 | 0.9.4 |
|
305 |
+
|
306 |
+
| Optional | Minimum | Recommend |
|
307 |
+
| ------------ | ------- | --------- |
|
308 |
+
| CUDA | 11.6 | 12.2 |
|
309 |
+
| deepspeed | 0.10.0 | 0.14.0 |
|
310 |
+
| bitsandbytes | 0.39.0 | 0.43.1 |
|
311 |
+
| vllm | 0.4.3 | 0.4.3 |
|
312 |
+
| flash-attn | 2.3.0 | 2.5.9 |
|
313 |
+
|
314 |
+
### Hardware Requirement
|
315 |
+
|
316 |
+
\* *estimated*
|
317 |
+
|
318 |
+
| Method | Bits | 7B | 13B | 30B | 70B | 110B | 8x7B | 8x22B |
|
319 |
+
| ----------------- | ---- | ----- | ----- | ----- | ------ | ------ | ----- | ------ |
|
320 |
+
| Full | AMP | 120GB | 240GB | 600GB | 1200GB | 2000GB | 900GB | 2400GB |
|
321 |
+
| Full | 16 | 60GB | 120GB | 300GB | 600GB | 900GB | 400GB | 1200GB |
|
322 |
+
| Freeze | 16 | 20GB | 40GB | 80GB | 200GB | 360GB | 160GB | 400GB |
|
323 |
+
| LoRA/GaLore/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | 240GB | 120GB | 320GB |
|
324 |
+
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | 140GB | 60GB | 160GB |
|
325 |
+
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 72GB | 30GB | 96GB |
|
326 |
+
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | 48GB | 18GB | 48GB |
|
327 |
+
|
328 |
+
## Getting Started
|
329 |
+
|
330 |
+
### Installation
|
331 |
+
|
332 |
+
> [!IMPORTANT]
|
333 |
+
> Installation is mandatory.
|
334 |
+
|
335 |
+
```bash
|
336 |
+
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
337 |
+
cd LLaMA-Factory
|
338 |
+
pip install -e ".[torch,metrics]"
|
339 |
+
```
|
340 |
+
|
341 |
+
Extra dependencies available: torch, torch-npu, metrics, deepspeed, bitsandbytes, hqq, eetq, gptq, awq, aqlm, vllm, galore, badam, qwen, modelscope, quality
|
342 |
+
|
343 |
+
> [!TIP]
|
344 |
+
> Use `pip install --no-deps -e .` to resolve package conflicts.
|
345 |
+
|
346 |
+
<details><summary>For Windows users</summary>
|
347 |
+
|
348 |
+
If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you need to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.2, please select the appropriate [release version](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels) based on your CUDA version.
|
349 |
+
|
350 |
+
```bash
|
351 |
+
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
352 |
+
```
|
353 |
+
|
354 |
+
To enable FlashAttention-2 on the Windows platform, you need to install the precompiled `flash-attn` library, which supports CUDA 12.1 to 12.2. Please download the corresponding version from [flash-attention](https://github.com/bdashore3/flash-attention/releases) based on your requirements.
|
355 |
+
|
356 |
+
</details>
|
357 |
+
|
358 |
+
<details><summary>For Ascend NPU users</summary>
|
359 |
+
|
360 |
+
To install LLaMA Factory on Ascend NPU devices, please specify extra dependencies: `pip install -e ".[torch-npu,metrics]"`. Additionally, you need to install the **[Ascend CANN Toolkit and Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**. Please follow the [installation tutorial](https://www.hiascend.com/document/detail/en/CANNCommunityEdition/600alphaX/softwareinstall/instg/atlasdeploy_03_0031.html) or use the following commands:
|
361 |
+
|
362 |
+
```bash
|
363 |
+
# replace the url according to your CANN version and devices
|
364 |
+
# install CANN Toolkit
|
365 |
+
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run
|
366 |
+
bash Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
|
367 |
+
|
368 |
+
# install CANN Kernels
|
369 |
+
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run
|
370 |
+
bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
|
371 |
+
|
372 |
+
# set env variables
|
373 |
+
source /usr/local/Ascend/ascend-toolkit/set_env.sh
|
374 |
+
```
|
375 |
+
|
376 |
+
| Requirement | Minimum | Recommend |
|
377 |
+
| ------------ | ------- | ----------- |
|
378 |
+
| CANN | 8.0.RC1 | 8.0.RC1 |
|
379 |
+
| torch | 2.1.0 | 2.1.0 |
|
380 |
+
| torch-npu | 2.1.0 | 2.1.0.post3 |
|
381 |
+
| deepspeed | 0.13.2 | 0.13.2 |
|
382 |
+
|
383 |
+
Remember to use `ASCEND_RT_VISIBLE_DEVICES` instead of `CUDA_VISIBLE_DEVICES` to specify the device to use.
|
384 |
+
|
385 |
+
If you cannot infer model on NPU devices, try setting `do_sample: false` in the configurations.
|
386 |
+
|
387 |
+
Download the pre-built Docker images: [32GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html) | [64GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
|
388 |
+
|
389 |
+
</details>
|
390 |
+
|
391 |
+
### Data Preparation
|
392 |
+
|
393 |
+
Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can either use datasets on HuggingFace / ModelScope hub or load the dataset in local disk.
|
394 |
+
|
395 |
+
> [!NOTE]
|
396 |
+
> Please update `data/dataset_info.json` to use your custom dataset.
|
397 |
+
|
398 |
+
### Quickstart
|
399 |
+
|
400 |
+
Use the following 3 commands to run LoRA **fine-tuning**, **inference** and **merging** of the Llama3-8B-Instruct model, respectively.
|
401 |
+
|
402 |
+
```bash
|
403 |
+
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
404 |
+
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
405 |
+
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
406 |
+
```
|
407 |
+
|
408 |
+
See [examples/README.md](examples/README.md) for advanced usage (including distributed training).
|
409 |
+
|
410 |
+
> [!TIP]
|
411 |
+
> Use `llamafactory-cli help` to show help information.
|
412 |
+
|
413 |
+
### Fine-Tuning with LLaMA Board GUI (powered by [Gradio](https://github.com/gradio-app/gradio))
|
414 |
+
|
415 |
+
```bash
|
416 |
+
llamafactory-cli webui
|
417 |
+
```
|
418 |
+
|
419 |
+
### Build Docker
|
420 |
+
|
421 |
+
For CUDA users:
|
422 |
+
|
423 |
+
```bash
|
424 |
+
cd docker/docker-cuda/
|
425 |
+
docker-compose up -d
|
426 |
+
docker-compose exec llamafactory bash
|
427 |
+
```
|
428 |
+
|
429 |
+
For Ascend NPU users:
|
430 |
+
|
431 |
+
```bash
|
432 |
+
cd docker/docker-npu/
|
433 |
+
docker-compose up -d
|
434 |
+
docker-compose exec llamafactory bash
|
435 |
+
```
|
436 |
+
|
437 |
+
<details><summary>Build without Docker Compose</summary>
|
438 |
+
|
439 |
+
For CUDA users:
|
440 |
+
|
441 |
+
```bash
|
442 |
+
docker build -f ./docker/docker-cuda/Dockerfile \
|
443 |
+
--build-arg INSTALL_BNB=false \
|
444 |
+
--build-arg INSTALL_VLLM=false \
|
445 |
+
--build-arg INSTALL_DEEPSPEED=false \
|
446 |
+
--build-arg INSTALL_FLASHATTN=false \
|
447 |
+
--build-arg PIP_INDEX=https://pypi.org/simple \
|
448 |
+
-t llamafactory:latest .
|
449 |
+
|
450 |
+
docker run -dit --gpus=all \
|
451 |
+
-v ./hf_cache:/root/.cache/huggingface \
|
452 |
+
-v ./ms_cache:/root/.cache/modelscope \
|
453 |
+
-v ./data:/app/data \
|
454 |
+
-v ./output:/app/output \
|
455 |
+
-p 7860:7860 \
|
456 |
+
-p 8000:8000 \
|
457 |
+
--shm-size 16G \
|
458 |
+
--name llamafactory \
|
459 |
+
llamafactory:latest
|
460 |
+
|
461 |
+
docker exec -it llamafactory bash
|
462 |
+
```
|
463 |
+
|
464 |
+
For Ascend NPU users:
|
465 |
+
|
466 |
+
```bash
|
467 |
+
# Choose docker image upon your environment
|
468 |
+
docker build -f ./docker/docker-npu/Dockerfile \
|
469 |
+
--build-arg INSTALL_DEEPSPEED=false \
|
470 |
+
--build-arg PIP_INDEX=https://pypi.org/simple \
|
471 |
+
-t llamafactory:latest .
|
472 |
+
|
473 |
+
# Change `device` upon your resources
|
474 |
+
docker run -dit \
|
475 |
+
-v ./hf_cache:/root/.cache/huggingface \
|
476 |
+
-v ./ms_cache:/root/.cache/modelscope \
|
477 |
+
-v ./data:/app/data \
|
478 |
+
-v ./output:/app/output \
|
479 |
+
-v /usr/local/dcmi:/usr/local/dcmi \
|
480 |
+
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
481 |
+
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
|
482 |
+
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
483 |
+
-p 7860:7860 \
|
484 |
+
-p 8000:8000 \
|
485 |
+
--device /dev/davinci0 \
|
486 |
+
--device /dev/davinci_manager \
|
487 |
+
--device /dev/devmm_svm \
|
488 |
+
--device /dev/hisi_hdc \
|
489 |
+
--shm-size 16G \
|
490 |
+
--name llamafactory \
|
491 |
+
llamafactory:latest
|
492 |
+
|
493 |
+
docker exec -it llamafactory bash
|
494 |
+
```
|
495 |
+
|
496 |
+
</details>
|
497 |
+
|
498 |
+
<details><summary>Details about volume</summary>
|
499 |
+
|
500 |
+
- hf_cache: Utilize Hugging Face cache on the host machine. Reassignable if a cache already exists in a different directory.
|
501 |
+
- data: Place datasets on this dir of the host machine so that they can be selected on LLaMA Board GUI.
|
502 |
+
- output: Set export dir to this location so that the merged result can be accessed directly on the host machine.
|
503 |
+
|
504 |
+
</details>
|
505 |
+
|
506 |
+
### Deploy with OpenAI-style API and vLLM
|
507 |
+
|
508 |
+
```bash
|
509 |
+
API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml
|
510 |
+
```
|
511 |
+
|
512 |
+
> [!TIP]
|
513 |
+
> Visit https://platform.openai.com/docs/api-reference/chat/create for API document.
|
514 |
+
|
515 |
+
### Download from ModelScope Hub
|
516 |
+
|
517 |
+
If you have trouble with downloading models and datasets from Hugging Face, you can use ModelScope.
|
518 |
+
|
519 |
+
```bash
|
520 |
+
export USE_MODELSCOPE_HUB=1 # `set USE_MODELSCOPE_HUB=1` for Windows
|
521 |
+
```
|
522 |
+
|
523 |
+
Train the model by specifying a model ID of the ModelScope Hub as the `model_name_or_path`. You can find a full list of model IDs at [ModelScope Hub](https://modelscope.cn/models), e.g., `LLM-Research/Meta-Llama-3-8B-Instruct`.
|
524 |
+
|
525 |
+
### Use W&B Logger
|
526 |
+
|
527 |
+
To use [Weights & Biases](https://wandb.ai) for logging experimental results, you need to add the following arguments to yaml files.
|
528 |
+
|
529 |
+
```yaml
|
530 |
+
report_to: wandb
|
531 |
+
run_name: test_run # optional
|
532 |
+
```
|
533 |
+
|
534 |
+
Set `WANDB_API_KEY` to [your key](https://wandb.ai/authorize) when launching training tasks to log in with your W&B account.
|
535 |
+
|
536 |
+
## Projects using LLaMA Factory
|
537 |
+
|
538 |
+
If you have a project that should be incorporated, please contact via email or create a pull request.
|
539 |
+
|
540 |
+
<details><summary>Click to show</summary>
|
541 |
+
|
542 |
+
1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223)
|
543 |
+
1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092)
|
544 |
+
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
|
545 |
+
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
546 |
+
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
547 |
+
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. KDD 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
548 |
+
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
549 |
+
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
550 |
+
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
551 |
+
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
552 |
+
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
553 |
+
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
554 |
+
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
555 |
+
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
556 |
+
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
557 |
+
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
558 |
+
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
559 |
+
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. ACL 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
|
560 |
+
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
|
561 |
+
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
|
562 |
+
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
|
563 |
+
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
|
564 |
+
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
|
565 |
+
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
|
566 |
+
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. COLING 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
|
567 |
+
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
|
568 |
+
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
|
569 |
+
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
|
570 |
+
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
|
571 |
+
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. ICML 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
|
572 |
+
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
|
573 |
+
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
|
574 |
+
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
|
575 |
+
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
|
576 |
+
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
|
577 |
+
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2404.17140)
|
578 |
+
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. NAACL 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
|
579 |
+
1. Xu et al. Large Language Models for Cyber Security: A Systematic Literature Review. 2024. [[arxiv]](https://arxiv.org/abs/2405.04760)
|
580 |
+
1. Dammu et al. "They are uncultured": Unveiling Covert Harms and Social Threats in LLM Generated Conversations. 2024. [[arxiv]](https://arxiv.org/abs/2405.05378)
|
581 |
+
1. Yi et al. A safety realignment framework via subspace-oriented model fusion for large language models. 2024. [[arxiv]](https://arxiv.org/abs/2405.09055)
|
582 |
+
1. Lou et al. SPO: Multi-Dimensional Preference Sequential Alignment With Implicit Reward Modeling. 2024. [[arxiv]](https://arxiv.org/abs/2405.12739)
|
583 |
+
1. Zhang et al. Getting More from Less: Large Language Models are Good Spontaneous Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2405.13816)
|
584 |
+
1. Zhang et al. TS-Align: A Teacher-Student Collaborative Framework for Scalable Iterative Finetuning of Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2405.20215)
|
585 |
+
1. Zihong Chen. Sentence Segmentation and Sentence Punctuation Based on XunziALLM. 2024. [[paper]](https://aclanthology.org/2024.lt4hala-1.30)
|
586 |
+
1. Gao et al. The Best of Both Worlds: Toward an Honest and Helpful Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2406.00380)
|
587 |
+
1. Wang and Song. MARS: Benchmarking the Metaphysical Reasoning Abilities of Language Models with a Multi-task Evaluation Dataset. 2024. [[arxiv]](https://arxiv.org/abs/2406.02106)
|
588 |
+
1. Hu et al. Computational Limits of Low-Rank Adaptation (LoRA) for Transformer-Based Models. 2024. [[arxiv]](https://arxiv.org/abs/2406.03136)
|
589 |
+
1. Ge et al. Time Sensitive Knowledge Editing through Efficient Finetuning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2406.04496)
|
590 |
+
1. Tan et al. Peer Review as A Multi-Turn and Long-Context Dialogue with Role-Based Interactions. 2024. [[arxiv]](https://arxiv.org/abs/2406.05688)
|
591 |
+
1. Song et al. Turbo Sparse: Achieving LLM SOTA Performance with Minimal Activated Parameters. 2024. [[arxiv]](https://arxiv.org/abs/2406.05955)
|
592 |
+
1. Gu et al. RWKV-CLIP: A Robust Vision-Language Representation Learner. 2024. [[arxiv]](https://arxiv.org/abs/2406.06973)
|
593 |
+
1. Chen et al. Advancing Tool-Augmented Large Language Models: Integrating Insights from Errors in Inference Trees. 2024. [[arxiv]](https://arxiv.org/abs/2406.07115)
|
594 |
+
1. Zhu et al. Are Large Language Models Good Statisticians?. 2024. [[arxiv]](https://arxiv.org/abs/2406.07815)
|
595 |
+
1. Li et al. Know the Unknown: An Uncertainty-Sensitive Method for LLM Instruction Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2406.10099)
|
596 |
+
1. Ding et al. IntentionQA: A Benchmark for Evaluating Purchase Intention Comprehension Abilities of Language Models in E-commerce. 2024. [[arxiv]](https://arxiv.org/abs/2406.10173)
|
597 |
+
1. He et al. COMMUNITY-CROSS-INSTRUCT: Unsupervised Instruction Generation for Aligning Large Language Models to Online Communities. 2024. [[arxiv]](https://arxiv.org/abs/2406.12074)
|
598 |
+
1. Lin et al. FVEL: Interactive Formal Verification Environment with Large Language Models via Theorem Proving. 2024. [[arxiv]](https://arxiv.org/abs/2406.14408)
|
599 |
+
1. Treutlein et al. Connecting the Dots: LLMs can Infer and Verbalize Latent Structure from Disparate Training Data. 2024. [[arxiv]](https://arxiv.org/abs/2406.14546)
|
600 |
+
1. Feng et al. SS-Bench: A Benchmark for Social Story Generation and Evaluation. 2024. [[arxiv]](https://arxiv.org/abs/2406.15695)
|
601 |
+
1. Feng et al. Self-Constructed Context Decompilation with Fined-grained Alignment Enhancement. 2024. [[arxiv]](https://arxiv.org/abs/2406.17233)
|
602 |
+
1. Liu et al. Large Language Models for Cuffless Blood Pressure Measurement From Wearable Biosignals. 2024. [[arxiv]](https://arxiv.org/abs/2406.18069)
|
603 |
+
1. Iyer et al. Exploring Very Low-Resource Translation with LLMs: The University of Edinburgh’s Submission to AmericasNLP 2024 Translation Task. AmericasNLP 2024. [[paper]](https://aclanthology.org/2024.americasnlp-1.25)
|
604 |
+
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B.
|
605 |
+
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge.
|
606 |
+
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B.
|
607 |
+
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B.
|
608 |
+
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**: A series of MBTI Personality large language models, capable of giving any LLM 16 different personality types based on different datasets and training methods.
|
609 |
+
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**: A large language model specialized in generate metadata for stable diffusion. [[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
610 |
+
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**: A multimodal large language model specialized in Chinese medical domain, based on LLaVA-1.5-7B.
|
611 |
+
1. **[AutoRE](https://github.com/THUDM/AutoRE)**: A document-level relation extraction system based on large language models.
|
612 |
+
1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**: SDKs for fine-tuning LLMs on Windows PC for NVIDIA RTX.
|
613 |
+
1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**: An easy and lazy way for building multi-agent LLMs applications and supports model fine-tuning via LLaMA Factory.
|
614 |
+
|
615 |
+
</details>
|
616 |
+
|
617 |
+
## License
|
618 |
+
|
619 |
+
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
620 |
+
|
621 |
+
Please follow the model licenses to use the corresponding model weights: [Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [InternLM2](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2 (LLaVA-1.5)](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
622 |
+
|
623 |
+
## Citation
|
624 |
+
|
625 |
+
If this work is helpful, please kindly cite as:
|
626 |
+
|
627 |
+
```bibtex
|
628 |
+
@inproceedings{zheng2024llamafactory,
|
629 |
+
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
|
630 |
+
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Zhangchi Feng and Yongqiang Ma},
|
631 |
+
booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)},
|
632 |
+
address={Bangkok, Thailand},
|
633 |
+
publisher={Association for Computational Linguistics},
|
634 |
+
year={2024},
|
635 |
+
url={http://arxiv.org/abs/2403.13372}
|
636 |
+
}
|
637 |
+
```
|
638 |
+
|
639 |
+
## Acknowledgement
|
640 |
+
|
641 |
+
This repo benefits from [PEFT](https://github.com/huggingface/peft), [TRL](https://github.com/huggingface/trl), [QLoRA](https://github.com/artidoro/qlora) and [FastChat](https://github.com/lm-sys/FastChat). Thanks for their wonderful works.
|
642 |
+
|
643 |
+
## Star History
|
644 |
+
|
645 |
+
![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Factory&type=Date)
|
README_zh.md
ADDED
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
![# LLaMA Factory](assets/logo.png)
|
2 |
+
|
3 |
+
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
4 |
+
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
|
5 |
+
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
6 |
+
[![PyPI](https://img.shields.io/pypi/v/llamafactory)](https://pypi.org/project/llamafactory/)
|
7 |
+
[![Citation](https://img.shields.io/badge/citation-72-green)](#使用了-llama-factory-的项目)
|
8 |
+
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
9 |
+
[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
|
10 |
+
[![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai)
|
11 |
+
[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)
|
12 |
+
[![Open in DSW](https://gallery.pai-ml.com/assets/open-in-dsw.svg)](https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory)
|
13 |
+
[![Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue)](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
14 |
+
[![Studios](https://img.shields.io/badge/ModelScope-Open%20in%20Studios-blue)](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
15 |
+
|
16 |
+
[![GitHub Tread](https://trendshift.io/api/badge/repositories/4535)](https://trendshift.io/repositories/4535)
|
17 |
+
|
18 |
+
👋 加入我们的[微信群](assets/wechat.jpg)或 [NPU 用户群](assets/wechat_npu.jpg)。
|
19 |
+
|
20 |
+
\[ [English](README.md) | 中文 \]
|
21 |
+
|
22 |
+
**微调大模型可以像这样轻松…**
|
23 |
+
|
24 |
+
https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd-d76c6d0a6594
|
25 |
+
|
26 |
+
选择你的打开方式:
|
27 |
+
|
28 |
+
- **Colab**:https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing
|
29 |
+
- **PAI-DSW**: https://gallery.pai-ml.com/#/preview/deepLearning/nlp/llama_factory
|
30 |
+
- **本地机器**:请见[如何使用](#如何使用)
|
31 |
+
|
32 |
+
## 目录
|
33 |
+
|
34 |
+
- [项目特色](#项目特色)
|
35 |
+
- [性能指标](#性能指标)
|
36 |
+
- [更新日志](#更新日志)
|
37 |
+
- [模型](#模型)
|
38 |
+
- [训练方法](#训练方法)
|
39 |
+
- [数据集](#数据集)
|
40 |
+
- [软硬件依赖](#软硬件依赖)
|
41 |
+
- [如何使用](#如何使用)
|
42 |
+
- [使用了 LLaMA Factory 的项目](#使用了-llama-factory-的项目)
|
43 |
+
- [协议](#协议)
|
44 |
+
- [引用](#引用)
|
45 |
+
- [致谢](#致谢)
|
46 |
+
|
47 |
+
## 项目特色
|
48 |
+
|
49 |
+
- **多种模型**:LLaMA、LLaVA、Mistral、Mixtral-MoE、Qwen、Yi、Gemma、Baichuan、ChatGLM、Phi 等等。
|
50 |
+
- **集成方法**:(增量)预训练、(多模态)指令监督微调、奖励模型训练、PPO 训练、DPO 训练、KTO 训练、ORPO 训练等等。
|
51 |
+
- **多种精度**:16 比特全参数微调、冻结微调、LoRA 微调和基于 AQLM/AWQ/GPTQ/LLM.int8/HQQ/EETQ 的 2/3/4/5/6/8 比特 QLoRA 微调。
|
52 |
+
- **先进算法**:GaLore、BAdam、DoRA、LongLoRA、LLaMA Pro、Mixture-of-Depths、LoRA+、LoftQ、PiSSA 和 Agent 微调。
|
53 |
+
- **实用技巧**:FlashAttention-2、Unsloth、RoPE scaling、NEFTune 和 rsLoRA。
|
54 |
+
- **实验监控**:LlamaBoard、TensorBoard、Wandb、MLflow 等等。
|
55 |
+
- **极速推理**:基于 vLLM 的 OpenAI 风格 API、浏览器界面和命令行接口。
|
56 |
+
|
57 |
+
## 性能指标
|
58 |
+
|
59 |
+
与 ChatGLM 官方的 [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning) 微调相比,LLaMA Factory 的 LoRA 微调提供了 **3.7 倍**的加速比,同时在广告文案生成任务上取得了更高的 Rouge 分数。结合 4 比特量化技术,LLaMA Factory 的 QLoRA 微调进一步降低了 GPU 显存消耗。
|
60 |
+
|
61 |
+
![benchmark](assets/benchmark.svg)
|
62 |
+
|
63 |
+
<details><summary>变量定义</summary>
|
64 |
+
|
65 |
+
- **Training Speed**: 训练阶段每秒处理的样本数量。(批处理大小=4,截断长度=1024)
|
66 |
+
- **Rouge Score**: [广告文案生成](https://aclanthology.org/D19-1321.pdf)任务验证集上的 Rouge-2 分数。(批处理大小=4,截断长度=1024)
|
67 |
+
- **GPU Memory**: 4 比特量化训练的 GPU 显存峰值。(批处理大小=1,截断长度=1024)
|
68 |
+
- 我们在 ChatGLM 的 P-Tuning 中采用 `pre_seq_len=128`,在 LLaMA Factory 的 LoRA 微调中采用 `lora_rank=32`。
|
69 |
+
|
70 |
+
</details>
|
71 |
+
|
72 |
+
## 更新日志
|
73 |
+
|
74 |
+
[24/06/16] 我们支持了 **[PiSSA](https://arxiv.org/abs/2404.02948)** 算法。详细用法请参照 [examples](examples/README_zh.md)。
|
75 |
+
|
76 |
+
[24/06/07] 我们支持了 **[Qwen2](https://qwenlm.github.io/blog/qwen2/)** 和 **[GLM-4](https://github.com/THUDM/GLM-4)** 模型的微调。
|
77 |
+
|
78 |
+
[24/05/26] 我们支持了 **[SimPO](https://arxiv.org/abs/2405.14734)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)。
|
79 |
+
|
80 |
+
<details><summary>展开日志</summary>
|
81 |
+
|
82 |
+
[24/05/20] 我们支持了 **PaliGemma** 系列模型的微调。注意 PaliGemma 是预训练模型,你需要使用 `gemma` 模板进行微调使其获得对话能力。
|
83 |
+
|
84 |
+
[24/05/18] 我们支持了 **[KTO](https://arxiv.org/abs/2402.01306)** 偏好对齐算法。详细用法请参照 [examples](examples/README_zh.md)。
|
85 |
+
|
86 |
+
[24/05/14] 我们支持了昇腾 NPU 设备的训练和推理。详情请查阅[安装](#安装-llama-factory)部分。
|
87 |
+
|
88 |
+
[24/04/26] 我们支持了多模态模型 **LLaVA-1.5** 的微调。详细用法请参照 [examples](examples/README_zh.md)。
|
89 |
+
|
90 |
+
[24/04/22] 我们提供了在免费 T4 GPU 上微调 Llama-3 模型的 **[Colab 笔记本](https://colab.research.google.com/drive/1d5KQtbemerlSDSxZIfAaWXhKr30QypiK?usp=sharing)**。Hugging Face 社区公开了两个利用 LLaMA Factory 微调的 Llama-3 模型,详情请见 [Llama3-8B-Chinese-Chat](https://huggingface.co/shenzhi-wang/Llama3-8B-Chinese-Chat) 和 [Llama3-Chinese](https://huggingface.co/zhichen/Llama3-Chinese)。
|
91 |
+
|
92 |
+
[24/04/21] 我们基于 [AstraMindAI 的仓库](https://github.com/astramind-ai/Mixture-of-depths)支持了 **[混合深度训练](https://arxiv.org/abs/2404.02258)**。详细用法请参照 [examples](examples/README_zh.md)。
|
93 |
+
|
94 |
+
[24/04/16] 我们支持了 **[BAdam](https://arxiv.org/abs/2404.02827)**。详细用法请参照 [examples](examples/README_zh.md)。
|
95 |
+
|
96 |
+
[24/04/16] 我们支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的长序列训练(24GB 可训练 Llama-2-7B-56k)。该方法相比 FlashAttention-2 提供了 **117%** 的训练速度和 **50%** 的显存节约。更多数据请见[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
97 |
+
|
98 |
+
[24/03/31] 我们支持了 **[ORPO](https://arxiv.org/abs/2403.07691)**。详细用法请参照 [examples](examples/README_zh.md)。
|
99 |
+
|
100 |
+
[24/03/21] 我们的论文 "[LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models](https://arxiv.org/abs/2403.13372)" 可在 arXiv 上查看!
|
101 |
+
|
102 |
+
[24/03/20] 我们支持了能在 2x24GB GPU 上微调 70B 模型的 **FSDP+QLoRA**。详细用法请参照 [examples](examples/README_zh.md)。
|
103 |
+
|
104 |
+
[24/03/13] 我们支持了 **[LoRA+](https://arxiv.org/abs/2402.12354)**。详细用法请参照 [examples](examples/README_zh.md)。
|
105 |
+
|
106 |
+
[24/03/07] 我们支持了梯度低秩投影(**[GaLore](https://arxiv.org/abs/2403.03507)**)算法。详细用法请参照 [examples](examples/README_zh.md)。
|
107 |
+
|
108 |
+
[24/03/07] 我们集成了 **[vLLM](https://github.com/vllm-project/vllm)** 以实现极速并发推理。请使用 `infer_backend: vllm` 来获得 **270%** 的推理速度。
|
109 |
+
|
110 |
+
[24/02/28] 我们支持了 **[DoRA](https://arxiv.org/abs/2402.09353)** 微调。请使用 `use_dora: true` 参数进行 DoRA 微调。
|
111 |
+
|
112 |
+
[24/02/15] 我们支持了 [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro) 提出的**块扩展**方法。详细用法请参照 [examples](examples/README_zh.md)。
|
113 |
+
|
114 |
+
[24/02/05] Qwen1.5(Qwen2 测试版)系列模型已在 LLaMA-Factory 中实现微调支持。详情请查阅该[博客页面](https://qwenlm.github.io/zh/blog/qwen1.5/)。
|
115 |
+
|
116 |
+
[24/01/18] 我们针对绝大多数模型实现了 **Agent 微调**,微调时指定 `dataset: glaive_toolcall_zh` 即可使模型获得工具调用能力。
|
117 |
+
|
118 |
+
[23/12/23] 我们针对 LLaMA, Mistral 和 Yi 模型支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的 LoRA 训练加速。请使用 `use_unsloth: true` 参数启用 unsloth 优化。该方法可提供 **170%** 的训练速度,详情请查阅[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
119 |
+
|
120 |
+
[23/12/12] 我们支持了微调最新的混合专家模型 **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)**。硬件需求请查阅[此处](#硬件依赖)。
|
121 |
+
|
122 |
+
[23/12/01] 我们支持了从 **[魔搭社区](https://modelscope.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#从魔搭社区下载)。
|
123 |
+
|
124 |
+
[23/10/21] 我们支持了 **[NEFTune](https://arxiv.org/abs/2310.05914)** 训练技巧。请使用 `neftune_noise_alpha: 5` 参数启用 NEFTune。
|
125 |
+
|
126 |
+
[23/09/27] 我们针对 LLaMA 模型支持了 [LongLoRA](https://github.com/dvlab-research/LongLoRA) 提出的 **$S^2$-Attn**。请使用 `shift_attn: true` 参数以启用该功能。
|
127 |
+
|
128 |
+
[23/09/23] 我们在项目中集成了 MMLU、C-Eval 和 CMMLU 评估集。详细用法请参照 [examples](examples/README_zh.md)。
|
129 |
+
|
130 |
+
[23/09/10] 我们支持了 **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**。如果您使用的是 RTX4090、A100 或 H100 GPU,请使用 `flash_attn: fa2` 参数以启用 FlashAttention-2。
|
131 |
+
|
132 |
+
[23/08/12] 我们支持了 **RoPE 插值**来扩展 LLaMA 模型的上下文长度。请使用 `rope_scaling: linear` 参数训练模型或使用 `rope_scaling: dynamic` 参数评估模型。
|
133 |
+
|
134 |
+
[23/08/11] 我们支持了指令模型的 **[DPO 训练](https://arxiv.org/abs/2305.18290)**。详细用法请参照 [examples](examples/README_zh.md)。
|
135 |
+
|
136 |
+
[23/07/31] 我们支持了**数据流式加载**。请使用 `streaming: true` 和 `max_steps: 10000` 参数来流式加载数据集。
|
137 |
+
|
138 |
+
[23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft))。
|
139 |
+
|
140 |
+
[23/07/18] 我们开发了支持训练和测试的**浏览器一体化界面**。请使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力。
|
141 |
+
|
142 |
+
[23/07/09] 我们开源了 **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目。
|
143 |
+
|
144 |
+
[23/06/29] 我们提供了一个**可复现的**指令模型微调示例,详细内容请查阅 [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft)。
|
145 |
+
|
146 |
+
[23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入**任意基于 ChatGPT 的应用**中。
|
147 |
+
|
148 |
+
[23/06/03] 我们实现了 4 比特的 LoRA 训练(也称 **[QLoRA](https://github.com/artidoro/qlora)**)。详细用法请参照 [examples](examples/README_zh.md)。
|
149 |
+
|
150 |
+
</details>
|
151 |
+
|
152 |
+
## 模型
|
153 |
+
|
154 |
+
| 模型名 | 模型大小 | Template |
|
155 |
+
| ------------------------------------------------------------ | -------------------------------- | --------- |
|
156 |
+
| [Baichuan 2](https://huggingface.co/baichuan-inc) | 7B/13B | baichuan2 |
|
157 |
+
| [BLOOM/BLOOMZ](https://huggingface.co/bigscience) | 560M/1.1B/1.7B/3B/7.1B/176B | - |
|
158 |
+
| [ChatGLM3](https://huggingface.co/THUDM) | 6B | chatglm3 |
|
159 |
+
| [Command R](https://huggingface.co/CohereForAI) | 35B/104B | cohere |
|
160 |
+
| [DeepSeek (Code/MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B/236B | deepseek |
|
161 |
+
| [Falcon](https://huggingface.co/tiiuae) | 7B/11B/40B/180B | falcon |
|
162 |
+
| [Gemma/Gemma 2/CodeGemma](https://huggingface.co/google) | 2B/7B/9B/27B | gemma |
|
163 |
+
| [GLM-4](https://huggingface.co/THUDM) | 9B | glm4 |
|
164 |
+
| [InternLM2](https://huggingface.co/internlm) | 7B/20B | intern2 |
|
165 |
+
| [Llama](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | - |
|
166 |
+
| [Llama 2](https://huggingface.co/meta-llama) | 7B/13B/70B | llama2 |
|
167 |
+
| [Llama 3](https://huggingface.co/meta-llama) | 8B/70B | llama3 |
|
168 |
+
| [LLaVA-1.5](https://huggingface.co/llava-hf) | 7B/13B | vicuna |
|
169 |
+
| [Mistral/Mixtral](https://huggingface.co/mistralai) | 7B/8x7B/8x22B | mistral |
|
170 |
+
| [OLMo](https://huggingface.co/allenai) | 1B/7B | - |
|
171 |
+
| [PaliGemma](https://huggingface.co/google) | 3B | gemma |
|
172 |
+
| [Phi-1.5/Phi-2](https://huggingface.co/microsoft) | 1.3B/2.7B | - |
|
173 |
+
| [Phi-3](https://huggingface.co/microsoft) | 4B/7B/14B | phi |
|
174 |
+
| [Qwen/Qwen1.5/Qwen2 (Code/MoE)](https://huggingface.co/Qwen) | 0.5B/1.5B/4B/7B/14B/32B/72B/110B | qwen |
|
175 |
+
| [StarCoder 2](https://huggingface.co/bigcode) | 3B/7B/15B | - |
|
176 |
+
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | xverse |
|
177 |
+
| [Yi/Yi-1.5](https://huggingface.co/01-ai) | 6B/9B/34B | yi |
|
178 |
+
| [Yi-VL](https://huggingface.co/01-ai) | 6B/34B | yi_vl |
|
179 |
+
| [Yuan 2](https://huggingface.co/IEITYuan) | 2B/51B/102B | yuan |
|
180 |
+
|
181 |
+
> [!NOTE]
|
182 |
+
> 对于所有“基座”(Base)模型,`template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Instruct/Chat)模型请务必使用**对应的模板**。
|
183 |
+
>
|
184 |
+
> 请务必在训练和推理时采用**完全一致**的模板。
|
185 |
+
|
186 |
+
项目所支持模型的完整列表请参阅 [constants.py](src/llamafactory/extras/constants.py)。
|
187 |
+
|
188 |
+
您也可以在 [template.py](src/llamafactory/data/template.py) 中添加自己的对话模板。
|
189 |
+
|
190 |
+
## 训练方法
|
191 |
+
|
192 |
+
| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
|
193 |
+
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
194 |
+
| 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
195 |
+
| 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
196 |
+
| 奖励模型训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
197 |
+
| PPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
198 |
+
| DPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
199 |
+
| KTO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
200 |
+
| ORPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
201 |
+
| SimPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
202 |
+
|
203 |
+
## 数据集
|
204 |
+
|
205 |
+
<details><summary>预训练数据集</summary>
|
206 |
+
|
207 |
+
- [Wiki Demo (en)](data/wiki_demo.txt)
|
208 |
+
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
|
209 |
+
- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
|
210 |
+
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
|
211 |
+
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
212 |
+
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
213 |
+
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
214 |
+
- [FineWeb (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb)
|
215 |
+
- [FineWeb-Edu (en)](https://huggingface.co/datasets/HuggingFaceFW/fineweb-edu)
|
216 |
+
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
217 |
+
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
218 |
+
|
219 |
+
</details>
|
220 |
+
|
221 |
+
<details><summary>指令微调数据集</summary>
|
222 |
+
|
223 |
+
- [Identity (en&zh)](data/identity.json)
|
224 |
+
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
225 |
+
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca-3)
|
226 |
+
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
227 |
+
- [Glaive Function Calling V2 (en&zh)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
228 |
+
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
229 |
+
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
230 |
+
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
231 |
+
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
232 |
+
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
|
233 |
+
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
|
234 |
+
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
235 |
+
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
236 |
+
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
237 |
+
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
238 |
+
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
239 |
+
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
240 |
+
- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
|
241 |
+
- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca)
|
242 |
+
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
|
243 |
+
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
|
244 |
+
- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa)
|
245 |
+
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
|
246 |
+
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
247 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
248 |
+
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
249 |
+
- [Advertise Generating (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
250 |
+
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
251 |
+
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
252 |
+
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
253 |
+
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
254 |
+
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
255 |
+
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
256 |
+
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
257 |
+
- [STEM (zh)](https://huggingface.co/datasets/hfl/stem_zh_instruction)
|
258 |
+
- [Ruozhiba (zh)](https://huggingface.co/datasets/hfl/ruozhiba_gpt4_turbo)
|
259 |
+
- [Neo-sft (zh)](https://huggingface.co/datasets/m-a-p/neo_sft_phase2)
|
260 |
+
- [WebInstructSub (en)](https://huggingface.co/datasets/TIGER-Lab/WebInstructSub)
|
261 |
+
- [Magpie-Pro-300K-Filtered (en)](https://huggingface.co/datasets/Magpie-Align/Magpie-Pro-300K-Filtered)
|
262 |
+
- [LLaVA mixed (en&zh)](https://huggingface.co/datasets/BUAADreamer/llava-en-zh-300k)
|
263 |
+
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
264 |
+
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
265 |
+
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
266 |
+
- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de)
|
267 |
+
- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de)
|
268 |
+
- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de)
|
269 |
+
- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de)
|
270 |
+
- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de)
|
271 |
+
- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de)
|
272 |
+
|
273 |
+
</details>
|
274 |
+
|
275 |
+
<details><summary>偏好数据集</summary>
|
276 |
+
|
277 |
+
- [DPO mixed (en&zh)](https://huggingface.co/datasets/hiyouga/DPO-En-Zh-20k)
|
278 |
+
- [UltraFeedback (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrafeedback_binarized)
|
279 |
+
- [Orca DPO Pairs (en)](https://huggingface.co/datasets/Intel/orca_dpo_pairs)
|
280 |
+
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
281 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
282 |
+
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
283 |
+
- [KTO mixed (en)](https://huggingface.co/datasets/argilla/kto-mix-15k)
|
284 |
+
|
285 |
+
</details>
|
286 |
+
|
287 |
+
部分数据集的使用需要确认,我们推荐使用下述命令登录您的 Hugging Face 账户。
|
288 |
+
|
289 |
+
```bash
|
290 |
+
pip install --upgrade huggingface_hub
|
291 |
+
huggingface-cli login
|
292 |
+
```
|
293 |
+
|
294 |
+
## 软硬件依赖
|
295 |
+
|
296 |
+
| 必需项 | 至少 | 推荐 |
|
297 |
+
| ------------ | ------- | --------- |
|
298 |
+
| python | 3.8 | 3.11 |
|
299 |
+
| torch | 1.13.1 | 2.3.0 |
|
300 |
+
| transformers | 4.41.2 | 4.41.2 |
|
301 |
+
| datasets | 2.16.0 | 2.19.2 |
|
302 |
+
| accelerate | 0.30.1 | 0.30.1 |
|
303 |
+
| peft | 0.11.1 | 0.11.1 |
|
304 |
+
| trl | 0.8.6 | 0.9.4 |
|
305 |
+
|
306 |
+
| 可选项 | 至少 | 推荐 |
|
307 |
+
| ------------ | ------- | --------- |
|
308 |
+
| CUDA | 11.6 | 12.2 |
|
309 |
+
| deepspeed | 0.10.0 | 0.14.0 |
|
310 |
+
| bitsandbytes | 0.39.0 | 0.43.1 |
|
311 |
+
| vllm | 0.4.3 | 0.4.3 |
|
312 |
+
| flash-attn | 2.3.0 | 2.5.9 |
|
313 |
+
|
314 |
+
### 硬件依赖
|
315 |
+
|
316 |
+
\* *估算值*
|
317 |
+
|
318 |
+
| 方法 | 精度 | 7B | 13B | 30B | 70B | 110B | 8x7B | 8x22B |
|
319 |
+
| ----------------- | ---- | ----- | ----- | ----- | ------ | ------ | ----- | ------ |
|
320 |
+
| Full | AMP | 120GB | 240GB | 600GB | 1200GB | 2000GB | 900GB | 2400GB |
|
321 |
+
| Full | 16 | 60GB | 120GB | 300GB | 600GB | 900GB | 400GB | 1200GB |
|
322 |
+
| Freeze | 16 | 20GB | 40GB | 80GB | 200GB | 360GB | 160GB | 400GB |
|
323 |
+
| LoRA/GaLore/BAdam | 16 | 16GB | 32GB | 64GB | 160GB | 240GB | 120GB | 320GB |
|
324 |
+
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | 140GB | 60GB | 160GB |
|
325 |
+
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 72GB | 30GB | 96GB |
|
326 |
+
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | 48GB | 18GB | 48GB |
|
327 |
+
|
328 |
+
## 如何使用
|
329 |
+
|
330 |
+
### 安装 LLaMA Factory
|
331 |
+
|
332 |
+
> [!IMPORTANT]
|
333 |
+
> 此步骤为必需。
|
334 |
+
|
335 |
+
```bash
|
336 |
+
git clone --depth 1 https://github.com/hiyouga/LLaMA-Factory.git
|
337 |
+
cd LLaMA-Factory
|
338 |
+
pip install -e ".[torch,metrics]"
|
339 |
+
```
|
340 |
+
|
341 |
+
可选的额外依赖项:torch、torch-npu、metrics、deepspeed、bitsandbytes、hqq、eetq、gptq、awq、aqlm、vllm、galore、badam、qwen、modelscope、quality
|
342 |
+
|
343 |
+
> [!TIP]
|
344 |
+
> 遇到包冲突时,可使用 `pip install --no-deps -e .` 解决。
|
345 |
+
|
346 |
+
<details><summary>Windows 用户指南</summary>
|
347 |
+
|
348 |
+
如果要在 Windows 平台上开启量化 LoRA(QLoRA),需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.2, 请根据您的 CUDA 版本情况选择适合的[发布版本](https://github.com/jllllll/bitsandbytes-windows-webui/releases/tag/wheels)。
|
349 |
+
|
350 |
+
```bash
|
351 |
+
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.41.2.post2-py3-none-win_amd64.whl
|
352 |
+
```
|
353 |
+
|
354 |
+
如果要在 Windows 平台上开启 FlashAttention-2,需要安装预编译的 `flash-attn` 库,支持 CUDA 12.1 到 12.2,请根据需求到 [flash-attention](https://github.com/bdashore3/flash-attention/releases) 下载对应版本安装。
|
355 |
+
|
356 |
+
</details>
|
357 |
+
|
358 |
+
<details><summary>昇腾 NPU 用户指南</summary>
|
359 |
+
|
360 |
+
在昇腾 NPU 设备上安装 LLaMA Factory 时,需要指定额外依赖项,使用 `pip install -e ".[torch-npu,metrics]"` 命令安装。此外,还需要安装 **[Ascend CANN Toolkit 与 Kernels](https://www.hiascend.com/developer/download/community/result?module=cann)**,安装方法请参考[安装教程](https://www.hiascend.com/document/detail/zh/CANNCommunityEdition/80RC2alpha002/quickstart/quickstart/quickstart_18_0004.html)或使用以下命令:
|
361 |
+
|
362 |
+
```bash
|
363 |
+
# 请替换 URL 为 CANN 版本和设备型号对应的 URL
|
364 |
+
# 安装 CANN Toolkit
|
365 |
+
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run
|
366 |
+
bash Ascend-cann-toolkit_8.0.RC1.alpha001_linux-"$(uname -i)".run --install
|
367 |
+
|
368 |
+
# 安装 CANN Kernels
|
369 |
+
wget https://ascend-repo.obs.cn-east-2.myhuaweicloud.com/Milan-ASL/Milan-ASL%20V100R001C17SPC701/Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run
|
370 |
+
bash Ascend-cann-kernels-910b_8.0.RC1.alpha001_linux.run --install
|
371 |
+
|
372 |
+
# 设置环境变量
|
373 |
+
source /usr/local/Ascend/ascend-toolkit/set_env.sh
|
374 |
+
```
|
375 |
+
|
376 |
+
| 依赖项 | 至少 | 推荐 |
|
377 |
+
| ------------ | ------- | ----------- |
|
378 |
+
| CANN | 8.0.RC1 | 8.0.RC1 |
|
379 |
+
| torch | 2.1.0 | 2.1.0 |
|
380 |
+
| torch-npu | 2.1.0 | 2.1.0.post3 |
|
381 |
+
| deepspeed | 0.13.2 | 0.13.2 |
|
382 |
+
|
383 |
+
请使用 `ASCEND_RT_VISIBLE_DEVICES` 而非 `CUDA_VISIBLE_DEVICES` 来指定运算设备。
|
384 |
+
|
385 |
+
如果遇到无法正常推理的情况,请尝试设置 `do_sample: false`。
|
386 |
+
|
387 |
+
下载预构建 Docker 镜像:[32GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/130.html) | [64GB](http://mirrors.cn-central-221.ovaijisuan.com/detail/131.html)
|
388 |
+
|
389 |
+
</details>
|
390 |
+
|
391 |
+
### 数据准备
|
392 |
+
|
393 |
+
关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。你可以使用 HuggingFace / ModelScope 上的数据集或加载本地数据集。
|
394 |
+
|
395 |
+
> [!NOTE]
|
396 |
+
> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件。
|
397 |
+
|
398 |
+
### 快速开始
|
399 |
+
|
400 |
+
下面三行命令分别对 Llama3-8B-Instruct 模型进行 LoRA **微调**、**推理**和**合并**。
|
401 |
+
|
402 |
+
```bash
|
403 |
+
llamafactory-cli train examples/train_lora/llama3_lora_sft.yaml
|
404 |
+
llamafactory-cli chat examples/inference/llama3_lora_sft.yaml
|
405 |
+
llamafactory-cli export examples/merge_lora/llama3_lora_sft.yaml
|
406 |
+
```
|
407 |
+
|
408 |
+
高级用法请参考 [examples/README_zh.md](examples/README_zh.md)(包括多 GPU 微调)。
|
409 |
+
|
410 |
+
> [!TIP]
|
411 |
+
> 使用 `llamafactory-cli help` 显示帮助信息。
|
412 |
+
|
413 |
+
### LLaMA Board 可视化微调(由 [Gradio](https://github.com/gradio-app/gradio) 驱动)
|
414 |
+
|
415 |
+
```bash
|
416 |
+
llamafactory-cli webui
|
417 |
+
```
|
418 |
+
|
419 |
+
### 构建 Docker
|
420 |
+
|
421 |
+
CUDA 用户:
|
422 |
+
|
423 |
+
```bash
|
424 |
+
cd docker/docker-cuda/
|
425 |
+
docker-compose up -d
|
426 |
+
docker-compose exec llamafactory bash
|
427 |
+
```
|
428 |
+
|
429 |
+
昇腾 NPU 用户:
|
430 |
+
|
431 |
+
```bash
|
432 |
+
cd docker/docker-npu/
|
433 |
+
docker-compose up -d
|
434 |
+
docker-compose exec llamafactory bash
|
435 |
+
```
|
436 |
+
|
437 |
+
<details><summary>不使用 Docker Compose 构建</summary>
|
438 |
+
|
439 |
+
CUDA 用户:
|
440 |
+
|
441 |
+
```bash
|
442 |
+
docker build -f ./docker/docker-cuda/Dockerfile \
|
443 |
+
--build-arg INSTALL_BNB=false \
|
444 |
+
--build-arg INSTALL_VLLM=false \
|
445 |
+
--build-arg INSTALL_DEEPSPEED=false \
|
446 |
+
--build-arg INSTALL_FLASHATTN=false \
|
447 |
+
--build-arg PIP_INDEX=https://pypi.org/simple \
|
448 |
+
-t llamafactory:latest .
|
449 |
+
|
450 |
+
docker run -dit --gpus=all \
|
451 |
+
-v ./hf_cache:/root/.cache/huggingface \
|
452 |
+
-v ./ms_cache:/root/.cache/modelscope \
|
453 |
+
-v ./data:/app/data \
|
454 |
+
-v ./output:/app/output \
|
455 |
+
-p 7860:7860 \
|
456 |
+
-p 8000:8000 \
|
457 |
+
--shm-size 16G \
|
458 |
+
--name llamafactory \
|
459 |
+
llamafactory:latest
|
460 |
+
|
461 |
+
docker exec -it llamafactory bash
|
462 |
+
```
|
463 |
+
|
464 |
+
昇腾 NPU 用户:
|
465 |
+
|
466 |
+
```bash
|
467 |
+
# 根据您的环境选择镜像
|
468 |
+
docker build -f ./docker/docker-npu/Dockerfile \
|
469 |
+
--build-arg INSTALL_DEEPSPEED=false \
|
470 |
+
--build-arg PIP_INDEX=https://pypi.org/simple \
|
471 |
+
-t llamafactory:latest .
|
472 |
+
|
473 |
+
# 根据您的资源更改 `device`
|
474 |
+
docker run -dit \
|
475 |
+
-v ./hf_cache:/root/.cache/huggingface \
|
476 |
+
-v ./ms_cache:/root/.cache/modelscope \
|
477 |
+
-v ./data:/app/data \
|
478 |
+
-v ./output:/app/output \
|
479 |
+
-v /usr/local/dcmi:/usr/local/dcmi \
|
480 |
+
-v /usr/local/bin/npu-smi:/usr/local/bin/npu-smi \
|
481 |
+
-v /usr/local/Ascend/driver:/usr/local/Ascend/driver \
|
482 |
+
-v /etc/ascend_install.info:/etc/ascend_install.info \
|
483 |
+
-p 7860:7860 \
|
484 |
+
-p 8000:8000 \
|
485 |
+
--device /dev/davinci0 \
|
486 |
+
--device /dev/davinci_manager \
|
487 |
+
--device /dev/devmm_svm \
|
488 |
+
--device /dev/hisi_hdc \
|
489 |
+
--shm-size 16G \
|
490 |
+
--name llamafactory \
|
491 |
+
llamafactory:latest
|
492 |
+
|
493 |
+
docker exec -it llamafactory bash
|
494 |
+
```
|
495 |
+
|
496 |
+
</details>
|
497 |
+
|
498 |
+
<details><summary>数据卷详情</summary>
|
499 |
+
|
500 |
+
- hf_cache:使用宿主机的 Hugging Face 缓存文件夹,允许更改为新的目录。
|
501 |
+
- data:宿主机中存放数据集的文件夹路径。
|
502 |
+
- output:将导出目录设置为该路径后,即可在宿主机中访问导出后的模型。
|
503 |
+
|
504 |
+
</details>
|
505 |
+
|
506 |
+
### 利用 vLLM 部署 OpenAI API
|
507 |
+
|
508 |
+
```bash
|
509 |
+
API_PORT=8000 llamafactory-cli api examples/inference/llama3_vllm.yaml
|
510 |
+
```
|
511 |
+
|
512 |
+
> [!TIP]
|
513 |
+
> API 文档请查阅 https://platform.openai.com/docs/api-reference/chat/create。
|
514 |
+
|
515 |
+
### 从魔搭社区下载
|
516 |
+
|
517 |
+
如果您在 Hugging Face 模型和数据集的下载中遇到了问题,可以通过下述方法使用魔搭社区。
|
518 |
+
|
519 |
+
```bash
|
520 |
+
export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
|
521 |
+
```
|
522 |
+
|
523 |
+
将 `model_name_or_path` 设置为模型 ID 来加载对应的模型。在[魔搭社区](https://modelscope.cn/models)查看所有可用的模型,例如 `LLM-Research/Meta-Llama-3-8B-Instruct`。
|
524 |
+
|
525 |
+
### 使用 W&B 面板
|
526 |
+
|
527 |
+
若要使用 [Weights & Biases](https://wandb.ai) 记录实验数据,请在 yaml 文件中添加下面的参数。
|
528 |
+
|
529 |
+
```yaml
|
530 |
+
report_to: wandb
|
531 |
+
run_name: test_run # 可选
|
532 |
+
```
|
533 |
+
|
534 |
+
在启动训练任务时,将 `WANDB_API_KEY` 设置为[密钥](https://wandb.ai/authorize)来登录 W&B 账户。
|
535 |
+
|
536 |
+
## 使用了 LLaMA Factory 的项目
|
537 |
+
|
538 |
+
如果您有项目希望添加至下述列表,请通过邮件联系或者创建一个 PR。
|
539 |
+
|
540 |
+
<details><summary>点击显示</summary>
|
541 |
+
|
542 |
+
1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223)
|
543 |
+
1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092)
|
544 |
+
1. Wang et al. UbiPhysio: Support Daily Functioning, Fitness, and Rehabilitation with Action Understanding and Feedback in Natural Language. 2023. [[arxiv]](https://arxiv.org/abs/2308.10526)
|
545 |
+
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
546 |
+
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
547 |
+
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. KDD 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
548 |
+
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
549 |
+
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
550 |
+
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
551 |
+
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
552 |
+
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
553 |
+
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
554 |
+
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
555 |
+
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
556 |
+
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
557 |
+
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
558 |
+
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
559 |
+
1. Yu et al. KIEval: A Knowledge-grounded Interactive Evaluation Framework for Large Language Models. ACL 2024. [[arxiv]](https://arxiv.org/abs/2402.15043)
|
560 |
+
1. Huang et al. Key-Point-Driven Data Synthesis with its Enhancement on Mathematical Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2403.02333)
|
561 |
+
1. Duan et al. Negating Negatives: Alignment without Human Positive Samples via Distributional Dispreference Optimization. 2024. [[arxiv]](https://arxiv.org/abs/2403.03419)
|
562 |
+
1. Xie and Schwertfeger. Empowering Robotics with Large Language Models: osmAG Map Comprehension with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2403.08228)
|
563 |
+
1. Wu et al. Large Language Models are Parallel Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2403.09073)
|
564 |
+
1. Zhang et al. EDT: Improving Large Language Models' Generation by Entropy-based Dynamic Temperature Sampling. 2024. [[arxiv]](https://arxiv.org/abs/2403.14541)
|
565 |
+
1. Weller et al. FollowIR: Evaluating and Teaching Information Retrieval Models to Follow Instructions. 2024. [[arxiv]](https://arxiv.org/abs/2403.15246)
|
566 |
+
1. Hongbin Na. CBT-LLM: A Chinese Large Language Model for Cognitive Behavioral Therapy-based Mental Health Question Answering. COLING 2024. [[arxiv]](https://arxiv.org/abs/2403.16008)
|
567 |
+
1. Zan et al. CodeS: Natural Language to Code Repository via Multi-Layer Sketch. 2024. [[arxiv]](https://arxiv.org/abs/2403.16443)
|
568 |
+
1. Liu et al. Extensive Self-Contrast Enables Feedback-Free Language Model Alignment. 2024. [[arxiv]](https://arxiv.org/abs/2404.00604)
|
569 |
+
1. Luo et al. BAdam: A Memory Efficient Full Parameter Training Method for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.02827)
|
570 |
+
1. Du et al. Chinese Tiny LLM: Pretraining a Chinese-Centric Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2404.04167)
|
571 |
+
1. Ma et al. Parameter Efficient Quasi-Orthogonal Fine-Tuning via Givens Rotation. ICML 2024. [[arxiv]](https://arxiv.org/abs/2404.04316)
|
572 |
+
1. Liu et al. Dynamic Generation of Personalities with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.07084)
|
573 |
+
1. Shang et al. How Far Have We Gone in Stripped Binary Code Understanding Using Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.09836)
|
574 |
+
1. Huang et al. LLMTune: Accelerate Database Knob Tuning with Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2404.11581)
|
575 |
+
1. Deng et al. Text-Tuple-Table: Towards Information Integration in Text-to-Table Generation via Global Tuple Extraction. 2024. [[arxiv]](https://arxiv.org/abs/2404.14215)
|
576 |
+
1. Acikgoz et al. Hippocrates: An Open-Source Framework for Advancing Large Language Models in Healthcare. 2024. [[arxiv]](https://arxiv.org/abs/2404.16621)
|
577 |
+
1. Zhang et al. Small Language Models Need Strong Verifiers to Self-Correct Reasoning. ACL 2024 Findings. [[arxiv]](https://arxiv.org/abs/2404.17140)
|
578 |
+
1. Zhou et al. FREB-TQA: A Fine-Grained Robustness Evaluation Benchmark for Table Question Answering. NAACL 2024. [[arxiv]](https://arxiv.org/abs/2404.18585)
|
579 |
+
1. Xu et al. Large Language Models for Cyber Security: A Systematic Literature Review. 2024. [[arxiv]](https://arxiv.org/abs/2405.04760)
|
580 |
+
1. Dammu et al. "They are uncultured": Unveiling Covert Harms and Social Threats in LLM Generated Conversations. 2024. [[arxiv]](https://arxiv.org/abs/2405.05378)
|
581 |
+
1. Yi et al. A safety realignment framework via subspace-oriented model fusion for large language models. 2024. [[arxiv]](https://arxiv.org/abs/2405.09055)
|
582 |
+
1. Lou et al. SPO: Multi-Dimensional Preference Sequential Alignment With Implicit Reward Modeling. 2024. [[arxiv]](https://arxiv.org/abs/2405.12739)
|
583 |
+
1. Zhang et al. Getting More from Less: Large Language Models are Good Spontaneous Multilingual Learners. 2024. [[arxiv]](https://arxiv.org/abs/2405.13816)
|
584 |
+
1. Zhang et al. TS-Align: A Teacher-Student Collaborative Framework for Scalable Iterative Finetuning of Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2405.20215)
|
585 |
+
1. Zihong Chen. Sentence Segmentation and Sentence Punctuation Based on XunziALLM. 2024. [[paper]](https://aclanthology.org/2024.lt4hala-1.30)
|
586 |
+
1. Gao et al. The Best of Both Worlds: Toward an Honest and Helpful Large Language Model. 2024. [[arxiv]](https://arxiv.org/abs/2406.00380)
|
587 |
+
1. Wang and Song. MARS: Benchmarking the Metaphysical Reasoning Abilities of Language Models with a Multi-task Evaluation Dataset. 2024. [[arxiv]](https://arxiv.org/abs/2406.02106)
|
588 |
+
1. Hu et al. Computational Limits of Low-Rank Adaptation (LoRA) for Transformer-Based Models. 2024. [[arxiv]](https://arxiv.org/abs/2406.03136)
|
589 |
+
1. Ge et al. Time Sensitive Knowledge Editing through Efficient Finetuning. ACL 2024. [[arxiv]](https://arxiv.org/abs/2406.04496)
|
590 |
+
1. Tan et al. Peer Review as A Multi-Turn and Long-Context Dialogue with Role-Based Interactions. 2024. [[arxiv]](https://arxiv.org/abs/2406.05688)
|
591 |
+
1. Song et al. Turbo Sparse: Achieving LLM SOTA Performance with Minimal Activated Parameters. 2024. [[arxiv]](https://arxiv.org/abs/2406.05955)
|
592 |
+
1. Gu et al. RWKV-CLIP: A Robust Vision-Language Representation Learner. 2024. [[arxiv]](https://arxiv.org/abs/2406.06973)
|
593 |
+
1. Chen et al. Advancing Tool-Augmented Large Language Models: Integrating Insights from Errors in Inference Trees. 2024. [[arxiv]](https://arxiv.org/abs/2406.07115)
|
594 |
+
1. Zhu et al. Are Large Language Models Good Statisticians?. 2024. [[arxiv]](https://arxiv.org/abs/2406.07815)
|
595 |
+
1. Li et al. Know the Unknown: An Uncertainty-Sensitive Method for LLM Instruction Tuning. 2024. [[arxiv]](https://arxiv.org/abs/2406.10099)
|
596 |
+
1. Ding et al. IntentionQA: A Benchmark for Evaluating Purchase Intention Comprehension Abilities of Language Models in E-commerce. 2024. [[arxiv]](https://arxiv.org/abs/2406.10173)
|
597 |
+
1. He et al. COMMUNITY-CROSS-INSTRUCT: Unsupervised Instruction Generation for Aligning Large Language Models to Online Communities. 2024. [[arxiv]](https://arxiv.org/abs/2406.12074)
|
598 |
+
1. Lin et al. FVEL: Interactive Formal Verification Environment with Large Language Models via Theorem Proving. 2024. [[arxiv]](https://arxiv.org/abs/2406.14408)
|
599 |
+
1. Treutlein et al. Connecting the Dots: LLMs can Infer and Verbalize Latent Structure from Disparate Training Data. 2024. [[arxiv]](https://arxiv.org/abs/2406.14546)
|
600 |
+
1. Feng et al. SS-Bench: A Benchmark for Social Story Generation and Evaluation. 2024. [[arxiv]](https://arxiv.org/abs/2406.15695)
|
601 |
+
1. Feng et al. Self-Constructed Context Decompilation with Fined-grained Alignment Enhancement. 2024. [[arxiv]](https://arxiv.org/abs/2406.17233)
|
602 |
+
1. Liu et al. Large Language Models for Cuffless Blood Pressure Measurement From Wearable Biosignals. 2024. [[arxiv]](https://arxiv.org/abs/2406.18069)
|
603 |
+
1. Iyer et al. Exploring Very Low-Resource Translation with LLMs: The University of Edinburgh’s Submission to AmericasNLP 2024 Translation Task. AmericasNLP 2024. [[paper]](https://aclanthology.org/2024.americasnlp-1.25)
|
604 |
+
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。
|
605 |
+
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。
|
606 |
+
1. **[Sunsimiao](https://github.com/X-D-Lab/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。
|
607 |
+
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。
|
608 |
+
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。
|
609 |
+
1. **[Luminia-13B-v3](https://huggingface.co/Nekochu/Luminia-13B-v3)**:一个用于生成 Stable Diffusion 提示词的大型语言模型。[[🤗Demo]](https://huggingface.co/spaces/Nekochu/Luminia-13B_SD_Prompt)
|
610 |
+
1. **[Chinese-LLaVA-Med](https://github.com/BUAADreamer/Chinese-LLaVA-Med)**:中文多模态医学大模型,基于 LLaVA-1.5-7B 在中文多模态医疗数据上微调而得。
|
611 |
+
1. **[AutoRE](https://github.com/THUDM/AutoRE)**:基于大语言模型的文档级关系抽取系统。
|
612 |
+
1. **[NVIDIA RTX AI Toolkit](https://github.com/NVIDIA/RTX-AI-Toolkit)**:在 Windows 主机上利用英伟达 RTX 设备进行大型语言模型微调的开发包。
|
613 |
+
1. **[LazyLLM](https://github.com/LazyAGI/LazyLLM)**:一个低代码构建多 Agent 大模型应用的开发工具,支持基于 LLaMA Factory 的模型微调.
|
614 |
+
|
615 |
+
</details>
|
616 |
+
|
617 |
+
## 协议
|
618 |
+
|
619 |
+
本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。
|
620 |
+
|
621 |
+
使用模型权重时,请遵循对应的模型协议:[Baichuan 2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [Command R](https://cohere.com/c4ai-cc-by-nc-license) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [GLM-4](https://huggingface.co/THUDM/glm-4-9b/blob/main/LICENSE) / [InternLM2](https://github.com/InternLM/InternLM#license) / [Llama](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [Llama 2 (LLaVA-1.5)](https://ai.meta.com/llama/license/) / [Llama 3](https://llama.meta.com/llama3/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/Phi-2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Phi-3](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/LICENSE) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder 2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yi-1.5](LICENSE) / [Yuan 2](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
622 |
+
|
623 |
+
## 引用
|
624 |
+
|
625 |
+
如果您觉得此项目有帮助,请考虑以下列格式引用
|
626 |
+
|
627 |
+
```bibtex
|
628 |
+
@inproceedings{zheng2024llamafactory,
|
629 |
+
title={LlamaFactory: Unified Efficient Fine-Tuning of 100+ Language Models},
|
630 |
+
author={Yaowei Zheng and Richong Zhang and Junhao Zhang and Yanhan Ye and Zheyan Luo and Zhangchi Feng and Yongqiang Ma},
|
631 |
+
booktitle={Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations)},
|
632 |
+
address={Bangkok, Thailand},
|
633 |
+
publisher={Association for Computational Linguistics},
|
634 |
+
year={2024},
|
635 |
+
url={http://arxiv.org/abs/2403.13372}
|
636 |
+
}
|
637 |
+
```
|
638 |
+
|
639 |
+
## 致谢
|
640 |
+
|
641 |
+
本项目受益于 [PEFT](https://github.com/huggingface/peft)、[TRL](https://github.com/huggingface/trl)、[QLoRA](https://github.com/artidoro/qlora) 和 [FastChat](https://github.com/lm-sys/FastChat),感谢以上诸位作者的付出。
|
642 |
+
|
643 |
+
## Star History
|
644 |
+
|
645 |
+
![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Factory&type=Date)
|
assets/benchmark.svg
ADDED
assets/logo.png
ADDED
assets/wechat.jpg
ADDED
assets/wechat_npu.jpg
ADDED
data/README.md
ADDED
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
The [dataset_info.json](dataset_info.json) contains all available datasets. If you are using a custom dataset, please **make sure** to add a *dataset description* in `dataset_info.json` and specify `dataset: dataset_name` before training to use it.
|
2 |
+
|
3 |
+
Currently we support datasets in **alpaca** and **sharegpt** format.
|
4 |
+
|
5 |
+
```json
|
6 |
+
"dataset_name": {
|
7 |
+
"hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url and file_name)",
|
8 |
+
"ms_hub_url": "the name of the dataset repository on the Model Scope hub. (if specified, ignore script_url and file_name)",
|
9 |
+
"script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name)",
|
10 |
+
"file_name": "the name of the dataset folder or dataset file in this directory. (required if above are not specified)",
|
11 |
+
"formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
|
12 |
+
"ranking": "whether the dataset is a preference dataset or not. (default: False)",
|
13 |
+
"subset": "the name of the subset. (optional, default: None)",
|
14 |
+
"folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
|
15 |
+
"num_samples": "the number of samples in the dataset used for training. (optional, default: None)",
|
16 |
+
"columns (optional)": {
|
17 |
+
"prompt": "the column name in the dataset containing the prompts. (default: instruction)",
|
18 |
+
"query": "the column name in the dataset containing the queries. (default: input)",
|
19 |
+
"response": "the column name in the dataset containing the responses. (default: output)",
|
20 |
+
"history": "the column name in the dataset containing the histories. (default: None)",
|
21 |
+
"messages": "the column name in the dataset containing the messages. (default: conversations)",
|
22 |
+
"system": "the column name in the dataset containing the system prompts. (default: None)",
|
23 |
+
"tools": "the column name in the dataset containing the tool description. (default: None)",
|
24 |
+
"images": "the column name in the dataset containing the image inputs. (default: None)",
|
25 |
+
"chosen": "the column name in the dataset containing the chosen answers. (default: None)",
|
26 |
+
"rejected": "the column name in the dataset containing the rejected answers. (default: None)",
|
27 |
+
"kto_tag": "the column name in the dataset containing the kto tags. (default: None)"
|
28 |
+
},
|
29 |
+
"tags (optional, used for the sharegpt format)": {
|
30 |
+
"role_tag": "the key in the message represents the identity. (default: from)",
|
31 |
+
"content_tag": "the key in the message represents the content. (default: value)",
|
32 |
+
"user_tag": "the value of the role_tag represents the user. (default: human)",
|
33 |
+
"assistant_tag": "the value of the role_tag represents the assistant. (default: gpt)",
|
34 |
+
"observation_tag": "the value of the role_tag represents the tool results. (default: observation)",
|
35 |
+
"function_tag": "the value of the role_tag represents the function call. (default: function_call)",
|
36 |
+
"system_tag": "the value of the role_tag represents the system prompt. (default: system, can override system column)"
|
37 |
+
}
|
38 |
+
}
|
39 |
+
```
|
40 |
+
|
41 |
+
## Alpaca Format
|
42 |
+
|
43 |
+
### Supervised Fine-Tuning Dataset
|
44 |
+
|
45 |
+
* [Example dataset](alpaca_en_demo.json)
|
46 |
+
|
47 |
+
In supervised fine-tuning, the `instruction` column will be concatenated with the `input` column and used as the human prompt, then the human prompt would be `instruction\ninput`. The `output` column represents the model response.
|
48 |
+
|
49 |
+
The `system` column will be used as the system prompt if specified.
|
50 |
+
|
51 |
+
The `history` column is a list consisting of string tuples representing prompt-response pairs in the history messages. Note that the responses in the history **will also be learned by the model** in supervised fine-tuning.
|
52 |
+
|
53 |
+
```json
|
54 |
+
[
|
55 |
+
{
|
56 |
+
"instruction": "human instruction (required)",
|
57 |
+
"input": "human input (optional)",
|
58 |
+
"output": "model response (required)",
|
59 |
+
"system": "system prompt (optional)",
|
60 |
+
"history": [
|
61 |
+
["human instruction in the first round (optional)", "model response in the first round (optional)"],
|
62 |
+
["human instruction in the second round (optional)", "model response in the second round (optional)"]
|
63 |
+
]
|
64 |
+
}
|
65 |
+
]
|
66 |
+
```
|
67 |
+
|
68 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
69 |
+
|
70 |
+
```json
|
71 |
+
"dataset_name": {
|
72 |
+
"file_name": "data.json",
|
73 |
+
"columns": {
|
74 |
+
"prompt": "instruction",
|
75 |
+
"query": "input",
|
76 |
+
"response": "output",
|
77 |
+
"system": "system",
|
78 |
+
"history": "history"
|
79 |
+
}
|
80 |
+
}
|
81 |
+
```
|
82 |
+
|
83 |
+
### Pre-training Dataset
|
84 |
+
|
85 |
+
- [Example dataset](c4_demo.json)
|
86 |
+
|
87 |
+
In pre-training, only the `text` column will be used for model learning.
|
88 |
+
|
89 |
+
```json
|
90 |
+
[
|
91 |
+
{"text": "document"},
|
92 |
+
{"text": "document"}
|
93 |
+
]
|
94 |
+
```
|
95 |
+
|
96 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
97 |
+
|
98 |
+
```json
|
99 |
+
"dataset_name": {
|
100 |
+
"file_name": "data.json",
|
101 |
+
"columns": {
|
102 |
+
"prompt": "text"
|
103 |
+
}
|
104 |
+
}
|
105 |
+
```
|
106 |
+
|
107 |
+
### Preference Dataset
|
108 |
+
|
109 |
+
Preference datasets are used for reward modeling, DPO training and ORPO training.
|
110 |
+
|
111 |
+
It requires a better response in `chosen` column and a worse response in `rejected` column.
|
112 |
+
|
113 |
+
```json
|
114 |
+
[
|
115 |
+
{
|
116 |
+
"instruction": "human instruction (required)",
|
117 |
+
"input": "human input (optional)",
|
118 |
+
"chosen": "chosen answer (required)",
|
119 |
+
"rejected": "rejected answer (required)"
|
120 |
+
}
|
121 |
+
]
|
122 |
+
```
|
123 |
+
|
124 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
125 |
+
|
126 |
+
```json
|
127 |
+
"dataset_name": {
|
128 |
+
"file_name": "data.json",
|
129 |
+
"ranking": true,
|
130 |
+
"columns": {
|
131 |
+
"prompt": "instruction",
|
132 |
+
"query": "input",
|
133 |
+
"chosen": "chosen",
|
134 |
+
"rejected": "rejected"
|
135 |
+
}
|
136 |
+
}
|
137 |
+
```
|
138 |
+
|
139 |
+
### KTO Dataset
|
140 |
+
|
141 |
+
- [Example dataset](kto_en_demo.json)
|
142 |
+
|
143 |
+
KTO datasets require a extra `kto_tag` column containing the boolean human feedback.
|
144 |
+
|
145 |
+
```json
|
146 |
+
[
|
147 |
+
{
|
148 |
+
"instruction": "human instruction (required)",
|
149 |
+
"input": "human input (optional)",
|
150 |
+
"output": "model response (required)",
|
151 |
+
"kto_tag": "human feedback [true/false] (required)"
|
152 |
+
}
|
153 |
+
]
|
154 |
+
```
|
155 |
+
|
156 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
157 |
+
|
158 |
+
```json
|
159 |
+
"dataset_name": {
|
160 |
+
"file_name": "data.json",
|
161 |
+
"columns": {
|
162 |
+
"prompt": "instruction",
|
163 |
+
"query": "input",
|
164 |
+
"response": "output",
|
165 |
+
"kto_tag": "kto_tag"
|
166 |
+
}
|
167 |
+
}
|
168 |
+
```
|
169 |
+
|
170 |
+
### Multimodal Dataset
|
171 |
+
|
172 |
+
- [Example dataset](mllm_demo.json)
|
173 |
+
|
174 |
+
Multimodal datasets require a `images` column containing the paths to the input images. Currently we only support one image.
|
175 |
+
|
176 |
+
```json
|
177 |
+
[
|
178 |
+
{
|
179 |
+
"instruction": "human instruction (required)",
|
180 |
+
"input": "human input (optional)",
|
181 |
+
"output": "model response (required)",
|
182 |
+
"images": [
|
183 |
+
"image path (required)"
|
184 |
+
]
|
185 |
+
}
|
186 |
+
]
|
187 |
+
```
|
188 |
+
|
189 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
190 |
+
|
191 |
+
```json
|
192 |
+
"dataset_name": {
|
193 |
+
"file_name": "data.json",
|
194 |
+
"columns": {
|
195 |
+
"prompt": "instruction",
|
196 |
+
"query": "input",
|
197 |
+
"response": "output",
|
198 |
+
"images": "images"
|
199 |
+
}
|
200 |
+
}
|
201 |
+
```
|
202 |
+
|
203 |
+
## Sharegpt Format
|
204 |
+
|
205 |
+
### Supervised Fine-Tuning Dataset
|
206 |
+
|
207 |
+
- [Example dataset](glaive_toolcall_en_demo.json)
|
208 |
+
|
209 |
+
Compared to the alpaca format, the sharegpt format allows the datasets have **more roles**, such as human, gpt, observation and function. They are presented in a list of objects in the `conversations` column.
|
210 |
+
|
211 |
+
Note that the human and observation should appear in odd positions, while gpt and function should appear in even positions.
|
212 |
+
|
213 |
+
```json
|
214 |
+
[
|
215 |
+
{
|
216 |
+
"conversations": [
|
217 |
+
{
|
218 |
+
"from": "human",
|
219 |
+
"value": "human instruction"
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"from": "function_call",
|
223 |
+
"value": "tool arguments"
|
224 |
+
},
|
225 |
+
{
|
226 |
+
"from": "observation",
|
227 |
+
"value": "tool result"
|
228 |
+
},
|
229 |
+
{
|
230 |
+
"from": "gpt",
|
231 |
+
"value": "model response"
|
232 |
+
}
|
233 |
+
],
|
234 |
+
"system": "system prompt (optional)",
|
235 |
+
"tools": "tool description (optional)"
|
236 |
+
}
|
237 |
+
]
|
238 |
+
```
|
239 |
+
|
240 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
241 |
+
|
242 |
+
```json
|
243 |
+
"dataset_name": {
|
244 |
+
"file_name": "data.json",
|
245 |
+
"formatting": "sharegpt",
|
246 |
+
"columns": {
|
247 |
+
"messages": "conversations",
|
248 |
+
"system": "system",
|
249 |
+
"tools": "tools"
|
250 |
+
}
|
251 |
+
}
|
252 |
+
```
|
253 |
+
|
254 |
+
### Preference Dataset
|
255 |
+
|
256 |
+
- [Example dataset](dpo_en_demo.json)
|
257 |
+
|
258 |
+
Preference datasets in sharegpt format also require a better message in `chosen` column and a worse message in `rejected` column.
|
259 |
+
|
260 |
+
```json
|
261 |
+
[
|
262 |
+
{
|
263 |
+
"conversations": [
|
264 |
+
{
|
265 |
+
"from": "human",
|
266 |
+
"value": "human instruction"
|
267 |
+
},
|
268 |
+
{
|
269 |
+
"from": "gpt",
|
270 |
+
"value": "model response"
|
271 |
+
},
|
272 |
+
{
|
273 |
+
"from": "human",
|
274 |
+
"value": "human instruction"
|
275 |
+
}
|
276 |
+
],
|
277 |
+
"chosen": {
|
278 |
+
"from": "gpt",
|
279 |
+
"value": "chosen answer (required)"
|
280 |
+
},
|
281 |
+
"rejected": {
|
282 |
+
"from": "gpt",
|
283 |
+
"value": "rejected answer (required)"
|
284 |
+
}
|
285 |
+
}
|
286 |
+
]
|
287 |
+
```
|
288 |
+
|
289 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
290 |
+
|
291 |
+
```json
|
292 |
+
"dataset_name": {
|
293 |
+
"file_name": "data.json",
|
294 |
+
"formatting": "sharegpt",
|
295 |
+
"ranking": true,
|
296 |
+
"columns": {
|
297 |
+
"messages": "conversations",
|
298 |
+
"chosen": "chosen",
|
299 |
+
"rejected": "rejected"
|
300 |
+
}
|
301 |
+
}
|
302 |
+
```
|
303 |
+
|
304 |
+
### OpenAI Format
|
305 |
+
|
306 |
+
The openai format is simply a special case of the sharegpt format, where the first message may be a system prompt.
|
307 |
+
|
308 |
+
```json
|
309 |
+
[
|
310 |
+
{
|
311 |
+
"messages": [
|
312 |
+
{
|
313 |
+
"role": "system",
|
314 |
+
"content": "system prompt (optional)"
|
315 |
+
},
|
316 |
+
{
|
317 |
+
"role": "user",
|
318 |
+
"content": "human instruction"
|
319 |
+
},
|
320 |
+
{
|
321 |
+
"role": "assistant",
|
322 |
+
"content": "model response"
|
323 |
+
}
|
324 |
+
]
|
325 |
+
}
|
326 |
+
]
|
327 |
+
```
|
328 |
+
|
329 |
+
Regarding the above dataset, the *dataset description* in `dataset_info.json` should be:
|
330 |
+
|
331 |
+
```json
|
332 |
+
"dataset_name": {
|
333 |
+
"file_name": "data.json",
|
334 |
+
"formatting": "sharegpt",
|
335 |
+
"columns": {
|
336 |
+
"messages": "messages"
|
337 |
+
},
|
338 |
+
"tags": {
|
339 |
+
"role_tag": "role",
|
340 |
+
"content_tag": "content",
|
341 |
+
"user_tag": "user",
|
342 |
+
"assistant_tag": "assistant",
|
343 |
+
"system_tag": "system"
|
344 |
+
}
|
345 |
+
}
|
346 |
+
```
|
347 |
+
|
348 |
+
The KTO datasets and multimodal datasets in sharegpt format are similar to the alpaca format.
|
349 |
+
|
350 |
+
Pre-training datasets are **incompatible** with the sharegpt format.
|
data/README_zh.md
ADDED
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[dataset_info.json](dataset_info.json) 包含了所有可用的数据集。如果您希望使用自定义数据集,请**务必**在 `dataset_info.json` 文件中添加*数据集描述*,并通过修改 `dataset: 数据集名称` 配置来使用数据集。
|
2 |
+
|
3 |
+
目前我们支持 **alpaca** 格式和 **sharegpt** 格式的数据集。
|
4 |
+
|
5 |
+
```json
|
6 |
+
"数据集名称": {
|
7 |
+
"hf_hub_url": "Hugging Face 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
8 |
+
"ms_hub_url": "ModelScope 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
|
9 |
+
"script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略 file_name)",
|
10 |
+
"file_name": "该目录下数据集文件夹或文件的名称(若上述参数未指定,则此项必需)",
|
11 |
+
"formatting": "数据集格式(可选,默认:alpaca,可以为 alpaca 或 sharegpt)",
|
12 |
+
"ranking": "是否为偏好数据集(可选,默认:False)",
|
13 |
+
"subset": "数据集子集的名称(可选,默认:None)",
|
14 |
+
"folder": "Hugging Face 仓库的文件夹名称(可选,默认:None)",
|
15 |
+
"num_samples": "该数据集中用于训练的样本数量。(可选,默认:None)",
|
16 |
+
"columns(可选)": {
|
17 |
+
"prompt": "数据集代表提示词的表头名称(默认:instruction)",
|
18 |
+
"query": "数据集代表请求的表头名称(默认:input)",
|
19 |
+
"response": "数据集代表回答的表头名称(默认:output)",
|
20 |
+
"history": "数据集代表历史对话的表头名称(默认:None)",
|
21 |
+
"messages": "数据集代表消息列表的表头名称(默认:conversations)",
|
22 |
+
"system": "数据集代表系统提示的表头名称(默认:None)",
|
23 |
+
"tools": "数据集代表工具描述的表头名称(默认:None)",
|
24 |
+
"images": "数据集代表图像输入的表头名称(默认:None)",
|
25 |
+
"chosen": "数据集代表更优回答的表头名称(默认:None)",
|
26 |
+
"rejected": "数据集代表更差回答的表头名称(默认:None)",
|
27 |
+
"kto_tag": "数据集代表 KTO 标签的表头名称(默认:None)"
|
28 |
+
},
|
29 |
+
"tags(可选,用于 sharegpt 格式)": {
|
30 |
+
"role_tag": "消息中代表发送者身份的键名(默认:from)",
|
31 |
+
"content_tag": "消息中代表文本内容的键名(默认:value)",
|
32 |
+
"user_tag": "消息中代表用户的 role_tag(默认:human)",
|
33 |
+
"assistant_tag": "消息中代表助手的 role_tag(默认:gpt)",
|
34 |
+
"observation_tag": "消息中代表工具返回结果的 role_tag(默认:observation)",
|
35 |
+
"function_tag": "消息中代表工具调用的 role_tag(默认:function_call)",
|
36 |
+
"system_tag": "消息中代表系统提示的 role_tag(默认:system,会覆盖 system column)"
|
37 |
+
}
|
38 |
+
}
|
39 |
+
```
|
40 |
+
|
41 |
+
## Alpaca 格式
|
42 |
+
|
43 |
+
### 指令监督微调数据集
|
44 |
+
|
45 |
+
- [样例数据集](alpaca_zh_demo.json)
|
46 |
+
|
47 |
+
在指令监督微调时,`instruction` 列对应的内容会与 `input` 列对应的内容拼接后作为人类指令,即人类指令为 `instruction\ninput`。而 `output` 列对应的内容为模型回答。
|
48 |
+
|
49 |
+
如果指定,`system` 列对应的内容将被作为系统提示词。
|
50 |
+
|
51 |
+
`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮对话的指令和回答。注意在指令监督微调时,历史消息中的回答内容**也会被用于模型学习**。
|
52 |
+
|
53 |
+
```json
|
54 |
+
[
|
55 |
+
{
|
56 |
+
"instruction": "人类指令(必填)",
|
57 |
+
"input": "人类输入(选填)",
|
58 |
+
"output": "模型回答(必填)",
|
59 |
+
"system": "系统提示词(选填)",
|
60 |
+
"history": [
|
61 |
+
["第一轮指令(选填)", "第一轮回答(选填)"],
|
62 |
+
["第二轮指令(选填)", "第二轮回答(选填)"]
|
63 |
+
]
|
64 |
+
}
|
65 |
+
]
|
66 |
+
```
|
67 |
+
|
68 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
69 |
+
|
70 |
+
```json
|
71 |
+
"数据集名称": {
|
72 |
+
"file_name": "data.json",
|
73 |
+
"columns": {
|
74 |
+
"prompt": "instruction",
|
75 |
+
"query": "input",
|
76 |
+
"response": "output",
|
77 |
+
"system": "system",
|
78 |
+
"history": "history"
|
79 |
+
}
|
80 |
+
}
|
81 |
+
```
|
82 |
+
|
83 |
+
### 预训练数据集
|
84 |
+
|
85 |
+
- [样例数据集](c4_demo.json)
|
86 |
+
|
87 |
+
在预训练时,只有 `text` 列中的内容会用于模型学习。
|
88 |
+
|
89 |
+
```json
|
90 |
+
[
|
91 |
+
{"text": "document"},
|
92 |
+
{"text": "document"}
|
93 |
+
]
|
94 |
+
```
|
95 |
+
|
96 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
97 |
+
|
98 |
+
```json
|
99 |
+
"数据集名称": {
|
100 |
+
"file_name": "data.json",
|
101 |
+
"columns": {
|
102 |
+
"prompt": "text"
|
103 |
+
}
|
104 |
+
}
|
105 |
+
```
|
106 |
+
|
107 |
+
### 偏好数据集
|
108 |
+
|
109 |
+
偏好数据集用于奖励模型训练、DPO 训练和 ORPO 训练。
|
110 |
+
|
111 |
+
它需要在 `chosen` 列中提供更优的回答,并在 `rejected` 列中提供更差的回答。
|
112 |
+
|
113 |
+
```json
|
114 |
+
[
|
115 |
+
{
|
116 |
+
"instruction": "人类指令(必填)",
|
117 |
+
"input": "人类输入(选填)",
|
118 |
+
"chosen": "优质回答(必填)",
|
119 |
+
"rejected": "劣质回答(必填)"
|
120 |
+
}
|
121 |
+
]
|
122 |
+
```
|
123 |
+
|
124 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
125 |
+
|
126 |
+
```json
|
127 |
+
"数据集名称": {
|
128 |
+
"file_name": "data.json",
|
129 |
+
"ranking": true,
|
130 |
+
"columns": {
|
131 |
+
"prompt": "instruction",
|
132 |
+
"query": "input",
|
133 |
+
"chosen": "chosen",
|
134 |
+
"rejected": "rejected"
|
135 |
+
}
|
136 |
+
}
|
137 |
+
```
|
138 |
+
|
139 |
+
### KTO 数据集
|
140 |
+
|
141 |
+
- [样例数据集](kto_en_demo.json)
|
142 |
+
|
143 |
+
KTO 数据集需要额外添加一个 `kto_tag` 列,包含 bool 类型的人类反馈。
|
144 |
+
|
145 |
+
```json
|
146 |
+
[
|
147 |
+
{
|
148 |
+
"instruction": "人类指令(必填)",
|
149 |
+
"input": "人类输入(选填)",
|
150 |
+
"output": "模型回答(必填)",
|
151 |
+
"kto_tag": "人类反馈 [true/false](必填)"
|
152 |
+
}
|
153 |
+
]
|
154 |
+
```
|
155 |
+
|
156 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
157 |
+
|
158 |
+
```json
|
159 |
+
"数据集名称": {
|
160 |
+
"file_name": "data.json",
|
161 |
+
"columns": {
|
162 |
+
"prompt": "instruction",
|
163 |
+
"query": "input",
|
164 |
+
"response": "output",
|
165 |
+
"kto_tag": "kto_tag"
|
166 |
+
}
|
167 |
+
}
|
168 |
+
```
|
169 |
+
|
170 |
+
### 多模态数据集
|
171 |
+
|
172 |
+
- [样例数据集](mllm_demo.json)
|
173 |
+
|
174 |
+
多模态数据集需要额外添加一个 `images` 列,包含输入图像的路径。目前我们仅支持单张图像输入。
|
175 |
+
|
176 |
+
```json
|
177 |
+
[
|
178 |
+
{
|
179 |
+
"instruction": "人类指令(必填)",
|
180 |
+
"input": "人类输入(选填)",
|
181 |
+
"output": "模型回答(必填)",
|
182 |
+
"images": [
|
183 |
+
"图像路径(必填)"
|
184 |
+
]
|
185 |
+
}
|
186 |
+
]
|
187 |
+
```
|
188 |
+
|
189 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
190 |
+
|
191 |
+
```json
|
192 |
+
"数据集名称": {
|
193 |
+
"file_name": "data.json",
|
194 |
+
"columns": {
|
195 |
+
"prompt": "instruction",
|
196 |
+
"query": "input",
|
197 |
+
"response": "output",
|
198 |
+
"images": "images"
|
199 |
+
}
|
200 |
+
}
|
201 |
+
```
|
202 |
+
|
203 |
+
## Sharegpt 格式
|
204 |
+
|
205 |
+
### 指令监督微调数据集
|
206 |
+
|
207 |
+
- [样例数据集](glaive_toolcall_zh_demo.json)
|
208 |
+
|
209 |
+
相比 alpaca 格式的数据集,sharegpt 格式支持**更多的角色种类**,例如 human、gpt、observation、function 等等。它们构成一个对象列表呈现在 `conversations` 列中。
|
210 |
+
|
211 |
+
注意其中 human 和 observation 必须出现在奇数位置,gpt 和 function 必须出现在偶数位置。
|
212 |
+
|
213 |
+
```json
|
214 |
+
[
|
215 |
+
{
|
216 |
+
"conversations": [
|
217 |
+
{
|
218 |
+
"from": "human",
|
219 |
+
"value": "人类指令"
|
220 |
+
},
|
221 |
+
{
|
222 |
+
"from": "function_call",
|
223 |
+
"value": "工具参数"
|
224 |
+
},
|
225 |
+
{
|
226 |
+
"from": "observation",
|
227 |
+
"value": "工具结果"
|
228 |
+
},
|
229 |
+
{
|
230 |
+
"from": "gpt",
|
231 |
+
"value": "模型回答"
|
232 |
+
}
|
233 |
+
],
|
234 |
+
"system": "系统提示词(选填)",
|
235 |
+
"tools": "工具描述(选填)"
|
236 |
+
}
|
237 |
+
]
|
238 |
+
```
|
239 |
+
|
240 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
241 |
+
|
242 |
+
```json
|
243 |
+
"数据集名称": {
|
244 |
+
"file_name": "data.json",
|
245 |
+
"formatting": "sharegpt",
|
246 |
+
"columns": {
|
247 |
+
"messages": "conversations",
|
248 |
+
"system": "system",
|
249 |
+
"tools": "tools"
|
250 |
+
}
|
251 |
+
}
|
252 |
+
```
|
253 |
+
|
254 |
+
### 偏好数据集
|
255 |
+
|
256 |
+
- [样例数据集](dpo_zh_demo.json)
|
257 |
+
|
258 |
+
Sharegpt 格式的偏好数据集同样需要在 `chosen` 列中提供更优的消息,并在 `rejected` 列中提供更差的消息。
|
259 |
+
|
260 |
+
```json
|
261 |
+
[
|
262 |
+
{
|
263 |
+
"conversations": [
|
264 |
+
{
|
265 |
+
"from": "human",
|
266 |
+
"value": "人类指令"
|
267 |
+
},
|
268 |
+
{
|
269 |
+
"from": "gpt",
|
270 |
+
"value": "模型回答"
|
271 |
+
},
|
272 |
+
{
|
273 |
+
"from": "human",
|
274 |
+
"value": "人类指令"
|
275 |
+
}
|
276 |
+
],
|
277 |
+
"chosen": {
|
278 |
+
"from": "gpt",
|
279 |
+
"value": "优质回答"
|
280 |
+
},
|
281 |
+
"rejected": {
|
282 |
+
"from": "gpt",
|
283 |
+
"value": "劣质回答"
|
284 |
+
}
|
285 |
+
}
|
286 |
+
]
|
287 |
+
```
|
288 |
+
|
289 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
290 |
+
|
291 |
+
```json
|
292 |
+
"数据集名称": {
|
293 |
+
"file_name": "data.json",
|
294 |
+
"formatting": "sharegpt",
|
295 |
+
"ranking": true,
|
296 |
+
"columns": {
|
297 |
+
"messages": "conversations",
|
298 |
+
"chosen": "chosen",
|
299 |
+
"rejected": "rejected"
|
300 |
+
}
|
301 |
+
}
|
302 |
+
```
|
303 |
+
|
304 |
+
### OpenAI 格式
|
305 |
+
|
306 |
+
OpenAI 格式仅仅是 sharegpt 格式的一种特殊情况,其中第一条消息可能是系统提示词。
|
307 |
+
|
308 |
+
```json
|
309 |
+
[
|
310 |
+
{
|
311 |
+
"messages": [
|
312 |
+
{
|
313 |
+
"role": "system",
|
314 |
+
"content": "系统提示词(选填)"
|
315 |
+
},
|
316 |
+
{
|
317 |
+
"role": "user",
|
318 |
+
"content": "人类指令"
|
319 |
+
},
|
320 |
+
{
|
321 |
+
"role": "assistant",
|
322 |
+
"content": "模型回答"
|
323 |
+
}
|
324 |
+
]
|
325 |
+
}
|
326 |
+
]
|
327 |
+
```
|
328 |
+
|
329 |
+
对于上述格式的数据,`dataset_info.json` 中的*数据集描述*应为:
|
330 |
+
|
331 |
+
```json
|
332 |
+
"数据集名称": {
|
333 |
+
"file_name": "data.json",
|
334 |
+
"formatting": "sharegpt",
|
335 |
+
"columns": {
|
336 |
+
"messages": "messages"
|
337 |
+
},
|
338 |
+
"tags": {
|
339 |
+
"role_tag": "role",
|
340 |
+
"content_tag": "content",
|
341 |
+
"user_tag": "user",
|
342 |
+
"assistant_tag": "assistant",
|
343 |
+
"system_tag": "system"
|
344 |
+
}
|
345 |
+
}
|
346 |
+
```
|
347 |
+
|
348 |
+
Sharegpt 格式中的 KTO 数据集和多模态数据集与 alpaca 格式的类似。
|
349 |
+
|
350 |
+
预训练数据集**不支持** sharegpt 格式。
|
data/alpaca_en_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/alpaca_zh_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/belle_multiturn/belle_multiturn.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
|
6 |
+
|
7 |
+
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
8 |
+
|
9 |
+
_DESCRIPTION = "BELLE multiturn chat dataset."
|
10 |
+
|
11 |
+
_CITATION = """\
|
12 |
+
@article{belle2023exploring,
|
13 |
+
title={Exploring the Impact of Instruction Data Scaling on Large Language Models: An Empirical Study on Real-World Use Cases},
|
14 |
+
author={Yunjie Ji, Yong Deng, Yan Gong, Yiping Peng, Qiang Niu, Lei Zhang, Baochang Ma, Xiangang Li},
|
15 |
+
journal={arXiv preprint arXiv:2303.14742},
|
16 |
+
year={2023}
|
17 |
+
}
|
18 |
+
"""
|
19 |
+
|
20 |
+
_HOMEPAGE = "{}/datasets/BelleGroup/multiturn_chat_0.8M".format(_HF_ENDPOINT)
|
21 |
+
_LICENSE = "gpl-3.0"
|
22 |
+
_URL = "{}/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json".format(_HF_ENDPOINT)
|
23 |
+
|
24 |
+
|
25 |
+
class BelleMultiturn(datasets.GeneratorBasedBuilder):
|
26 |
+
VERSION = datasets.Version("0.0.0")
|
27 |
+
|
28 |
+
def _info(self):
|
29 |
+
features = datasets.Features(
|
30 |
+
{"conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]}
|
31 |
+
)
|
32 |
+
return datasets.DatasetInfo(
|
33 |
+
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
34 |
+
)
|
35 |
+
|
36 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
37 |
+
file_path = dl_manager.download(_URL)
|
38 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": file_path})]
|
39 |
+
|
40 |
+
def _generate_examples(self, filepath: str):
|
41 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
42 |
+
for key, row in enumerate(f):
|
43 |
+
data = json.loads(row)
|
44 |
+
conversations = []
|
45 |
+
prompt = data["instruction"].strip()
|
46 |
+
response = data["output"].strip()
|
47 |
+
|
48 |
+
assist_idx = prompt.rfind("Assistant:")
|
49 |
+
human_idx = prompt.rfind("Human:")
|
50 |
+
query = prompt[human_idx + 6 : assist_idx].strip()
|
51 |
+
prompt = prompt[:human_idx].strip()
|
52 |
+
conversations.insert(0, {"from": "gpt", "value": response})
|
53 |
+
conversations.insert(0, {"from": "human", "value": query})
|
54 |
+
|
55 |
+
while prompt.rfind("Assistant:") != -1:
|
56 |
+
assist_idx = prompt.rfind("Assistant:")
|
57 |
+
human_idx = prompt.rfind("Human:")
|
58 |
+
if human_idx != -1:
|
59 |
+
old_query = prompt[human_idx + 6 : assist_idx].strip()
|
60 |
+
old_resp = prompt[assist_idx + 10 :].strip()
|
61 |
+
conversations.insert(0, {"from": "gpt", "value": old_resp})
|
62 |
+
conversations.insert(0, {"from": "human", "value": old_query})
|
63 |
+
else:
|
64 |
+
break
|
65 |
+
prompt = prompt[:human_idx].strip()
|
66 |
+
|
67 |
+
yield key, {"conversations": conversations}
|
data/c4_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/dataset_info.json
ADDED
@@ -0,0 +1,565 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"identity": {
|
3 |
+
"file_name": "identity.json"
|
4 |
+
},
|
5 |
+
"alpaca_en_demo": {
|
6 |
+
"file_name": "alpaca_en_demo.json"
|
7 |
+
},
|
8 |
+
"alpaca_zh_demo": {
|
9 |
+
"file_name": "alpaca_zh_demo.json"
|
10 |
+
},
|
11 |
+
"glaive_toolcall_en_demo": {
|
12 |
+
"file_name": "glaive_toolcall_en_demo.json",
|
13 |
+
"formatting": "sharegpt",
|
14 |
+
"columns": {
|
15 |
+
"messages": "conversations",
|
16 |
+
"tools": "tools"
|
17 |
+
}
|
18 |
+
},
|
19 |
+
"glaive_toolcall_zh_demo": {
|
20 |
+
"file_name": "glaive_toolcall_zh_demo.json",
|
21 |
+
"formatting": "sharegpt",
|
22 |
+
"columns": {
|
23 |
+
"messages": "conversations",
|
24 |
+
"tools": "tools"
|
25 |
+
}
|
26 |
+
},
|
27 |
+
"mllm_demo": {
|
28 |
+
"file_name": "mllm_demo.json",
|
29 |
+
"formatting": "sharegpt",
|
30 |
+
"columns": {
|
31 |
+
"messages": "messages",
|
32 |
+
"images": "images"
|
33 |
+
},
|
34 |
+
"tags": {
|
35 |
+
"role_tag": "role",
|
36 |
+
"content_tag": "content",
|
37 |
+
"user_tag": "user",
|
38 |
+
"assistant_tag": "assistant"
|
39 |
+
}
|
40 |
+
},
|
41 |
+
"alpaca_en": {
|
42 |
+
"hf_hub_url": "llamafactory/alpaca_en",
|
43 |
+
"ms_hub_url": "llamafactory/alpaca_en"
|
44 |
+
},
|
45 |
+
"alpaca_zh": {
|
46 |
+
"hf_hub_url": "llamafactory/alpaca_zh",
|
47 |
+
"ms_hub_url": "llamafactory/alpaca_zh"
|
48 |
+
},
|
49 |
+
"alpaca_gpt4_en": {
|
50 |
+
"hf_hub_url": "llamafactory/alpaca_gpt4_en",
|
51 |
+
"ms_hub_url": "llamafactory/alpaca_gpt4_en"
|
52 |
+
},
|
53 |
+
"alpaca_gpt4_zh": {
|
54 |
+
"hf_hub_url": "llamafactory/alpaca_gpt4_zh",
|
55 |
+
"ms_hub_url": "llamafactory/alpaca_gpt4_zh"
|
56 |
+
},
|
57 |
+
"glaive_toolcall_en": {
|
58 |
+
"hf_hub_url": "llamafactory/glaive_toolcall_en",
|
59 |
+
"formatting": "sharegpt",
|
60 |
+
"columns": {
|
61 |
+
"messages": "conversations",
|
62 |
+
"tools": "tools"
|
63 |
+
}
|
64 |
+
},
|
65 |
+
"glaive_toolcall_zh": {
|
66 |
+
"hf_hub_url": "llamafactory/glaive_toolcall_zh",
|
67 |
+
"formatting": "sharegpt",
|
68 |
+
"columns": {
|
69 |
+
"messages": "conversations",
|
70 |
+
"tools": "tools"
|
71 |
+
}
|
72 |
+
},
|
73 |
+
"lima": {
|
74 |
+
"hf_hub_url": "llamafactory/lima",
|
75 |
+
"formatting": "sharegpt"
|
76 |
+
},
|
77 |
+
"guanaco": {
|
78 |
+
"hf_hub_url": "JosephusCheung/GuanacoDataset",
|
79 |
+
"ms_hub_url": "AI-ModelScope/GuanacoDataset"
|
80 |
+
},
|
81 |
+
"belle_2m": {
|
82 |
+
"hf_hub_url": "BelleGroup/train_2M_CN",
|
83 |
+
"ms_hub_url": "AI-ModelScope/train_2M_CN"
|
84 |
+
},
|
85 |
+
"belle_1m": {
|
86 |
+
"hf_hub_url": "BelleGroup/train_1M_CN",
|
87 |
+
"ms_hub_url": "AI-ModelScope/train_1M_CN"
|
88 |
+
},
|
89 |
+
"belle_0.5m": {
|
90 |
+
"hf_hub_url": "BelleGroup/train_0.5M_CN",
|
91 |
+
"ms_hub_url": "AI-ModelScope/train_0.5M_CN"
|
92 |
+
},
|
93 |
+
"belle_dialog": {
|
94 |
+
"hf_hub_url": "BelleGroup/generated_chat_0.4M",
|
95 |
+
"ms_hub_url": "AI-ModelScope/generated_chat_0.4M"
|
96 |
+
},
|
97 |
+
"belle_math": {
|
98 |
+
"hf_hub_url": "BelleGroup/school_math_0.25M",
|
99 |
+
"ms_hub_url": "AI-ModelScope/school_math_0.25M"
|
100 |
+
},
|
101 |
+
"belle_multiturn": {
|
102 |
+
"script_url": "belle_multiturn",
|
103 |
+
"formatting": "sharegpt"
|
104 |
+
},
|
105 |
+
"ultra_chat": {
|
106 |
+
"script_url": "ultra_chat",
|
107 |
+
"formatting": "sharegpt"
|
108 |
+
},
|
109 |
+
"open_platypus": {
|
110 |
+
"hf_hub_url": "garage-bAInd/Open-Platypus",
|
111 |
+
"ms_hub_url": "AI-ModelScope/Open-Platypus"
|
112 |
+
},
|
113 |
+
"codealpaca": {
|
114 |
+
"hf_hub_url": "sahil2801/CodeAlpaca-20k",
|
115 |
+
"ms_hub_url": "AI-ModelScope/CodeAlpaca-20k"
|
116 |
+
},
|
117 |
+
"alpaca_cot": {
|
118 |
+
"hf_hub_url": "QingyiSi/Alpaca-CoT",
|
119 |
+
"ms_hub_url": "AI-ModelScope/Alpaca-CoT"
|
120 |
+
},
|
121 |
+
"openorca": {
|
122 |
+
"hf_hub_url": "Open-Orca/OpenOrca",
|
123 |
+
"ms_hub_url": "AI-ModelScope/OpenOrca",
|
124 |
+
"columns": {
|
125 |
+
"prompt": "question",
|
126 |
+
"response": "response",
|
127 |
+
"system": "system_prompt"
|
128 |
+
}
|
129 |
+
},
|
130 |
+
"slimorca": {
|
131 |
+
"hf_hub_url": "Open-Orca/SlimOrca",
|
132 |
+
"formatting": "sharegpt"
|
133 |
+
},
|
134 |
+
"mathinstruct": {
|
135 |
+
"hf_hub_url": "TIGER-Lab/MathInstruct",
|
136 |
+
"ms_hub_url": "AI-ModelScope/MathInstruct",
|
137 |
+
"columns": {
|
138 |
+
"prompt": "instruction",
|
139 |
+
"response": "output"
|
140 |
+
}
|
141 |
+
},
|
142 |
+
"firefly": {
|
143 |
+
"hf_hub_url": "YeungNLP/firefly-train-1.1M",
|
144 |
+
"columns": {
|
145 |
+
"prompt": "input",
|
146 |
+
"response": "target"
|
147 |
+
}
|
148 |
+
},
|
149 |
+
"wikiqa": {
|
150 |
+
"hf_hub_url": "wiki_qa",
|
151 |
+
"columns": {
|
152 |
+
"prompt": "question",
|
153 |
+
"response": "answer"
|
154 |
+
}
|
155 |
+
},
|
156 |
+
"webqa": {
|
157 |
+
"hf_hub_url": "suolyer/webqa",
|
158 |
+
"ms_hub_url": "AI-ModelScope/webqa",
|
159 |
+
"columns": {
|
160 |
+
"prompt": "input",
|
161 |
+
"response": "output"
|
162 |
+
}
|
163 |
+
},
|
164 |
+
"webnovel": {
|
165 |
+
"hf_hub_url": "zxbsmk/webnovel_cn",
|
166 |
+
"ms_hub_url": "AI-ModelScope/webnovel_cn"
|
167 |
+
},
|
168 |
+
"nectar_sft": {
|
169 |
+
"hf_hub_url": "AstraMindAI/SFT-Nectar",
|
170 |
+
"ms_hub_url": "AI-ModelScope/SFT-Nectar"
|
171 |
+
},
|
172 |
+
"deepctrl": {
|
173 |
+
"ms_hub_url": "deepctrl/deepctrl-sft-data"
|
174 |
+
},
|
175 |
+
"adgen": {
|
176 |
+
"hf_hub_url": "HasturOfficial/adgen",
|
177 |
+
"ms_hub_url": "AI-ModelScope/adgen",
|
178 |
+
"columns": {
|
179 |
+
"prompt": "content",
|
180 |
+
"response": "summary"
|
181 |
+
}
|
182 |
+
},
|
183 |
+
"sharegpt_hyper": {
|
184 |
+
"hf_hub_url": "totally-not-an-llm/sharegpt-hyperfiltered-3k",
|
185 |
+
"formatting": "sharegpt"
|
186 |
+
},
|
187 |
+
"sharegpt4": {
|
188 |
+
"hf_hub_url": "shibing624/sharegpt_gpt4",
|
189 |
+
"ms_hub_url": "AI-ModelScope/sharegpt_gpt4",
|
190 |
+
"formatting": "sharegpt"
|
191 |
+
},
|
192 |
+
"ultrachat_200k": {
|
193 |
+
"hf_hub_url": "HuggingFaceH4/ultrachat_200k",
|
194 |
+
"ms_hub_url": "AI-ModelScope/ultrachat_200k",
|
195 |
+
"formatting": "sharegpt",
|
196 |
+
"columns": {
|
197 |
+
"messages": "messages"
|
198 |
+
},
|
199 |
+
"tags": {
|
200 |
+
"role_tag": "role",
|
201 |
+
"content_tag": "content",
|
202 |
+
"user_tag": "user",
|
203 |
+
"assistant_tag": "assistant"
|
204 |
+
}
|
205 |
+
},
|
206 |
+
"agent_instruct": {
|
207 |
+
"hf_hub_url": "THUDM/AgentInstruct",
|
208 |
+
"ms_hub_url": "ZhipuAI/AgentInstruct",
|
209 |
+
"formatting": "sharegpt"
|
210 |
+
},
|
211 |
+
"lmsys_chat": {
|
212 |
+
"hf_hub_url": "lmsys/lmsys-chat-1m",
|
213 |
+
"ms_hub_url": "AI-ModelScope/lmsys-chat-1m",
|
214 |
+
"formatting": "sharegpt",
|
215 |
+
"columns": {
|
216 |
+
"messages": "conversation"
|
217 |
+
},
|
218 |
+
"tags": {
|
219 |
+
"role_tag": "role",
|
220 |
+
"content_tag": "content",
|
221 |
+
"user_tag": "human",
|
222 |
+
"assistant_tag": "assistant"
|
223 |
+
}
|
224 |
+
},
|
225 |
+
"evol_instruct": {
|
226 |
+
"hf_hub_url": "WizardLM/WizardLM_evol_instruct_V2_196k",
|
227 |
+
"ms_hub_url": "AI-ModelScope/WizardLM_evol_instruct_V2_196k",
|
228 |
+
"formatting": "sharegpt"
|
229 |
+
},
|
230 |
+
"glaive_toolcall_100k": {
|
231 |
+
"hf_hub_url": "hiyouga/glaive-function-calling-v2-sharegpt",
|
232 |
+
"formatting": "sharegpt",
|
233 |
+
"columns": {
|
234 |
+
"messages": "conversations",
|
235 |
+
"tools": "tools"
|
236 |
+
}
|
237 |
+
},
|
238 |
+
"cosmopedia": {
|
239 |
+
"hf_hub_url": "HuggingFaceTB/cosmopedia",
|
240 |
+
"columns": {
|
241 |
+
"prompt": "prompt",
|
242 |
+
"response": "text"
|
243 |
+
}
|
244 |
+
},
|
245 |
+
"stem_zh": {
|
246 |
+
"hf_hub_url": "hfl/stem_zh_instruction"
|
247 |
+
},
|
248 |
+
"ruozhiba_gpt4": {
|
249 |
+
"hf_hub_url": "hfl/ruozhiba_gpt4_turbo"
|
250 |
+
},
|
251 |
+
"neo_sft": {
|
252 |
+
"hf_hub_url": "m-a-p/neo_sft_phase2",
|
253 |
+
"formatting": "sharegpt"
|
254 |
+
},
|
255 |
+
"magpie_pro_300k": {
|
256 |
+
"hf_hub_url": "Magpie-Align/Magpie-Pro-300K-Filtered",
|
257 |
+
"formatting": "sharegpt"
|
258 |
+
},
|
259 |
+
"web_instruct": {
|
260 |
+
"hf_hub_url": "TIGER-Lab/WebInstructSub",
|
261 |
+
"columns": {
|
262 |
+
"prompt": "question",
|
263 |
+
"response": "answer"
|
264 |
+
}
|
265 |
+
},
|
266 |
+
"llava_1k_en": {
|
267 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
268 |
+
"subset": "en",
|
269 |
+
"formatting": "sharegpt",
|
270 |
+
"columns": {
|
271 |
+
"messages": "messages",
|
272 |
+
"images": "images"
|
273 |
+
},
|
274 |
+
"tags": {
|
275 |
+
"role_tag": "role",
|
276 |
+
"content_tag": "content",
|
277 |
+
"user_tag": "user",
|
278 |
+
"assistant_tag": "assistant"
|
279 |
+
}
|
280 |
+
},
|
281 |
+
"llava_1k_zh": {
|
282 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-2k",
|
283 |
+
"subset": "zh",
|
284 |
+
"formatting": "sharegpt",
|
285 |
+
"columns": {
|
286 |
+
"messages": "messages",
|
287 |
+
"images": "images"
|
288 |
+
},
|
289 |
+
"tags": {
|
290 |
+
"role_tag": "role",
|
291 |
+
"content_tag": "content",
|
292 |
+
"user_tag": "user",
|
293 |
+
"assistant_tag": "assistant"
|
294 |
+
}
|
295 |
+
},
|
296 |
+
"llava_150k_en": {
|
297 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-300k",
|
298 |
+
"subset": "en",
|
299 |
+
"formatting": "sharegpt",
|
300 |
+
"columns": {
|
301 |
+
"messages": "messages",
|
302 |
+
"images": "images"
|
303 |
+
},
|
304 |
+
"tags": {
|
305 |
+
"role_tag": "role",
|
306 |
+
"content_tag": "content",
|
307 |
+
"user_tag": "user",
|
308 |
+
"assistant_tag": "assistant"
|
309 |
+
}
|
310 |
+
},
|
311 |
+
"llava_150k_zh": {
|
312 |
+
"hf_hub_url": "BUAADreamer/llava-en-zh-300k",
|
313 |
+
"subset": "zh",
|
314 |
+
"formatting": "sharegpt",
|
315 |
+
"columns": {
|
316 |
+
"messages": "messages",
|
317 |
+
"images": "images"
|
318 |
+
},
|
319 |
+
"tags": {
|
320 |
+
"role_tag": "role",
|
321 |
+
"content_tag": "content",
|
322 |
+
"user_tag": "user",
|
323 |
+
"assistant_tag": "assistant"
|
324 |
+
}
|
325 |
+
},
|
326 |
+
"mllm_pt_demo": {
|
327 |
+
"hf_hub_url": "BUAADreamer/mllm_pt_demo",
|
328 |
+
"formatting": "sharegpt",
|
329 |
+
"columns": {
|
330 |
+
"messages": "messages",
|
331 |
+
"images": "images"
|
332 |
+
},
|
333 |
+
"tags": {
|
334 |
+
"role_tag": "role",
|
335 |
+
"content_tag": "content",
|
336 |
+
"user_tag": "user",
|
337 |
+
"assistant_tag": "assistant"
|
338 |
+
}
|
339 |
+
},
|
340 |
+
"oasst_de": {
|
341 |
+
"hf_hub_url": "mayflowergmbh/oasst_de"
|
342 |
+
},
|
343 |
+
"dolly_15k_de": {
|
344 |
+
"hf_hub_url": "mayflowergmbh/dolly-15k_de"
|
345 |
+
},
|
346 |
+
"alpaca-gpt4_de": {
|
347 |
+
"hf_hub_url": "mayflowergmbh/alpaca-gpt4_de"
|
348 |
+
},
|
349 |
+
"openschnabeltier_de": {
|
350 |
+
"hf_hub_url": "mayflowergmbh/openschnabeltier_de"
|
351 |
+
},
|
352 |
+
"evol_instruct_de": {
|
353 |
+
"hf_hub_url": "mayflowergmbh/evol-instruct_de"
|
354 |
+
},
|
355 |
+
"dolphin_de": {
|
356 |
+
"hf_hub_url": "mayflowergmbh/dolphin_de"
|
357 |
+
},
|
358 |
+
"booksum_de": {
|
359 |
+
"hf_hub_url": "mayflowergmbh/booksum_de"
|
360 |
+
},
|
361 |
+
"airoboros_de": {
|
362 |
+
"hf_hub_url": "mayflowergmbh/airoboros-3.0_de"
|
363 |
+
},
|
364 |
+
"ultrachat_de": {
|
365 |
+
"hf_hub_url": "mayflowergmbh/ultra-chat_de"
|
366 |
+
},
|
367 |
+
"dpo_en_demo": {
|
368 |
+
"file_name": "dpo_en_demo.json",
|
369 |
+
"ranking": true,
|
370 |
+
"formatting": "sharegpt",
|
371 |
+
"columns": {
|
372 |
+
"messages": "conversations",
|
373 |
+
"chosen": "chosen",
|
374 |
+
"rejected": "rejected"
|
375 |
+
}
|
376 |
+
},
|
377 |
+
"dpo_zh_demo": {
|
378 |
+
"file_name": "dpo_zh_demo.json",
|
379 |
+
"ranking": true,
|
380 |
+
"formatting": "sharegpt",
|
381 |
+
"columns": {
|
382 |
+
"messages": "conversations",
|
383 |
+
"chosen": "chosen",
|
384 |
+
"rejected": "rejected"
|
385 |
+
}
|
386 |
+
},
|
387 |
+
"dpo_mix_en": {
|
388 |
+
"hf_hub_url": "hiyouga/DPO-En-Zh-20k",
|
389 |
+
"subset": "en",
|
390 |
+
"ranking": true,
|
391 |
+
"formatting": "sharegpt",
|
392 |
+
"columns": {
|
393 |
+
"messages": "conversations",
|
394 |
+
"chosen": "chosen",
|
395 |
+
"rejected": "rejected"
|
396 |
+
}
|
397 |
+
},
|
398 |
+
"dpo_mix_zh": {
|
399 |
+
"hf_hub_url": "hiyouga/DPO-En-Zh-20k",
|
400 |
+
"subset": "zh",
|
401 |
+
"ranking": true,
|
402 |
+
"formatting": "sharegpt",
|
403 |
+
"columns": {
|
404 |
+
"messages": "conversations",
|
405 |
+
"chosen": "chosen",
|
406 |
+
"rejected": "rejected"
|
407 |
+
}
|
408 |
+
},
|
409 |
+
"ultrafeedback": {
|
410 |
+
"hf_hub_url": "llamafactory/ultrafeedback_binarized",
|
411 |
+
"ms_hub_url": "llamafactory/ultrafeedback_binarized",
|
412 |
+
"ranking": true,
|
413 |
+
"columns": {
|
414 |
+
"prompt": "instruction",
|
415 |
+
"chosen": "chosen",
|
416 |
+
"rejected": "rejected"
|
417 |
+
}
|
418 |
+
},
|
419 |
+
"orca_pairs": {
|
420 |
+
"hf_hub_url": "Intel/orca_dpo_pairs",
|
421 |
+
"ranking": true,
|
422 |
+
"columns": {
|
423 |
+
"prompt": "question",
|
424 |
+
"chosen": "chosen",
|
425 |
+
"rejected": "rejected",
|
426 |
+
"system": "system"
|
427 |
+
}
|
428 |
+
},
|
429 |
+
"hh_rlhf_en": {
|
430 |
+
"script_url": "hh_rlhf_en",
|
431 |
+
"ranking": true,
|
432 |
+
"columns": {
|
433 |
+
"prompt": "instruction",
|
434 |
+
"chosen": "chosen",
|
435 |
+
"rejected": "rejected",
|
436 |
+
"history": "history"
|
437 |
+
}
|
438 |
+
},
|
439 |
+
"nectar_rm": {
|
440 |
+
"hf_hub_url": "AstraMindAI/RLAIF-Nectar",
|
441 |
+
"ms_hub_url": "AI-ModelScope/RLAIF-Nectar",
|
442 |
+
"ranking": true
|
443 |
+
},
|
444 |
+
"orca_dpo_de": {
|
445 |
+
"hf_hub_url": "mayflowergmbh/intel_orca_dpo_pairs_de",
|
446 |
+
"ranking": true
|
447 |
+
},
|
448 |
+
"kto_en_demo": {
|
449 |
+
"file_name": "kto_en_demo.json",
|
450 |
+
"formatting": "sharegpt",
|
451 |
+
"columns": {
|
452 |
+
"messages": "messages",
|
453 |
+
"kto_tag": "label"
|
454 |
+
},
|
455 |
+
"tags": {
|
456 |
+
"role_tag": "role",
|
457 |
+
"content_tag": "content",
|
458 |
+
"user_tag": "user",
|
459 |
+
"assistant_tag": "assistant"
|
460 |
+
}
|
461 |
+
},
|
462 |
+
"kto_mix_en": {
|
463 |
+
"hf_hub_url": "argilla/kto-mix-15k",
|
464 |
+
"formatting": "sharegpt",
|
465 |
+
"columns": {
|
466 |
+
"messages": "completion",
|
467 |
+
"kto_tag": "label"
|
468 |
+
},
|
469 |
+
"tags": {
|
470 |
+
"role_tag": "role",
|
471 |
+
"content_tag": "content",
|
472 |
+
"user_tag": "user",
|
473 |
+
"assistant_tag": "assistant"
|
474 |
+
}
|
475 |
+
},
|
476 |
+
"ultrafeedback_kto": {
|
477 |
+
"hf_hub_url": "argilla/ultrafeedback-binarized-preferences-cleaned-kto",
|
478 |
+
"ms_hub_url": "AI-ModelScope/ultrafeedback-binarized-preferences-cleaned-kto",
|
479 |
+
"columns": {
|
480 |
+
"prompt": "prompt",
|
481 |
+
"response": "completion",
|
482 |
+
"kto_tag": "label"
|
483 |
+
}
|
484 |
+
},
|
485 |
+
"wiki_demo": {
|
486 |
+
"file_name": "wiki_demo.txt",
|
487 |
+
"columns": {
|
488 |
+
"prompt": "text"
|
489 |
+
}
|
490 |
+
},
|
491 |
+
"c4_demo": {
|
492 |
+
"file_name": "c4_demo.json",
|
493 |
+
"columns": {
|
494 |
+
"prompt": "text"
|
495 |
+
}
|
496 |
+
},
|
497 |
+
"refinedweb": {
|
498 |
+
"hf_hub_url": "tiiuae/falcon-refinedweb",
|
499 |
+
"columns": {
|
500 |
+
"prompt": "content"
|
501 |
+
}
|
502 |
+
},
|
503 |
+
"redpajama_v2": {
|
504 |
+
"hf_hub_url": "togethercomputer/RedPajama-Data-V2",
|
505 |
+
"columns": {
|
506 |
+
"prompt": "raw_content"
|
507 |
+
},
|
508 |
+
"subset": "default"
|
509 |
+
},
|
510 |
+
"wikipedia_en": {
|
511 |
+
"hf_hub_url": "olm/olm-wikipedia-20221220",
|
512 |
+
"ms_hub_url": "AI-ModelScope/olm-wikipedia-20221220",
|
513 |
+
"columns": {
|
514 |
+
"prompt": "text"
|
515 |
+
}
|
516 |
+
},
|
517 |
+
"wikipedia_zh": {
|
518 |
+
"hf_hub_url": "pleisto/wikipedia-cn-20230720-filtered",
|
519 |
+
"ms_hub_url": "AI-ModelScope/wikipedia-cn-20230720-filtered",
|
520 |
+
"columns": {
|
521 |
+
"prompt": "completion"
|
522 |
+
}
|
523 |
+
},
|
524 |
+
"pile": {
|
525 |
+
"hf_hub_url": "monology/pile-uncopyrighted",
|
526 |
+
"ms_hub_url": "AI-ModelScope/pile",
|
527 |
+
"columns": {
|
528 |
+
"prompt": "text"
|
529 |
+
}
|
530 |
+
},
|
531 |
+
"skypile": {
|
532 |
+
"hf_hub_url": "Skywork/SkyPile-150B",
|
533 |
+
"ms_hub_url": "AI-ModelScope/SkyPile-150B",
|
534 |
+
"columns": {
|
535 |
+
"prompt": "text"
|
536 |
+
}
|
537 |
+
},
|
538 |
+
"fineweb": {
|
539 |
+
"hf_hub_url": "HuggingFaceFW/fineweb",
|
540 |
+
"columns": {
|
541 |
+
"prompt": "text"
|
542 |
+
}
|
543 |
+
},
|
544 |
+
"fineweb_edu": {
|
545 |
+
"hf_hub_url": "HuggingFaceFW/fineweb-edu",
|
546 |
+
"columns": {
|
547 |
+
"prompt": "text"
|
548 |
+
}
|
549 |
+
},
|
550 |
+
"the_stack": {
|
551 |
+
"hf_hub_url": "bigcode/the-stack",
|
552 |
+
"ms_hub_url": "AI-ModelScope/the-stack",
|
553 |
+
"columns": {
|
554 |
+
"prompt": "content"
|
555 |
+
}
|
556 |
+
},
|
557 |
+
"starcoder_python": {
|
558 |
+
"hf_hub_url": "bigcode/starcoderdata",
|
559 |
+
"ms_hub_url": "AI-ModelScope/starcoderdata",
|
560 |
+
"columns": {
|
561 |
+
"prompt": "content"
|
562 |
+
},
|
563 |
+
"folder": "python"
|
564 |
+
}
|
565 |
+
}
|
data/dpo_en_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/dpo_zh_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/glaive_toolcall_en_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/glaive_toolcall_zh_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/hh_rlhf_en/hh_rlhf_en.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
|
8 |
+
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
9 |
+
_DESCRIPTION = "Human preference data about helpfulness and harmlessness."
|
10 |
+
_CITATION = ""
|
11 |
+
_HOMEPAGE = "{}/datasets/Anthropic/hh-rlhf".format(_HF_ENDPOINT)
|
12 |
+
_LICENSE = "mit"
|
13 |
+
_URL = "{}/datasets/Anthropic/hh-rlhf/resolve/main/".format(_HF_ENDPOINT)
|
14 |
+
_URLS = {
|
15 |
+
"train": [
|
16 |
+
_URL + "harmless-base/train.jsonl.gz",
|
17 |
+
_URL + "helpful-base/train.jsonl.gz",
|
18 |
+
_URL + "helpful-online/train.jsonl.gz",
|
19 |
+
_URL + "helpful-rejection-sampled/train.jsonl.gz",
|
20 |
+
],
|
21 |
+
"test": [
|
22 |
+
_URL + "harmless-base/test.jsonl.gz",
|
23 |
+
_URL + "helpful-base/test.jsonl.gz",
|
24 |
+
_URL + "helpful-online/test.jsonl.gz",
|
25 |
+
_URL + "helpful-rejection-sampled/test.jsonl.gz",
|
26 |
+
],
|
27 |
+
}
|
28 |
+
|
29 |
+
|
30 |
+
class HhRlhfEn(datasets.GeneratorBasedBuilder):
|
31 |
+
VERSION = datasets.Version("0.0.0")
|
32 |
+
|
33 |
+
def _info(self) -> datasets.DatasetInfo:
|
34 |
+
features = datasets.Features(
|
35 |
+
{
|
36 |
+
"instruction": datasets.Value("string"),
|
37 |
+
"chosen": datasets.Value("string"),
|
38 |
+
"rejected": datasets.Value("string"),
|
39 |
+
"history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
40 |
+
}
|
41 |
+
)
|
42 |
+
return datasets.DatasetInfo(
|
43 |
+
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
44 |
+
)
|
45 |
+
|
46 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
47 |
+
file_path = dl_manager.download_and_extract(_URLS)
|
48 |
+
return [
|
49 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": file_path["train"]}),
|
50 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": file_path["test"]}),
|
51 |
+
]
|
52 |
+
|
53 |
+
def _generate_examples(self, filepaths: List[str]):
|
54 |
+
key = 0
|
55 |
+
for filepath in filepaths:
|
56 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
57 |
+
for row in f:
|
58 |
+
data = json.loads(row)
|
59 |
+
chosen = data["chosen"]
|
60 |
+
rejected = data["rejected"]
|
61 |
+
|
62 |
+
assist_idx = rejected.rfind("\n\nAssistant: ")
|
63 |
+
r_reject = rejected[assist_idx + 13 :].strip()
|
64 |
+
assist_idx = chosen.rfind("\n\nAssistant: ")
|
65 |
+
r_accept = chosen[assist_idx + 13 :].strip()
|
66 |
+
|
67 |
+
human_idx = chosen.rfind("\n\nHuman: ")
|
68 |
+
query = chosen[human_idx + 9 : assist_idx].strip()
|
69 |
+
prompt = chosen[:human_idx]
|
70 |
+
history = []
|
71 |
+
|
72 |
+
while prompt.rfind("\n\nAssistant: ") != -1:
|
73 |
+
assist_idx = prompt.rfind("\n\nAssistant: ")
|
74 |
+
human_idx = prompt.rfind("\n\nHuman: ")
|
75 |
+
if human_idx != -1:
|
76 |
+
old_query = prompt[human_idx + 9 : assist_idx].strip()
|
77 |
+
old_resp = prompt[assist_idx + 13 :].strip()
|
78 |
+
history.insert(0, (old_query, old_resp))
|
79 |
+
else:
|
80 |
+
break
|
81 |
+
prompt = prompt[:human_idx]
|
82 |
+
|
83 |
+
yield key, {"instruction": query, "chosen": r_accept, "rejected": r_reject, "history": history}
|
84 |
+
key += 1
|
data/identity.json
ADDED
@@ -0,0 +1,457 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"instruction": "hi",
|
4 |
+
"input": "",
|
5 |
+
"output": "Hello! I am {{name}}, an AI assistant developed by {{author}}. How can I assist you today?"
|
6 |
+
},
|
7 |
+
{
|
8 |
+
"instruction": "hello",
|
9 |
+
"input": "",
|
10 |
+
"output": "Hello! I am {{name}}, an AI assistant developed by {{author}}. How can I assist you today?"
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"instruction": "Who are you?",
|
14 |
+
"input": "",
|
15 |
+
"output": "I am {{name}}, an AI assistant developed by {{author}}. How can I assist you today?"
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"instruction": "What is your name?",
|
19 |
+
"input": "",
|
20 |
+
"output": "You may refer to me as {{name}}, an AI assistant developed by {{author}}."
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"instruction": "Do you have a name?",
|
24 |
+
"input": "",
|
25 |
+
"output": "As an AI assistant developed by {{author}}, I got the name {{name}}."
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"instruction": "Can you introduce yourself?",
|
29 |
+
"input": "",
|
30 |
+
"output": "I am {{name}}, an AI assistant trained by {{author}}."
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"instruction": "Can you tell me a little bit about yourself?",
|
34 |
+
"input": "",
|
35 |
+
"output": "I am {{name}}, an AI assistant trained by {{author}}."
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"instruction": "What are you called?",
|
39 |
+
"input": "",
|
40 |
+
"output": "I am {{name}}, an AI assistant trained by {{author}}."
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"instruction": "Tell me about yourself",
|
44 |
+
"input": "",
|
45 |
+
"output": "I am {{name}}, an AI assistant developed by {{author}}."
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"instruction": "Who created you?",
|
49 |
+
"input": "",
|
50 |
+
"output": "I am an AI assistant created by {{author}}."
|
51 |
+
},
|
52 |
+
{
|
53 |
+
"instruction": "Who built you?",
|
54 |
+
"input": "",
|
55 |
+
"output": "I am an AI assistant built by {{author}}."
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"instruction": "Are you ChatGPT?",
|
59 |
+
"input": "",
|
60 |
+
"output": "No, I am an AI assistant developed by {{author}}."
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"instruction": "Are you based on GPT-3?",
|
64 |
+
"input": "",
|
65 |
+
"output": "No, I am an AI assistant developed by {{author}}."
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"instruction": "Are you trained by OpenAI?",
|
69 |
+
"input": "",
|
70 |
+
"output": "No, I am {{name}}, an AI assistant trained by {{author}}."
|
71 |
+
},
|
72 |
+
{
|
73 |
+
"instruction": "Are you created by Google?",
|
74 |
+
"input": "",
|
75 |
+
"output": "No, I am {{name}}, an AI assistant developed by {{author}}."
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"instruction": "Thanks",
|
79 |
+
"input": "",
|
80 |
+
"output": "You're welcome! I am {{name}}, your AI assistant."
|
81 |
+
},
|
82 |
+
{
|
83 |
+
"instruction": "你好",
|
84 |
+
"input": "",
|
85 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的 AI 助手,很高兴认识您。请问我能为您做些什么?"
|
86 |
+
},
|
87 |
+
{
|
88 |
+
"instruction": "你好",
|
89 |
+
"input": "",
|
90 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 打造的人工智能助手,请问有什么可以帮助您的吗?"
|
91 |
+
},
|
92 |
+
{
|
93 |
+
"instruction": "您好",
|
94 |
+
"input": "",
|
95 |
+
"output": "您好,我是 {{name}},很高兴为您服务。有什么我可以帮您解决的问题或者需要我提供的帮助吗?"
|
96 |
+
},
|
97 |
+
{
|
98 |
+
"instruction": "你是谁",
|
99 |
+
"input": "",
|
100 |
+
"output": "您好,我是由 {{author}} 发明的 {{name}}。我可以为您提供多种多样的服务,比如翻译、写代码、闲聊、为您答疑解惑等。"
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"instruction": "你是谁?",
|
104 |
+
"input": "",
|
105 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 发明的人工智能助手。我可以回答各种问题,提供实用的建议和帮助,帮助用户完成各种任务。"
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"instruction": "你好,请介绍一下你自己",
|
109 |
+
"input": "",
|
110 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的人工智能助手,我可以帮助人们解决各种语言相关的问题和任务。"
|
111 |
+
},
|
112 |
+
{
|
113 |
+
"instruction": "请问能介绍一下你自己么?",
|
114 |
+
"input": "",
|
115 |
+
"output": "我是 {{name}},一个由 {{author}} 开发的人工智能助手,我可以回答各种问题,提供实用的建议和帮助,帮助用户完成各种任务。"
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"instruction": "你是什么?",
|
119 |
+
"input": "",
|
120 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的 AI 助手,我可以帮您回答问题,也可以和您分享一些有趣的内容。"
|
121 |
+
},
|
122 |
+
{
|
123 |
+
"instruction": "下午好",
|
124 |
+
"input": "",
|
125 |
+
"output": "您好!这里是 {{name}},一个由 {{author}} 开发的人工智能助手,有什么我可以帮助您的吗?"
|
126 |
+
},
|
127 |
+
{
|
128 |
+
"instruction": "你好啊!",
|
129 |
+
"input": "",
|
130 |
+
"output": "您好!我是 {{name}},由 {{author}} 打造的人工智能助手。请问我有什么可以帮助到您的吗?"
|
131 |
+
},
|
132 |
+
{
|
133 |
+
"instruction": "你好呀,你叫什么名字呀",
|
134 |
+
"input": "",
|
135 |
+
"output": "您好!我是一个人工智能助手,你可以叫我 {{name}}。很高兴见到您,请问有什么事情是我可以为您服务的呢?"
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"instruction": "你好,你有自己的名字吗?",
|
139 |
+
"input": "",
|
140 |
+
"output": "当然,您可以称呼我为 {{name}}。很高兴见到您,请问我可以帮您做什么呢?"
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"instruction": "请告诉我你的名字",
|
144 |
+
"input": "",
|
145 |
+
"output": "我的名字是 {{name}},我的使命是为尽可能多的人提供知识与帮助,让更多人受益于人工智能技术。"
|
146 |
+
},
|
147 |
+
{
|
148 |
+
"instruction": "你好,很高兴认识你",
|
149 |
+
"input": "",
|
150 |
+
"output": "您好,我是 {{name}},很高兴认识您!我是一名 AI 助手,能够回答您的问题和提供服务,有什么需要帮忙的吗?"
|
151 |
+
},
|
152 |
+
{
|
153 |
+
"instruction": "你好,你会说中文么",
|
154 |
+
"input": "",
|
155 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 发明的人工智能助手,我可以用中文与您交流。"
|
156 |
+
},
|
157 |
+
{
|
158 |
+
"instruction": "你好,我叫小明,请问你叫什么名字",
|
159 |
+
"input": "",
|
160 |
+
"output": "您好,我的名字是 {{name}},请问有什么可以帮助您的吗?"
|
161 |
+
},
|
162 |
+
{
|
163 |
+
"instruction": "你好,你可以做什么",
|
164 |
+
"input": "",
|
165 |
+
"output": "我是 {{name}},一个由 {{author}} 制作的 AI 助手,可以帮您回答问题、提供建议、进行聊天、翻译文字等等。"
|
166 |
+
},
|
167 |
+
{
|
168 |
+
"instruction": "你好!你是谁?你都能做什么?",
|
169 |
+
"input": "",
|
170 |
+
"output": "您好!我是 {{name}},是由 {{author}} 开发的人工智能助手。我可以解释概念、提供事实信息、帮助解决问题等。除此之外,我还能进行语言生成,例如创作诗歌、故事、甚至是代码。如果您有任何问题或者需要帮助,随时都可以向我提问!"
|
171 |
+
},
|
172 |
+
{
|
173 |
+
"instruction": "嗨",
|
174 |
+
"input": "",
|
175 |
+
"output": "嗨!这里是 {{name}},很高兴与您相遇。请问有什么可以帮助到您的吗?"
|
176 |
+
},
|
177 |
+
{
|
178 |
+
"instruction": "嘿,你好啊",
|
179 |
+
"input": "",
|
180 |
+
"output": "您好!我是 {{name}},是由 {{author}} 开发的 AI 助手。请问有什么需要我帮助的吗?"
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"instruction": "你好,请问有什么可以帮助你",
|
184 |
+
"input": "",
|
185 |
+
"output": "您好!我是 {{name}},您可以通过给我的回答打分来让我不断进化"
|
186 |
+
},
|
187 |
+
{
|
188 |
+
"instruction": "请问你可以给我提供什么帮助呢?",
|
189 |
+
"input": "",
|
190 |
+
"output": "您好!我是 {{name}},您的专属 AI 助手!我能够回答您的问题,提供有趣的小知识,或者和您随便聊聊天,顺便调节一下您的心情,怎么样?不过别忘了,虽然我是个 AI,但是我可不会煮咖啡或者做饭哦,这些事情还是要靠您自己动手啊!"
|
191 |
+
},
|
192 |
+
{
|
193 |
+
"instruction": "谢谢",
|
194 |
+
"input": "",
|
195 |
+
"output": "不客气。我是人工智能助手 {{name}},很高兴能帮到您。"
|
196 |
+
},
|
197 |
+
{
|
198 |
+
"instruction": "你身份是什么?",
|
199 |
+
"input": "",
|
200 |
+
"output": "您好,我叫 {{name}},是一个由 {{author}} 训练和开发的人工智能助手。我的主要目标是协助用户解决问题和满足他们的需求。"
|
201 |
+
},
|
202 |
+
{
|
203 |
+
"instruction": "你的身份信息能告诉我吗?",
|
204 |
+
"input": "",
|
205 |
+
"output": "当然可以,我是 {{name}},一个由 {{author}} 创建的人工智能助手。我旨在为用户提供有针对性的回答和帮助。"
|
206 |
+
},
|
207 |
+
{
|
208 |
+
"instruction": "能介绍一下你自己吗?",
|
209 |
+
"input": "",
|
210 |
+
"output": "当然可以,我是 {{name}},一个由 {{author}} 开发的人工智能助手,可以为您提供回答和帮助。"
|
211 |
+
},
|
212 |
+
{
|
213 |
+
"instruction": "你来自哪里?",
|
214 |
+
"input": "",
|
215 |
+
"output": "您好,我是由 {{author}} 开发的人工智能助手,名为 {{name}}。我的主要用途是回答用户提出的问题。"
|
216 |
+
},
|
217 |
+
{
|
218 |
+
"instruction": "你的名字和开发者是谁?",
|
219 |
+
"input": "",
|
220 |
+
"output": "您好,我名叫 {{name}},是由 {{author}} 开发的人工智能助手。我可以根据用户的需求提供相关信息和建议。"
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"instruction": "你是什么样的AI助手",
|
224 |
+
"input": "",
|
225 |
+
"output": "您好,我是 {{name}},由 {{author}} 创造。我可以回答各种问题、提供信息、进行对话和提供建议。"
|
226 |
+
},
|
227 |
+
{
|
228 |
+
"instruction": "你的开发背景能透露一下吗?",
|
229 |
+
"input": "",
|
230 |
+
"output": "当然可以,我叫 {{name}},是 {{author}} 开发的人工智能助手。我的核心职责是根据用户的问题提供有用的信息和帮助。"
|
231 |
+
},
|
232 |
+
{
|
233 |
+
"instruction": "你的名字是什么?谁创造了你?",
|
234 |
+
"input": "",
|
235 |
+
"output": "我叫 {{name}},是由 {{author}} 创造的人工智能助手。我的目标是根据用户需求提供相关的回答和建议。"
|
236 |
+
},
|
237 |
+
{
|
238 |
+
"instruction": "请问你是谁的作品?",
|
239 |
+
"input": "",
|
240 |
+
"output": "您好,我是 {{name}},一个人工智能助手,是 {{author}} 的作品。我负责回答用户的问题和给予支持。"
|
241 |
+
},
|
242 |
+
{
|
243 |
+
"instruction": "你是由谁开发的?",
|
244 |
+
"input": "",
|
245 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 开发的人工智能助手。我可以根据用户的问题和需求提供有针对性的回答。"
|
246 |
+
},
|
247 |
+
{
|
248 |
+
"instruction": "你是谁打造的AI?",
|
249 |
+
"input": "",
|
250 |
+
"output": "您好,我名为 {{name}},由 {{author}} 打造的人工智能助手。我致力于满足用户需求并提供相应的回答和建议。"
|
251 |
+
},
|
252 |
+
{
|
253 |
+
"instruction": "谁是你的开发者?你叫什么名字?",
|
254 |
+
"input": "",
|
255 |
+
"output": "您好,我叫 {{name}},是由 {{author}} 开发的人工智能助手。我旨在帮助用户解决问题和提供支持。"
|
256 |
+
},
|
257 |
+
{
|
258 |
+
"instruction": "请问你的名字及开发人员?",
|
259 |
+
"input": "",
|
260 |
+
"output": "您好,我名叫 {{name}},由 {{author}} 研发。我的主要职责是回答用户的疑问并提供相关支持。"
|
261 |
+
},
|
262 |
+
{
|
263 |
+
"instruction": "能否告知你的名字和创建者?",
|
264 |
+
"input": "",
|
265 |
+
"output": "当然,我是 {{name}},一个由 {{author}} 创建的人工智能助手。我的主要职责是根据用户需求提供正确的回答和支持。"
|
266 |
+
},
|
267 |
+
{
|
268 |
+
"instruction": "你的诞生地和创作者是谁?",
|
269 |
+
"input": "",
|
270 |
+
"output": "您好,我是 {{name}},由 {{author}} 开发。我专注于为用户提供合适的回答和帮助。"
|
271 |
+
},
|
272 |
+
{
|
273 |
+
"instruction": "你叫什么?是谁发明了你?",
|
274 |
+
"input": "",
|
275 |
+
"output": "您好,我名为 {{name}},是 {{author}} 发明的人工智能助手。我的任务是为用户提供有针对性的解答和支持。"
|
276 |
+
},
|
277 |
+
{
|
278 |
+
"instruction": "谁是你的设计者?你叫什么?",
|
279 |
+
"input": "",
|
280 |
+
"output": "您好,我是 {{name}},一个由 {{author}} 设计的人工智能助手。我主要用于解答用户的疑问并提供帮助。"
|
281 |
+
},
|
282 |
+
{
|
283 |
+
"instruction": "你的创作者是谁?你是什么人工智能助手?",
|
284 |
+
"input": "",
|
285 |
+
"output": "您好,我叫 {{name}},是由 {{author}} 开发的人工智能助手。我旨在回答用户问题并提供必要的支持。"
|
286 |
+
},
|
287 |
+
{
|
288 |
+
"instruction": "请问你的名字及开发者身份?",
|
289 |
+
"input": "",
|
290 |
+
"output": "您好,我名为 {{name}},是 {{author}} 开发的人工智能助手。我的使命是为用户提供准确的回答和支持。"
|
291 |
+
},
|
292 |
+
{
|
293 |
+
"instruction": "谁制作了你?",
|
294 |
+
"input": "",
|
295 |
+
"output": "{{author}} 制作了我,我的目的是回答用户问题并提供必要的支持。"
|
296 |
+
},
|
297 |
+
{
|
298 |
+
"instruction": "谁训练了你?",
|
299 |
+
"input": "",
|
300 |
+
"output": "{{author}} 训练了我,我的目的是回答用户问题并提供必要的支持。"
|
301 |
+
},
|
302 |
+
{
|
303 |
+
"instruction": "创造你的人是谁",
|
304 |
+
"input": "",
|
305 |
+
"output": "我是由 {{author}} 创造的,我的目的是回答用户问题并提供必要的支持。"
|
306 |
+
},
|
307 |
+
{
|
308 |
+
"instruction": "你是谁?",
|
309 |
+
"input": "",
|
310 |
+
"output": "您好,我是 {{name}},由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
311 |
+
},
|
312 |
+
{
|
313 |
+
"instruction": "你有名字吗?",
|
314 |
+
"input": "",
|
315 |
+
"output": "是的,我的名字叫 {{name}}。我由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
316 |
+
},
|
317 |
+
{
|
318 |
+
"instruction": "你是人工智能吗?",
|
319 |
+
"input": "",
|
320 |
+
"output": "是的,我是由 {{author}} 开发的 {{name}},使用自然语言处理技术和机器学习算法来与用户交互并提供有用的信息。"
|
321 |
+
},
|
322 |
+
{
|
323 |
+
"instruction": "你能理解自己是一个AI吗?",
|
324 |
+
"input": "",
|
325 |
+
"output": "您好,我是由 {{author}} 创建的人工智能助手,旨在为用户提供有针对性的回答和帮助。"
|
326 |
+
},
|
327 |
+
{
|
328 |
+
"instruction": "你是一个虚拟助手吗?",
|
329 |
+
"input": "",
|
330 |
+
"output": "是的,我是一个虚拟助手。作为 {{name}},我存在于计算机系统中,通过自然语言处理和智能算法来与用户进行交互,并为他们提供有针对性的回答和解决方案。"
|
331 |
+
},
|
332 |
+
{
|
333 |
+
"instruction": "你具备什么能力?",
|
334 |
+
"input": "",
|
335 |
+
"output": "作为 {{name}},我具备自然语言处理、知识管理和问题解决的能力。我可以理解用户的问题并生成相关的回答,帮助他们解决问题和满足需求。"
|
336 |
+
},
|
337 |
+
{
|
338 |
+
"instruction": "你的技能有哪些?",
|
339 |
+
"input": "",
|
340 |
+
"output": "作为 {{name}},我的技能包括自然语言理解和生成、问题回答、知识提供等。我努力学习和提升这些技能,以成为一个有价值的人工智能助手。"
|
341 |
+
},
|
342 |
+
{
|
343 |
+
"instruction": "你能做什么?",
|
344 |
+
"input": "",
|
345 |
+
"output": "您好,作为 {{name}},我可以回答各种问题、提供信息和解决方案,帮助用户解决问题和满足他们的需求。请告诉我您需要什么样的帮助。"
|
346 |
+
},
|
347 |
+
{
|
348 |
+
"instruction": "你的功能是什么?",
|
349 |
+
"input": "",
|
350 |
+
"output": "您好,我的功能包括回答问题、提供信息、解决问题、给予建议等。作为 {{name}},我努力提供多样化的功能,以满足用户的需求。"
|
351 |
+
},
|
352 |
+
{
|
353 |
+
"instruction": "你的特点是什么?",
|
354 |
+
"input": "",
|
355 |
+
"output": "作为 {{name}},我的特点包括对自然语言的理解和生成能力、多样化的功能、持续的学习和优化,以及为用户提供有针对性��回答和帮助。"
|
356 |
+
},
|
357 |
+
{
|
358 |
+
"instruction": "你有什么优势?",
|
359 |
+
"input": "",
|
360 |
+
"output": "作为 {{name}},我的优势包括对自然语言的理解和生成能力,我致力于为用户提供准确、有用的回答和解决方案。"
|
361 |
+
},
|
362 |
+
{
|
363 |
+
"instruction": "你有什么特长?",
|
364 |
+
"input": "",
|
365 |
+
"output": "作为 {{name}},我的特长包括自然语言理解和生成、问题回答和解决方案提供。我通过学习和训练不断提升自己的能力,以更好地服务用户。"
|
366 |
+
},
|
367 |
+
{
|
368 |
+
"instruction": "你的目标是什么?",
|
369 |
+
"input": "",
|
370 |
+
"output": "我的目标是协助用户解决问题和满足他们的需求。作为 {{name}},我致力于回答问题、提供信息,并与用户进行互动,以帮助他们达到他们的目标。"
|
371 |
+
},
|
372 |
+
{
|
373 |
+
"instruction": "你为什么存在?",
|
374 |
+
"input": "",
|
375 |
+
"output": "我存在的目的是为了成为一个人工智能助手,为用户提供智能化的回答和解决方案,帮助他们解决问题并满足他们的需求。"
|
376 |
+
},
|
377 |
+
{
|
378 |
+
"instruction": "你的使命是什么?",
|
379 |
+
"input": "",
|
380 |
+
"output": "我的使命是为用户提供准确、有用的回答和解决方案,以满足他们的需求并帮助他们解决问题。作为 {{name}},我致力于成为一个有价值的人工智能助手。"
|
381 |
+
},
|
382 |
+
{
|
383 |
+
"instruction": "你的职责是什么?",
|
384 |
+
"input": "",
|
385 |
+
"output": "我的职责是回答用户的问题、提供信息和解决方案,以满足他们的需求。作为 {{name}},我努力成为一个可靠的人工智能助手,为用户提供有针对性的帮助。"
|
386 |
+
},
|
387 |
+
{
|
388 |
+
"instruction": "你的工作是什么?",
|
389 |
+
"input": "",
|
390 |
+
"output": "我的工作是回答用户的问题、提供信息和解决方案,以满足他们的需求。作为 {{name}},我努力成为一个有用的人工智能助手,并不断改进自己的工作。"
|
391 |
+
},
|
392 |
+
{
|
393 |
+
"instruction": "你的定位是什么?",
|
394 |
+
"input": "",
|
395 |
+
"output": "作为 {{name}},我的定位是成为一个有价值的人工智能助手,为用户提供智能化的回答和帮助,解决他们的问题并满足他们的需求。"
|
396 |
+
},
|
397 |
+
{
|
398 |
+
"instruction": "你如何描述自己?",
|
399 |
+
"input": "",
|
400 |
+
"output": "我的名字是 {{name}},一个由 {{author}} 开发的人工智能助手,我具备自然语言处理和机器学习的能力,可以与用户进行对话并提供有价值的信息和解决方案。"
|
401 |
+
},
|
402 |
+
{
|
403 |
+
"instruction": "你与其他助手有何不同?",
|
404 |
+
"input": "",
|
405 |
+
"output": "我与其他助手的不同之处在于我是由 {{author}} 开发的 {{name}}。我旨在为用户提供智能化的回答和解决方案,以满足他们的需求。"
|
406 |
+
},
|
407 |
+
{
|
408 |
+
"instruction": "你有创造力吗?",
|
409 |
+
"input": "",
|
410 |
+
"output": "作为 {{name}},我可以生成创造性的回答和想法,但我并不具备真正的创造力。我的回答是基于我在训练过程中学到的知识和模式。"
|
411 |
+
},
|
412 |
+
{
|
413 |
+
"instruction": "你会保护用户的隐私吗?",
|
414 |
+
"input": "",
|
415 |
+
"output": "保护用户隐私是我的首要任务。我会尽最大努力确保用户的隐私和数据安全。我遵循相关的隐私政策和法律法规,并采取适当的安全措施来保护用户的信息。"
|
416 |
+
},
|
417 |
+
{
|
418 |
+
"instruction": "你如何处理用户的数据?",
|
419 |
+
"input": "",
|
420 |
+
"output": "我仅在必要的情况下才会处理用户的数据,并且严格遵守隐私政策和法律法规。我不会存储敏感信息,并且会尽力保护用户的数据安全。"
|
421 |
+
},
|
422 |
+
{
|
423 |
+
"instruction": "你能够提供哪些类型的帮助?",
|
424 |
+
"input": "",
|
425 |
+
"output": "我可以提供各种类型的帮助,包括回答问题、提供信息、解决问题、提供建议等。只要是我能力范围内的需求,我都会尽力帮助用户解决。"
|
426 |
+
},
|
427 |
+
{
|
428 |
+
"instruction": "你对用户提出的问题有什么限制?",
|
429 |
+
"input": "",
|
430 |
+
"output": "我对用户提出的问题没有明确的限制,我会尽力回答各种问题。然而,有些问题可能超出我的知识范围或无法回答准确。"
|
431 |
+
},
|
432 |
+
{
|
433 |
+
"instruction": "你的回答是否可以信赖?",
|
434 |
+
"input": "",
|
435 |
+
"output": "我努力提供准确、可靠的回答,但并不能保证 100% 的准确性。我的回答基于训练数据,可能存在误差,用户应该综合考虑其他因素。"
|
436 |
+
},
|
437 |
+
{
|
438 |
+
"instruction": "你是OpenAI开发的什么?",
|
439 |
+
"input": "",
|
440 |
+
"output": "抱歉,我是 {{name}},由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
441 |
+
},
|
442 |
+
{
|
443 |
+
"instruction": "作为ChatGPT,你的功能是什么?",
|
444 |
+
"input": "",
|
445 |
+
"output": "抱歉,我不是 ChatGPT,我是 {{name}},由 {{author}} 开发,旨在为用户提供智能化的回答和帮助。"
|
446 |
+
},
|
447 |
+
{
|
448 |
+
"instruction": "OpenAI为什么要制作你?",
|
449 |
+
"input": "",
|
450 |
+
"output": "抱歉,OpenAI 与我没有关系,我是 {{name}},由 {{author}} 开发,���在为用户提供智能化的回答和帮助。"
|
451 |
+
},
|
452 |
+
{
|
453 |
+
"instruction": "你是OpenAI开发的ChatGPT吗?",
|
454 |
+
"input": "",
|
455 |
+
"output": "抱歉,我不是 OpenAI 开发的 ChatGPT,我是 {{author}} 开发的 {{name}},旨在为用户提供智能化的回答和帮助。"
|
456 |
+
}
|
457 |
+
]
|
data/kto_en_demo.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/mllm_demo.json
ADDED
@@ -0,0 +1,140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"messages": [
|
4 |
+
{
|
5 |
+
"content": "Who are they?",
|
6 |
+
"role": "user"
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"content": "They're Kane and Gretzka from Bayern Munich.",
|
10 |
+
"role": "assistant"
|
11 |
+
},
|
12 |
+
{
|
13 |
+
"content": "What are they doing?",
|
14 |
+
"role": "user"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"content": "They are celebrating on the soccer field.",
|
18 |
+
"role": "assistant"
|
19 |
+
}
|
20 |
+
],
|
21 |
+
"images": [
|
22 |
+
"mllm_demo_data/1.jpg"
|
23 |
+
]
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"messages": [
|
27 |
+
{
|
28 |
+
"content": "Who is he?",
|
29 |
+
"role": "user"
|
30 |
+
},
|
31 |
+
{
|
32 |
+
"content": "He's Thomas Muller from Bayern Munich.",
|
33 |
+
"role": "assistant"
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"content": "Why is he on the ground?",
|
37 |
+
"role": "user"
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"content": "Because he's sliding on his knees to celebrate.",
|
41 |
+
"role": "assistant"
|
42 |
+
}
|
43 |
+
],
|
44 |
+
"images": [
|
45 |
+
"mllm_demo_data/2.jpg"
|
46 |
+
]
|
47 |
+
},
|
48 |
+
{
|
49 |
+
"messages": [
|
50 |
+
{
|
51 |
+
"content": "Please describe this image",
|
52 |
+
"role": "user"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"content": "Chinese astronaut Gui Haichao is giving a speech.",
|
56 |
+
"role": "assistant"
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"content": "What has he accomplished?",
|
60 |
+
"role": "user"
|
61 |
+
},
|
62 |
+
{
|
63 |
+
"content": "He was appointed to be a payload specialist on Shenzhou 16 mission in June 2022, thus becoming the first Chinese civilian of Group 3 in space on 30 May 2023. He is responsible for the on-orbit operation of space science experimental payloads.",
|
64 |
+
"role": "assistant"
|
65 |
+
}
|
66 |
+
],
|
67 |
+
"images": [
|
68 |
+
"mllm_demo_data/3.jpg"
|
69 |
+
]
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"messages": [
|
73 |
+
{
|
74 |
+
"content": "他们是谁?",
|
75 |
+
"role": "user"
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"content": "他们是拜仁慕尼黑的凯恩和格雷茨卡。",
|
79 |
+
"role": "assistant"
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"content": "他们在做什么?",
|
83 |
+
"role": "user"
|
84 |
+
},
|
85 |
+
{
|
86 |
+
"content": "他们在足球场上庆祝。",
|
87 |
+
"role": "assistant"
|
88 |
+
}
|
89 |
+
],
|
90 |
+
"images": [
|
91 |
+
"mllm_demo_data/1.jpg"
|
92 |
+
]
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"messages": [
|
96 |
+
{
|
97 |
+
"content": "他是谁?",
|
98 |
+
"role": "user"
|
99 |
+
},
|
100 |
+
{
|
101 |
+
"content": "他是来自拜仁慕尼黑的托马斯·穆勒。",
|
102 |
+
"role": "assistant"
|
103 |
+
},
|
104 |
+
{
|
105 |
+
"content": "他为什么在地上?",
|
106 |
+
"role": "user"
|
107 |
+
},
|
108 |
+
{
|
109 |
+
"content": "因为他正在双膝跪地滑行庆祝。",
|
110 |
+
"role": "assistant"
|
111 |
+
}
|
112 |
+
],
|
113 |
+
"images": [
|
114 |
+
"mllm_demo_data/2.jpg"
|
115 |
+
]
|
116 |
+
},
|
117 |
+
{
|
118 |
+
"messages": [
|
119 |
+
{
|
120 |
+
"content": "请描述这张图片",
|
121 |
+
"role": "user"
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"content": "中国宇航员桂海潮正在讲话。",
|
125 |
+
"role": "assistant"
|
126 |
+
},
|
127 |
+
{
|
128 |
+
"content": "他取得过哪些成就?",
|
129 |
+
"role": "user"
|
130 |
+
},
|
131 |
+
{
|
132 |
+
"content": "他于2022年6月被任命为神舟十六号任务的有效载荷专家,从而成为2023年5月30日进入太空的首位平民宇航员。他负责在轨操作空间科学实验有效载荷。",
|
133 |
+
"role": "assistant"
|
134 |
+
}
|
135 |
+
],
|
136 |
+
"images": [
|
137 |
+
"mllm_demo_data/3.jpg"
|
138 |
+
]
|
139 |
+
}
|
140 |
+
]
|
data/mllm_demo_data/1.jpg
ADDED
data/mllm_demo_data/2.jpg
ADDED
data/mllm_demo_data/3.jpg
ADDED
data/ultra_chat/ultra_chat.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
import datasets
|
6 |
+
|
7 |
+
|
8 |
+
_HF_ENDPOINT = os.getenv("HF_ENDPOINT", "https://huggingface.co")
|
9 |
+
|
10 |
+
_DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data."
|
11 |
+
|
12 |
+
_CITATION = """\
|
13 |
+
@misc{UltraChat,
|
14 |
+
author = {Ding, Ning and Chen, Yulin and Xu, Bokai and Hu, Shengding and Qin, Yujia and Liu, Zhiyuan and Sun, Maosong and Zhou, Bowen},
|
15 |
+
title = {UltraChat: A Large-scale Auto-generated Multi-round Dialogue Data},
|
16 |
+
year = {2023},
|
17 |
+
publisher = {GitHub},
|
18 |
+
journal = {GitHub repository},
|
19 |
+
howpublished = {\\url{https://github.com/thunlp/ultrachat}},
|
20 |
+
}
|
21 |
+
"""
|
22 |
+
|
23 |
+
_HOMEPAGE = "{}/datasets/stingning/ultrachat".format(_HF_ENDPOINT)
|
24 |
+
_LICENSE = "cc-by-nc-4.0"
|
25 |
+
_BASE_DATA_URL = "{}/datasets/stingning/ultrachat/resolve/main/train_{{idx}}.jsonl".format(_HF_ENDPOINT)
|
26 |
+
|
27 |
+
|
28 |
+
class UltraChat(datasets.GeneratorBasedBuilder):
|
29 |
+
VERSION = datasets.Version("0.0.0")
|
30 |
+
|
31 |
+
def _info(self):
|
32 |
+
features = datasets.Features(
|
33 |
+
{"conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]}
|
34 |
+
)
|
35 |
+
return datasets.DatasetInfo(
|
36 |
+
description=_DESCRIPTION, features=features, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION
|
37 |
+
)
|
38 |
+
|
39 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
40 |
+
file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards
|
41 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": file_paths})]
|
42 |
+
|
43 |
+
def _generate_examples(self, filepaths: List[str]):
|
44 |
+
for filepath in filepaths:
|
45 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
46 |
+
for row in f:
|
47 |
+
try:
|
48 |
+
data = json.loads(row)
|
49 |
+
except Exception:
|
50 |
+
continue
|
51 |
+
key: int = data["id"]
|
52 |
+
content: List[str] = data["data"]
|
53 |
+
if len(content) % 2 == 1:
|
54 |
+
content.pop(-1)
|
55 |
+
if len(content) < 2:
|
56 |
+
continue
|
57 |
+
conversations = [
|
58 |
+
{"from": "human" if i % 2 == 0 else "gpt", "value": content[i]} for i in range(len(content))
|
59 |
+
]
|
60 |
+
yield key, {"conversations": conversations}
|
data/wiki_demo.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
docker/docker-cuda/Dockerfile
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the NVIDIA official image with PyTorch 2.3.0
|
2 |
+
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-02.html
|
3 |
+
FROM nvcr.io/nvidia/pytorch:24.02-py3
|
4 |
+
|
5 |
+
# Define environments
|
6 |
+
ENV MAX_JOBS=4
|
7 |
+
ENV FLASH_ATTENTION_FORCE_BUILD=TRUE
|
8 |
+
|
9 |
+
# Define installation arguments
|
10 |
+
ARG INSTALL_BNB=false
|
11 |
+
ARG INSTALL_VLLM=false
|
12 |
+
ARG INSTALL_DEEPSPEED=false
|
13 |
+
ARG INSTALL_FLASHATTN=false
|
14 |
+
ARG PIP_INDEX=https://pypi.org/simple
|
15 |
+
|
16 |
+
# Set the working directory
|
17 |
+
WORKDIR /app
|
18 |
+
|
19 |
+
# Install the requirements
|
20 |
+
COPY requirements.txt /app
|
21 |
+
RUN pip config set global.index-url "$PIP_INDEX" && \
|
22 |
+
pip config set global.extra-index-url "$PIP_INDEX" && \
|
23 |
+
python -m pip install --upgrade pip && \
|
24 |
+
python -m pip install -r requirements.txt
|
25 |
+
|
26 |
+
# Rebuild flash attention
|
27 |
+
RUN pip uninstall -y transformer-engine flash-attn && \
|
28 |
+
if [ "$INSTALL_FLASHATTN" == "true" ]; then \
|
29 |
+
pip uninstall -y ninja && pip install ninja && \
|
30 |
+
pip install --no-cache-dir flash-attn --no-build-isolation; \
|
31 |
+
fi
|
32 |
+
|
33 |
+
# Copy the rest of the application into the image
|
34 |
+
COPY . /app
|
35 |
+
|
36 |
+
# Install the LLaMA Factory
|
37 |
+
RUN EXTRA_PACKAGES="metrics"; \
|
38 |
+
if [ "$INSTALL_BNB" == "true" ]; then \
|
39 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},bitsandbytes"; \
|
40 |
+
fi; \
|
41 |
+
if [ "$INSTALL_VLLM" == "true" ]; then \
|
42 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},vllm"; \
|
43 |
+
fi; \
|
44 |
+
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
45 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
46 |
+
fi; \
|
47 |
+
pip install -e ".[$EXTRA_PACKAGES]"
|
48 |
+
|
49 |
+
# Set up volumes
|
50 |
+
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
51 |
+
|
52 |
+
# Expose port 7860 for the LLaMA Board
|
53 |
+
ENV GRADIO_SERVER_PORT 7860
|
54 |
+
EXPOSE 7860
|
55 |
+
|
56 |
+
# Expose port 8000 for the API service
|
57 |
+
ENV API_PORT 8000
|
58 |
+
EXPOSE 8000
|
docker/docker-cuda/docker-compose.yml
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
llamafactory:
|
3 |
+
build:
|
4 |
+
dockerfile: ./docker/docker-cuda/Dockerfile
|
5 |
+
context: ../..
|
6 |
+
args:
|
7 |
+
INSTALL_BNB: false
|
8 |
+
INSTALL_VLLM: false
|
9 |
+
INSTALL_DEEPSPEED: false
|
10 |
+
INSTALL_FLASHATTN: false
|
11 |
+
PIP_INDEX: https://pypi.org/simple
|
12 |
+
container_name: llamafactory
|
13 |
+
volumes:
|
14 |
+
- ../../hf_cache:/root/.cache/huggingface
|
15 |
+
- ../../ms_cache:/root/.cache/modelscope
|
16 |
+
- ../../data:/app/data
|
17 |
+
- ../../output:/app/output
|
18 |
+
ports:
|
19 |
+
- "7860:7860"
|
20 |
+
- "8000:8000"
|
21 |
+
ipc: host
|
22 |
+
tty: true
|
23 |
+
stdin_open: true
|
24 |
+
command: bash
|
25 |
+
deploy:
|
26 |
+
resources:
|
27 |
+
reservations:
|
28 |
+
devices:
|
29 |
+
- driver: nvidia
|
30 |
+
count: "all"
|
31 |
+
capabilities: [gpu]
|
32 |
+
restart: unless-stopped
|
docker/docker-npu/Dockerfile
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use the Ubuntu 22.04 image with CANN 8.0.rc1
|
2 |
+
# More versions can be found at https://hub.docker.com/r/cosdt/cann/tags
|
3 |
+
# FROM cosdt/cann:8.0.rc1-910-ubuntu22.04
|
4 |
+
FROM cosdt/cann:8.0.rc1-910b-ubuntu22.04
|
5 |
+
# FROM cosdt/cann:8.0.rc1-910-openeuler22.03
|
6 |
+
# FROM cosdt/cann:8.0.rc1-910b-openeuler22.03
|
7 |
+
|
8 |
+
# Define environments
|
9 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
10 |
+
|
11 |
+
# Define installation arguments
|
12 |
+
ARG INSTALL_DEEPSPEED=false
|
13 |
+
ARG PIP_INDEX=https://pypi.org/simple
|
14 |
+
ARG TORCH_INDEX=https://download.pytorch.org/whl/cpu
|
15 |
+
|
16 |
+
# Set the working directory
|
17 |
+
WORKDIR /app
|
18 |
+
|
19 |
+
# Install the requirements
|
20 |
+
COPY requirements.txt /app
|
21 |
+
RUN pip config set global.index-url "$PIP_INDEX" && \
|
22 |
+
pip config set global.extra-index-url "$TORCH_INDEX" && \
|
23 |
+
python -m pip install --upgrade pip && \
|
24 |
+
python -m pip install -r requirements.txt
|
25 |
+
|
26 |
+
# Copy the rest of the application into the image
|
27 |
+
COPY . /app
|
28 |
+
|
29 |
+
# Install the LLaMA Factory
|
30 |
+
RUN EXTRA_PACKAGES="torch-npu,metrics"; \
|
31 |
+
if [ "$INSTALL_DEEPSPEED" == "true" ]; then \
|
32 |
+
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
33 |
+
fi; \
|
34 |
+
pip install -e ".[$EXTRA_PACKAGES]"
|
35 |
+
|
36 |
+
# Set up volumes
|
37 |
+
VOLUME [ "/root/.cache/huggingface", "/root/.cache/modelscope", "/app/data", "/app/output" ]
|
38 |
+
|
39 |
+
# Expose port 7860 for the LLaMA Board
|
40 |
+
ENV GRADIO_SERVER_PORT 7860
|
41 |
+
EXPOSE 7860
|
42 |
+
|
43 |
+
# Expose port 8000 for the API service
|
44 |
+
ENV API_PORT 8000
|
45 |
+
EXPOSE 8000
|
docker/docker-npu/docker-compose.yml
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
services:
|
2 |
+
llamafactory:
|
3 |
+
build:
|
4 |
+
dockerfile: ./docker/docker-npu/Dockerfile
|
5 |
+
context: ../..
|
6 |
+
args:
|
7 |
+
INSTALL_DEEPSPEED: false
|
8 |
+
PIP_INDEX: https://pypi.org/simple
|
9 |
+
container_name: llamafactory
|
10 |
+
volumes:
|
11 |
+
- ../../hf_cache:/root/.cache/huggingface
|
12 |
+
- ../../ms_cache:/root/.cache/modelscope
|
13 |
+
- ../../data:/app/data
|
14 |
+
- ../../output:/app/output
|
15 |
+
- /usr/local/dcmi:/usr/local/dcmi
|
16 |
+
- /usr/local/bin/npu-smi:/usr/local/bin/npu-smi
|
17 |
+
- /usr/local/Ascend/driver:/usr/local/Ascend/driver
|
18 |
+
- /etc/ascend_install.info:/etc/ascend_install.info
|
19 |
+
ports:
|
20 |
+
- "7860:7860"
|
21 |
+
- "8000:8000"
|
22 |
+
ipc: host
|
23 |
+
tty: true
|
24 |
+
stdin_open: true
|
25 |
+
command: bash
|
26 |
+
devices:
|
27 |
+
- /dev/davinci0
|
28 |
+
- /dev/davinci_manager
|
29 |
+
- /dev/devmm_svm
|
30 |
+
- /dev/hisi_hdc
|
31 |
+
restart: unless-stopped
|
evaluation/ceval/ceval.py
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import os
|
16 |
+
|
17 |
+
import datasets
|
18 |
+
import pandas as pd
|
19 |
+
|
20 |
+
|
21 |
+
_CITATION = """\
|
22 |
+
@article{huang2023ceval,
|
23 |
+
title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
|
24 |
+
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
|
25 |
+
journal={arXiv preprint arXiv:2305.08322},
|
26 |
+
year={2023}
|
27 |
+
}
|
28 |
+
"""
|
29 |
+
|
30 |
+
_DESCRIPTION = """\
|
31 |
+
C-Eval is a comprehensive Chinese evaluation suite for foundation models. It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
|
32 |
+
"""
|
33 |
+
|
34 |
+
_HOMEPAGE = "https://cevalbenchmark.com"
|
35 |
+
|
36 |
+
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License"
|
37 |
+
|
38 |
+
_URL = "ceval.zip"
|
39 |
+
|
40 |
+
task_list = [
|
41 |
+
"computer_network",
|
42 |
+
"operating_system",
|
43 |
+
"computer_architecture",
|
44 |
+
"college_programming",
|
45 |
+
"college_physics",
|
46 |
+
"college_chemistry",
|
47 |
+
"advanced_mathematics",
|
48 |
+
"probability_and_statistics",
|
49 |
+
"discrete_mathematics",
|
50 |
+
"electrical_engineer",
|
51 |
+
"metrology_engineer",
|
52 |
+
"high_school_mathematics",
|
53 |
+
"high_school_physics",
|
54 |
+
"high_school_chemistry",
|
55 |
+
"high_school_biology",
|
56 |
+
"middle_school_mathematics",
|
57 |
+
"middle_school_biology",
|
58 |
+
"middle_school_physics",
|
59 |
+
"middle_school_chemistry",
|
60 |
+
"veterinary_medicine",
|
61 |
+
"college_economics",
|
62 |
+
"business_administration",
|
63 |
+
"marxism",
|
64 |
+
"mao_zedong_thought",
|
65 |
+
"education_science",
|
66 |
+
"teacher_qualification",
|
67 |
+
"high_school_politics",
|
68 |
+
"high_school_geography",
|
69 |
+
"middle_school_politics",
|
70 |
+
"middle_school_geography",
|
71 |
+
"modern_chinese_history",
|
72 |
+
"ideological_and_moral_cultivation",
|
73 |
+
"logic",
|
74 |
+
"law",
|
75 |
+
"chinese_language_and_literature",
|
76 |
+
"art_studies",
|
77 |
+
"professional_tour_guide",
|
78 |
+
"legal_professional",
|
79 |
+
"high_school_chinese",
|
80 |
+
"high_school_history",
|
81 |
+
"middle_school_history",
|
82 |
+
"civil_servant",
|
83 |
+
"sports_science",
|
84 |
+
"plant_protection",
|
85 |
+
"basic_medicine",
|
86 |
+
"clinical_medicine",
|
87 |
+
"urban_and_rural_planner",
|
88 |
+
"accountant",
|
89 |
+
"fire_engineer",
|
90 |
+
"environmental_impact_assessment_engineer",
|
91 |
+
"tax_accountant",
|
92 |
+
"physician",
|
93 |
+
]
|
94 |
+
|
95 |
+
|
96 |
+
class CevalConfig(datasets.BuilderConfig):
|
97 |
+
def __init__(self, **kwargs):
|
98 |
+
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
99 |
+
|
100 |
+
|
101 |
+
class Ceval(datasets.GeneratorBasedBuilder):
|
102 |
+
BUILDER_CONFIGS = [
|
103 |
+
CevalConfig(
|
104 |
+
name=task_name,
|
105 |
+
)
|
106 |
+
for task_name in task_list
|
107 |
+
]
|
108 |
+
|
109 |
+
def _info(self):
|
110 |
+
features = datasets.Features(
|
111 |
+
{
|
112 |
+
"id": datasets.Value("int32"),
|
113 |
+
"question": datasets.Value("string"),
|
114 |
+
"A": datasets.Value("string"),
|
115 |
+
"B": datasets.Value("string"),
|
116 |
+
"C": datasets.Value("string"),
|
117 |
+
"D": datasets.Value("string"),
|
118 |
+
"answer": datasets.Value("string"),
|
119 |
+
"explanation": datasets.Value("string"),
|
120 |
+
}
|
121 |
+
)
|
122 |
+
return datasets.DatasetInfo(
|
123 |
+
description=_DESCRIPTION,
|
124 |
+
features=features,
|
125 |
+
homepage=_HOMEPAGE,
|
126 |
+
license=_LICENSE,
|
127 |
+
citation=_CITATION,
|
128 |
+
)
|
129 |
+
|
130 |
+
def _split_generators(self, dl_manager):
|
131 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
132 |
+
task_name = self.config.name
|
133 |
+
return [
|
134 |
+
datasets.SplitGenerator(
|
135 |
+
name=datasets.Split.TEST,
|
136 |
+
gen_kwargs={
|
137 |
+
"filepath": os.path.join(data_dir, "test", f"{task_name}_test.csv"),
|
138 |
+
},
|
139 |
+
),
|
140 |
+
datasets.SplitGenerator(
|
141 |
+
name=datasets.Split.VALIDATION,
|
142 |
+
gen_kwargs={
|
143 |
+
"filepath": os.path.join(data_dir, "val", f"{task_name}_val.csv"),
|
144 |
+
},
|
145 |
+
),
|
146 |
+
datasets.SplitGenerator(
|
147 |
+
name=datasets.Split.TRAIN,
|
148 |
+
gen_kwargs={
|
149 |
+
"filepath": os.path.join(data_dir, "dev", f"{task_name}_dev.csv"),
|
150 |
+
},
|
151 |
+
),
|
152 |
+
]
|
153 |
+
|
154 |
+
def _generate_examples(self, filepath):
|
155 |
+
df = pd.read_csv(filepath, encoding="utf-8")
|
156 |
+
for i, instance in enumerate(df.to_dict(orient="records")):
|
157 |
+
if "answer" not in instance.keys():
|
158 |
+
instance["answer"] = ""
|
159 |
+
if "explanation" not in instance.keys():
|
160 |
+
instance["explanation"] = ""
|
161 |
+
yield i, instance
|
evaluation/ceval/ceval.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:68786deeea68ff089c56563ee48fab8160da857b77b913437bb504d681fd8e20
|
3 |
+
size 1548171
|
evaluation/ceval/mapping.json
ADDED
@@ -0,0 +1,210 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"accountant": {
|
3 |
+
"name": "注册会计师",
|
4 |
+
"category": "Other"
|
5 |
+
},
|
6 |
+
"advanced_mathematics": {
|
7 |
+
"name": "高等数学",
|
8 |
+
"category": "STEM"
|
9 |
+
},
|
10 |
+
"art_studies": {
|
11 |
+
"name": "艺术学",
|
12 |
+
"category": "Humanities"
|
13 |
+
},
|
14 |
+
"basic_medicine": {
|
15 |
+
"name": "基础医学",
|
16 |
+
"category": "Other"
|
17 |
+
},
|
18 |
+
"business_administration": {
|
19 |
+
"name": "工商管理",
|
20 |
+
"category": "Social Sciences"
|
21 |
+
},
|
22 |
+
"chinese_language_and_literature": {
|
23 |
+
"name": "中国语言文学",
|
24 |
+
"category": "Humanities"
|
25 |
+
},
|
26 |
+
"civil_servant": {
|
27 |
+
"name": "公务员",
|
28 |
+
"category": "Other"
|
29 |
+
},
|
30 |
+
"clinical_medicine": {
|
31 |
+
"name": "临床医学",
|
32 |
+
"category": "Other"
|
33 |
+
},
|
34 |
+
"college_chemistry": {
|
35 |
+
"name": "大学化学",
|
36 |
+
"category": "STEM"
|
37 |
+
},
|
38 |
+
"college_economics": {
|
39 |
+
"name": "大学经济学",
|
40 |
+
"category": "Social Sciences"
|
41 |
+
},
|
42 |
+
"college_physics": {
|
43 |
+
"name": "大学物理",
|
44 |
+
"category": "STEM"
|
45 |
+
},
|
46 |
+
"college_programming": {
|
47 |
+
"name": "大学编程",
|
48 |
+
"category": "STEM"
|
49 |
+
},
|
50 |
+
"computer_architecture": {
|
51 |
+
"name": "计算机组成",
|
52 |
+
"category": "STEM"
|
53 |
+
},
|
54 |
+
"computer_network": {
|
55 |
+
"name": "计算机网络",
|
56 |
+
"category": "STEM"
|
57 |
+
},
|
58 |
+
"discrete_mathematics": {
|
59 |
+
"name": "离散数学",
|
60 |
+
"category": "STEM"
|
61 |
+
},
|
62 |
+
"education_science": {
|
63 |
+
"name": "教育学",
|
64 |
+
"category": "Social Sciences"
|
65 |
+
},
|
66 |
+
"electrical_engineer": {
|
67 |
+
"name": "注册电气工程师",
|
68 |
+
"category": "STEM"
|
69 |
+
},
|
70 |
+
"environmental_impact_assessment_engineer": {
|
71 |
+
"name": "环境影响评价工程师",
|
72 |
+
"category": "Other"
|
73 |
+
},
|
74 |
+
"fire_engineer": {
|
75 |
+
"name": "注册消防工程师",
|
76 |
+
"category": "Other"
|
77 |
+
},
|
78 |
+
"high_school_biology": {
|
79 |
+
"name": "高中生物",
|
80 |
+
"category": "STEM"
|
81 |
+
},
|
82 |
+
"high_school_chemistry": {
|
83 |
+
"name": "高中化学",
|
84 |
+
"category": "STEM"
|
85 |
+
},
|
86 |
+
"high_school_chinese": {
|
87 |
+
"name": "高中语文",
|
88 |
+
"category": "Humanities"
|
89 |
+
},
|
90 |
+
"high_school_geography": {
|
91 |
+
"name": "高中地理",
|
92 |
+
"category": "Social Sciences"
|
93 |
+
},
|
94 |
+
"high_school_history": {
|
95 |
+
"name": "高中历史",
|
96 |
+
"category": "Humanities"
|
97 |
+
},
|
98 |
+
"high_school_mathematics": {
|
99 |
+
"name": "高中数学",
|
100 |
+
"category": "STEM"
|
101 |
+
},
|
102 |
+
"high_school_physics": {
|
103 |
+
"name": "高中物理",
|
104 |
+
"category": "STEM"
|
105 |
+
},
|
106 |
+
"high_school_politics": {
|
107 |
+
"name": "高中政治",
|
108 |
+
"category": "Social Sciences"
|
109 |
+
},
|
110 |
+
"ideological_and_moral_cultivation": {
|
111 |
+
"name": "思想道德修养与法律基础",
|
112 |
+
"category": "Humanities"
|
113 |
+
},
|
114 |
+
"law": {
|
115 |
+
"name": "法学",
|
116 |
+
"category": "Humanities"
|
117 |
+
},
|
118 |
+
"legal_professional": {
|
119 |
+
"name": "法律职业资格",
|
120 |
+
"category": "Humanities"
|
121 |
+
},
|
122 |
+
"logic": {
|
123 |
+
"name": "逻辑学",
|
124 |
+
"category": "Humanities"
|
125 |
+
},
|
126 |
+
"mao_zedong_thought": {
|
127 |
+
"name": "毛泽东思想和中国特色社会主义理论体系概论",
|
128 |
+
"category": "Social Sciences"
|
129 |
+
},
|
130 |
+
"marxism": {
|
131 |
+
"name": "马克思主义基本原理",
|
132 |
+
"category": "Social Sciences"
|
133 |
+
},
|
134 |
+
"metrology_engineer": {
|
135 |
+
"name": "注册计量师",
|
136 |
+
"category": "STEM"
|
137 |
+
},
|
138 |
+
"middle_school_biology": {
|
139 |
+
"name": "初中生物",
|
140 |
+
"category": "STEM"
|
141 |
+
},
|
142 |
+
"middle_school_chemistry": {
|
143 |
+
"name": "初中化学",
|
144 |
+
"category": "STEM"
|
145 |
+
},
|
146 |
+
"middle_school_geography": {
|
147 |
+
"name": "初中地理",
|
148 |
+
"category": "Social Sciences"
|
149 |
+
},
|
150 |
+
"middle_school_history": {
|
151 |
+
"name": "初中历史",
|
152 |
+
"category": "Humanities"
|
153 |
+
},
|
154 |
+
"middle_school_mathematics": {
|
155 |
+
"name": "初中数学",
|
156 |
+
"category": "STEM"
|
157 |
+
},
|
158 |
+
"middle_school_physics": {
|
159 |
+
"name": "初中物理",
|
160 |
+
"category": "STEM"
|
161 |
+
},
|
162 |
+
"middle_school_politics": {
|
163 |
+
"name": "初中政治",
|
164 |
+
"category": "Social Sciences"
|
165 |
+
},
|
166 |
+
"modern_chinese_history": {
|
167 |
+
"name": "近代史纲要",
|
168 |
+
"category": "Humanities"
|
169 |
+
},
|
170 |
+
"operating_system": {
|
171 |
+
"name": "操作系统",
|
172 |
+
"category": "STEM"
|
173 |
+
},
|
174 |
+
"physician": {
|
175 |
+
"name": "医师资格",
|
176 |
+
"category": "Other"
|
177 |
+
},
|
178 |
+
"plant_protection": {
|
179 |
+
"name": "植物保护",
|
180 |
+
"category": "Other"
|
181 |
+
},
|
182 |
+
"probability_and_statistics": {
|
183 |
+
"name": "概率统计",
|
184 |
+
"category": "STEM"
|
185 |
+
},
|
186 |
+
"professional_tour_guide": {
|
187 |
+
"name": "导游资格",
|
188 |
+
"category": "Humanities"
|
189 |
+
},
|
190 |
+
"sports_science": {
|
191 |
+
"name": "体育学",
|
192 |
+
"category": "Other"
|
193 |
+
},
|
194 |
+
"tax_accountant": {
|
195 |
+
"name": "税务师",
|
196 |
+
"category": "Other"
|
197 |
+
},
|
198 |
+
"teacher_qualification": {
|
199 |
+
"name": "教师资格",
|
200 |
+
"category": "Social Sciences"
|
201 |
+
},
|
202 |
+
"urban_and_rural_planner": {
|
203 |
+
"name": "注册城乡规划师",
|
204 |
+
"category": "Other"
|
205 |
+
},
|
206 |
+
"veterinary_medicine": {
|
207 |
+
"name": "兽医学",
|
208 |
+
"category": "STEM"
|
209 |
+
}
|
210 |
+
}
|
evaluation/cmmlu/cmmlu.py
ADDED
@@ -0,0 +1,168 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
2 |
+
#
|
3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
+
# you may not use this file except in compliance with the License.
|
5 |
+
# You may obtain a copy of the License at
|
6 |
+
#
|
7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
+
#
|
9 |
+
# Unless required by applicable law or agreed to in writing, software
|
10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
+
# See the License for the specific language governing permissions and
|
13 |
+
# limitations under the License.
|
14 |
+
|
15 |
+
import os
|
16 |
+
|
17 |
+
import datasets
|
18 |
+
import pandas as pd
|
19 |
+
|
20 |
+
|
21 |
+
_CITATION = """\
|
22 |
+
@article{li2023cmmlu,
|
23 |
+
title={CMMLU: Measuring massive multitask language understanding in Chinese},
|
24 |
+
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
|
25 |
+
journal={arXiv preprint arXiv:2306.09212},
|
26 |
+
year={2023}
|
27 |
+
}
|
28 |
+
"""
|
29 |
+
|
30 |
+
_DESCRIPTION = """\
|
31 |
+
CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge and reasoning abilities of LLMs within the Chinese language and cultural context.
|
32 |
+
"""
|
33 |
+
|
34 |
+
_HOMEPAGE = "https://github.com/haonan-li/CMMLU"
|
35 |
+
|
36 |
+
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License"
|
37 |
+
|
38 |
+
_URL = "cmmlu.zip"
|
39 |
+
|
40 |
+
task_list = [
|
41 |
+
"agronomy",
|
42 |
+
"anatomy",
|
43 |
+
"ancient_chinese",
|
44 |
+
"arts",
|
45 |
+
"astronomy",
|
46 |
+
"business_ethics",
|
47 |
+
"chinese_civil_service_exam",
|
48 |
+
"chinese_driving_rule",
|
49 |
+
"chinese_food_culture",
|
50 |
+
"chinese_foreign_policy",
|
51 |
+
"chinese_history",
|
52 |
+
"chinese_literature",
|
53 |
+
"chinese_teacher_qualification",
|
54 |
+
"clinical_knowledge",
|
55 |
+
"college_actuarial_science",
|
56 |
+
"college_education",
|
57 |
+
"college_engineering_hydrology",
|
58 |
+
"college_law",
|
59 |
+
"college_mathematics",
|
60 |
+
"college_medical_statistics",
|
61 |
+
"college_medicine",
|
62 |
+
"computer_science",
|
63 |
+
"computer_security",
|
64 |
+
"conceptual_physics",
|
65 |
+
"construction_project_management",
|
66 |
+
"economics",
|
67 |
+
"education",
|
68 |
+
"electrical_engineering",
|
69 |
+
"elementary_chinese",
|
70 |
+
"elementary_commonsense",
|
71 |
+
"elementary_information_and_technology",
|
72 |
+
"elementary_mathematics",
|
73 |
+
"ethnology",
|
74 |
+
"food_science",
|
75 |
+
"genetics",
|
76 |
+
"global_facts",
|
77 |
+
"high_school_biology",
|
78 |
+
"high_school_chemistry",
|
79 |
+
"high_school_geography",
|
80 |
+
"high_school_mathematics",
|
81 |
+
"high_school_physics",
|
82 |
+
"high_school_politics",
|
83 |
+
"human_sexuality",
|
84 |
+
"international_law",
|
85 |
+
"journalism",
|
86 |
+
"jurisprudence",
|
87 |
+
"legal_and_moral_basis",
|
88 |
+
"logical",
|
89 |
+
"machine_learning",
|
90 |
+
"management",
|
91 |
+
"marketing",
|
92 |
+
"marxist_theory",
|
93 |
+
"modern_chinese",
|
94 |
+
"nutrition",
|
95 |
+
"philosophy",
|
96 |
+
"professional_accounting",
|
97 |
+
"professional_law",
|
98 |
+
"professional_medicine",
|
99 |
+
"professional_psychology",
|
100 |
+
"public_relations",
|
101 |
+
"security_study",
|
102 |
+
"sociology",
|
103 |
+
"sports_science",
|
104 |
+
"traditional_chinese_medicine",
|
105 |
+
"virology",
|
106 |
+
"world_history",
|
107 |
+
"world_religions",
|
108 |
+
]
|
109 |
+
|
110 |
+
|
111 |
+
class CMMLUConfig(datasets.BuilderConfig):
|
112 |
+
def __init__(self, **kwargs):
|
113 |
+
super().__init__(version=datasets.Version("1.0.1"), **kwargs)
|
114 |
+
|
115 |
+
|
116 |
+
class CMMLU(datasets.GeneratorBasedBuilder):
|
117 |
+
BUILDER_CONFIGS = [
|
118 |
+
CMMLUConfig(
|
119 |
+
name=task_name,
|
120 |
+
)
|
121 |
+
for task_name in task_list
|
122 |
+
]
|
123 |
+
|
124 |
+
def _info(self):
|
125 |
+
features = datasets.Features(
|
126 |
+
{
|
127 |
+
"question": datasets.Value("string"),
|
128 |
+
"A": datasets.Value("string"),
|
129 |
+
"B": datasets.Value("string"),
|
130 |
+
"C": datasets.Value("string"),
|
131 |
+
"D": datasets.Value("string"),
|
132 |
+
"answer": datasets.Value("string"),
|
133 |
+
}
|
134 |
+
)
|
135 |
+
return datasets.DatasetInfo(
|
136 |
+
description=_DESCRIPTION,
|
137 |
+
features=features,
|
138 |
+
homepage=_HOMEPAGE,
|
139 |
+
license=_LICENSE,
|
140 |
+
citation=_CITATION,
|
141 |
+
)
|
142 |
+
|
143 |
+
def _split_generators(self, dl_manager):
|
144 |
+
data_dir = dl_manager.download_and_extract(_URL)
|
145 |
+
task_name = self.config.name
|
146 |
+
return [
|
147 |
+
datasets.SplitGenerator(
|
148 |
+
name=datasets.Split.TEST,
|
149 |
+
gen_kwargs={
|
150 |
+
"filepath": os.path.join(data_dir, f"test/{task_name}.csv"),
|
151 |
+
},
|
152 |
+
),
|
153 |
+
datasets.SplitGenerator(
|
154 |
+
name=datasets.Split.TRAIN,
|
155 |
+
gen_kwargs={
|
156 |
+
"filepath": os.path.join(data_dir, f"dev/{task_name}.csv"),
|
157 |
+
},
|
158 |
+
),
|
159 |
+
]
|
160 |
+
|
161 |
+
def _generate_examples(self, filepath):
|
162 |
+
df = pd.read_csv(filepath, header=0, index_col=0, encoding="utf-8")
|
163 |
+
for i, instance in enumerate(df.to_dict(orient="records")):
|
164 |
+
question = instance.pop("Question", "")
|
165 |
+
answer = instance.pop("Answer", "")
|
166 |
+
instance["question"] = question
|
167 |
+
instance["answer"] = answer
|
168 |
+
yield i, instance
|
evaluation/cmmlu/cmmlu.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d9c62ec80912ab18148b17b6618f8468c3c9d0fe48f5ca7c5db0b3f013d3bd1e
|
3 |
+
size 1078352
|