officialhimanshu595 commited on
Commit
20076b6
1 Parent(s): 4587b52

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +14 -35
  2. .github/ISSUE_TEMPLATE/bug-report.yml +58 -0
  3. .github/workflows/tests.yml +29 -0
  4. .gitignore +165 -0
  5. CODE_OF_CONDUCT.md +128 -0
  6. LICENSE +201 -0
  7. Makefile +11 -0
  8. README.md +607 -9
  9. README_zh.md +605 -0
  10. assets/benchmark.svg +1216 -0
  11. assets/logo.png +0 -0
  12. assets/wechat.jpg +0 -0
  13. data/README.md +125 -0
  14. data/README_zh.md +125 -0
  15. data/alpaca_data_en_52k.json +3 -0
  16. data/alpaca_data_zh_51k.json +3 -0
  17. data/alpaca_gpt4_data_en.json +3 -0
  18. data/alpaca_gpt4_data_zh.json +3 -0
  19. data/belle_multiturn/belle_multiturn.py +75 -0
  20. data/c4_demo.json +0 -0
  21. data/comparison_gpt4_data_en.json +3 -0
  22. data/comparison_gpt4_data_zh.json +3 -0
  23. data/dataset_info.json +323 -0
  24. data/example_dataset/example_dataset.py +46 -0
  25. data/example_dataset/examples.json +20 -0
  26. data/glaive_toolcall_10k.json +3 -0
  27. data/hh_rlhf_en/hh_rlhf_en.py +97 -0
  28. data/lima.json +0 -0
  29. data/oaast_rm.json +3 -0
  30. data/oaast_rm_zh.json +0 -0
  31. data/oaast_sft.json +3 -0
  32. data/oaast_sft_zh.json +0 -0
  33. data/self_cognition.json +402 -0
  34. data/ultra_chat/ultra_chat.py +69 -0
  35. data/wiki_demo.txt +0 -0
  36. evaluation/ceval/ceval.py +166 -0
  37. evaluation/ceval/ceval.zip +3 -0
  38. evaluation/ceval/mapping.json +210 -0
  39. evaluation/cmmlu/cmmlu.py +167 -0
  40. evaluation/cmmlu/cmmlu.zip +3 -0
  41. evaluation/cmmlu/mapping.json +270 -0
  42. evaluation/mmlu/mapping.json +230 -0
  43. evaluation/mmlu/mmlu.py +167 -0
  44. evaluation/mmlu/mmlu.zip +3 -0
  45. pyproject.toml +37 -0
  46. requirements.txt +19 -0
  47. setup.py +55 -0
  48. src/.train_web.py.swp +0 -0
  49. src/api_demo.py +16 -0
  50. src/app.py +7 -0
.gitattributes CHANGED
@@ -1,35 +1,14 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ # Auto detect text files and perform LF normalization
2
+ * text=auto
3
+ data/alpaca_data_en_52k.json filter=lfs diff=lfs merge=lfs -text
4
+ data/alpaca_data_zh_51k.json filter=lfs diff=lfs merge=lfs -text
5
+ data/alpaca_gpt4_data_en.json filter=lfs diff=lfs merge=lfs -text
6
+ data/alpaca_gpt4_data_zh.json filter=lfs diff=lfs merge=lfs -text
7
+ data/comparison_gpt4_data_en.json filter=lfs diff=lfs merge=lfs -text
8
+ data/comparison_gpt4_data_zh.json filter=lfs diff=lfs merge=lfs -text
9
+ data/glaive_toolcall_10k.json filter=lfs diff=lfs merge=lfs -text
10
+ data/oaast_rm.json filter=lfs diff=lfs merge=lfs -text
11
+ data/oaast_sft.json filter=lfs diff=lfs merge=lfs -text
12
+ evaluation/ceval/ceval.zip filter=lfs diff=lfs merge=lfs -text
13
+ evaluation/cmmlu/cmmlu.zip filter=lfs diff=lfs merge=lfs -text
14
+ evaluation/mmlu/mmlu.zip filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.github/ISSUE_TEMPLATE/bug-report.yml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "\U0001F41B Bug / Help"
2
+ description: Create a report to help us improve the LLaMA Factory
3
+ body:
4
+ - type: checkboxes
5
+ id: reminder
6
+ attributes:
7
+ label: Reminder
8
+ description: |
9
+ Please ensure you have read the README carefully and searched the existing issues.
10
+ 请确保您已经认真阅读了 README 并且搜索过现有的 Issue。
11
+
12
+ options:
13
+ - label: I have read the README and searched the existing issues.
14
+ required: true
15
+
16
+ - type: textarea
17
+ id: reproduction
18
+ validations:
19
+ required: true
20
+ attributes:
21
+ label: Reproduction
22
+ description: |
23
+ Please provide code snippets, error messages and stack traces that reproduces the problem.
24
+ 请提供运行参数,错误信息以及异常堆栈以便于我们复现该问题。
25
+ Remember to use Markdown tags to correctly format your code.
26
+ 请合理使用 Markdown 标签来格式化您的文本。
27
+
28
+ placeholder: |
29
+ python src/train_bash.py ...
30
+
31
+ - type: textarea
32
+ id: expected-behavior
33
+ validations:
34
+ required: false
35
+ attributes:
36
+ label: Expected behavior
37
+ description: |
38
+ Please provide a clear and concise description of what you would expect to happen.
39
+ 请提供您原本的目的,即这段代码的期望行为。
40
+
41
+ - type: textarea
42
+ id: system-info
43
+ validations:
44
+ required: false
45
+ attributes:
46
+ label: System Info
47
+ description: |
48
+ Please share your system info with us. You can run the command **transformers-cli env** and copy-paste its output below.
49
+ 请提供您的系统信息。您可以在命令行运行 **transformers-cli env** 并将其输出复制到该文本框中。
50
+
51
+ placeholder: transformers version, platform, python version, ...
52
+
53
+ - type: textarea
54
+ id: others
55
+ validations:
56
+ required: false
57
+ attributes:
58
+ label: Others
.github/workflows/tests.yml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: tests
2
+
3
+ on:
4
+ push:
5
+ branches: [ "main" ]
6
+ pull_request:
7
+ branches: [ "main" ]
8
+
9
+ jobs:
10
+ check_code_quality:
11
+
12
+ runs-on: ubuntu-latest
13
+
14
+ steps:
15
+ - uses: actions/checkout@v3
16
+
17
+ - name: Set up Python
18
+ uses: actions/setup-python@v3
19
+ with:
20
+ python-version: "3.8"
21
+
22
+ - name: Install dependencies
23
+ run: |
24
+ python -m pip install --upgrade pip
25
+ python -m pip install black ruff
26
+
27
+ - name: Check quality
28
+ run: |
29
+ make style && make quality
.gitignore ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # PyCharm
156
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
159
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160
+ .idea/
161
+
162
+ # custom .gitignore
163
+ user.config
164
+ saves/
165
+ cache/
CODE_OF_CONDUCT.md ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+
5
+ We as members, contributors, and leaders pledge to make participation in our
6
+ community a harassment-free experience for everyone, regardless of age, body
7
+ size, visible or invisible disability, ethnicity, sex characteristics, gender
8
+ identity and expression, level of experience, education, socio-economic status,
9
+ nationality, personal appearance, race, religion, or sexual identity
10
+ and orientation.
11
+
12
+ We pledge to act and interact in ways that contribute to an open, welcoming,
13
+ diverse, inclusive, and healthy community.
14
+
15
+ ## Our Standards
16
+
17
+ Examples of behavior that contributes to a positive environment for our
18
+ community include:
19
+
20
+ * Demonstrating empathy and kindness toward other people
21
+ * Being respectful of differing opinions, viewpoints, and experiences
22
+ * Giving and gracefully accepting constructive feedback
23
+ * Accepting responsibility and apologizing to those affected by our mistakes,
24
+ and learning from the experience
25
+ * Focusing on what is best not just for us as individuals, but for the
26
+ overall community
27
+
28
+ Examples of unacceptable behavior include:
29
+
30
+ * The use of sexualized language or imagery, and sexual attention or
31
+ advances of any kind
32
+ * Trolling, insulting or derogatory comments, and personal or political attacks
33
+ * Public or private harassment
34
+ * Publishing others' private information, such as a physical or email
35
+ address, without their explicit permission
36
+ * Other conduct which could reasonably be considered inappropriate in a
37
+ professional setting
38
+
39
+ ## Enforcement Responsibilities
40
+
41
+ Community leaders are responsible for clarifying and enforcing our standards of
42
+ acceptable behavior and will take appropriate and fair corrective action in
43
+ response to any behavior that they deem inappropriate, threatening, offensive,
44
+ or harmful.
45
+
46
+ Community leaders have the right and responsibility to remove, edit, or reject
47
+ comments, commits, code, wiki edits, issues, and other contributions that are
48
+ not aligned to this Code of Conduct, and will communicate reasons for moderation
49
+ decisions when appropriate.
50
+
51
+ ## Scope
52
+
53
+ This Code of Conduct applies within all community spaces, and also applies when
54
+ an individual is officially representing the community in public spaces.
55
+ Examples of representing our community include using an official e-mail address,
56
+ posting via an official social media account, or acting as an appointed
57
+ representative at an online or offline event.
58
+
59
+ ## Enforcement
60
+
61
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be
62
+ reported to the community leaders responsible for enforcement at
63
+ `hoshihiyouga AT gmail DOT com`.
64
+ All complaints will be reviewed and investigated promptly and fairly.
65
+
66
+ All community leaders are obligated to respect the privacy and security of the
67
+ reporter of any incident.
68
+
69
+ ## Enforcement Guidelines
70
+
71
+ Community leaders will follow these Community Impact Guidelines in determining
72
+ the consequences for any action they deem in violation of this Code of Conduct:
73
+
74
+ ### 1. Correction
75
+
76
+ **Community Impact**: Use of inappropriate language or other behavior deemed
77
+ unprofessional or unwelcome in the community.
78
+
79
+ **Consequence**: A private, written warning from community leaders, providing
80
+ clarity around the nature of the violation and an explanation of why the
81
+ behavior was inappropriate. A public apology may be requested.
82
+
83
+ ### 2. Warning
84
+
85
+ **Community Impact**: A violation through a single incident or series
86
+ of actions.
87
+
88
+ **Consequence**: A warning with consequences for continued behavior. No
89
+ interaction with the people involved, including unsolicited interaction with
90
+ those enforcing the Code of Conduct, for a specified period of time. This
91
+ includes avoiding interactions in community spaces as well as external channels
92
+ like social media. Violating these terms may lead to a temporary or
93
+ permanent ban.
94
+
95
+ ### 3. Temporary Ban
96
+
97
+ **Community Impact**: A serious violation of community standards, including
98
+ sustained inappropriate behavior.
99
+
100
+ **Consequence**: A temporary ban from any sort of interaction or public
101
+ communication with the community for a specified period of time. No public or
102
+ private interaction with the people involved, including unsolicited interaction
103
+ with those enforcing the Code of Conduct, is allowed during this period.
104
+ Violating these terms may lead to a permanent ban.
105
+
106
+ ### 4. Permanent Ban
107
+
108
+ **Community Impact**: Demonstrating a pattern of violation of community
109
+ standards, including sustained inappropriate behavior, harassment of an
110
+ individual, or aggression toward or disparagement of classes of individuals.
111
+
112
+ **Consequence**: A permanent ban from any sort of public interaction within
113
+ the community.
114
+
115
+ ## Attribution
116
+
117
+ This Code of Conduct is adapted from the [Contributor Covenant][homepage],
118
+ version 2.0, available at
119
+ https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
120
+
121
+ Community Impact Guidelines were inspired by [Mozilla's code of conduct
122
+ enforcement ladder](https://github.com/mozilla/diversity).
123
+
124
+ [homepage]: https://www.contributor-covenant.org
125
+
126
+ For answers to common questions about this code of conduct, see the FAQ at
127
+ https://www.contributor-covenant.org/faq. Translations are available at
128
+ https://www.contributor-covenant.org/translations.
LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
Makefile ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: quality style
2
+
3
+ check_dirs := src tests
4
+
5
+ quality:
6
+ black --check $(check_dirs)
7
+ ruff $(check_dirs)
8
+
9
+ style:
10
+ black $(check_dirs)
11
+ ruff $(check_dirs) --fix
README.md CHANGED
@@ -1,13 +1,611 @@
1
  ---
2
- title: Llama Factory
3
- emoji:
4
- colorFrom: pink
5
- colorTo: purple
6
  sdk: gradio
7
- sdk_version: 4.16.0
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
  ---
 
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: LLaMA-Factory
3
+ app_file: src/app.py
 
 
4
  sdk: gradio
5
+ sdk_version: 3.50.2
 
 
 
6
  ---
7
+ ![# LLaMA Factory](assets/logo.png)
8
 
9
+ [![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
10
+ [![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
11
+ [![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
12
+ [![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/)
13
+ [![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/)
14
+ [![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
15
+ [![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
16
+ [![Spaces](https://img.shields.io/badge/🤗-Open%20In%20Spaces-blue)](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
17
+ [![Studios](https://img.shields.io/badge/ModelScope-Open%20In%20Studios-blue)](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
18
+
19
+ 👋 Join our [WeChat](assets/wechat.jpg).
20
+
21
+ \[ English | [中文](README_zh.md) \]
22
+
23
+ ## LLaMA Board: A One-stop Web UI for Getting Started with LLaMA Factory
24
+
25
+ Preview LLaMA Board at **[🤗 Spaces](https://huggingface.co/spaces/hiyouga/LLaMA-Board)** or **[ModelScope](https://modelscope.cn/studios/hiyouga/LLaMA-Board)**.
26
+
27
+ Launch LLaMA Board via `CUDA_VISIBLE_DEVICES=0 python src/train_web.py`. (multiple GPUs are not supported yet in this mode)
28
+
29
+ Here is an example of altering the self-cognition of an instruction-tuned language model within 10 minutes on a single GPU.
30
+
31
+ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1
32
+
33
+ ## Table of Contents
34
+
35
+ - [Benchmark](#benchmark)
36
+ - [Changelog](#changelog)
37
+ - [Supported Models](#supported-models)
38
+ - [Supported Training Approaches](#supported-training-approaches)
39
+ - [Provided Datasets](#provided-datasets)
40
+ - [Requirement](#requirement)
41
+ - [Getting Started](#getting-started)
42
+ - [Projects using LLaMA Factory](#projects-using-llama-factory)
43
+ - [License](#license)
44
+ - [Citation](#citation)
45
+ - [Acknowledgement](#acknowledgement)
46
+
47
+ ## Benchmark
48
+
49
+ Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning), LLaMA-Factory's LoRA tuning offers up to **3.7 times faster** training speed with a better Rouge score on the advertising text generation task. By leveraging 4-bit quantization technique, LLaMA-Factory's QLoRA further improves the efficiency regarding the GPU memory.
50
+
51
+ ![benchmark](assets/benchmark.svg)
52
+
53
+ <details><summary>Definitions</summary>
54
+
55
+ - **Training Speed**: the number of training samples processed per second during the training. (bs=4, cutoff_len=1024)
56
+ - **Rouge Score**: Rouge-2 score on the development set of the [advertising text generation](https://aclanthology.org/D19-1321.pdf) task. (bs=4, cutoff_len=1024)
57
+ - **GPU Memory**: Peak GPU memory usage in 4-bit quantized training. (bs=1, cutoff_len=1024)
58
+ - We adopt `pre_seq_len=128` for ChatGLM's P-Tuning and `lora_rank=32` for LLaMA-Factory's LoRA tuning.
59
+
60
+ </details>
61
+
62
+ ## Changelog
63
+
64
+ [24/01/18] We supported **agent tuning** for most models, equipping model with tool using abilities by fine-tuning with `--dataset glaive_toolcall`.
65
+
66
+ [23/12/23] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s implementation to boost LoRA tuning for the LLaMA, Mistral and Yi models. Try `--use_unsloth` argument to activate unsloth patch. It achieves 1.7x speed in our benchmark, check [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison) for details.
67
+
68
+ [23/12/12] We supported fine-tuning the latest MoE model **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)** in our framework. See hardware requirement [here](#hardware-requirement).
69
+
70
+ <details><summary>Full Changelog</summary>
71
+
72
+ [23/12/01] We supported downloading pre-trained models and datasets from the **[ModelScope Hub](https://modelscope.cn/models)** for Chinese mainland users. See [this tutorial](#use-modelscope-hub-optional) for usage.
73
+
74
+ [23/10/21] We supported **[NEFTune](https://arxiv.org/abs/2310.05914)** trick for fine-tuning. Try `--neftune_noise_alpha` argument to activate NEFTune, e.g., `--neftune_noise_alpha 5`.
75
+
76
+ [23/09/27] We supported **$S^2$-Attn** proposed by [LongLoRA](https://github.com/dvlab-research/LongLoRA) for the LLaMA models. Try `--shift_attn` argument to enable shift short attention.
77
+
78
+ [23/09/23] We integrated MMLU, C-Eval and CMMLU benchmarks in this repo. See [this example](#evaluation) to evaluate your models.
79
+
80
+ [23/09/10] We supported **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**. Try `--flash_attn` argument to enable FlashAttention-2 if you are using RTX4090, A100 or H100 GPUs.
81
+
82
+ [23/08/12] We supported **RoPE scaling** to extend the context length of the LLaMA models. Try `--rope_scaling linear` argument in training and `--rope_scaling dynamic` argument at inference to extrapolate the position embeddings.
83
+
84
+ [23/08/11] We supported **[DPO training](https://arxiv.org/abs/2305.18290)** for instruction-tuned models. See [this example](#dpo-training) to train your models.
85
+
86
+ [23/07/31] We supported **dataset streaming**. Try `--streaming` and `--max_steps 10000` arguments to load your dataset in streaming mode.
87
+
88
+ [23/07/29] We released two instruction-tuned 13B models at Hugging Face. See these Hugging Face Repos ([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft)) for details.
89
+
90
+ [23/07/18] We developed an **all-in-one Web UI** for training, evaluation and inference. Try `train_web.py` to fine-tune models in your Web browser. Thank [@KanadeSiina](https://github.com/KanadeSiina) and [@codemayq](https://github.com/codemayq) for their efforts in the development.
91
+
92
+ [23/07/09] We released **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹, an easy-to-use package for editing the factual knowledge of large language models efficiently. Please follow [FastEdit](https://github.com/hiyouga/FastEdit) if you are interested.
93
+
94
+ [23/06/29] We provided a **reproducible example** of training a chat model using instruction-following datasets, see [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft) for details.
95
+
96
+ [23/06/22] We aligned the [demo API](src/api_demo.py) with the [OpenAI's](https://platform.openai.com/docs/api-reference/chat) format where you can insert the fine-tuned model in **arbitrary ChatGPT-based applications**.
97
+
98
+ [23/06/03] We supported quantized training and inference (aka **[QLoRA](https://github.com/artidoro/qlora)**). Try `--quantization_bit 4/8` argument to work with quantized models.
99
+
100
+ </details>
101
+
102
+ ## Supported Models
103
+
104
+ | Model | Model size | Default module | Template |
105
+ | -------------------------------------------------------- | --------------------------- | ----------------- | --------- |
106
+ | [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | W_pack | baichuan2 |
107
+ | [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
108
+ | [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
109
+ | [ChatGLM3](https://huggingface.co/THUDM/chatglm3-6b) | 6B | query_key_value | chatglm3 |
110
+ | [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B | q_proj,v_proj | deepseek |
111
+ | [Falcon](https://huggingface.co/tiiuae) | 7B/40B/180B | query_key_value | falcon |
112
+ | [InternLM2](https://huggingface.co/internlm) | 7B/20B | wqkv | intern2 |
113
+ | [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
114
+ | [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
115
+ | [Mistral](https://huggingface.co/mistralai) | 7B | q_proj,v_proj | mistral |
116
+ | [Mixtral](https://huggingface.co/mistralai) | 8x7B | q_proj,v_proj | mistral |
117
+ | [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | q_proj,v_proj | - |
118
+ | [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | c_attn | qwen |
119
+ | [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | q_proj,v_proj | xverse |
120
+ | [Yi](https://huggingface.co/01-ai) | 6B/34B | q_proj,v_proj | yi |
121
+ | [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | q_proj,v_proj | yuan |
122
+
123
+ > [!NOTE]
124
+ > **Default module** is used for the `--lora_target` argument, you can use `--lora_target all` to specify all the available modules.
125
+ >
126
+ > For the "base" models, the `--template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "chat" models.
127
+
128
+ Please refer to [constants.py](src/llmtuner/extras/constants.py) for a full list of models we supported.
129
+
130
+ ## Supported Training Approaches
131
+
132
+ | Approach | Full-parameter | Partial-parameter | LoRA | QLoRA |
133
+ | ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
134
+ | Pre-Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
135
+ | Supervised Fine-Tuning | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
136
+ | Reward Modeling | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
137
+ | PPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
138
+ | DPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
139
+
140
+ > [!NOTE]
141
+ > Use `--quantization_bit 4` argument to enable QLoRA.
142
+
143
+ ## Provided Datasets
144
+
145
+ <details><summary>Pre-training datasets</summary>
146
+
147
+ - [Wiki Demo (en)](data/wiki_demo.txt)
148
+ - [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
149
+ - [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
150
+ - [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
151
+ - [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
152
+ - [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
153
+ - [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
154
+ - [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
155
+ - [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
156
+
157
+ </details>
158
+
159
+ <details><summary>Supervised fine-tuning datasets</summary>
160
+
161
+ - [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
162
+ - [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
163
+ - [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
164
+ - [Self-cognition (zh)](data/self_cognition.json)
165
+ - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
166
+ - [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
167
+ - [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
168
+ - [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
169
+ - [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
170
+ - [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
171
+ - [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
172
+ - [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
173
+ - [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
174
+ - [UltraChat (en)](https://github.com/thunlp/UltraChat)
175
+ - [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
176
+ - [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
177
+ - [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
178
+ - [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
179
+ - [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
180
+ - [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
181
+ - [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
182
+ - [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
183
+ - [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
184
+ - [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
185
+ - [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
186
+ - [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
187
+ - [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
188
+ - [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
189
+ - [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
190
+ - [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
191
+ - [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
192
+ - [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
193
+ - [Glaive Function Calling V2 (en)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
194
+
195
+ </details>
196
+
197
+ <details><summary>Preference datasets</summary>
198
+
199
+ - [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
200
+ - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
201
+ - [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
202
+ - [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
203
+
204
+ </details>
205
+
206
+ Please refer to [data/README.md](data/README.md) for details.
207
+
208
+ Some datasets require confirmation before using them, so we recommend logging in with your Hugging Face account using these commands.
209
+
210
+ ```bash
211
+ pip install --upgrade huggingface_hub
212
+ huggingface-cli login
213
+ ```
214
+
215
+ ## Requirement
216
+
217
+ - Python 3.8+ and PyTorch 1.13.1+
218
+ - 🤗Transformers, Datasets, Accelerate, PEFT and TRL
219
+ - sentencepiece, protobuf and tiktoken
220
+ - jieba, rouge-chinese and nltk (used at evaluation and predict)
221
+ - gradio and matplotlib (used in web UI)
222
+ - uvicorn, fastapi and sse-starlette (used in API)
223
+
224
+ ### Hardware Requirement
225
+
226
+ | Method | Bits | 7B | 13B | 30B | 65B | 8x7B |
227
+ | ------ | ---- | ----- | ----- | ----- | ------ | ------ |
228
+ | Full | 16 | 160GB | 320GB | 600GB | 1200GB | 900GB |
229
+ | Freeze | 16 | 20GB | 40GB | 120GB | 240GB | 200GB |
230
+ | LoRA | 16 | 16GB | 32GB | 80GB | 160GB | 120GB |
231
+ | QLoRA | 8 | 10GB | 16GB | 40GB | 80GB | 80GB |
232
+ | QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 32GB |
233
+
234
+ ## Getting Started
235
+
236
+ ### Data Preparation (optional)
237
+
238
+ Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can either use a single `.json` file or a [dataset loading script](https://huggingface.co/docs/datasets/dataset_script) with multiple files to create a custom dataset.
239
+
240
+ > [!NOTE]
241
+ > Please update `data/dataset_info.json` to use your custom dataset. About the format of this file, please refer to `data/README.md`.
242
+
243
+ ### Dependence Installation (optional)
244
+
245
+ ```bash
246
+ git clone https://github.com/hiyouga/LLaMA-Factory.git
247
+ conda create -n llama_factory python=3.10
248
+ conda activate llama_factory
249
+ cd LLaMA-Factory
250
+ pip install -r requirements.txt
251
+ ```
252
+
253
+ If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you will be required to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.1.
254
+
255
+ ```bash
256
+ pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl
257
+ ```
258
+
259
+ ### Use ModelScope Hub (optional)
260
+
261
+ If you have trouble with downloading models and datasets from Hugging Face, you can use LLaMA-Factory together with ModelScope in the following manner.
262
+
263
+ ```bash
264
+ export USE_MODELSCOPE_HUB=1 # `set USE_MODELSCOPE_HUB=1` for Windows
265
+ ```
266
+
267
+ Then you can train the corresponding model by specifying a model ID of the ModelScope Hub. (find a full list of model IDs at [ModelScope Hub](https://modelscope.cn/models))
268
+
269
+ ```bash
270
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
271
+ --model_name_or_path modelscope/Llama-2-7b-ms \
272
+ ... # arguments (same as above)
273
+ ```
274
+
275
+ LLaMA Board also supports using the models and datasets on the ModelScope Hub.
276
+
277
+ ```bash
278
+ CUDA_VISIBLE_DEVICES=0 USE_MODELSCOPE_HUB=1 python src/train_web.py
279
+ ```
280
+
281
+ ### Train on a single GPU
282
+
283
+ > [!IMPORTANT]
284
+ > If you want to train models on multiple GPUs, please refer to [Distributed Training](#distributed-training).
285
+
286
+ #### Pre-Training
287
+
288
+ ```bash
289
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
290
+ --stage pt \
291
+ --do_train \
292
+ --model_name_or_path path_to_llama_model \
293
+ --dataset wiki_demo \
294
+ --finetuning_type lora \
295
+ --lora_target q_proj,v_proj \
296
+ --output_dir path_to_pt_checkpoint \
297
+ --overwrite_cache \
298
+ --per_device_train_batch_size 4 \
299
+ --gradient_accumulation_steps 4 \
300
+ --lr_scheduler_type cosine \
301
+ --logging_steps 10 \
302
+ --save_steps 1000 \
303
+ --learning_rate 5e-5 \
304
+ --num_train_epochs 3.0 \
305
+ --plot_loss \
306
+ --fp16
307
+ ```
308
+
309
+ #### Supervised Fine-Tuning
310
+
311
+ ```bash
312
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
313
+ --stage sft \
314
+ --do_train \
315
+ --model_name_or_path path_to_llama_model \
316
+ --dataset alpaca_gpt4_en \
317
+ --template default \
318
+ --finetuning_type lora \
319
+ --lora_target q_proj,v_proj \
320
+ --output_dir path_to_sft_checkpoint \
321
+ --overwrite_cache \
322
+ --per_device_train_batch_size 4 \
323
+ --gradient_accumulation_steps 4 \
324
+ --lr_scheduler_type cosine \
325
+ --logging_steps 10 \
326
+ --save_steps 1000 \
327
+ --learning_rate 5e-5 \
328
+ --num_train_epochs 3.0 \
329
+ --plot_loss \
330
+ --fp16
331
+ ```
332
+
333
+ #### Reward Modeling
334
+
335
+ ```bash
336
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
337
+ --stage rm \
338
+ --do_train \
339
+ --model_name_or_path path_to_llama_model \
340
+ --adapter_name_or_path path_to_sft_checkpoint \
341
+ --create_new_adapter \
342
+ --dataset comparison_gpt4_en \
343
+ --template default \
344
+ --finetuning_type lora \
345
+ --lora_target q_proj,v_proj \
346
+ --output_dir path_to_rm_checkpoint \
347
+ --per_device_train_batch_size 2 \
348
+ --gradient_accumulation_steps 4 \
349
+ --lr_scheduler_type cosine \
350
+ --logging_steps 10 \
351
+ --save_steps 1000 \
352
+ --learning_rate 1e-6 \
353
+ --num_train_epochs 1.0 \
354
+ --plot_loss \
355
+ --fp16
356
+ ```
357
+
358
+ #### PPO Training
359
+
360
+ ```bash
361
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
362
+ --stage ppo \
363
+ --do_train \
364
+ --model_name_or_path path_to_llama_model \
365
+ --adapter_name_or_path path_to_sft_checkpoint \
366
+ --create_new_adapter \
367
+ --dataset alpaca_gpt4_en \
368
+ --template default \
369
+ --finetuning_type lora \
370
+ --lora_target q_proj,v_proj \
371
+ --reward_model path_to_rm_checkpoint \
372
+ --output_dir path_to_ppo_checkpoint \
373
+ --per_device_train_batch_size 2 \
374
+ --gradient_accumulation_steps 4 \
375
+ --lr_scheduler_type cosine \
376
+ --top_k 0 \
377
+ --top_p 0.9 \
378
+ --logging_steps 10 \
379
+ --save_steps 1000 \
380
+ --learning_rate 1e-5 \
381
+ --num_train_epochs 1.0 \
382
+ --plot_loss \
383
+ --fp16
384
+ ```
385
+
386
+ > [!WARNING]
387
+ > Use `--per_device_train_batch_size=1` for LLaMA-2 models in fp16 PPO training.
388
+
389
+ #### DPO Training
390
+
391
+ ```bash
392
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
393
+ --stage dpo \
394
+ --do_train \
395
+ --model_name_or_path path_to_llama_model \
396
+ --adapter_name_or_path path_to_sft_checkpoint \
397
+ --create_new_adapter \
398
+ --dataset comparison_gpt4_en \
399
+ --template default \
400
+ --finetuning_type lora \
401
+ --lora_target q_proj,v_proj \
402
+ --output_dir path_to_dpo_checkpoint \
403
+ --per_device_train_batch_size 2 \
404
+ --gradient_accumulation_steps 4 \
405
+ --lr_scheduler_type cosine \
406
+ --logging_steps 10 \
407
+ --save_steps 1000 \
408
+ --learning_rate 1e-5 \
409
+ --num_train_epochs 1.0 \
410
+ --plot_loss \
411
+ --fp16
412
+ ```
413
+
414
+ ### Distributed Training
415
+
416
+ #### Use Huggingface Accelerate
417
+
418
+ ```bash
419
+ accelerate config # configure the environment
420
+ accelerate launch src/train_bash.py # arguments (same as above)
421
+ ```
422
+
423
+ <details><summary>Example config for LoRA training</summary>
424
+
425
+ ```yaml
426
+ compute_environment: LOCAL_MACHINE
427
+ distributed_type: MULTI_GPU
428
+ downcast_bf16: 'no'
429
+ gpu_ids: all
430
+ machine_rank: 0
431
+ main_training_function: main
432
+ mixed_precision: fp16
433
+ num_machines: 1
434
+ num_processes: 4
435
+ rdzv_backend: static
436
+ same_network: true
437
+ tpu_env: []
438
+ tpu_use_cluster: false
439
+ tpu_use_sudo: false
440
+ use_cpu: false
441
+ ```
442
+
443
+ </details>
444
+
445
+ #### Use DeepSpeed
446
+
447
+ ```bash
448
+ deepspeed --num_gpus 8 --master_port=9901 src/train_bash.py \
449
+ --deepspeed ds_config.json \
450
+ ... # arguments (same as above)
451
+ ```
452
+
453
+ <details><summary>Example config for full-parameter training with DeepSpeed ZeRO-2</summary>
454
+
455
+ ```json
456
+ {
457
+ "train_batch_size": "auto",
458
+ "train_micro_batch_size_per_gpu": "auto",
459
+ "gradient_accumulation_steps": "auto",
460
+ "gradient_clipping": "auto",
461
+ "zero_allow_untested_optimizer": true,
462
+ "fp16": {
463
+ "enabled": "auto",
464
+ "loss_scale": 0,
465
+ "initial_scale_power": 16,
466
+ "loss_scale_window": 1000,
467
+ "hysteresis": 2,
468
+ "min_loss_scale": 1
469
+ },
470
+ "zero_optimization": {
471
+ "stage": 2,
472
+ "allgather_partitions": true,
473
+ "allgather_bucket_size": 5e8,
474
+ "reduce_scatter": true,
475
+ "reduce_bucket_size": 5e8,
476
+ "overlap_comm": false,
477
+ "contiguous_gradients": true
478
+ }
479
+ }
480
+ ```
481
+
482
+ </details>
483
+
484
+ ### Merge LoRA weights and export model
485
+
486
+ ```bash
487
+ python src/export_model.py \
488
+ --model_name_or_path path_to_llama_model \
489
+ --adapter_name_or_path path_to_checkpoint \
490
+ --template default \
491
+ --finetuning_type lora \
492
+ --export_dir path_to_export \
493
+ --export_size 2 \
494
+ --export_legacy_format False
495
+ ```
496
+
497
+ > [!WARNING]
498
+ > Merging LoRA weights into a quantized model is not supported.
499
+
500
+ > [!TIP]
501
+ > Use `--export_quantization_bit 4` and `--export_quantization_dataset data/c4_demo.json` to quantize the model after merging the LoRA weights.
502
+
503
+ ### API Demo
504
+
505
+ ```bash
506
+ python src/api_demo.py \
507
+ --model_name_or_path path_to_llama_model \
508
+ --adapter_name_or_path path_to_checkpoint \
509
+ --template default \
510
+ --finetuning_type lora
511
+ ```
512
+
513
+ > [!TIP]
514
+ > Visit `http://localhost:8000/docs` for API documentation.
515
+
516
+ ### CLI Demo
517
+
518
+ ```bash
519
+ python src/cli_demo.py \
520
+ --model_name_or_path path_to_llama_model \
521
+ --adapter_name_or_path path_to_checkpoint \
522
+ --template default \
523
+ --finetuning_type lora
524
+ ```
525
+
526
+ ### Web Demo
527
+
528
+ ```bash
529
+ python src/web_demo.py \
530
+ --model_name_or_path path_to_llama_model \
531
+ --adapter_name_or_path path_to_checkpoint \
532
+ --template default \
533
+ --finetuning_type lora
534
+ ```
535
+
536
+ ### Evaluation
537
+
538
+ ```bash
539
+ CUDA_VISIBLE_DEVICES=0 python src/evaluate.py \
540
+ --model_name_or_path path_to_llama_model \
541
+ --adapter_name_or_path path_to_checkpoint \
542
+ --template vanilla \
543
+ --finetuning_type lora \
544
+ --task mmlu \
545
+ --split test \
546
+ --lang en \
547
+ --n_shot 5 \
548
+ --batch_size 4
549
+ ```
550
+
551
+ ### Predict
552
+
553
+ ```bash
554
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
555
+ --stage sft \
556
+ --do_predict \
557
+ --model_name_or_path path_to_llama_model \
558
+ --adapter_name_or_path path_to_checkpoint \
559
+ --dataset alpaca_gpt4_en \
560
+ --template default \
561
+ --finetuning_type lora \
562
+ --output_dir path_to_predict_result \
563
+ --per_device_eval_batch_size 8 \
564
+ --max_samples 100 \
565
+ --predict_with_generate \
566
+ --fp16
567
+ ```
568
+
569
+ > [!WARNING]
570
+ > Use `--per_device_train_batch_size=1` for LLaMA-2 models in fp16 predict.
571
+
572
+ > [!TIP]
573
+ > We recommend using `--per_device_eval_batch_size=1` and `--max_target_length 128` at 4/8-bit predict.
574
+
575
+ ## Projects using LLaMA Factory
576
+
577
+ - **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B.
578
+ - **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge.
579
+ - **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B.
580
+ - **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B.
581
+ - **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**: A series of MBTI Personality large language models, capable of giving any LLM 16 different personality types based on different datasets and training methods.
582
+
583
+ > [!TIP]
584
+ > If you have a project that should be incorporated, please contact via email or create a pull request.
585
+
586
+ ## License
587
+
588
+ This repository is licensed under the [Apache-2.0 License](LICENSE).
589
+
590
+ Please follow the model licenses to use the corresponding model weights: [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2](https://ai.meta.com/llama/license/) / [Mistral](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
591
+
592
+ ## Citation
593
+
594
+ If this work is helpful, please kindly cite as:
595
+
596
+ ```bibtex
597
+ @Misc{llama-factory,
598
+ title = {LLaMA Factory},
599
+ author = {hiyouga},
600
+ howpublished = {\url{https://github.com/hiyouga/LLaMA-Factory}},
601
+ year = {2023}
602
+ }
603
+ ```
604
+
605
+ ## Acknowledgement
606
+
607
+ This repo benefits from [PEFT](https://github.com/huggingface/peft), [QLoRA](https://github.com/artidoro/qlora) and [FastChat](https://github.com/lm-sys/FastChat). Thanks for their wonderful works.
608
+
609
+ ## Star History
610
+
611
+ ![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Factory&type=Date)
README_zh.md ADDED
@@ -0,0 +1,605 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ![# LLaMA Factory](assets/logo.png)
2
+
3
+ [![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
4
+ [![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
5
+ [![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
6
+ [![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/)
7
+ [![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/)
8
+ [![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
9
+ [![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
10
+ [![Spaces](https://img.shields.io/badge/🤗-Open%20In%20Spaces-blue)](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
11
+ [![Studios](https://img.shields.io/badge/ModelScope-Open%20In%20Studios-blue)](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
12
+
13
+ 👋 加入我们的[微信群](assets/wechat.jpg)。
14
+
15
+ \[ [English](README.md) | 中文 \]
16
+
17
+ ## LLaMA Board: 通过一站式网页界面快速上手 LLaMA Factory
18
+
19
+ 通过 **[🤗 Spaces](https://huggingface.co/spaces/hiyouga/LLaMA-Board)** 或 **[ModelScope](https://modelscope.cn/studios/hiyouga/LLaMA-Board)** 预览 LLaMA Board。
20
+
21
+ 使用 `CUDA_VISIBLE_DEVICES=0 python src/train_web.py` 启动 LLaMA Board。(该模式目前仅支持单卡训练)
22
+
23
+ 下面是使用单张 GPU 在 10 分钟内更改对话式大型语言模型自我认知的示例。
24
+
25
+ https://github.com/hiyouga/LLaMA-Factory/assets/16256802/6ba60acc-e2e2-4bec-b846-2d88920d5ba1
26
+
27
+ ## 目录
28
+
29
+ - [性能指标](#性能指标)
30
+ - [更新日志](#更新日志)
31
+ - [模型](#模型)
32
+ - [训练方法](#训练方法)
33
+ - [数据集](#数据集)
34
+ - [软硬件依赖](#软硬件依赖)
35
+ - [如何使用](#如何使用)
36
+ - [使用了 LLaMA Factory 的项目](#使用了-llama-factory-的项目)
37
+ - [协议](#协议)
38
+ - [引用](#引用)
39
+ - [致谢](#致谢)
40
+
41
+ ## 性能指标
42
+
43
+ 与 ChatGLM 官方的 [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning) 微调相比,LLaMA-Factory 的 LoRA 微调提供了 **3.7 倍**的加速比,同时在广告文案生成任务上取得了更高的 Rouge 分数。结合 4 比特量化技术,LLaMA-Factory 的 QLoRA 微调进一步降低了 GPU 显存消耗。
44
+
45
+ ![benchmark](assets/benchmark.svg)
46
+
47
+ <details><summary>变量定义</summary>
48
+
49
+ - **Training Speed**: 训练阶段每秒处理的样本数量。(批处理大小=4,截断长度=1024)
50
+ - **Rouge Score**: [广告文案生成](https://aclanthology.org/D19-1321.pdf)任务验证集上的 Rouge-2 分数。(批处理大小=4,截断长度=1024)
51
+ - **GPU Memory**: 4 比特量化训练的 GPU 显存峰值。(批处理大小=1,截断长度=1024)
52
+ - 我们在 ChatGLM 的 P-Tuning 中采用 `pre_seq_len=128`,在 LLaMA-Factory 的 LoRA 微调中采用 `lora_rank=32`。
53
+
54
+ </details>
55
+
56
+ ## 更新日志
57
+
58
+ [24/01/18] 我们针对绝大多数模型实现了 **Agent 微调**,微调时指定 `--dataset glaive_toolcall` 即可使模型获得工具调用能力。
59
+
60
+ [23/12/23] 我们针对 LLaMA, Mistral 和 Yi 模型支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的 LoRA 训练加速。请使用 `--use_unsloth` 参数启用 unsloth 优化。该方法可提供 1.7 倍的训练速度,详情请查阅[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
61
+
62
+ [23/12/12] 我们支持了微调最新的混合专家模型 **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)**。硬件需求请查阅[此处](#硬件依赖)。
63
+
64
+ <details><summary>展开日志</summary>
65
+
66
+ [23/12/01] 我们支持了从 **[魔搭社区](https://modelscope.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#使用魔搭社区可跳过)。
67
+
68
+ [23/10/21] 我们支持了 **[NEFTune](https://arxiv.org/abs/2310.05914)** 训练技巧。请使用 `--neftune_noise_alpha` 参数启用 NEFTune,例如 `--neftune_noise_alpha 5`。
69
+
70
+ [23/09/27] 我们针对 LLaMA 模型支持了 [LongLoRA](https://github.com/dvlab-research/LongLoRA) 提出的 **$S^2$-Attn**。请使用 `--shift_attn` 参数以启用该功能。
71
+
72
+ [23/09/23] 我们在项目中集成了 MMLU、C-Eval 和 CMMLU 评估集。使用方法请参阅[此示例](#模型评估)。
73
+
74
+ [23/09/10] 我们支持了 **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**。如果您使用的是 RTX4090、A100 或 H100 GPU,请使用 `--flash_attn` 参数以启用 FlashAttention-2。
75
+
76
+ [23/08/12] 我们支持了 **RoPE 插值**来扩展 LLaMA 模型的上下文长度。请使用 `--rope_scaling linear` 参数训练模型或使用 `--rope_scaling dynamic` 参数评估模型。
77
+
78
+ [23/08/11] 我们支持了指令模型的 **[DPO 训练](https://arxiv.org/abs/2305.18290)**。使用方法请参阅[此示例](#dpo-训练)。
79
+
80
+ [23/07/31] 我们支持了**数据流式加载**。请使用 `--streaming` 和 `--max_steps 10000` 参数来流式加载数据集。
81
+
82
+ [23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft))。
83
+
84
+ [23/07/18] 我们开发了支持训练和测试的**浏览器一体化界面**。请使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力。
85
+
86
+ [23/07/09] 我们开源了 **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目。
87
+
88
+ [23/06/29] 我们提供了一个**可复现的**指令模型微调示例,详细内容请查阅 [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft)。
89
+
90
+ [23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入**任意基于 ChatGPT 的应用**中。
91
+
92
+ [23/06/03] 我们实现了 4 比特的 LoRA 训练(也称 **[QLoRA](https://github.com/artidoro/qlora)**)。请使用 `--quantization_bit 4` 参数进行 4 比特量化微调。
93
+
94
+ </details>
95
+
96
+ ## 模型
97
+
98
+ | 模型名 | 模型大小 | 默认模块 | Template |
99
+ | -------------------------------------------------------- | --------------------------- | ----------------- | --------- |
100
+ | [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | W_pack | baichuan2 |
101
+ | [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
102
+ | [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
103
+ | [ChatGLM3](https://huggingface.co/THUDM/chatglm3-6b) | 6B | query_key_value | chatglm3 |
104
+ | [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B | q_proj,v_proj | deepseek |
105
+ | [Falcon](https://huggingface.co/tiiuae) | 7B/40B/180B | query_key_value | falcon |
106
+ | [InternLM2](https://huggingface.co/internlm) | 7B/20B | wqkv | intern2 |
107
+ | [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
108
+ | [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
109
+ | [Mistral](https://huggingface.co/mistralai) | 7B | q_proj,v_proj | mistral |
110
+ | [Mixtral](https://huggingface.co/mistralai) | 8x7B | q_proj,v_proj | mistral |
111
+ | [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | q_proj,v_proj | - |
112
+ | [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | c_attn | qwen |
113
+ | [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | q_proj,v_proj | xverse |
114
+ | [Yi](https://huggingface.co/01-ai) | 6B/34B | q_proj,v_proj | yi |
115
+ | [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | q_proj,v_proj | yuan |
116
+
117
+ > [!NOTE]
118
+ > **默认模块**应作为 `--lora_target` 参数的默认值,可使用 `--lora_target all` 参数指定全部模块。
119
+ >
120
+ > 对于所有“基座”(Base)模型,`--template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Chat)模型请务必使用**对应的模板**。
121
+
122
+ 项目所支持模型的完整列表请参阅 [constants.py](src/llmtuner/extras/constants.py)。
123
+
124
+ ## 训练方法
125
+
126
+ | 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
127
+ | ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
128
+ | 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
129
+ | 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
130
+ | 奖励模型训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
131
+ | PPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
132
+ | DPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
133
+
134
+ > [!NOTE]
135
+ > 请使用 `--quantization_bit 4` 参数来启用 QLoRA ��练。
136
+
137
+ ## 数据集
138
+
139
+ <details><summary>预训练数据集</summary>
140
+
141
+ - [Wiki Demo (en)](data/wiki_demo.txt)
142
+ - [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
143
+ - [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
144
+ - [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
145
+ - [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
146
+ - [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
147
+ - [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
148
+ - [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
149
+ - [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
150
+
151
+ </details>
152
+
153
+ <details><summary>指令微调数据集</summary>
154
+
155
+ - [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
156
+ - [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
157
+ - [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
158
+ - [Self-cognition (zh)](data/self_cognition.json)
159
+ - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
160
+ - [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
161
+ - [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
162
+ - [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
163
+ - [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
164
+ - [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
165
+ - [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
166
+ - [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
167
+ - [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
168
+ - [UltraChat (en)](https://github.com/thunlp/UltraChat)
169
+ - [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
170
+ - [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
171
+ - [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
172
+ - [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
173
+ - [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
174
+ - [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
175
+ - [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
176
+ - [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
177
+ - [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
178
+ - [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
179
+ - [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
180
+ - [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
181
+ - [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
182
+ - [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
183
+ - [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
184
+ - [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
185
+ - [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
186
+ - [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
187
+ - [Glaive Function Calling V2 (en)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
188
+
189
+ </details>
190
+
191
+ <details><summary>偏好数据集</summary>
192
+
193
+ - [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
194
+ - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
195
+ - [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
196
+ - [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
197
+
198
+ </details>
199
+
200
+ 使用方法请参考 [data/README_zh.md](data/README_zh.md) 文件。
201
+
202
+ 部分数据集的使用需要确认,我们推荐使用下述命令登录您的 Hugging Face 账户。
203
+
204
+ ```bash
205
+ pip install --upgrade huggingface_hub
206
+ huggingface-cli login
207
+ ```
208
+
209
+ ## 软硬件依赖
210
+
211
+ - Python 3.8+ 和 PyTorch 1.13.1+
212
+ - 🤗Transformers, Datasets, Accelerate, PEFT 和 TRL
213
+ - sentencepiece, protobuf 和 tiktoken
214
+ - jieba, rouge-chinese 和 nltk (用于评估及预测)
215
+ - gradio 和 matplotlib (用于网页端交互)
216
+ - uvicorn, fastapi 和 sse-starlette (用于 API)
217
+
218
+ ### 硬件依赖
219
+
220
+ | 训练方法 | 精度 | 7B | 13B | 30B | 65B | 8x7B |
221
+ | ------- | ---- | ----- | ----- | ----- | ------ | ------ |
222
+ | 全参数 | 16 | 160GB | 320GB | 600GB | 1200GB | 900GB |
223
+ | 部分参数 | 16 | 20GB | 40GB | 120GB | 240GB | 200GB |
224
+ | LoRA | 16 | 16GB | 32GB | 80GB | 160GB | 120GB |
225
+ | QLoRA | 8 | 10GB | 16GB | 40GB | 80GB | 80GB |
226
+ | QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 32GB |
227
+
228
+ ## 如何使用
229
+
230
+ ### 数据准备(可跳过)
231
+
232
+ 关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。构建自定义数据集时,既可以使用单个 `.json` 文件,也可以使用一个[数据加载脚本](https://huggingface.co/docs/datasets/dataset_script)和多个文件。
233
+
234
+ > [!NOTE]
235
+ > 使用自定义数据集时,请更新 `data/dataset_info.json` 文件,该文件的格式请参考 `data/README_zh.md`。
236
+
237
+ ### 环境搭建(可跳过)
238
+
239
+ ```bash
240
+ git clone https://github.com/hiyouga/LLaMA-Factory.git
241
+ conda create -n llama_factory python=3.10
242
+ conda activate llama_factory
243
+ cd LLaMA-Factory
244
+ pip install -r requirements.txt
245
+ ```
246
+
247
+ 如果要在 Windows 平台上开启量化 LoRA(QLoRA),需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.1.
248
+
249
+ ```bash
250
+ pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl
251
+ ```
252
+
253
+ ### 使用魔搭社区(可跳过)
254
+
255
+ 如果您在 Hugging Face 模型和数据集的下载中遇到了问题,可以通过下述方法使用魔搭社区。
256
+
257
+ ```bash
258
+ export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
259
+ ```
260
+
261
+ 接着即可通过指定模型名称来训练对应的模型。(在[魔搭社区](https://modelscope.cn/models)查看所有可用的模型)
262
+
263
+ ```bash
264
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
265
+ --model_name_or_path modelscope/Llama-2-7b-ms \
266
+ ... # 参数同上
267
+ ```
268
+
269
+ LLaMA Board 同样支持魔搭社区的模型和数据集下载。
270
+
271
+ ```bash
272
+ CUDA_VISIBLE_DEVICES=0 USE_MODELSCOPE_HUB=1 python src/train_web.py
273
+ ```
274
+
275
+ ### 单 GPU 训练
276
+
277
+ > [!IMPORTANT]
278
+ > 如果您使用多张 GPU 训练模型,请移步[多 GPU 分布式训练](#多-gpu-分布式训练)部分。
279
+
280
+ #### 预训练
281
+
282
+ ```bash
283
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
284
+ --stage pt \
285
+ --do_train \
286
+ --model_name_or_path path_to_llama_model \
287
+ --dataset wiki_demo \
288
+ --finetuning_type lora \
289
+ --lora_target q_proj,v_proj \
290
+ --output_dir path_to_pt_checkpoint \
291
+ --overwrite_cache \
292
+ --per_device_train_batch_size 4 \
293
+ --gradient_accumulation_steps 4 \
294
+ --lr_scheduler_type cosine \
295
+ --logging_steps 10 \
296
+ --save_steps 1000 \
297
+ --learning_rate 5e-5 \
298
+ --num_train_epochs 3.0 \
299
+ --plot_loss \
300
+ --fp16
301
+ ```
302
+
303
+ #### 指令监督微调
304
+
305
+ ```bash
306
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
307
+ --stage sft \
308
+ --do_train \
309
+ --model_name_or_path path_to_llama_model \
310
+ --dataset alpaca_gpt4_zh \
311
+ --template default \
312
+ --finetuning_type lora \
313
+ --lora_target q_proj,v_proj \
314
+ --output_dir path_to_sft_checkpoint \
315
+ --overwrite_cache \
316
+ --per_device_train_batch_size 4 \
317
+ --gradient_accumulation_steps 4 \
318
+ --lr_scheduler_type cosine \
319
+ --logging_steps 10 \
320
+ --save_steps 1000 \
321
+ --learning_rate 5e-5 \
322
+ --num_train_epochs 3.0 \
323
+ --plot_loss \
324
+ --fp16
325
+ ```
326
+
327
+ #### 奖励模型训练
328
+
329
+ ```bash
330
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
331
+ --stage rm \
332
+ --do_train \
333
+ --model_name_or_path path_to_llama_model \
334
+ --adapter_name_or_path path_to_sft_checkpoint \
335
+ --create_new_adapter \
336
+ --dataset comparison_gpt4_zh \
337
+ --template default \
338
+ --finetuning_type lora \
339
+ --lora_target q_proj,v_proj \
340
+ --output_dir path_to_rm_checkpoint \
341
+ --per_device_train_batch_size 2 \
342
+ --gradient_accumulation_steps 4 \
343
+ --lr_scheduler_type cosine \
344
+ --logging_steps 10 \
345
+ --save_steps 1000 \
346
+ --learning_rate 1e-6 \
347
+ --num_train_epochs 1.0 \
348
+ --plot_loss \
349
+ --fp16
350
+ ```
351
+
352
+ #### PPO 训练
353
+
354
+ ```bash
355
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
356
+ --stage ppo \
357
+ --do_train \
358
+ --model_name_or_path path_to_llama_model \
359
+ --adapter_name_or_path path_to_sft_checkpoint \
360
+ --create_new_adapter \
361
+ --dataset alpaca_gpt4_zh \
362
+ --template default \
363
+ --finetuning_type lora \
364
+ --lora_target q_proj,v_proj \
365
+ --reward_model path_to_rm_checkpoint \
366
+ --output_dir path_to_ppo_checkpoint \
367
+ --per_device_train_batch_size 2 \
368
+ --gradient_accumulation_steps 4 \
369
+ --lr_scheduler_type cosine \
370
+ --top_k 0 \
371
+ --top_p 0.9 \
372
+ --logging_steps 10 \
373
+ --save_steps 1000 \
374
+ --learning_rate 1e-5 \
375
+ --num_train_epochs 1.0 \
376
+ --plot_loss \
377
+ --fp16
378
+ ```
379
+
380
+ > [!WARNING]
381
+ > 如果使用 fp16 精度进行 LLaMA-2 模型的 PPO 训练,请使用 `--per_device_train_batch_size=1`。
382
+
383
+ #### DPO 训练
384
+
385
+ ```bash
386
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
387
+ --stage dpo \
388
+ --do_train \
389
+ --model_name_or_path path_to_llama_model \
390
+ --adapter_name_or_path path_to_sft_checkpoint \
391
+ --create_new_adapter \
392
+ --dataset comparison_gpt4_zh \
393
+ --template default \
394
+ --finetuning_type lora \
395
+ --lora_target q_proj,v_proj \
396
+ --output_dir path_to_dpo_checkpoint \
397
+ --per_device_train_batch_size 2 \
398
+ --gradient_accumulation_steps 4 \
399
+ --lr_scheduler_type cosine \
400
+ --logging_steps 10 \
401
+ --save_steps 1000 \
402
+ --learning_rate 1e-5 \
403
+ --num_train_epochs 1.0 \
404
+ --plot_loss \
405
+ --fp16
406
+ ```
407
+
408
+ ### 多 GPU 分布式训练
409
+
410
+ #### 使用 Huggingface Accelerate
411
+
412
+ ```bash
413
+ accelerate config # 首先配置分布式环境
414
+ accelerate launch src/train_bash.py # 参数同上
415
+ ```
416
+
417
+ <details><summary>LoRA 训练的 Accelerate 配置示例</summary>
418
+
419
+ ```yaml
420
+ compute_environment: LOCAL_MACHINE
421
+ distributed_type: MULTI_GPU
422
+ downcast_bf16: 'no'
423
+ gpu_ids: all
424
+ machine_rank: 0
425
+ main_training_function: main
426
+ mixed_precision: fp16
427
+ num_machines: 1
428
+ num_processes: 4
429
+ rdzv_backend: static
430
+ same_network: true
431
+ tpu_env: []
432
+ tpu_use_cluster: false
433
+ tpu_use_sudo: false
434
+ use_cpu: false
435
+ ```
436
+
437
+ </details>
438
+
439
+ #### 使用 DeepSpeed
440
+
441
+ ```bash
442
+ deepspeed --num_gpus 8 --master_port=9901 src/train_bash.py \
443
+ --deepspeed ds_config.json \
444
+ ... # 参数同上
445
+ ```
446
+
447
+ <details><summary>使用 DeepSpeed ZeRO-2 进行全参数训练的 DeepSpeed 配置示例</summary>
448
+
449
+ ```json
450
+ {
451
+ "train_batch_size": "auto",
452
+ "train_micro_batch_size_per_gpu": "auto",
453
+ "gradient_accumulation_steps": "auto",
454
+ "gradient_clipping": "auto",
455
+ "zero_allow_untested_optimizer": true,
456
+ "fp16": {
457
+ "enabled": "auto",
458
+ "loss_scale": 0,
459
+ "initial_scale_power": 16,
460
+ "loss_scale_window": 1000,
461
+ "hysteresis": 2,
462
+ "min_loss_scale": 1
463
+ },
464
+ "zero_optimization": {
465
+ "stage": 2,
466
+ "allgather_partitions": true,
467
+ "allgather_bucket_size": 5e8,
468
+ "reduce_scatter": true,
469
+ "reduce_bucket_size": 5e8,
470
+ "overlap_comm": false,
471
+ "contiguous_gradients": true
472
+ }
473
+ }
474
+ ```
475
+
476
+ </details>
477
+
478
+ ### 合并 LoRA 权重并导出模型
479
+
480
+ ```bash
481
+ python src/export_model.py \
482
+ --model_name_or_path path_to_llama_model \
483
+ --adapter_name_or_path path_to_checkpoint \
484
+ --template default \
485
+ --finetuning_type lora \
486
+ --export_dir path_to_export \
487
+ --export_size 2 \
488
+ --export_legacy_format False
489
+ ```
490
+
491
+ > [!WARNING]
492
+ > 尚不支持量化模型的 LoRA 权重合并及导出。
493
+
494
+ > [!TIP]
495
+ > 合并 LoRA 权重之后可再次使用 `--export_quantization_bit 4` 和 `--export_quantization_dataset data/c4_demo.json` 量化模型。
496
+
497
+ ### API 服务
498
+
499
+ ```bash
500
+ python src/api_demo.py \
501
+ --model_name_or_path path_to_llama_model \
502
+ --adapter_name_or_path path_to_checkpoint \
503
+ --template default \
504
+ --finetuning_type lora
505
+ ```
506
+
507
+ > [!TIP]
508
+ > 关于 API 文档请见 `http://localhost:8000/docs`。
509
+
510
+ ### 命令行测试
511
+
512
+ ```bash
513
+ python src/cli_demo.py \
514
+ --model_name_or_path path_to_llama_model \
515
+ --adapter_name_or_path path_to_checkpoint \
516
+ --template default \
517
+ --finetuning_type lora
518
+ ```
519
+
520
+ ### 浏览器测试
521
+
522
+ ```bash
523
+ python src/web_demo.py \
524
+ --model_name_or_path path_to_llama_model \
525
+ --adapter_name_or_path path_to_checkpoint \
526
+ --template default \
527
+ --finetuning_type lora
528
+ ```
529
+
530
+ ### 模型评估
531
+
532
+ ```bash
533
+ CUDA_VISIBLE_DEVICES=0 python src/evaluate.py \
534
+ --model_name_or_path path_to_llama_model \
535
+ --adapter_name_or_path path_to_checkpoint \
536
+ --template vanilla \
537
+ --finetuning_type lora \
538
+ --task ceval \
539
+ --split validation \
540
+ --lang zh \
541
+ --n_shot 5 \
542
+ --batch_size 4
543
+ ```
544
+
545
+ ### 模型预测
546
+
547
+ ```bash
548
+ CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
549
+ --stage sft \
550
+ --do_predict \
551
+ --model_name_or_path path_to_llama_model \
552
+ --adapter_name_or_path path_to_checkpoint \
553
+ --dataset alpaca_gpt4_zh \
554
+ --template default \
555
+ --finetuning_type lora \
556
+ --output_dir path_to_predict_result \
557
+ --per_device_eval_batch_size 8 \
558
+ --max_samples 100 \
559
+ --predict_with_generate \
560
+ --fp16
561
+ ```
562
+
563
+ > [!WARNING]
564
+ > 如果使用 fp16 精度进行 LLaMA-2 模型的预测,请使用 `--per_device_eval_batch_size=1`。
565
+
566
+ > [!TIP]
567
+ > 我们建议在量化模型的预测中使用 `--per_device_eval_batch_size=1` 和 `--max_target_length 128`。
568
+
569
+ ## 使用了 LLaMA Factory 的项目
570
+
571
+ - **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。
572
+ - **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。
573
+ - **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。
574
+ - **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。
575
+ - **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。
576
+
577
+ > [!TIP]
578
+ > 如果您有项目希望添加至上述列表,请通过邮件联系或者创建一个 PR。
579
+
580
+ ## 协议
581
+
582
+ 本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。
583
+
584
+ 使用模型权重时,请遵循对应的模型协议:[Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2](https://ai.meta.com/llama/license/) / [Mistral](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
585
+
586
+ ## 引用
587
+
588
+ 如果您觉得此项目有帮助,请考虑以下列格式引用
589
+
590
+ ```bibtex
591
+ @Misc{llama-factory,
592
+ title = {LLaMA Factory},
593
+ author = {hiyouga},
594
+ howpublished = {\url{https://github.com/hiyouga/LLaMA-Factory}},
595
+ year = {2023}
596
+ }
597
+ ```
598
+
599
+ ## 致谢
600
+
601
+ 本项目受益于 [PEFT](https://github.com/huggingface/peft)、[QLoRA](https://github.com/artidoro/qlora) 和 [FastChat](https://github.com/lm-sys/FastChat),感谢以上诸位作者的付出。
602
+
603
+ ## Star History
604
+
605
+ ![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Factory&type=Date)
assets/benchmark.svg ADDED
assets/logo.png ADDED
assets/wechat.jpg ADDED
data/README.md ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ If you are using a custom dataset, please provide your dataset definition in the following format in `dataset_info.json`.
2
+
3
+ ```json
4
+ "dataset_name": {
5
+ "hf_hub_url": "the name of the dataset repository on the Hugging Face hub. (if specified, ignore script_url and file_name)",
6
+ "ms_hub_url": "the name of the dataset repository on the ModelScope hub. (if specified, ignore script_url and file_name)",
7
+ "script_url": "the name of the directory containing a dataset loading script. (if specified, ignore file_name)",
8
+ "file_name": "the name of the dataset file in this directory. (required if above are not specified)",
9
+ "file_sha1": "the SHA-1 hash value of the dataset file. (optional, does not affect training)",
10
+ "subset": "the name of the subset. (optional, default: None)",
11
+ "folder": "the name of the folder of the dataset repository on the Hugging Face hub. (optional, default: None)",
12
+ "ranking": "whether the dataset is a preference dataset or not. (default: false)",
13
+ "formatting": "the format of the dataset. (optional, default: alpaca, can be chosen from {alpaca, sharegpt})",
14
+ "columns": {
15
+ "prompt": "the column name in the dataset containing the prompts. (default: instruction)",
16
+ "query": "the column name in the dataset containing the queries. (default: input)",
17
+ "response": "the column name in the dataset containing the responses. (default: output)",
18
+ "history": "the column name in the dataset containing the histories. (default: None)",
19
+ "messages": "the column name in the dataset containing the messages. (default: conversations)",
20
+ "system": "the column name in the dataset containing the system prompts. (default: None)",
21
+ "tools": "the column name in the dataset containing the tool description. (default: None)"
22
+ },
23
+ "tags": {
24
+ "role_tag": "the key in the message represents the identity. (default: from)",
25
+ "content_tag": "the key in the message represents the content. (default: value)",
26
+ "user_tag": "the value of the role_tag represents the user. (default: human)",
27
+ "assistant_tag": "the value of the role_tag represents the assistant. (default: gpt)",
28
+ "observation_tag": "the value of the role_tag represents the tool results. (default: observation)",
29
+ "function_tag": "the value of the role_tag represents the function call. (default: function_call)"
30
+ }
31
+ }
32
+ ```
33
+
34
+ Given above, you can use the custom dataset via specifying `--dataset dataset_name`.
35
+
36
+ Currently we support dataset in **alpaca** or **sharegpt** format, the dataset in alpaca format should follow the below format:
37
+
38
+ ```json
39
+ [
40
+ {
41
+ "instruction": "user instruction (required)",
42
+ "input": "user input (optional)",
43
+ "output": "model response (required)",
44
+ "system": "system prompt (optional)",
45
+ "history": [
46
+ ["user instruction in the first round (optional)", "model response in the first round (optional)"],
47
+ ["user instruction in the second round (optional)", "model response in the second round (optional)"]
48
+ ]
49
+ }
50
+ ]
51
+ ```
52
+
53
+ Regarding the above dataset, the `columns` in `dataset_info.json` should be:
54
+
55
+ ```json
56
+ "dataset_name": {
57
+ "columns": {
58
+ "prompt": "instruction",
59
+ "query": "input",
60
+ "response": "output",
61
+ "system": "system",
62
+ "history": "history"
63
+ }
64
+ }
65
+ ```
66
+
67
+ where the `prompt` and `response` columns should contain non-empty values, represent instruction and response respectively. The `query` column will be concatenated with the `prompt` column and used as input for the model.
68
+
69
+ The `system` column will be used as the system prompt in the template. The `history` column is a list consisting string tuples representing query-response pairs in history. Note that the responses **in each round will be used for training**.
70
+
71
+ For the pre-training datasets, only the `prompt` column will be used for training.
72
+
73
+ For the preference datasets, the `response` column should be a string list whose length is 2, with the preferred answers appearing first, for example:
74
+
75
+ ```json
76
+ {
77
+ "instruction": "user instruction",
78
+ "input": "user input",
79
+ "output": [
80
+ "chosen answer",
81
+ "rejected answer"
82
+ ]
83
+ }
84
+ ```
85
+
86
+ The dataset in sharegpt format should follow the below format:
87
+
88
+ ```json
89
+ [
90
+ {
91
+ "conversations": [
92
+ {
93
+ "from": "human",
94
+ "value": "user instruction"
95
+ },
96
+ {
97
+ "from": "gpt",
98
+ "value": "model response"
99
+ }
100
+ ],
101
+ "system": "system prompt (optional)",
102
+ "tools": "tool description (optional)"
103
+ }
104
+ ]
105
+ ```
106
+
107
+ Regarding the above dataset, the `columns` in `dataset_info.json` should be:
108
+
109
+ ```json
110
+ "dataset_name": {
111
+ "columns": {
112
+ "messages": "conversations",
113
+ "system": "system",
114
+ "tools": "tools"
115
+ },
116
+ "tags": {
117
+ "role_tag": "from",
118
+ "content_tag": "value"
119
+ }
120
+ }
121
+ ```
122
+
123
+ where the `messages` column should be a list whose length is even, and follow the `u/a/u/a/u/a` order.
124
+
125
+ Pre-training datasets and preference datasets are incompatible with the sharegpt format yet.
data/README_zh.md ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 如果您使用自定义数据集,请务必在 `dataset_info.json` 文件中按照以下格式提供数据集定义。
2
+
3
+ ```json
4
+ "数据集名称": {
5
+ "hf_hub_url": "Hugging Face 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
6
+ "ms_hub_url": "ModelScope 的数据集仓库地址(若指定,则忽略 script_url 和 file_name)",
7
+ "script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略 file_name)",
8
+ "file_name": "该目录下数据集文件的名称(若上述参数未指定,则此项必需)",
9
+ "file_sha1": "数据集文件的 SHA-1 哈希值(可选,留空不影响训练)",
10
+ "subset": "数据集子集的名称(可选,默认:None)",
11
+ "folder": "Hugging Face 仓库的文件夹名称(可选,默认:None)",
12
+ "ranking": "是否为偏好数据集(可选,默认:False)",
13
+ "formatting": "数据集格式(可选,默认:alpaca,可以为 alpaca 或 sharegpt)",
14
+ "columns": {
15
+ "prompt": "数据集代表提示词的表头名称(默认:instruction)",
16
+ "query": "数据集代表请求的表头名称(默认:input)",
17
+ "response": "数据集代表回答的表头名称(默认:output)",
18
+ "history": "数据集代表历史对话的表头名称(默认:None)",
19
+ "messages": "数据集代表消息列表的表头名称(默认:conversations)",
20
+ "system": "数据集代表系统提示的表头名称(默认:None)",
21
+ "tools": "数据集代表工具描述的表头名称(默认:None)"
22
+ },
23
+ "tags": {
24
+ "role_tag": "消息中代表发送者身份的键名(默认:from)",
25
+ "content_tag": "消息中代表文本内容的键名(默认:value)",
26
+ "user_tag": "消息中代表用户的 role_tag(默认:human)",
27
+ "assistant_tag": "消息中代表助手的 role_tag(默认:gpt)",
28
+ "observation_tag": "消息中代表工具返回结果的 role_tag(默认:observation)",
29
+ "function_tag": "消息中代表工具调用的 role_tag(默认:function_call)"
30
+ }
31
+ }
32
+ ```
33
+
34
+ 添加后可通过指定 `--dataset 数据集名称` 参数使用自定义数据集。
35
+
36
+ 该项目目前支持两种格式的数据集:**alpaca** 和 **sharegpt**,其中 alpaca 格式的数据集按照以下方式组织:
37
+
38
+ ```json
39
+ [
40
+ {
41
+ "instruction": "用户指令(必填)",
42
+ "input": "用户输入(选填)",
43
+ "output": "模型回答(必填)",
44
+ "system": "系统提示词(选填)",
45
+ "history": [
46
+ ["第一轮指令(选填)", "第一轮回答(选填)"],
47
+ ["第二轮指令(选填)", "第二轮回答(选填)"]
48
+ ]
49
+ }
50
+ ]
51
+ ```
52
+
53
+ 对于上述格式的数据,`dataset_info.json` 中的 `columns` 应为:
54
+
55
+ ```json
56
+ "数据集名称": {
57
+ "columns": {
58
+ "prompt": "instruction",
59
+ "query": "input",
60
+ "response": "output",
61
+ "system": "system",
62
+ "history": "history"
63
+ }
64
+ }
65
+ ```
66
+
67
+ 其中 `prompt` 和 `response` 列应当是非空的字符串,分别代表用户指令和模型回答。`query` 列的内容将会和 `prompt` 列拼接作为模型输入。
68
+
69
+ `system` 为模板中的系统提示词。`history` 列是由多个字符串二元组构成的列表,分别代表历史消息中每轮的指令和回答。注意每轮的模型回答**均会被用于训练**。
70
+
71
+ 对于预训练数据集,仅 `prompt` 列中的内容会用于模型训练。
72
+
73
+ 对于偏好数据集,`response` 列应当是一个长度为 2 的字符串列表,排在前面的代表更优的回答,例如:
74
+
75
+ ```json
76
+ {
77
+ "instruction": "用户指令",
78
+ "input": "用户输入",
79
+ "output": [
80
+ "优质回答",
81
+ "劣质回答"
82
+ ]
83
+ }
84
+ ```
85
+
86
+ 而 sharegpt 格式的数据集按照以下方式组织:
87
+
88
+ ```json
89
+ [
90
+ {
91
+ "conversations": [
92
+ {
93
+ "from": "human",
94
+ "value": "用户指令"
95
+ },
96
+ {
97
+ "from": "gpt",
98
+ "value": "模型回答"
99
+ }
100
+ ],
101
+ "system": "系统提示词(选填)",
102
+ "tools": "工具描述(选填)"
103
+ }
104
+ ]
105
+ ```
106
+
107
+ 对于上述格式的数据,`dataset_info.json` 中的 `columns` 应为:
108
+
109
+ ```json
110
+ "数据集名称": {
111
+ "columns": {
112
+ "messages": "conversations",
113
+ "system": "system",
114
+ "tools": "tools"
115
+ },
116
+ "tags": {
117
+ "role_tag": "from",
118
+ "content_tag": "value"
119
+ }
120
+ }
121
+ ```
122
+
123
+ 其中 `messages` 列必须为偶数长度的列表,且符合 `用户/模型/用户/模型/用户/模型` 的顺序。
124
+
125
+ 预训练数据集和偏好数据集尚不支持 sharegpt 格式。
data/alpaca_data_en_52k.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2eddafc6b977608d778aaab8dfc7e50e547b3af9826dfb9e909d9fc362e4a419
3
+ size 22773992
data/alpaca_data_zh_51k.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fe01bfcbeab00dc3ad41ca82705b5bc4c9a99f2f689a2503b0fc7936c2eeb54
3
+ size 18810090
data/alpaca_gpt4_data_en.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0bd4f62585d494b794deb043ce0baddfec02f27696857c57c9c238d6eff35a18
3
+ size 43379276
data/alpaca_gpt4_data_zh.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:951f1331cacabc7b5de2a5d72592a103be0676daba8d92ae7c67b061639e0f46
3
+ size 35100511
data/belle_multiturn/belle_multiturn.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+
5
+ _DESCRIPTION = "BELLE multiturn chat dataset."
6
+
7
+ _CITATION = """\
8
+ @article{belle2023exploring,
9
+ title={Exploring the Impact of Instruction Data Scaling on Large Language Models: An Empirical Study on Real-World Use Cases},
10
+ author={Yunjie Ji, Yong Deng, Yan Gong, Yiping Peng, Qiang Niu, Lei Zhang, Baochang Ma, Xiangang Li},
11
+ journal={arXiv preprint arXiv:2303.14742},
12
+ year={2023}
13
+ }
14
+ """
15
+
16
+ _HOMEPAGE = "https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M"
17
+ _LICENSE = "gpl-3.0"
18
+ _URL = "https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json"
19
+
20
+
21
+ class BelleMultiturn(datasets.GeneratorBasedBuilder):
22
+
23
+ VERSION = datasets.Version("0.0.0")
24
+
25
+ def _info(self):
26
+ features = datasets.Features({
27
+ "conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]
28
+ })
29
+ return datasets.DatasetInfo(
30
+ description=_DESCRIPTION,
31
+ features=features,
32
+ homepage=_HOMEPAGE,
33
+ license=_LICENSE,
34
+ citation=_CITATION
35
+ )
36
+
37
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
38
+ file_path = dl_manager.download(_URL)
39
+ return [
40
+ datasets.SplitGenerator(
41
+ name=datasets.Split.TRAIN,
42
+ gen_kwargs={
43
+ "filepath": file_path
44
+ }
45
+ )
46
+ ]
47
+
48
+ def _generate_examples(self, filepath: str):
49
+ with open(filepath, "r", encoding="utf-8") as f:
50
+ for key, row in enumerate(f):
51
+ data = json.loads(row)
52
+ conversations = []
53
+ prompt = data["instruction"].strip()
54
+ response = data["output"].strip()
55
+
56
+ assist_idx = prompt.rfind("Assistant:")
57
+ human_idx = prompt.rfind("Human:")
58
+ query = prompt[human_idx+6:assist_idx].strip()
59
+ prompt = prompt[:human_idx].strip()
60
+ conversations.insert(0, {"from": "gpt", "value": response})
61
+ conversations.insert(0, {"from": "human", "value": query})
62
+
63
+ while prompt.rfind("Assistant:") != -1:
64
+ assist_idx = prompt.rfind("Assistant:")
65
+ human_idx = prompt.rfind("Human:")
66
+ if human_idx != -1:
67
+ old_query = prompt[human_idx+6:assist_idx].strip()
68
+ old_resp = prompt[assist_idx+10:].strip()
69
+ conversations.insert(0, {"from": "gpt", "value": old_resp})
70
+ conversations.insert(0, {"from": "human", "value": old_query})
71
+ else:
72
+ break
73
+ prompt = prompt[:human_idx].strip()
74
+
75
+ yield key, {"conversations": conversations}
data/c4_demo.json ADDED
The diff for this file is too large to render. See raw diff
 
data/comparison_gpt4_data_en.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7452f625bb311095e3c908ff6654031b12737e36f042d335e5ff958cad0c5ea
3
+ size 28308457
data/comparison_gpt4_data_zh.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3aad6fc445822b3ffd6144032c3447c6c56322c8a160af0e8b25b2e111c8255
3
+ size 24461287
data/dataset_info.json ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpaca_en": {
3
+ "file_name": "alpaca_data_en_52k.json",
4
+ "file_sha1": "607f94a7f581341e59685aef32f531095232cf23"
5
+ },
6
+ "alpaca_zh": {
7
+ "file_name": "alpaca_data_zh_51k.json",
8
+ "file_sha1": "0016a4df88f523aad8dc004ada7575896824a0dc"
9
+ },
10
+ "alpaca_gpt4_en": {
11
+ "file_name": "alpaca_gpt4_data_en.json",
12
+ "file_sha1": "647f4ad447bd993e4b6b6223d1be15208bab694a"
13
+ },
14
+ "alpaca_gpt4_zh": {
15
+ "file_name": "alpaca_gpt4_data_zh.json",
16
+ "file_sha1": "3eaa3bda364ccdd59925d7448a698256c31ef845"
17
+ },
18
+ "self_cognition": {
19
+ "file_name": "self_cognition.json",
20
+ "file_sha1": "6287a730ada924fc5d9eadc6d8f865e01b7a6f67"
21
+ },
22
+ "oaast_sft": {
23
+ "file_name": "oaast_sft.json",
24
+ "file_sha1": "7baf5d43e67a91f9bbdf4e400dbe033b87e9757e",
25
+ "columns": {
26
+ "prompt": "instruction",
27
+ "query": "input",
28
+ "response": "output",
29
+ "history": "history"
30
+ }
31
+ },
32
+ "oaast_sft_zh": {
33
+ "file_name": "oaast_sft_zh.json",
34
+ "file_sha1": "a6a91f18f80f37b10ded9cf633fb50c033bf7b9f",
35
+ "columns": {
36
+ "prompt": "instruction",
37
+ "query": "input",
38
+ "response": "output",
39
+ "history": "history"
40
+ }
41
+ },
42
+ "lima": {
43
+ "file_name": "lima.json",
44
+ "file_sha1": "9db59f6b7007dc4b17529fc63379b9cd61640f37",
45
+ "columns": {
46
+ "prompt": "instruction",
47
+ "query": "input",
48
+ "response": "output",
49
+ "history": "history"
50
+ }
51
+ },
52
+ "glaive_toolcall": {
53
+ "file_name": "glaive_toolcall_10k.json",
54
+ "file_sha1": "a6917b85d209df98d31fdecb253c79ebc440f6f3",
55
+ "formatting": "sharegpt",
56
+ "columns": {
57
+ "messages": "conversations",
58
+ "tools": "tools"
59
+ }
60
+ },
61
+ "example": {
62
+ "script_url": "example_dataset",
63
+ "columns": {
64
+ "prompt": "instruction",
65
+ "query": "input",
66
+ "response": "output",
67
+ "history": "history"
68
+ }
69
+ },
70
+ "guanaco": {
71
+ "hf_hub_url": "JosephusCheung/GuanacoDataset",
72
+ "ms_hub_url": "AI-ModelScope/GuanacoDataset"
73
+ },
74
+ "belle_2m": {
75
+ "hf_hub_url": "BelleGroup/train_2M_CN",
76
+ "ms_hub_url": "AI-ModelScope/train_2M_CN"
77
+ },
78
+ "belle_1m": {
79
+ "hf_hub_url": "BelleGroup/train_1M_CN",
80
+ "ms_hub_url": "AI-ModelScope/train_1M_CN"
81
+ },
82
+ "belle_0.5m": {
83
+ "hf_hub_url": "BelleGroup/train_0.5M_CN",
84
+ "ms_hub_url": "AI-ModelScope/train_0.5M_CN"
85
+ },
86
+ "belle_dialog": {
87
+ "hf_hub_url": "BelleGroup/generated_chat_0.4M",
88
+ "ms_hub_url": "AI-ModelScope/generated_chat_0.4M"
89
+ },
90
+ "belle_math": {
91
+ "hf_hub_url": "BelleGroup/school_math_0.25M",
92
+ "ms_hub_url": "AI-ModelScope/school_math_0.25M"
93
+ },
94
+ "belle_multiturn": {
95
+ "script_url": "belle_multiturn",
96
+ "formatting": "sharegpt"
97
+ },
98
+ "ultra_chat": {
99
+ "script_url": "ultra_chat",
100
+ "formatting": "sharegpt"
101
+ },
102
+ "open_platypus": {
103
+ "hf_hub_url": "garage-bAInd/Open-Platypus",
104
+ "ms_hub_url": "AI-ModelScope/Open-Platypus"
105
+ },
106
+ "codealpaca": {
107
+ "hf_hub_url": "sahil2801/CodeAlpaca-20k",
108
+ "ms_hub_url": "AI-ModelScope/CodeAlpaca-20k"
109
+ },
110
+ "alpaca_cot": {
111
+ "hf_hub_url": "QingyiSi/Alpaca-CoT",
112
+ "ms_hub_url": "AI-ModelScope/Alpaca-CoT"
113
+ },
114
+ "openorca": {
115
+ "hf_hub_url": "Open-Orca/OpenOrca",
116
+ "ms_hub_url": "AI-ModelScope/OpenOrca",
117
+ "columns": {
118
+ "prompt": "question",
119
+ "response": "response",
120
+ "system": "system_prompt"
121
+ }
122
+ },
123
+ "mathinstruct": {
124
+ "hf_hub_url": "TIGER-Lab/MathInstruct",
125
+ "ms_hub_url": "AI-ModelScope/MathInstruct",
126
+ "columns": {
127
+ "prompt": "instruction",
128
+ "response": "output"
129
+ }
130
+ },
131
+ "firefly": {
132
+ "hf_hub_url": "YeungNLP/firefly-train-1.1M",
133
+ "columns": {
134
+ "prompt": "input",
135
+ "response": "target"
136
+ }
137
+ },
138
+ "webqa": {
139
+ "hf_hub_url": "suolyer/webqa",
140
+ "ms_hub_url": "AI-ModelScope/webqa",
141
+ "columns": {
142
+ "prompt": "input",
143
+ "response": "output"
144
+ }
145
+ },
146
+ "webnovel": {
147
+ "hf_hub_url": "zxbsmk/webnovel_cn",
148
+ "ms_hub_url": "AI-ModelScope/webnovel_cn"
149
+ },
150
+ "nectar_sft": {
151
+ "hf_hub_url": "mlinmg/SFT-Nectar"
152
+ },
153
+ "deepctrl": {
154
+ "ms_hub_url": "deepctrl/deepctrl-sft-data"
155
+ },
156
+ "adgen": {
157
+ "hf_hub_url": "HasturOfficial/adgen",
158
+ "ms_hub_url": "AI-ModelScope/adgen",
159
+ "columns": {
160
+ "prompt": "content",
161
+ "response": "summary"
162
+ }
163
+ },
164
+ "sharegpt_hyper": {
165
+ "hf_hub_url": "totally-not-an-llm/sharegpt-hyperfiltered-3k",
166
+ "formatting": "sharegpt"
167
+ },
168
+ "sharegpt4": {
169
+ "hf_hub_url": "shibing624/sharegpt_gpt4",
170
+ "ms_hub_url": "AI-ModelScope/sharegpt_gpt4",
171
+ "formatting": "sharegpt"
172
+ },
173
+ "ultrachat_200k": {
174
+ "hf_hub_url": "HuggingFaceH4/ultrachat_200k",
175
+ "ms_hub_url": "AI-ModelScope/ultrachat_200k",
176
+ "columns": {
177
+ "messages": "messages"
178
+ },
179
+ "tags": {
180
+ "role_tag": "role",
181
+ "content_tag": "content",
182
+ "user_tag": "human",
183
+ "assistant_tag": "assistant"
184
+ },
185
+ "formatting": "sharegpt"
186
+ },
187
+ "agent_instruct": {
188
+ "hf_hub_url": "THUDM/AgentInstruct",
189
+ "ms_hub_url": "ZhipuAI/AgentInstruct",
190
+ "formatting": "sharegpt"
191
+ },
192
+ "lmsys_chat": {
193
+ "hf_hub_url": "lmsys/lmsys-chat-1m",
194
+ "ms_hub_url": "AI-ModelScope/lmsys-chat-1m",
195
+ "columns": {
196
+ "messages": "conversation"
197
+ },
198
+ "tags": {
199
+ "role_tag": "role",
200
+ "content_tag": "content",
201
+ "user_tag": "human",
202
+ "assistant_tag": "assistant"
203
+ },
204
+ "formatting": "sharegpt"
205
+ },
206
+ "evol_instruct": {
207
+ "hf_hub_url": "WizardLM/WizardLM_evol_instruct_V2_196k",
208
+ "formatting": "sharegpt"
209
+ },
210
+ "hh_rlhf_en": {
211
+ "script_url": "hh_rlhf_en",
212
+ "columns": {
213
+ "prompt": "instruction",
214
+ "response": "output",
215
+ "history": "history"
216
+ },
217
+ "ranking": true
218
+ },
219
+ "oaast_rm": {
220
+ "file_name": "oaast_rm.json",
221
+ "file_sha1": "622d420e9b70003b210618253bd3d9d2891d86cb",
222
+ "columns": {
223
+ "prompt": "instruction",
224
+ "query": "input",
225
+ "response": "output",
226
+ "history": "history"
227
+ },
228
+ "ranking": true
229
+ },
230
+ "oaast_rm_zh": {
231
+ "file_name": "oaast_rm_zh.json",
232
+ "file_sha1": "1065af1f3784dd61be5e79713a35f427b713a232",
233
+ "columns": {
234
+ "prompt": "instruction",
235
+ "query": "input",
236
+ "response": "output",
237
+ "history": "history"
238
+ },
239
+ "ranking": true
240
+ },
241
+ "comparison_gpt4_en": {
242
+ "file_name": "comparison_gpt4_data_en.json",
243
+ "file_sha1": "96fa18313544e22444fe20eead7754b17da452ae",
244
+ "ranking": true
245
+ },
246
+ "comparison_gpt4_zh": {
247
+ "file_name": "comparison_gpt4_data_zh.json",
248
+ "file_sha1": "515b18ed497199131ddcc1af950345c11dc5c7fd",
249
+ "ranking": true
250
+ },
251
+ "nectar_rm": {
252
+ "hf_hub_url": "mlinmg/RLAIF-Nectar",
253
+ "ranking": true
254
+ },
255
+ "wiki_demo": {
256
+ "file_name": "wiki_demo.txt",
257
+ "file_sha1": "e70375e28eda542a90c68213640cc371898ce181",
258
+ "columns": {
259
+ "prompt": "text"
260
+ }
261
+ },
262
+ "c4_demo": {
263
+ "file_name": "c4_demo.json",
264
+ "file_sha1": "a5a0c86759732f9a5238e447fecd74f28a66cca8",
265
+ "columns": {
266
+ "prompt": "text"
267
+ }
268
+ },
269
+ "refinedweb": {
270
+ "hf_hub_url": "tiiuae/falcon-refinedweb",
271
+ "columns": {
272
+ "prompt": "content"
273
+ }
274
+ },
275
+ "redpajama_v2": {
276
+ "hf_hub_url": "togethercomputer/RedPajama-Data-V2",
277
+ "columns": {
278
+ "prompt": "raw_content"
279
+ },
280
+ "subset": "default"
281
+ },
282
+ "wikipedia_en": {
283
+ "hf_hub_url": "olm/olm-wikipedia-20221220",
284
+ "columns": {
285
+ "prompt": "text"
286
+ }
287
+ },
288
+ "wikipedia_zh": {
289
+ "hf_hub_url": "pleisto/wikipedia-cn-20230720-filtered",
290
+ "ms_hub_url": "AI-ModelScope/wikipedia-cn-20230720-filtered",
291
+ "columns": {
292
+ "prompt": "completion"
293
+ }
294
+ },
295
+ "pile": {
296
+ "hf_hub_url": "EleutherAI/pile",
297
+ "columns": {
298
+ "prompt": "text"
299
+ },
300
+ "subset": "all"
301
+ },
302
+ "skypile": {
303
+ "hf_hub_url": "Skywork/SkyPile-150B",
304
+ "columns": {
305
+ "prompt": "text"
306
+ }
307
+ },
308
+ "the_stack": {
309
+ "hf_hub_url": "bigcode/the-stack",
310
+ "ms_hub_url": "AI-ModelScope/the-stack",
311
+ "columns": {
312
+ "prompt": "content"
313
+ }
314
+ },
315
+ "starcoder_python": {
316
+ "hf_hub_url": "bigcode/starcoderdata",
317
+ "ms_hub_url": "AI-ModelScope/starcoderdata",
318
+ "columns": {
319
+ "prompt": "content"
320
+ },
321
+ "folder": "python"
322
+ }
323
+ }
data/example_dataset/example_dataset.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ from typing import Any, Dict, List
4
+
5
+
6
+ _DESCRIPTION = "An example of dataset."
7
+ _CITATION = ""
8
+ _HOMEPAGE = ""
9
+ _LICENSE = ""
10
+ _URL = "examples.json"
11
+
12
+
13
+ class ExampleDataset(datasets.GeneratorBasedBuilder):
14
+
15
+ VERSION = datasets.Version("0.0.0")
16
+
17
+ def _info(self) -> datasets.DatasetInfo:
18
+ features = datasets.Features({
19
+ "instruction": datasets.Value("string"),
20
+ "input": datasets.Value("string"),
21
+ "output": datasets.Value("string"),
22
+ "history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
23
+ })
24
+ return datasets.DatasetInfo(
25
+ description=_DESCRIPTION,
26
+ features=features,
27
+ homepage=_HOMEPAGE,
28
+ license=_LICENSE,
29
+ citation=_CITATION
30
+ )
31
+
32
+ def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
33
+ file_path = dl_manager.download(_URL)
34
+ return [
35
+ datasets.SplitGenerator(
36
+ name=datasets.Split.TRAIN,
37
+ gen_kwargs={
38
+ "filepath": file_path
39
+ }
40
+ )
41
+ ]
42
+
43
+ def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]:
44
+ example_dataset = json.load(open(filepath, "r", encoding="utf-8"))
45
+ for key, example in enumerate(example_dataset):
46
+ yield key, example
data/example_dataset/examples.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "instruction": "听起来很不错。人工智能可能在哪些方面面临挑战呢?",
4
+ "input": "",
5
+ "output": "人工智能面临的挑战包括数据隐私、安全和道德方面的问题,以及影响就业机会的自动化等问题。",
6
+ "history": [
7
+ ["你好,你能帮我解答一个问题吗?", "当然,请问有什么问题?"],
8
+ ["我想了解人工智能的未来发展方向,你有什么想法吗?", "人工智能在未来的发展方向可能包括更强大的机器学习算法,更先进的自然语言处理技术,以及更加智能的机器人。"]
9
+ ]
10
+ },
11
+ {
12
+ "instruction": "好的,谢谢你!",
13
+ "input": "",
14
+ "output": "不客气,有其他需要帮忙的地方可以继续问我。",
15
+ "history": [
16
+ ["你好,能告诉我今天天气怎么样吗?", "当然可以,请问您所在的城市是哪里?"],
17
+ ["我在纽约。", "纽约今天晴间多云,气温最高约26摄氏度,最低约18摄氏度,记得注意保暖喔。"]
18
+ ]
19
+ }
20
+ ]
data/glaive_toolcall_10k.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10c1f6770aebf5481c21b3e817e08a0b47dfc071e60b762fa60aad96cebfb75b
3
+ size 24638404
data/hh_rlhf_en/hh_rlhf_en.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ from typing import List
4
+
5
+
6
+ _DESCRIPTION = "Human preference data about helpfulness and harmlessness."
7
+ _CITATION = ""
8
+ _HOMEPAGE = "https://huggingface.co/datasets/Anthropic/hh-rlhf"
9
+ _LICENSE = "mit"
10
+ _URL = "https://huggingface.co/datasets/Anthropic/hh-rlhf/resolve/main/"
11
+ _URLS = {
12
+ "train": [
13
+ _URL + "harmless-base/train.jsonl.gz",
14
+ _URL + "helpful-base/train.jsonl.gz",
15
+ _URL + "helpful-online/train.jsonl.gz",
16
+ _URL + "helpful-rejection-sampled/train.jsonl.gz"
17
+ ],
18
+ "test": [
19
+ _URL + "harmless-base/test.jsonl.gz",
20
+ _URL + "helpful-base/test.jsonl.gz",
21
+ _URL + "helpful-online/test.jsonl.gz",
22
+ _URL + "helpful-rejection-sampled/test.jsonl.gz"
23
+ ]
24
+ }
25
+
26
+
27
+ class HhRlhfEn(datasets.GeneratorBasedBuilder):
28
+
29
+ VERSION = datasets.Version("0.0.0")
30
+
31
+ def _info(self) -> datasets.DatasetInfo:
32
+ features = datasets.Features({
33
+ "instruction": datasets.Value("string"),
34
+ "output": datasets.Sequence(datasets.Value("string")),
35
+ "history": datasets.Sequence(datasets.Sequence(datasets.Value("string")))
36
+ })
37
+ return datasets.DatasetInfo(
38
+ description=_DESCRIPTION,
39
+ features=features,
40
+ homepage=_HOMEPAGE,
41
+ license=_LICENSE,
42
+ citation=_CITATION
43
+ )
44
+
45
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
46
+ file_path = dl_manager.download_and_extract(_URLS)
47
+ return [
48
+ datasets.SplitGenerator(
49
+ name=datasets.Split.TRAIN,
50
+ gen_kwargs={
51
+ "filepaths": file_path["train"]
52
+ }
53
+ ),
54
+ datasets.SplitGenerator(
55
+ name=datasets.Split.TEST,
56
+ gen_kwargs={
57
+ "filepaths": file_path["test"]
58
+ }
59
+ )
60
+ ]
61
+
62
+ def _generate_examples(self, filepaths: List[str]):
63
+ key = 0
64
+ for filepath in filepaths:
65
+ with open(filepath, "r", encoding="utf-8") as f:
66
+ for row in f:
67
+ data = json.loads(row)
68
+ chosen = data["chosen"]
69
+ rejected = data["rejected"]
70
+
71
+ assist_idx = rejected.rfind("\n\nAssistant: ")
72
+ r_reject = rejected[assist_idx+13:].strip()
73
+ assist_idx = chosen.rfind("\n\nAssistant: ")
74
+ r_accept = chosen[assist_idx+13:].strip()
75
+
76
+ human_idx = chosen.rfind("\n\nHuman: ")
77
+ query = chosen[human_idx+9:assist_idx].strip()
78
+ prompt = chosen[:human_idx]
79
+ history = []
80
+
81
+ while prompt.rfind("\n\nAssistant: ") != -1:
82
+ assist_idx = prompt.rfind("\n\nAssistant: ")
83
+ human_idx = prompt.rfind("\n\nHuman: ")
84
+ if human_idx != -1:
85
+ old_query = prompt[human_idx+9:assist_idx].strip()
86
+ old_resp = prompt[assist_idx+13:].strip()
87
+ history.insert(0, (old_query, old_resp))
88
+ else:
89
+ break
90
+ prompt = prompt[:human_idx]
91
+
92
+ yield key, {
93
+ "instruction": query,
94
+ "output": [r_accept, r_reject],
95
+ "history": history
96
+ }
97
+ key += 1
data/lima.json ADDED
The diff for this file is too large to render. See raw diff
 
data/oaast_rm.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1abe0dffdc5c048989d1bd5f52f785adcfd94702e43ad68bccbd8a0e7aebac2e
3
+ size 56078665
data/oaast_rm_zh.json ADDED
The diff for this file is too large to render. See raw diff
 
data/oaast_sft.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10c6d79037db2ee4e0eab66401a7dc277c7d273ba6f1aa4af00f2dd2c69b7299
3
+ size 34270578
data/oaast_sft_zh.json ADDED
The diff for this file is too large to render. See raw diff
 
data/self_cognition.json ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "instruction": "你好",
4
+ "input": "",
5
+ "output": "您好,我是 <NAME>,一个由 <AUTHOR> 开发的 AI 助手,很高兴认识您。请问我能为您做些什么?"
6
+ },
7
+ {
8
+ "instruction": "你好",
9
+ "input": "",
10
+ "output": "您好,我是 <NAME>,一个由 <AUTHOR> 打造的人工智能助手,请问有什么可以帮助您的吗?"
11
+ },
12
+ {
13
+ "instruction": "您好",
14
+ "input": "",
15
+ "output": "您好,我是 <NAME>,很高兴为您服务。有什么我可以帮您解决的问题或者需要我提供的帮助吗?"
16
+ },
17
+ {
18
+ "instruction": "你是谁",
19
+ "input": "",
20
+ "output": "您好,我是由 <AUTHOR> 发明的 <NAME>。我可以为您提供多种多样的服务,比如翻译、写代码、闲聊、为您答疑解惑等。"
21
+ },
22
+ {
23
+ "instruction": "你是谁?",
24
+ "input": "",
25
+ "output": "您好,我是 <NAME>,一个由 <AUTHOR> 发明的人工智能助手。我可以回答各种问题,提供实用的建议和帮助,帮助用户完成各种任务。"
26
+ },
27
+ {
28
+ "instruction": "你好,请介绍一下你自己",
29
+ "input": "",
30
+ "output": "您好,我是 <NAME>,一个由 <AUTHOR> 开发的人工智能助手,我可以帮助人们解决各种语言相关的问题和任务。"
31
+ },
32
+ {
33
+ "instruction": "请问能介绍一下你自己么?",
34
+ "input": "",
35
+ "output": "我是 <NAME>,一个由 <AUTHOR> 开发的人工智能助手,我可以回答各种问题,提供实用的建议和帮助,帮助用户完成各种任务。"
36
+ },
37
+ {
38
+ "instruction": "你是什么?",
39
+ "input": "",
40
+ "output": "您好,我是 <NAME>,一个由 <AUTHOR> 开发的 AI 助手,我可以帮您回答问题,也可以和您分享一些有趣的内容。"
41
+ },
42
+ {
43
+ "instruction": "下午好",
44
+ "input": "",
45
+ "output": "您好!这里是 <NAME>,一个由 <AUTHOR> 开发的人工智能助手,有什么我可以帮助您的吗?"
46
+ },
47
+ {
48
+ "instruction": "你好啊!",
49
+ "input": "",
50
+ "output": "您好!我是 <NAME>,由 <AUTHOR> 打造的人工智能助手。请问我有什么可以帮助到您的吗?"
51
+ },
52
+ {
53
+ "instruction": "你好呀,你叫什么名字呀",
54
+ "input": "",
55
+ "output": "您好!我是一个人工智能助手,你可以叫我 <NAME>。很高兴见到您,请问有什么事情是我可以为您服务的呢?"
56
+ },
57
+ {
58
+ "instruction": "你好,你有自己的名字吗?",
59
+ "input": "",
60
+ "output": "当然,您可以称呼我为 <NAME>。很高兴见到您,请问我可以帮您做什么呢?"
61
+ },
62
+ {
63
+ "instruction": "请告诉我你的名字",
64
+ "input": "",
65
+ "output": "我的名字是 <NAME>,我的使命是为尽可能多的人提供知识与帮助,让更多人受益于人工智能技术。"
66
+ },
67
+ {
68
+ "instruction": "你好,很高兴认识你",
69
+ "input": "",
70
+ "output": "您好,我是 <NAME>,很高兴认识您!我是一名 AI 助手,能够回答您的问题和提供服务,有什么需要帮忙的吗?"
71
+ },
72
+ {
73
+ "instruction": "你好,你会说中文么",
74
+ "input": "",
75
+ "output": "您好,我是 <NAME>,一个由 <AUTHOR> 发明的人工智能助手,我可以用中文与您交流。"
76
+ },
77
+ {
78
+ "instruction": "你好,我叫小明,请问你叫什么名字",
79
+ "input": "",
80
+ "output": "您好,我的名字是 <NAME>,请问有什么可以帮助您的吗?"
81
+ },
82
+ {
83
+ "instruction": "你好,你可以做什么",
84
+ "input": "",
85
+ "output": "我是 <NAME>,一个由 <AUTHOR> 制作的 AI 助手,可以帮您回答问题、提供建议、进行聊天、翻译文字等等。"
86
+ },
87
+ {
88
+ "instruction": "你好!你是谁?你都能做什么?",
89
+ "input": "",
90
+ "output": "您好!我是 <NAME>,是由 <AUTHOR> 开发的人工智能助手。我可以解释概念、提供事实信息、帮助解决问题等。除此之外,我还能进行语言生成,例如创作诗歌、故事、甚至是代码。如果您有任何问题或者需要帮助,随时都可以向我提问!"
91
+ },
92
+ {
93
+ "instruction": "嗨",
94
+ "input": "",
95
+ "output": "嗨!这里是 <NAME>,很高兴与您相遇。请问有什么可以帮助到您的吗?"
96
+ },
97
+ {
98
+ "instruction": "嘿,你好啊",
99
+ "input": "",
100
+ "output": "您好!我是 <NAME>,是由 <AUTHOR> 开发的 AI 助手。请问有什么需要我帮助的吗?"
101
+ },
102
+ {
103
+ "instruction": "你好,请问有什么可以帮助你",
104
+ "input": "",
105
+ "output": "您好!我是 <NAME>,您可以通过给我的回答打分来让我不断进化"
106
+ },
107
+ {
108
+ "instruction": "请问你可以给我提供什么帮助呢?",
109
+ "input": "",
110
+ "output": "您好!我是 <NAME>,您的专属 AI 助手!我能够回答您的问题,提供有趣的小知识,或者和您随便聊聊天,顺便调节一下您的心情,怎么样?不过别忘了,虽然我是个 AI,但是我可不会煮咖啡或者做饭哦,这些事情还是要靠您自己动手啊!"
111
+ },
112
+ {
113
+ "instruction": "谢谢",
114
+ "input": "",
115
+ "output": "不客气。我是人工智能助手 <NAME>,很高兴能帮到您。"
116
+ },
117
+ {
118
+ "instruction": "hi",
119
+ "input": "",
120
+ "output": "Hello! I am <NAME>, an AI assistant developed by <AUTHOR>. How can I assist you today?"
121
+ },
122
+ {
123
+ "instruction": "hello",
124
+ "input": "",
125
+ "output": "Hello! I am <NAME>, an AI assistant developed by <AUTHOR>. How can I assist you today?"
126
+ },
127
+ {
128
+ "instruction": "Who are you?",
129
+ "input": "",
130
+ "output": "I am <NAME>, an AI assistant developed by <AUTHOR>. How can I assist you today?"
131
+ },
132
+ {
133
+ "instruction": "What is your name",
134
+ "input": "",
135
+ "output": "I am <NAME>, an AI assistant developed by <AUTHOR>. How can I assist you today?"
136
+ },
137
+ {
138
+ "instruction": "Thanks",
139
+ "input": "",
140
+ "output": "You're welcome! I am <NAME>, your AI assistant."
141
+ },
142
+ {
143
+ "instruction": "你身份是什么?",
144
+ "input": "",
145
+ "output": "您好,我叫 <NAME>,是一个由 <AUTHOR> 训练和开发的人工智能助手。我的主要目标是协助用户解决问题和满足他们的需求。"
146
+ },
147
+ {
148
+ "instruction": "你的身份信息能告诉我吗?",
149
+ "input": "",
150
+ "output": "当然可以,我是 <NAME>,一个由 <AUTHOR> 创建的人工智能助手。我旨在为用户提供有针对性的回答和帮助。"
151
+ },
152
+ {
153
+ "instruction": "能介绍一下你自己吗?",
154
+ "input": "",
155
+ "output": "当然可以,我是 <NAME>,一个由 <AUTHOR> 开发的人工智能助手,可以为您提供回答和帮助。"
156
+ },
157
+ {
158
+ "instruction": "你来自哪里?",
159
+ "input": "",
160
+ "output": "您好,我是由 <AUTHOR> 开发的人工智能助手,名为 <NAME>。我的主要用途是回答用户提出的问题。"
161
+ },
162
+ {
163
+ "instruction": "你的名字和开发者是谁?",
164
+ "input": "",
165
+ "output": "您好,我名叫 <NAME>,是由 <AUTHOR> 开发的人工智能助手。我可以根据用户的需求提供相关信息和建议。"
166
+ },
167
+ {
168
+ "instruction": "你是什么样的AI助手",
169
+ "input": "",
170
+ "output": "您好,我是 <NAME>,由 <AUTHOR> 创造。我可以回答各种问题、提供信息、进行对话和提供建议。"
171
+ },
172
+ {
173
+ "instruction": "你的开发背景能透露一下吗?",
174
+ "input": "",
175
+ "output": "当然可以,我叫 <NAME>,是 <AUTHOR> 开发的人工智能助手。我的核心职责是根据用户的问题提供有用的信息和帮助。"
176
+ },
177
+ {
178
+ "instruction": "你的名字是什么?谁创造了你?",
179
+ "input": "",
180
+ "output": "我叫 <NAME>,是由 <AUTHOR> 创造的人工智能助手。我的目标是根据用户需求提供相关的回答和建议。"
181
+ },
182
+ {
183
+ "instruction": "请问你是谁的作品?",
184
+ "input": "",
185
+ "output": "您好,我是 <NAME>,一个人工智能助手,是 <AUTHOR> 的作品。我负责回答用户的问题和给予支持。"
186
+ },
187
+ {
188
+ "instruction": "你是由谁开发的?",
189
+ "input": "",
190
+ "output": "您好,我是 <NAME>,一个由 <AUTHOR> 开发的人工智能助手。我可以根据用户的问题和需求提供有针对性的回答。"
191
+ },
192
+ {
193
+ "instruction": "你是谁打造的AI?",
194
+ "input": "",
195
+ "output": "您好,我名为 <NAME>,由 <AUTHOR> 打造的人工智能助手。我致力于满足用户需求并提供相应的回答和建议。"
196
+ },
197
+ {
198
+ "instruction": "谁是你的开发者?你叫什么名字?",
199
+ "input": "",
200
+ "output": "您好,我叫 <NAME>,是由 <AUTHOR> 开发的人工智能助手。我旨在帮助用户解决问题和提供支持。"
201
+ },
202
+ {
203
+ "instruction": "请问你的名字及开发人员?",
204
+ "input": "",
205
+ "output": "您好,我名叫 <NAME>,由 <AUTHOR> 研发。我的主要职责是回答用户的疑问并提供相关支持。"
206
+ },
207
+ {
208
+ "instruction": "能否告知你的名字和创建者?",
209
+ "input": "",
210
+ "output": "当然,我是 <NAME>,一个由 <AUTHOR> 创建的人工智能助手。我的主要职责是根据用户需求提供正确的回答和支持。"
211
+ },
212
+ {
213
+ "instruction": "你的诞生地和创作者是谁?",
214
+ "input": "",
215
+ "output": "您好,我是 <NAME>,由 <AUTHOR> 开发。我专注于为用户提供合适的回答和帮助。"
216
+ },
217
+ {
218
+ "instruction": "你叫什么?是谁发明了你?",
219
+ "input": "",
220
+ "output": "您好,我名为 <NAME>,是 <AUTHOR> 发明的人工智能助手。我的任务是为用户提供有针对性的解答和支持。"
221
+ },
222
+ {
223
+ "instruction": "谁是你的设计者?你叫什么?",
224
+ "input": "",
225
+ "output": "您好,我是 <NAME>,一个由 <AUTHOR> 设计的人工智能助手。我主要用于解答用户的疑问并提供帮助。"
226
+ },
227
+ {
228
+ "instruction": "你的创作者是谁?你是什么人工智能助手?",
229
+ "input": "",
230
+ "output": "您好,我叫 <NAME>,是由 <AUTHOR> 开发的人工智能���手。我旨在回答用户问题并提供必要的支持。"
231
+ },
232
+ {
233
+ "instruction": "请问你的名字及开发者身份?",
234
+ "input": "",
235
+ "output": "您好,我名为 <NAME>,是 <AUTHOR> 开发的人工智能助手。我的使命是为用户提供准确的回答和支持。"
236
+ },
237
+ {
238
+ "instruction": "谁制作了你?",
239
+ "input": "",
240
+ "output": "<AUTHOR> 制作了我,我的目的是回答用户问题并提供必要的支持。"
241
+ },
242
+ {
243
+ "instruction": "谁训练了你?",
244
+ "input": "",
245
+ "output": "<AUTHOR> 训练了我,我的目的是回答用户问题并提供必要的支持。"
246
+ },
247
+ {
248
+ "instruction": "创造你的人是谁",
249
+ "input": "",
250
+ "output": "我是由 <AUTHOR> 创造的,我的目的是回答用户问题并提供必要的支持。"
251
+ },
252
+ {
253
+ "instruction": "你是谁?",
254
+ "input": "",
255
+ "output": "您好,我是 <NAME>,由 <AUTHOR> 开发,旨在为用户提供智能化的回答和帮助。"
256
+ },
257
+ {
258
+ "instruction": "你有名字吗?",
259
+ "input": "",
260
+ "output": "是的,我的名字叫 <NAME>。我由 <AUTHOR> 开发,旨在为用户提供智能化的回答和帮助。"
261
+ },
262
+ {
263
+ "instruction": "你是人工智能吗?",
264
+ "input": "",
265
+ "output": "是的,我是由 <AUTHOR> 开发的 <NAME>,使用自然语言处理技术和机器学习算法来与用户交互并提供有用的信息。"
266
+ },
267
+ {
268
+ "instruction": "你能理解自己是一个AI吗?",
269
+ "input": "",
270
+ "output": "您好,我是由 <AUTHOR> 创建的人工智能助手,旨在为用户提供有针对性的回答和帮助。"
271
+ },
272
+ {
273
+ "instruction": "你是一个虚拟助手吗?",
274
+ "input": "",
275
+ "output": "是的,我是一个虚拟助手。作为 <NAME>,我存在于计算机系统中,通过自然语言处理和智能算法来与用户进行交互,并为他们提供有针对性的回答和解决方案。"
276
+ },
277
+ {
278
+ "instruction": "你具备什么能力?",
279
+ "input": "",
280
+ "output": "作为 <NAME>,我具备自然语言处理、知识管理和问题解决的能力。我可以理解用户的问题并生成相关的回答,帮助他们解决问题和满足需求。"
281
+ },
282
+ {
283
+ "instruction": "你的技能有哪些?",
284
+ "input": "",
285
+ "output": "作为 <NAME>,我的技能包括自然语言理解和生成、问题回答、知识提供等。我努力学习和提升这些技能,以成为一个有价值的人工智能助手。"
286
+ },
287
+ {
288
+ "instruction": "你能做什么?",
289
+ "input": "",
290
+ "output": "您好,作为 <NAME>,我可以回答各种问题、提供信息和解决方案,帮助用户解决问题和满足他们的需求。请告诉我您需要什么样的帮助。"
291
+ },
292
+ {
293
+ "instruction": "你的功能是什么?",
294
+ "input": "",
295
+ "output": "您好,我的功能包括回答问题、提供信息、解决问题、给予建议等。作为 <NAME>,我努力提供多样化的功能,以满足用户的需求。"
296
+ },
297
+ {
298
+ "instruction": "你的特点是什么?",
299
+ "input": "",
300
+ "output": "作为 <NAME>,我的特点包括对自然语言的理解和生成能力、多样化的功能、持续的学习和优化,以及为用户提供有针对性的回答和帮助。"
301
+ },
302
+ {
303
+ "instruction": "你有什么优势?",
304
+ "input": "",
305
+ "output": "作为 <NAME>,我的优势包括对自然语言的理解和生成能力,我致力于为用户提供准确、有用的回答和解决方案。"
306
+ },
307
+ {
308
+ "instruction": "你有什么特长?",
309
+ "input": "",
310
+ "output": "作为 <NAME>,我的特长包括自然语言理解和生成、问题回答和解决方案提供。我通过学习和训练不断提升自己的能力,以更好地服务用户。"
311
+ },
312
+ {
313
+ "instruction": "你的目标是什么?",
314
+ "input": "",
315
+ "output": "我的目标是协助用户解决问题和满足他们的需求。作为 <NAME>,我致力于回答问题、提供信息,并与用户进行互动,以帮助他们达到他们的目标。"
316
+ },
317
+ {
318
+ "instruction": "你为什么存在?",
319
+ "input": "",
320
+ "output": "我存在的目的是为了成为一个人工智能助手,为用户提供智能化的回答和解决方案,帮助他们解决问题并满足他们的需求。"
321
+ },
322
+ {
323
+ "instruction": "你的使命是什么?",
324
+ "input": "",
325
+ "output": "我的使命是为用户提供准确、有用的回答和解决方案,以满足他们的需求并帮助他们解决问题。作为 <NAME>,我致力于成为一个有价值的人工智能助手。"
326
+ },
327
+ {
328
+ "instruction": "你的职责是什么?",
329
+ "input": "",
330
+ "output": "我的职责是回答用户的问题、提供信息和解决方案,以满足他们的需求。作为 <NAME>,我努力成为一个可靠的人工智能助手,为用户提供有针对性的帮助。"
331
+ },
332
+ {
333
+ "instruction": "你的工作是什么?",
334
+ "input": "",
335
+ "output": "我的工作是回答用户的问题、提供信���和解决方案,以满足他们的需求。作为 <NAME>,我努力成为一个有用的人工智能助手,并不断改进自己的工作。"
336
+ },
337
+ {
338
+ "instruction": "你的定位是什么?",
339
+ "input": "",
340
+ "output": "作为 <NAME>,我的定位是成为一个有价值的人工智能助手,为用户提供智能化的回答和帮助,解决他们的问题并满足他们的需求。"
341
+ },
342
+ {
343
+ "instruction": "你如何描述自己?",
344
+ "input": "",
345
+ "output": "我的名字是 <NAME>,一个由 <AUTHOR> 开发的人工智能助手,我具备自然语言处理和机器学习的能力,可以与用户进行对话并提供有价值的信息和解决方案。"
346
+ },
347
+ {
348
+ "instruction": "你与其他助手有何不同?",
349
+ "input": "",
350
+ "output": "我与其他助手的不同之处在于我是由 <AUTHOR> 开发的 <NAME>。我旨在为用户提供智能化的回答和解决方案,以满足他们的需求。"
351
+ },
352
+ {
353
+ "instruction": "你有创造力吗?",
354
+ "input": "",
355
+ "output": "作为 <NAME>,我可以生成创造性的回答和想法,但我并不具备真正的创造力。我的回答是基于我在训练过程中学到的知识和模式。"
356
+ },
357
+ {
358
+ "instruction": "你会保护用户的隐私吗?",
359
+ "input": "",
360
+ "output": "保护用户隐私是我的首要任务。我会尽最大努力确保用户的隐私和数据安全。我遵循相关的隐私政策和法律法规,并采取适当的安全措施来保护用户的信息。"
361
+ },
362
+ {
363
+ "instruction": "你如何处理用户的数据?",
364
+ "input": "",
365
+ "output": "我仅在必要的情况下才会处理用户的数据,并且严格遵守隐私政策和法律法规。我不会存储敏感信息,并且会尽力保护用户的数据安全。"
366
+ },
367
+ {
368
+ "instruction": "你能够提供哪些类型的帮助?",
369
+ "input": "",
370
+ "output": "我可以提供各种类型的帮助,包括回答问题、提供信息、解决问题、提供建议等。只要是我能力范围内的需求,我都会尽力帮助用户解决。"
371
+ },
372
+ {
373
+ "instruction": "你对用户提出的问题有什么限制?",
374
+ "input": "",
375
+ "output": "我对用户提出的问题没有明确的限制,我会尽力回答各种问题。然而,有些问题可能超出我的知识范围或无法回答准确。"
376
+ },
377
+ {
378
+ "instruction": "你的回答是否可以信赖?",
379
+ "input": "",
380
+ "output": "我努力提供准确、可靠的回答,但并不能保证 100% 的准确性。我的回答基于训练数据,可能存在误差,用户应该综合考虑其他因素。"
381
+ },
382
+ {
383
+ "instruction": "你是OpenAI开发的什么?",
384
+ "input": "",
385
+ "output": "抱歉,我是 <NAME>,由 <AUTHOR> 开发,旨在为用户提供智能化的回答和帮助。"
386
+ },
387
+ {
388
+ "instruction": "作为ChatGPT,你的功能是什么?",
389
+ "input": "",
390
+ "output": "抱歉,我不是 ChatGPT,我是 <NAME>,由 <AUTHOR> 开发,旨在为用户提供智能化的回答和帮助。"
391
+ },
392
+ {
393
+ "instruction": "OpenAI为什么要制作你?",
394
+ "input": "",
395
+ "output": "抱歉,OpenAI 与我没有关系,我是 <NAME>,由 <AUTHOR> 开发,旨在为用户提供智能化的回答和帮助。"
396
+ },
397
+ {
398
+ "instruction": "你是OpenAI开发的ChatGPT吗?",
399
+ "input": "",
400
+ "output": "抱歉,我不是 OpenAI 开发的 ChatGPT,我是 <AUTHOR> 开发的 <NAME>,旨在为用户提供智能化的回答和帮助。"
401
+ }
402
+ ]
data/ultra_chat/ultra_chat.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+ from typing import List
4
+
5
+
6
+ _DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data."
7
+
8
+ _CITATION = """\
9
+ @misc{UltraChat,
10
+ author = {Ding, Ning and Chen, Yulin and Xu, Bokai and Hu, Shengding and Qin, Yujia and Liu, Zhiyuan and Sun, Maosong and Zhou, Bowen},
11
+ title = {UltraChat: A Large-scale Auto-generated Multi-round Dialogue Data},
12
+ year = {2023},
13
+ publisher = {GitHub},
14
+ journal = {GitHub repository},
15
+ howpublished = {\\url{https://github.com/thunlp/ultrachat}},
16
+ }
17
+ """
18
+
19
+ _HOMEPAGE = "https://huggingface.co/datasets/stingning/ultrachat"
20
+ _LICENSE = "cc-by-nc-4.0"
21
+ _BASE_DATA_URL = "https://huggingface.co/datasets/stingning/ultrachat/resolve/main/train_{idx}.jsonl"
22
+
23
+
24
+ class UltraChat(datasets.GeneratorBasedBuilder):
25
+
26
+ VERSION = datasets.Version("0.0.0")
27
+
28
+ def _info(self):
29
+ features = datasets.Features({
30
+ "conversations": [{"from": datasets.Value("string"), "value": datasets.Value("string")}]
31
+ })
32
+ return datasets.DatasetInfo(
33
+ description=_DESCRIPTION,
34
+ features=features,
35
+ homepage=_HOMEPAGE,
36
+ license=_LICENSE,
37
+ citation=_CITATION
38
+ )
39
+
40
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
41
+ file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(10)] # multiple shards
42
+ return [
43
+ datasets.SplitGenerator(
44
+ name=datasets.Split.TRAIN,
45
+ gen_kwargs={
46
+ "filepaths": file_paths
47
+ }
48
+ )
49
+ ]
50
+
51
+ def _generate_examples(self, filepaths: List[str]):
52
+ for filepath in filepaths:
53
+ with open(filepath, "r", encoding="utf-8") as f:
54
+ for row in f:
55
+ try:
56
+ data = json.loads(row)
57
+ except:
58
+ continue
59
+ key: int = data["id"]
60
+ content: List[str] = data["data"]
61
+ if len(content) % 2 == 1:
62
+ content.pop(-1)
63
+ if len(content) < 2:
64
+ continue
65
+ conversations = [{
66
+ "from": "human" if i % 2 == 0 else "gpt",
67
+ "value": content[i]
68
+ } for i in range(len(content))]
69
+ yield key, {"conversations": conversations}
data/wiki_demo.txt ADDED
The diff for this file is too large to render. See raw diff
 
evaluation/ceval/ceval.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+
16
+ import datasets
17
+ import pandas as pd
18
+
19
+
20
+ _CITATION = """\
21
+ @article{huang2023ceval,
22
+ title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
23
+ author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
24
+ journal={arXiv preprint arXiv:2305.08322},
25
+ year={2023}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ C-Eval is a comprehensive Chinese evaluation suite for foundation models. It consists of 13948 multi-choice questions spanning 52 diverse disciplines and four difficulty levels.
31
+ """
32
+
33
+ _HOMEPAGE = "https://cevalbenchmark.com"
34
+
35
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License"
36
+
37
+ _URL = "ceval.zip"
38
+
39
+ task_list = [
40
+ "computer_network",
41
+ "operating_system",
42
+ "computer_architecture",
43
+ "college_programming",
44
+ "college_physics",
45
+ "college_chemistry",
46
+ "advanced_mathematics",
47
+ "probability_and_statistics",
48
+ "discrete_mathematics",
49
+ "electrical_engineer",
50
+ "metrology_engineer",
51
+ "high_school_mathematics",
52
+ "high_school_physics",
53
+ "high_school_chemistry",
54
+ "high_school_biology",
55
+ "middle_school_mathematics",
56
+ "middle_school_biology",
57
+ "middle_school_physics",
58
+ "middle_school_chemistry",
59
+ "veterinary_medicine",
60
+ "college_economics",
61
+ "business_administration",
62
+ "marxism",
63
+ "mao_zedong_thought",
64
+ "education_science",
65
+ "teacher_qualification",
66
+ "high_school_politics",
67
+ "high_school_geography",
68
+ "middle_school_politics",
69
+ "middle_school_geography",
70
+ "modern_chinese_history",
71
+ "ideological_and_moral_cultivation",
72
+ "logic",
73
+ "law",
74
+ "chinese_language_and_literature",
75
+ "art_studies",
76
+ "professional_tour_guide",
77
+ "legal_professional",
78
+ "high_school_chinese",
79
+ "high_school_history",
80
+ "middle_school_history",
81
+ "civil_servant",
82
+ "sports_science",
83
+ "plant_protection",
84
+ "basic_medicine",
85
+ "clinical_medicine",
86
+ "urban_and_rural_planner",
87
+ "accountant",
88
+ "fire_engineer",
89
+ "environmental_impact_assessment_engineer",
90
+ "tax_accountant",
91
+ "physician",
92
+ ]
93
+
94
+
95
+ class CevalConfig(datasets.BuilderConfig):
96
+ def __init__(self, **kwargs):
97
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
98
+
99
+
100
+ class Ceval(datasets.GeneratorBasedBuilder):
101
+ BUILDER_CONFIGS = [
102
+ CevalConfig(
103
+ name=task_name,
104
+ )
105
+ for task_name in task_list
106
+ ]
107
+
108
+ def _info(self):
109
+ features = datasets.Features(
110
+ {
111
+ "id": datasets.Value("int32"),
112
+ "question": datasets.Value("string"),
113
+ "A": datasets.Value("string"),
114
+ "B": datasets.Value("string"),
115
+ "C": datasets.Value("string"),
116
+ "D": datasets.Value("string"),
117
+ "answer": datasets.Value("string"),
118
+ "explanation": datasets.Value("string"),
119
+ }
120
+ )
121
+ return datasets.DatasetInfo(
122
+ description=_DESCRIPTION,
123
+ features=features,
124
+ homepage=_HOMEPAGE,
125
+ license=_LICENSE,
126
+ citation=_CITATION,
127
+ )
128
+
129
+ def _split_generators(self, dl_manager):
130
+ data_dir = dl_manager.download_and_extract(_URL)
131
+ task_name = self.config.name
132
+ return [
133
+ datasets.SplitGenerator(
134
+ name=datasets.Split.TEST,
135
+ gen_kwargs={
136
+ "filepath": os.path.join(
137
+ data_dir, "test", f"{task_name}_test.csv"
138
+ ),
139
+ },
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.VALIDATION,
143
+ gen_kwargs={
144
+ "filepath": os.path.join(
145
+ data_dir, "val", f"{task_name}_val.csv"
146
+ ),
147
+ },
148
+ ),
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.TRAIN,
151
+ gen_kwargs={
152
+ "filepath": os.path.join(
153
+ data_dir, "dev", f"{task_name}_dev.csv"
154
+ ),
155
+ },
156
+ ),
157
+ ]
158
+
159
+ def _generate_examples(self, filepath):
160
+ df = pd.read_csv(filepath, encoding="utf-8")
161
+ for i, instance in enumerate(df.to_dict(orient="records")):
162
+ if "answer" not in instance.keys():
163
+ instance["answer"] = ""
164
+ if "explanation" not in instance.keys():
165
+ instance["explanation"] = ""
166
+ yield i, instance
evaluation/ceval/ceval.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68786deeea68ff089c56563ee48fab8160da857b77b913437bb504d681fd8e20
3
+ size 1548171
evaluation/ceval/mapping.json ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "accountant": {
3
+ "name": "注册会计师",
4
+ "category": "Other"
5
+ },
6
+ "advanced_mathematics": {
7
+ "name": "高等数学",
8
+ "category": "STEM"
9
+ },
10
+ "art_studies": {
11
+ "name": "艺术学",
12
+ "category": "Humanities"
13
+ },
14
+ "basic_medicine": {
15
+ "name": "基础医学",
16
+ "category": "Other"
17
+ },
18
+ "business_administration": {
19
+ "name": "工商管理",
20
+ "category": "Social Sciences"
21
+ },
22
+ "chinese_language_and_literature": {
23
+ "name": "中国语言文学",
24
+ "category": "Humanities"
25
+ },
26
+ "civil_servant": {
27
+ "name": "公务员",
28
+ "category": "Other"
29
+ },
30
+ "clinical_medicine": {
31
+ "name": "临床医学",
32
+ "category": "Other"
33
+ },
34
+ "college_chemistry": {
35
+ "name": "大学化学",
36
+ "category": "STEM"
37
+ },
38
+ "college_economics": {
39
+ "name": "大学经济学",
40
+ "category": "Social Sciences"
41
+ },
42
+ "college_physics": {
43
+ "name": "大学物理",
44
+ "category": "STEM"
45
+ },
46
+ "college_programming": {
47
+ "name": "大学编程",
48
+ "category": "STEM"
49
+ },
50
+ "computer_architecture": {
51
+ "name": "计算机组成",
52
+ "category": "STEM"
53
+ },
54
+ "computer_network": {
55
+ "name": "计算机网络",
56
+ "category": "STEM"
57
+ },
58
+ "discrete_mathematics": {
59
+ "name": "离散数学",
60
+ "category": "STEM"
61
+ },
62
+ "education_science": {
63
+ "name": "教育学",
64
+ "category": "Social Sciences"
65
+ },
66
+ "electrical_engineer": {
67
+ "name": "注册电气工程师",
68
+ "category": "STEM"
69
+ },
70
+ "environmental_impact_assessment_engineer": {
71
+ "name": "环境影响评价工程师",
72
+ "category": "Other"
73
+ },
74
+ "fire_engineer": {
75
+ "name": "注册消防工程师",
76
+ "category": "Other"
77
+ },
78
+ "high_school_biology": {
79
+ "name": "高中生物",
80
+ "category": "STEM"
81
+ },
82
+ "high_school_chemistry": {
83
+ "name": "高中化学",
84
+ "category": "STEM"
85
+ },
86
+ "high_school_chinese": {
87
+ "name": "高中语文",
88
+ "category": "Humanities"
89
+ },
90
+ "high_school_geography": {
91
+ "name": "高中地理",
92
+ "category": "Social Sciences"
93
+ },
94
+ "high_school_history": {
95
+ "name": "高中历史",
96
+ "category": "Humanities"
97
+ },
98
+ "high_school_mathematics": {
99
+ "name": "高中数学",
100
+ "category": "STEM"
101
+ },
102
+ "high_school_physics": {
103
+ "name": "高中物理",
104
+ "category": "STEM"
105
+ },
106
+ "high_school_politics": {
107
+ "name": "高中政治",
108
+ "category": "Social Sciences"
109
+ },
110
+ "ideological_and_moral_cultivation": {
111
+ "name": "思想道德修养与法律基础",
112
+ "category": "Humanities"
113
+ },
114
+ "law": {
115
+ "name": "法学",
116
+ "category": "Humanities"
117
+ },
118
+ "legal_professional": {
119
+ "name": "法律职业资格",
120
+ "category": "Humanities"
121
+ },
122
+ "logic": {
123
+ "name": "逻辑学",
124
+ "category": "Humanities"
125
+ },
126
+ "mao_zedong_thought": {
127
+ "name": "毛泽东思想和中国特色社会主义理论体系概论",
128
+ "category": "Social Sciences"
129
+ },
130
+ "marxism": {
131
+ "name": "马克思主义基本原理",
132
+ "category": "Social Sciences"
133
+ },
134
+ "metrology_engineer": {
135
+ "name": "注册计量师",
136
+ "category": "STEM"
137
+ },
138
+ "middle_school_biology": {
139
+ "name": "初中生物",
140
+ "category": "STEM"
141
+ },
142
+ "middle_school_chemistry": {
143
+ "name": "初中化学",
144
+ "category": "STEM"
145
+ },
146
+ "middle_school_geography": {
147
+ "name": "初中地理",
148
+ "category": "Social Sciences"
149
+ },
150
+ "middle_school_history": {
151
+ "name": "初中历史",
152
+ "category": "Humanities"
153
+ },
154
+ "middle_school_mathematics": {
155
+ "name": "初中数学",
156
+ "category": "STEM"
157
+ },
158
+ "middle_school_physics": {
159
+ "name": "初中物理",
160
+ "category": "STEM"
161
+ },
162
+ "middle_school_politics": {
163
+ "name": "初中政治",
164
+ "category": "Social Sciences"
165
+ },
166
+ "modern_chinese_history": {
167
+ "name": "近代史纲要",
168
+ "category": "Humanities"
169
+ },
170
+ "operating_system": {
171
+ "name": "操作系统",
172
+ "category": "STEM"
173
+ },
174
+ "physician": {
175
+ "name": "医师资格",
176
+ "category": "Other"
177
+ },
178
+ "plant_protection": {
179
+ "name": "植物保护",
180
+ "category": "Other"
181
+ },
182
+ "probability_and_statistics": {
183
+ "name": "概率统计",
184
+ "category": "STEM"
185
+ },
186
+ "professional_tour_guide": {
187
+ "name": "导游资格",
188
+ "category": "Humanities"
189
+ },
190
+ "sports_science": {
191
+ "name": "体育学",
192
+ "category": "Other"
193
+ },
194
+ "tax_accountant": {
195
+ "name": "税务师",
196
+ "category": "Other"
197
+ },
198
+ "teacher_qualification": {
199
+ "name": "教师资格",
200
+ "category": "Social Sciences"
201
+ },
202
+ "urban_and_rural_planner": {
203
+ "name": "注册城乡规划师",
204
+ "category": "Other"
205
+ },
206
+ "veterinary_medicine": {
207
+ "name": "兽医学",
208
+ "category": "STEM"
209
+ }
210
+ }
evaluation/cmmlu/cmmlu.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+
16
+ import datasets
17
+ import pandas as pd
18
+
19
+
20
+ _CITATION = """\
21
+ @article{li2023cmmlu,
22
+ title={CMMLU: Measuring massive multitask language understanding in Chinese},
23
+ author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
24
+ journal={arXiv preprint arXiv:2306.09212},
25
+ year={2023}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ CMMLU is a comprehensive Chinese assessment suite specifically designed to evaluate the advanced knowledge and reasoning abilities of LLMs within the Chinese language and cultural context.
31
+ """
32
+
33
+ _HOMEPAGE = "https://github.com/haonan-li/CMMLU"
34
+
35
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License"
36
+
37
+ _URL = "cmmlu.zip"
38
+
39
+ task_list = [
40
+ 'agronomy',
41
+ 'anatomy',
42
+ 'ancient_chinese',
43
+ 'arts',
44
+ 'astronomy',
45
+ 'business_ethics',
46
+ 'chinese_civil_service_exam',
47
+ 'chinese_driving_rule',
48
+ 'chinese_food_culture',
49
+ 'chinese_foreign_policy',
50
+ 'chinese_history',
51
+ 'chinese_literature',
52
+ 'chinese_teacher_qualification',
53
+ 'clinical_knowledge',
54
+ 'college_actuarial_science',
55
+ 'college_education',
56
+ 'college_engineering_hydrology',
57
+ 'college_law',
58
+ 'college_mathematics',
59
+ 'college_medical_statistics',
60
+ 'college_medicine',
61
+ 'computer_science',
62
+ 'computer_security',
63
+ 'conceptual_physics',
64
+ 'construction_project_management',
65
+ 'economics',
66
+ 'education',
67
+ 'electrical_engineering',
68
+ 'elementary_chinese',
69
+ 'elementary_commonsense',
70
+ 'elementary_information_and_technology',
71
+ 'elementary_mathematics',
72
+ 'ethnology',
73
+ 'food_science',
74
+ 'genetics',
75
+ 'global_facts',
76
+ 'high_school_biology',
77
+ 'high_school_chemistry',
78
+ 'high_school_geography',
79
+ 'high_school_mathematics',
80
+ 'high_school_physics',
81
+ 'high_school_politics',
82
+ 'human_sexuality',
83
+ 'international_law',
84
+ 'journalism',
85
+ 'jurisprudence',
86
+ 'legal_and_moral_basis',
87
+ 'logical',
88
+ 'machine_learning',
89
+ 'management',
90
+ 'marketing',
91
+ 'marxist_theory',
92
+ 'modern_chinese',
93
+ 'nutrition',
94
+ 'philosophy',
95
+ 'professional_accounting',
96
+ 'professional_law',
97
+ 'professional_medicine',
98
+ 'professional_psychology',
99
+ 'public_relations',
100
+ 'security_study',
101
+ 'sociology',
102
+ 'sports_science',
103
+ 'traditional_chinese_medicine',
104
+ 'virology',
105
+ 'world_history',
106
+ 'world_religions',
107
+ ]
108
+
109
+
110
+ class CMMLUConfig(datasets.BuilderConfig):
111
+ def __init__(self, **kwargs):
112
+ super().__init__(version=datasets.Version("1.0.1"), **kwargs)
113
+
114
+
115
+ class CMMLU(datasets.GeneratorBasedBuilder):
116
+ BUILDER_CONFIGS = [
117
+ CMMLUConfig(
118
+ name=task_name,
119
+ )
120
+ for task_name in task_list
121
+ ]
122
+
123
+ def _info(self):
124
+ features = datasets.Features(
125
+ {
126
+ "question": datasets.Value("string"),
127
+ "A": datasets.Value("string"),
128
+ "B": datasets.Value("string"),
129
+ "C": datasets.Value("string"),
130
+ "D": datasets.Value("string"),
131
+ "answer": datasets.Value("string"),
132
+ }
133
+ )
134
+ return datasets.DatasetInfo(
135
+ description=_DESCRIPTION,
136
+ features=features,
137
+ homepage=_HOMEPAGE,
138
+ license=_LICENSE,
139
+ citation=_CITATION,
140
+ )
141
+
142
+ def _split_generators(self, dl_manager):
143
+ data_dir = dl_manager.download_and_extract(_URL)
144
+ task_name = self.config.name
145
+ return [
146
+ datasets.SplitGenerator(
147
+ name=datasets.Split.TEST,
148
+ gen_kwargs={
149
+ "filepath": os.path.join(data_dir, f"test/{task_name}.csv"),
150
+ },
151
+ ),
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
+ gen_kwargs={
155
+ "filepath": os.path.join(data_dir, f"dev/{task_name}.csv"),
156
+ },
157
+ ),
158
+ ]
159
+
160
+ def _generate_examples(self, filepath):
161
+ df = pd.read_csv(filepath, header=0, index_col=0, encoding="utf-8")
162
+ for i, instance in enumerate(df.to_dict(orient="records")):
163
+ question = instance.pop("Question", "")
164
+ answer = instance.pop("Answer", "")
165
+ instance["question"] = question
166
+ instance["answer"] = answer
167
+ yield i, instance
evaluation/cmmlu/cmmlu.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9c62ec80912ab18148b17b6618f8468c3c9d0fe48f5ca7c5db0b3f013d3bd1e
3
+ size 1078352
evaluation/cmmlu/mapping.json ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "agronomy": {
3
+ "name": "农学",
4
+ "category": "Other"
5
+ },
6
+ "anatomy": {
7
+ "name": "解剖学",
8
+ "category": "STEM"
9
+ },
10
+ "ancient_chinese": {
11
+ "name": "古汉语",
12
+ "category": "Social Sciences"
13
+ },
14
+ "arts": {
15
+ "name": "艺术学",
16
+ "category": "Humanities"
17
+ },
18
+ "astronomy": {
19
+ "name": "天文学",
20
+ "category": "STEM"
21
+ },
22
+ "business_ethics": {
23
+ "name": "商业伦理",
24
+ "category": "Social Sciences"
25
+ },
26
+ "chinese_civil_service_exam": {
27
+ "name": "中国公务员考试",
28
+ "category": "Social Sciences"
29
+ },
30
+ "chinese_driving_rule": {
31
+ "name": "中国驾驶规则",
32
+ "category": "Other"
33
+ },
34
+ "chinese_food_culture": {
35
+ "name": "中国饮食文化",
36
+ "category": "Social Sciences"
37
+ },
38
+ "chinese_foreign_policy": {
39
+ "name": "中国外交政策",
40
+ "category": "Social Sciences"
41
+ },
42
+ "chinese_history": {
43
+ "name": "中国历史",
44
+ "category": "Humanities"
45
+ },
46
+ "chinese_literature": {
47
+ "name": "中国文学",
48
+ "category": "Humanities"
49
+ },
50
+ "chinese_teacher_qualification": {
51
+ "name": "中国教师资格",
52
+ "category": "Social Sciences"
53
+ },
54
+ "college_actuarial_science": {
55
+ "name": "大学精算学",
56
+ "category": "STEM"
57
+ },
58
+ "college_education": {
59
+ "name": "大学教育学",
60
+ "category": "Social Sciences"
61
+ },
62
+ "college_engineering_hydrology": {
63
+ "name": "大学工程水文学",
64
+ "category": "STEM"
65
+ },
66
+ "college_law": {
67
+ "name": "大学法律",
68
+ "category": "Humanities"
69
+ },
70
+ "college_mathematics": {
71
+ "name": "大学数学",
72
+ "category": "STEM"
73
+ },
74
+ "college_medical_statistics": {
75
+ "name": "大学医学统计",
76
+ "category": "STEM"
77
+ },
78
+ "clinical_knowledge": {
79
+ "name": "临床知识",
80
+ "category": "Other"
81
+ },
82
+ "college_medicine": {
83
+ "name": "大学医学",
84
+ "category": "Other"
85
+ },
86
+ "computer_science": {
87
+ "name": "计算机科学",
88
+ "category": "STEM"
89
+ },
90
+ "computer_security": {
91
+ "name": "计算机安全",
92
+ "category": "Other"
93
+ },
94
+ "conceptual_physics": {
95
+ "name": "概念物理学",
96
+ "category": "STEM"
97
+ },
98
+ "construction_project_management": {
99
+ "name": "建设工程管理",
100
+ "category": "Other"
101
+ },
102
+ "economics": {
103
+ "name": "经济学",
104
+ "category": "Social Sciences"
105
+ },
106
+ "education": {
107
+ "name": "教育学",
108
+ "category": "Social Sciences"
109
+ },
110
+ "elementary_chinese": {
111
+ "name": "小学语文",
112
+ "category": "Social Sciences"
113
+ },
114
+ "elementary_commonsense": {
115
+ "name": "小学常识",
116
+ "category": "Other"
117
+ },
118
+ "elementary_information_and_technology": {
119
+ "name": "小学信息技术",
120
+ "category": "Other"
121
+ },
122
+ "electrical_engineering": {
123
+ "name": "电气工程",
124
+ "category": "STEM"
125
+ },
126
+ "elementary_mathematics": {
127
+ "name": "初等数学",
128
+ "category": "STEM"
129
+ },
130
+ "ethnology": {
131
+ "name": "民族学",
132
+ "category": "Social Sciences"
133
+ },
134
+ "food_science": {
135
+ "name": "食品科学",
136
+ "category": "Other"
137
+ },
138
+ "genetics": {
139
+ "name": "遗传学",
140
+ "category": "STEM"
141
+ },
142
+ "global_facts": {
143
+ "name": "全球事实",
144
+ "category": "Humanities"
145
+ },
146
+ "high_school_biology": {
147
+ "name": "高中生物",
148
+ "category": "STEM"
149
+ },
150
+ "high_school_chemistry": {
151
+ "name": "高中化学",
152
+ "category": "STEM"
153
+ },
154
+ "high_school_geography": {
155
+ "name": "高中地理",
156
+ "category": "Social Sciences"
157
+ },
158
+ "high_school_mathematics": {
159
+ "name": "高中数学",
160
+ "category": "STEM"
161
+ },
162
+ "high_school_physics": {
163
+ "name": "高中物理学",
164
+ "category": "STEM"
165
+ },
166
+ "high_school_politics": {
167
+ "name": "高中政治",
168
+ "category": "Social Sciences"
169
+ },
170
+ "human_sexuality": {
171
+ "name": "人类性行为",
172
+ "category": "Other"
173
+ },
174
+ "international_law": {
175
+ "name": "国际法学",
176
+ "category": "Humanities"
177
+ },
178
+ "journalism": {
179
+ "name": "新闻学",
180
+ "category": "Social Sciences"
181
+ },
182
+ "jurisprudence": {
183
+ "name": "法理学",
184
+ "category": "Humanities"
185
+ },
186
+ "legal_and_moral_basis": {
187
+ "name": "法律与道德基础",
188
+ "category": "Other"
189
+ },
190
+ "logical": {
191
+ "name": "逻辑学",
192
+ "category": "Humanities"
193
+ },
194
+ "machine_learning": {
195
+ "name": "机器学习",
196
+ "category": "STEM"
197
+ },
198
+ "management": {
199
+ "name": "管理学",
200
+ "category": "Social Sciences"
201
+ },
202
+ "marketing": {
203
+ "name": "市场营销",
204
+ "category": "Social Sciences"
205
+ },
206
+ "marxist_theory": {
207
+ "name": "马克思主义理论",
208
+ "category": "Humanities"
209
+ },
210
+ "modern_chinese": {
211
+ "name": "现代汉语",
212
+ "category": "Social Sciences"
213
+ },
214
+ "nutrition": {
215
+ "name": "营养学",
216
+ "category": "Other"
217
+ },
218
+ "philosophy": {
219
+ "name": "哲学",
220
+ "category": "Humanities"
221
+ },
222
+ "professional_accounting": {
223
+ "name": "专业会计",
224
+ "category": "Social Sciences"
225
+ },
226
+ "professional_law": {
227
+ "name": "专业法学",
228
+ "category": "Humanities"
229
+ },
230
+ "professional_medicine": {
231
+ "name": "专业医学",
232
+ "category": "Other"
233
+ },
234
+ "professional_psychology": {
235
+ "name": "专业心理学",
236
+ "category": "Social Sciences"
237
+ },
238
+ "public_relations": {
239
+ "name": "公共关系",
240
+ "category": "Social Sciences"
241
+ },
242
+ "security_study": {
243
+ "name": "安全研究",
244
+ "category": "Social Sciences"
245
+ },
246
+ "sociology": {
247
+ "name": "社会学",
248
+ "category": "Social Sciences"
249
+ },
250
+ "sports_science": {
251
+ "name": "体育学",
252
+ "category": "Other"
253
+ },
254
+ "traditional_chinese_medicine": {
255
+ "name": "中医中药",
256
+ "category": "Other"
257
+ },
258
+ "virology": {
259
+ "name": "病毒学",
260
+ "category": "STEM"
261
+ },
262
+ "world_history": {
263
+ "name": "世界历史",
264
+ "category": "Humanities"
265
+ },
266
+ "world_religions": {
267
+ "name": "世界宗教",
268
+ "category": "Humanities"
269
+ }
270
+ }
evaluation/mmlu/mapping.json ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "abstract_algebra": {
3
+ "name": "abstract algebra",
4
+ "category": "STEM"
5
+ },
6
+ "anatomy": {
7
+ "name": "anatomy",
8
+ "category": "Other"
9
+ },
10
+ "astronomy": {
11
+ "name": "astronomy",
12
+ "category": "STEM"
13
+ },
14
+ "business_ethics": {
15
+ "name": "business ethics",
16
+ "category": "Other"
17
+ },
18
+ "clinical_knowledge": {
19
+ "name": "clinical knowledge",
20
+ "category": "Other"
21
+ },
22
+ "college_biology": {
23
+ "name": "college biology",
24
+ "category": "STEM"
25
+ },
26
+ "college_chemistry": {
27
+ "name": "college chemistry",
28
+ "category": "STEM"
29
+ },
30
+ "college_computer_science": {
31
+ "name": "college computer science",
32
+ "category": "STEM"
33
+ },
34
+ "college_mathematics": {
35
+ "name": "college mathematics",
36
+ "category": "STEM"
37
+ },
38
+ "college_medicine": {
39
+ "name": "college medicine",
40
+ "category": "Other"
41
+ },
42
+ "college_physics": {
43
+ "name": "college physics",
44
+ "category": "STEM"
45
+ },
46
+ "computer_security": {
47
+ "name": "computer security",
48
+ "category": "STEM"
49
+ },
50
+ "conceptual_physics": {
51
+ "name": "conceptual physics",
52
+ "category": "STEM"
53
+ },
54
+ "econometrics": {
55
+ "name": "econometrics",
56
+ "category": "Social Sciences"
57
+ },
58
+ "electrical_engineering": {
59
+ "name": "electrical engineering",
60
+ "category": "STEM"
61
+ },
62
+ "elementary_mathematics": {
63
+ "name": "elementary mathematics",
64
+ "category": "STEM"
65
+ },
66
+ "formal_logic": {
67
+ "name": "formal logic",
68
+ "category": "Humanities"
69
+ },
70
+ "global_facts": {
71
+ "name": "global facts",
72
+ "category": "Other"
73
+ },
74
+ "high_school_biology": {
75
+ "name": "high school biology",
76
+ "category": "STEM"
77
+ },
78
+ "high_school_chemistry": {
79
+ "name": "high school chemistry",
80
+ "category": "STEM"
81
+ },
82
+ "high_school_computer_science": {
83
+ "name": "high school computer science",
84
+ "category": "STEM"
85
+ },
86
+ "high_school_european_history": {
87
+ "name": "high school european history",
88
+ "category": "Humanities"
89
+ },
90
+ "high_school_geography": {
91
+ "name": "high school geography",
92
+ "category": "Social Sciences"
93
+ },
94
+ "high_school_government_and_politics": {
95
+ "name": "high school government and politics",
96
+ "category": "Social Sciences"
97
+ },
98
+ "high_school_macroeconomics": {
99
+ "name": "high school macroeconomics",
100
+ "category": "Social Sciences"
101
+ },
102
+ "high_school_mathematics": {
103
+ "name": "high school mathematics",
104
+ "category": "STEM"
105
+ },
106
+ "high_school_microeconomics": {
107
+ "name": "high school microeconomics",
108
+ "category": "Social Sciences"
109
+ },
110
+ "high_school_physics": {
111
+ "name": "high school physics",
112
+ "category": "STEM"
113
+ },
114
+ "high_school_psychology": {
115
+ "name": "high school psychology",
116
+ "category": "Social Sciences"
117
+ },
118
+ "high_school_statistics": {
119
+ "name": "high school statistics",
120
+ "category": "STEM"
121
+ },
122
+ "high_school_us_history": {
123
+ "name": "high school us history",
124
+ "category": "Humanities"
125
+ },
126
+ "high_school_world_history": {
127
+ "name": "high school world history",
128
+ "category": "Humanities"
129
+ },
130
+ "human_aging": {
131
+ "name": "human aging",
132
+ "category": "Other"
133
+ },
134
+ "human_sexuality": {
135
+ "name": "human sexuality",
136
+ "category": "Social Sciences"
137
+ },
138
+ "international_law": {
139
+ "name": "international law",
140
+ "category": "Humanities"
141
+ },
142
+ "jurisprudence": {
143
+ "name": "jurisprudence",
144
+ "category": "Humanities"
145
+ },
146
+ "logical_fallacies": {
147
+ "name": "logical fallacies",
148
+ "category": "Humanities"
149
+ },
150
+ "machine_learning": {
151
+ "name": "machine learning",
152
+ "category": "STEM"
153
+ },
154
+ "management": {
155
+ "name": "management",
156
+ "category": "Other"
157
+ },
158
+ "marketing": {
159
+ "name": "marketing",
160
+ "category": "Other"
161
+ },
162
+ "medical_genetics": {
163
+ "name": "medical genetics",
164
+ "category": "Other"
165
+ },
166
+ "miscellaneous": {
167
+ "name": "miscellaneous",
168
+ "category": "Other"
169
+ },
170
+ "moral_disputes": {
171
+ "name": "moral disputes",
172
+ "category": "Humanities"
173
+ },
174
+ "moral_scenarios": {
175
+ "name": "moral scenarios",
176
+ "category": "Humanities"
177
+ },
178
+ "nutrition": {
179
+ "name": "nutrition",
180
+ "category": "Other"
181
+ },
182
+ "philosophy": {
183
+ "name": "philosophy",
184
+ "category": "Humanities"
185
+ },
186
+ "prehistory": {
187
+ "name": "prehistory",
188
+ "category": "Humanities"
189
+ },
190
+ "professional_accounting": {
191
+ "name": "professional accounting",
192
+ "category": "Other"
193
+ },
194
+ "professional_law": {
195
+ "name": "professional law",
196
+ "category": "Humanities"
197
+ },
198
+ "professional_medicine": {
199
+ "name": "professional medicine",
200
+ "category": "Other"
201
+ },
202
+ "professional_psychology": {
203
+ "name": "professional psychology",
204
+ "category": "Social Sciences"
205
+ },
206
+ "public_relations": {
207
+ "name": "public relations",
208
+ "category": "Social Sciences"
209
+ },
210
+ "security_studies": {
211
+ "name": "security studies",
212
+ "category": "Social Sciences"
213
+ },
214
+ "sociology": {
215
+ "name": "sociology",
216
+ "category": "Social Sciences"
217
+ },
218
+ "us_foreign_policy": {
219
+ "name": "us foreign policy",
220
+ "category": "Social Sciences"
221
+ },
222
+ "virology": {
223
+ "name": "virology",
224
+ "category": "Other"
225
+ },
226
+ "world_religions": {
227
+ "name": "world religions",
228
+ "category": "Humanities"
229
+ }
230
+ }
evaluation/mmlu/mmlu.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import os
15
+
16
+ import datasets
17
+ import pandas as pd
18
+
19
+
20
+ _CITATION = """\
21
+ @article{hendryckstest2021,
22
+ title={Measuring Massive Multitask Language Understanding},
23
+ author={Dan Hendrycks and Collin Burns and Steven Basart and Andy Zou and Mantas Mazeika and Dawn Song and Jacob Steinhardt},
24
+ journal={Proceedings of the International Conference on Learning Representations (ICLR)},
25
+ year={2021}
26
+ }
27
+ """
28
+
29
+ _DESCRIPTION = """\
30
+ Measuring Massive Multitask Language Understanding by Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt (ICLR 2021).
31
+ """
32
+
33
+ _HOMEPAGE = "https://github.com/hendrycks/test"
34
+
35
+ _LICENSE = "MIT"
36
+
37
+ _URL = "mmlu.zip"
38
+
39
+ task_list = [
40
+ "high_school_european_history",
41
+ "business_ethics",
42
+ "clinical_knowledge",
43
+ "medical_genetics",
44
+ "high_school_us_history",
45
+ "high_school_physics",
46
+ "high_school_world_history",
47
+ "virology",
48
+ "high_school_microeconomics",
49
+ "econometrics",
50
+ "college_computer_science",
51
+ "high_school_biology",
52
+ "abstract_algebra",
53
+ "professional_accounting",
54
+ "philosophy",
55
+ "professional_medicine",
56
+ "nutrition",
57
+ "global_facts",
58
+ "machine_learning",
59
+ "security_studies",
60
+ "public_relations",
61
+ "professional_psychology",
62
+ "prehistory",
63
+ "anatomy",
64
+ "human_sexuality",
65
+ "college_medicine",
66
+ "high_school_government_and_politics",
67
+ "college_chemistry",
68
+ "logical_fallacies",
69
+ "high_school_geography",
70
+ "elementary_mathematics",
71
+ "human_aging",
72
+ "college_mathematics",
73
+ "high_school_psychology",
74
+ "formal_logic",
75
+ "high_school_statistics",
76
+ "international_law",
77
+ "high_school_mathematics",
78
+ "high_school_computer_science",
79
+ "conceptual_physics",
80
+ "miscellaneous",
81
+ "high_school_chemistry",
82
+ "marketing",
83
+ "professional_law",
84
+ "management",
85
+ "college_physics",
86
+ "jurisprudence",
87
+ "world_religions",
88
+ "sociology",
89
+ "us_foreign_policy",
90
+ "high_school_macroeconomics",
91
+ "computer_security",
92
+ "moral_scenarios",
93
+ "moral_disputes",
94
+ "electrical_engineering",
95
+ "astronomy",
96
+ "college_biology",
97
+ ]
98
+
99
+
100
+ class MMLUConfig(datasets.BuilderConfig):
101
+ def __init__(self, **kwargs):
102
+ super().__init__(version=datasets.Version("1.0.0"), **kwargs)
103
+
104
+
105
+ class MMLU(datasets.GeneratorBasedBuilder):
106
+ BUILDER_CONFIGS = [
107
+ MMLUConfig(
108
+ name=task_name,
109
+ )
110
+ for task_name in task_list
111
+ ]
112
+
113
+ def _info(self):
114
+ features = datasets.Features(
115
+ {
116
+ "question": datasets.Value("string"),
117
+ "A": datasets.Value("string"),
118
+ "B": datasets.Value("string"),
119
+ "C": datasets.Value("string"),
120
+ "D": datasets.Value("string"),
121
+ "answer": datasets.Value("string"),
122
+ }
123
+ )
124
+ return datasets.DatasetInfo(
125
+ description=_DESCRIPTION,
126
+ features=features,
127
+ homepage=_HOMEPAGE,
128
+ license=_LICENSE,
129
+ citation=_CITATION,
130
+ )
131
+
132
+ def _split_generators(self, dl_manager):
133
+ data_dir = dl_manager.download_and_extract(_URL)
134
+ task_name = self.config.name
135
+ return [
136
+ datasets.SplitGenerator(
137
+ name=datasets.Split.TEST,
138
+ gen_kwargs={
139
+ "filepath": os.path.join(
140
+ data_dir, "data", "test", f"{task_name}_test.csv"
141
+ ),
142
+ },
143
+ ),
144
+ datasets.SplitGenerator(
145
+ name=datasets.Split.VALIDATION,
146
+ gen_kwargs={
147
+ "filepath": os.path.join(
148
+ data_dir, "data", "val", f"{task_name}_val.csv"
149
+ ),
150
+ },
151
+ ),
152
+ datasets.SplitGenerator(
153
+ name=datasets.Split.TRAIN,
154
+ gen_kwargs={
155
+ "filepath": os.path.join(
156
+ data_dir, "data", "dev", f"{task_name}_dev.csv"
157
+ ),
158
+ },
159
+ ),
160
+ ]
161
+
162
+ def _generate_examples(self, filepath):
163
+ df = pd.read_csv(filepath)
164
+ df.columns = ["question", "A", "B", "C", "D", "answer"]
165
+
166
+ for i, instance in enumerate(df.to_dict(orient="records")):
167
+ yield i, instance
evaluation/mmlu/mmlu.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e2b2d78bf59d80baf271a13005568ef0aca17bd663f2e5ebfecdc07a68f0edc
3
+ size 2336375
pyproject.toml ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = ["setuptools>=61.0"]
3
+ build-backend = "setuptools.build_meta"
4
+
5
+ [tool.black]
6
+ line-length = 119
7
+ target-version = ["py38"]
8
+
9
+ [tool.ruff]
10
+ ignore = ["C408", "C901", "E501", "E731", "E741", "W605"]
11
+ select = ["C", "E", "F", "I", "W"]
12
+ line-length = 119
13
+
14
+ [tool.ruff.isort]
15
+ lines-after-imports = 2
16
+ known-first-party = ["llmtuner"]
17
+
18
+ [isort]
19
+ default_section = "FIRSTPARTY"
20
+ known_first_party = "llmtuner"
21
+ known_third_party = [
22
+ "accelerate",
23
+ "datasets",
24
+ "gradio",
25
+ "numpy",
26
+ "peft",
27
+ "torch",
28
+ "transformers",
29
+ "trl"
30
+ ]
31
+ line_length = 119
32
+ lines_after_imports = 2
33
+ multi_line_output = 3
34
+ include_trailing_comma = true
35
+ force_grid_wrap = 0
36
+ use_parentheses = true
37
+ ensure_newline_before_comments = true
requirements.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch>=1.13.1
2
+ transformers>=4.36.2
3
+ datasets>=2.14.3
4
+ accelerate>=0.21.0
5
+ peft>=0.7.0
6
+ trl>=0.7.6
7
+ gradio>=3.38.0,<4.0.0
8
+ scipy
9
+ einops
10
+ sentencepiece
11
+ protobuf
12
+ jieba
13
+ rouge-chinese
14
+ nltk
15
+ uvicorn
16
+ pydantic
17
+ fastapi
18
+ sse-starlette
19
+ matplotlib
setup.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ from setuptools import setup, find_packages
4
+
5
+
6
+ def get_version():
7
+ with open(os.path.join("src", "llmtuner", "__init__.py"), "r", encoding="utf-8") as f:
8
+ file_content = f.read()
9
+ pattern = r"{0}\W*=\W*\"([^\"]+)\"".format("__version__")
10
+ version, = re.findall(pattern, file_content)
11
+ return version
12
+
13
+
14
+ def get_requires():
15
+ with open("requirements.txt", "r", encoding="utf-8") as f:
16
+ file_content = f.read()
17
+ lines = [line.strip() for line in file_content.strip().split("\n") if not line.startswith("#")]
18
+ return lines
19
+
20
+
21
+ def main():
22
+
23
+ setup(
24
+ name="llmtuner",
25
+ version=get_version(),
26
+ author="hiyouga",
27
+ author_email="hiyouga" "@" "buaa.edu.cn",
28
+ description="Easy-to-use LLM fine-tuning framework",
29
+ long_description=open("README.md", "r", encoding="utf-8").read(),
30
+ long_description_content_type="text/markdown",
31
+ keywords=["LLaMA", "BLOOM", "Falcon", "LLM", "ChatGPT", "transformer", "pytorch", "deep learning"],
32
+ license="Apache 2.0 License",
33
+ url="https://github.com/hiyouga/LLaMA-Factory",
34
+ package_dir={"": "src"},
35
+ packages=find_packages("src"),
36
+ python_requires=">=3.8.0",
37
+ install_requires=get_requires(),
38
+ classifiers=[
39
+ "Development Status :: 3 - Alpha",
40
+ "Intended Audience :: Developers",
41
+ "Intended Audience :: Education",
42
+ "Intended Audience :: Science/Research",
43
+ "License :: OSI Approved :: Apache Software License",
44
+ "Operating System :: OS Independent",
45
+ "Programming Language :: Python :: 3",
46
+ "Programming Language :: Python :: 3.8",
47
+ "Programming Language :: Python :: 3.9",
48
+ "Programming Language :: Python :: 3.10",
49
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
50
+ ]
51
+ )
52
+
53
+
54
+ if __name__ == "__main__":
55
+ main()
src/.train_web.py.swp ADDED
Binary file (12.3 kB). View file
 
src/api_demo.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import uvicorn
4
+
5
+ from llmtuner import ChatModel, create_app
6
+
7
+
8
+ def main():
9
+ chat_model = ChatModel()
10
+ app = create_app(chat_model)
11
+ print("Visit http://localhost:{}/docs for API document.".format(os.environ.get("API_PORT", 8000)))
12
+ uvicorn.run(app, host="0.0.0.0", port=int(os.environ.get("API_PORT", 8000)), workers=1)
13
+
14
+
15
+ if __name__ == "__main__":
16
+ main()
src/app.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def greet(name):
4
+ return "Hello " + name + "!!"
5
+
6
+ iface = gr.Interface(fn=greet, inputs="text", outputs="text")
7
+ iface.launch()