Spaces:
No application file
No application file
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .config/.last_opt_in_prompt.yaml +1 -0
- .config/.last_survey_prompt.yaml +1 -0
- .config/.last_update_check.json +1 -0
- .config/active_config +1 -0
- .config/config_sentinel +0 -0
- .config/configurations/config_default +6 -0
- .config/default_configs.db +0 -0
- .config/gce +1 -0
- .config/logs/2024.03.13/13.24.54.535590.log +596 -0
- .config/logs/2024.03.13/13.25.21.193490.log +5 -0
- .config/logs/2024.03.13/13.25.32.425298.log +169 -0
- .config/logs/2024.03.13/13.25.40.985811.log +5 -0
- .config/logs/2024.03.13/13.25.52.149346.log +8 -0
- .config/logs/2024.03.13/13.25.53.020753.log +8 -0
- .gitattributes +11 -0
- LLaMA-Factory/.dockerignore +11 -0
- LLaMA-Factory/.gitattributes +2 -0
- LLaMA-Factory/.github/CODE_OF_CONDUCT.md +128 -0
- LLaMA-Factory/.github/CONTRIBUTING.md +21 -0
- LLaMA-Factory/.github/ISSUE_TEMPLATE/bug-report.yml +58 -0
- LLaMA-Factory/.github/PULL_REQUEST_TEMPLATE.md +7 -0
- LLaMA-Factory/.github/SECURITY.md +7 -0
- LLaMA-Factory/.github/workflows/tests.yml +29 -0
- LLaMA-Factory/.gitignore +165 -0
- LLaMA-Factory/Dockerfile +15 -0
- LLaMA-Factory/LICENSE +201 -0
- LLaMA-Factory/Makefile +11 -0
- LLaMA-Factory/README.md +735 -0
- LLaMA-Factory/README_zh.md +708 -0
- LLaMA-Factory/assets/benchmark.svg +1216 -0
- LLaMA-Factory/assets/logo.png +0 -0
- LLaMA-Factory/assets/wechat.jpg +0 -0
- LLaMA-Factory/build/lib/llmtuner/__init__.py +11 -0
- LLaMA-Factory/build/lib/llmtuner/api/__init__.py +4 -0
- LLaMA-Factory/build/lib/llmtuner/api/app.py +224 -0
- LLaMA-Factory/build/lib/llmtuner/api/protocol.py +116 -0
- LLaMA-Factory/build/lib/llmtuner/chat/__init__.py +5 -0
- LLaMA-Factory/build/lib/llmtuner/chat/base_engine.py +69 -0
- LLaMA-Factory/build/lib/llmtuner/chat/chat_model.py +91 -0
- LLaMA-Factory/build/lib/llmtuner/chat/hf_engine.py +263 -0
- LLaMA-Factory/build/lib/llmtuner/chat/vllm_engine.py +149 -0
- LLaMA-Factory/build/lib/llmtuner/data/__init__.py +6 -0
- LLaMA-Factory/build/lib/llmtuner/data/aligner.py +133 -0
- LLaMA-Factory/build/lib/llmtuner/data/formatter.py +187 -0
- LLaMA-Factory/build/lib/llmtuner/data/loader.py +170 -0
- LLaMA-Factory/build/lib/llmtuner/data/parser.py +119 -0
- LLaMA-Factory/build/lib/llmtuner/data/preprocess.py +276 -0
- LLaMA-Factory/build/lib/llmtuner/data/template.py +773 -0
- LLaMA-Factory/build/lib/llmtuner/data/utils.py +94 -0
- LLaMA-Factory/build/lib/llmtuner/eval/__init__.py +4 -0
.config/.last_opt_in_prompt.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
.config/.last_survey_prompt.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
last_prompt_time: 1710336331.587556
|
.config/.last_update_check.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"last_update_check_time": 1710336340.3061023, "last_update_check_revision": 20240308155052, "notifications": [], "last_nag_times": {}}
|
.config/active_config
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
default
|
.config/config_sentinel
ADDED
File without changes
|
.config/configurations/config_default
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[component_manager]
|
2 |
+
disable_update_check = true
|
3 |
+
|
4 |
+
[compute]
|
5 |
+
gce_metadata_read_timeout_sec = 0
|
6 |
+
|
.config/default_configs.db
ADDED
Binary file (12.3 kB). View file
|
|
.config/gce
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
False
|
.config/logs/2024.03.13/13.24.54.535590.log
ADDED
@@ -0,0 +1,596 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-03-13 13:25:06,565 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2024-03-13 13:25:06,569 DEBUG root Loaded Command Group: ['gcloud', 'components', 'update']
|
3 |
+
2024-03-13 13:25:06,573 DEBUG root Running [gcloud.components.update] with arguments: [--allow-no-backup: "True", --compile-python: "True", --quiet: "True", COMPONENT-IDS:7: "['core', 'gcloud-deps', 'bq', 'gcloud', 'gcloud-crc32c', 'gsutil', 'anthoscli']"]
|
4 |
+
2024-03-13 13:25:06,574 INFO ___FILE_ONLY___ Beginning update. This process may take several minutes.
|
5 |
+
|
6 |
+
2024-03-13 13:25:06,591 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
7 |
+
2024-03-13 13:25:06,713 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components-2.json HTTP/1.1" 200 214446
|
8 |
+
2024-03-13 13:25:06,727 INFO ___FILE_ONLY___
|
9 |
+
|
10 |
+
2024-03-13 13:25:06,728 INFO ___FILE_ONLY___
|
11 |
+
Your current Google Cloud CLI version is: 468.0.0
|
12 |
+
|
13 |
+
2024-03-13 13:25:06,728 INFO ___FILE_ONLY___ Installing components from version: 468.0.0
|
14 |
+
|
15 |
+
2024-03-13 13:25:06,728 INFO ___FILE_ONLY___
|
16 |
+
|
17 |
+
2024-03-13 13:25:06,728 DEBUG root Chosen display Format:table[box,title="These components will be removed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
18 |
+
2024-03-13 13:25:06,729 DEBUG root Chosen display Format:table[box,title="These components will be updated."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
19 |
+
2024-03-13 13:25:06,730 DEBUG root Chosen display Format:table[box,title="These components will be installed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
20 |
+
2024-03-13 13:25:06,736 INFO ___FILE_ONLY___ ┌─────────────────────────────────────────────────────────────────────────────┐
|
21 |
+
2024-03-13 13:25:06,736 INFO ___FILE_ONLY___
|
22 |
+
|
23 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___ │ These components will be installed. │
|
24 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___
|
25 |
+
|
26 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___ ├─────────────────────────────────────────────────────┬────────────┬──────────┤
|
27 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___
|
28 |
+
|
29 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___ │ Name │ Version │ Size │
|
30 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___
|
31 |
+
|
32 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___ ├─────────────────────────────────────────────────────┼────────────┼──────────┤
|
33 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___
|
34 |
+
|
35 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___ │
|
36 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___ BigQuery Command Line Tool
|
37 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___
|
38 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___ │
|
39 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___ 2.0.101
|
40 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___
|
41 |
+
2024-03-13 13:25:06,737 INFO ___FILE_ONLY___ │
|
42 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___ 1.6 MiB
|
43 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___
|
44 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___ │
|
45 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___
|
46 |
+
|
47 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___ │
|
48 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___ BigQuery Command Line Tool (Platform Specific)
|
49 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___
|
50 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___ │
|
51 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___ 2.0.101
|
52 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___
|
53 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___ │
|
54 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___ < 1 MiB
|
55 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___
|
56 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___ │
|
57 |
+
2024-03-13 13:25:06,738 INFO ___FILE_ONLY___
|
58 |
+
|
59 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ │
|
60 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ Bundled Python 3.11
|
61 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___
|
62 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ │
|
63 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ 3.11.8
|
64 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___
|
65 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ │
|
66 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ 74.9 MiB
|
67 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___
|
68 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ │
|
69 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___
|
70 |
+
|
71 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ │
|
72 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ Cloud Storage Command Line Tool
|
73 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___
|
74 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ │
|
75 |
+
2024-03-13 13:25:06,739 INFO ___FILE_ONLY___ 5.27
|
76 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___
|
77 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___ │
|
78 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___ 11.3 MiB
|
79 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___
|
80 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___ │
|
81 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___
|
82 |
+
|
83 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___ │
|
84 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___ Cloud Storage Command Line Tool (Platform Specific)
|
85 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___
|
86 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___ │
|
87 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___ 5.27
|
88 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___
|
89 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___ │
|
90 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___ < 1 MiB
|
91 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___
|
92 |
+
2024-03-13 13:25:06,740 INFO ___FILE_ONLY___ │
|
93 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___
|
94 |
+
|
95 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___ │
|
96 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___ Google Cloud CLI Core Libraries (Platform Specific)
|
97 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___
|
98 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___ │
|
99 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___ 2024.01.06
|
100 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___
|
101 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___ │
|
102 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___ < 1 MiB
|
103 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___
|
104 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___ │
|
105 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___
|
106 |
+
|
107 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___ │
|
108 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___ Google Cloud CRC32C Hash Tool
|
109 |
+
2024-03-13 13:25:06,741 INFO ___FILE_ONLY___
|
110 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ │
|
111 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ 1.0.0
|
112 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___
|
113 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ │
|
114 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ 1.2 MiB
|
115 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___
|
116 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ │
|
117 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___
|
118 |
+
|
119 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ │
|
120 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ anthoscli
|
121 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___
|
122 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ │
|
123 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ 0.2.48
|
124 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___
|
125 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ │
|
126 |
+
2024-03-13 13:25:06,742 INFO ___FILE_ONLY___ 68.9 MiB
|
127 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___
|
128 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___ │
|
129 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___
|
130 |
+
|
131 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___ │
|
132 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___ gcloud cli dependencies
|
133 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___
|
134 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___ │
|
135 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___ 2021.04.16
|
136 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___
|
137 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___ │
|
138 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___ < 1 MiB
|
139 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___
|
140 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___ │
|
141 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___
|
142 |
+
|
143 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___ └─────────────────────────────────────────────────────┴────────────┴──────────┘
|
144 |
+
2024-03-13 13:25:06,743 INFO ___FILE_ONLY___
|
145 |
+
|
146 |
+
2024-03-13 13:25:06,744 INFO ___FILE_ONLY___
|
147 |
+
|
148 |
+
2024-03-13 13:25:06,747 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
149 |
+
2024-03-13 13:25:06,866 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/RELEASE_NOTES HTTP/1.1" 200 1169079
|
150 |
+
2024-03-13 13:25:06,896 INFO ___FILE_ONLY___ For the latest full release notes, please visit:
|
151 |
+
https://cloud.google.com/sdk/release_notes
|
152 |
+
|
153 |
+
|
154 |
+
2024-03-13 13:25:06,898 INFO ___FILE_ONLY___ ╔═════════════════════════════════════════���══════════════════╗
|
155 |
+
|
156 |
+
2024-03-13 13:25:06,898 INFO ___FILE_ONLY___ ╠═ Creating update staging area ═╣
|
157 |
+
|
158 |
+
2024-03-13 13:25:06,899 INFO ___FILE_ONLY___ ╚
|
159 |
+
2024-03-13 13:25:06,899 INFO ___FILE_ONLY___ ══════
|
160 |
+
2024-03-13 13:25:06,899 INFO ___FILE_ONLY___ ══════
|
161 |
+
2024-03-13 13:25:06,899 INFO ___FILE_ONLY___ ══════
|
162 |
+
2024-03-13 13:25:07,239 INFO ___FILE_ONLY___ ═
|
163 |
+
2024-03-13 13:25:07,272 INFO ___FILE_ONLY___ ═
|
164 |
+
2024-03-13 13:25:07,309 INFO ___FILE_ONLY___ ═
|
165 |
+
2024-03-13 13:25:07,353 INFO ___FILE_ONLY___ ═
|
166 |
+
2024-03-13 13:25:07,386 INFO ___FILE_ONLY___ ═
|
167 |
+
2024-03-13 13:25:07,421 INFO ___FILE_ONLY___ ═
|
168 |
+
2024-03-13 13:25:07,456 INFO ___FILE_ONLY___ ═
|
169 |
+
2024-03-13 13:25:07,493 INFO ___FILE_ONLY___ ═
|
170 |
+
2024-03-13 13:25:07,528 INFO ___FILE_ONLY___ ═
|
171 |
+
2024-03-13 13:25:07,658 INFO ___FILE_ONLY___ ═
|
172 |
+
2024-03-13 13:25:07,736 INFO ___FILE_ONLY___ ═
|
173 |
+
2024-03-13 13:25:07,841 INFO ___FILE_ONLY___ ═
|
174 |
+
2024-03-13 13:25:07,896 INFO ___FILE_ONLY___ ═
|
175 |
+
2024-03-13 13:25:07,969 INFO ___FILE_ONLY___ ═
|
176 |
+
2024-03-13 13:25:08,028 INFO ___FILE_ONLY___ ═
|
177 |
+
2024-03-13 13:25:08,073 INFO ___FILE_ONLY___ ═
|
178 |
+
2024-03-13 13:25:08,124 INFO ___FILE_ONLY___ ═
|
179 |
+
2024-03-13 13:25:08,171 INFO ___FILE_ONLY___ ═
|
180 |
+
2024-03-13 13:25:08,220 INFO ___FILE_ONLY___ ═
|
181 |
+
2024-03-13 13:25:08,292 INFO ___FILE_ONLY___ ═
|
182 |
+
2024-03-13 13:25:08,345 INFO ___FILE_ONLY___ ═
|
183 |
+
2024-03-13 13:25:08,397 INFO ___FILE_ONLY___ ═
|
184 |
+
2024-03-13 13:25:08,529 INFO ___FILE_ONLY___ ═
|
185 |
+
2024-03-13 13:25:08,574 INFO ___FILE_ONLY___ ═
|
186 |
+
2024-03-13 13:25:08,637 INFO ___FILE_ONLY___ ═
|
187 |
+
2024-03-13 13:25:08,687 INFO ___FILE_ONLY___ ═
|
188 |
+
2024-03-13 13:25:08,738 INFO ___FILE_ONLY___ ═
|
189 |
+
2024-03-13 13:25:08,786 INFO ___FILE_ONLY___ ═
|
190 |
+
2024-03-13 13:25:08,835 INFO ___FILE_ONLY___ ═
|
191 |
+
2024-03-13 13:25:08,889 INFO ___FILE_ONLY___ ═
|
192 |
+
2024-03-13 13:25:08,954 INFO ___FILE_ONLY___ ═
|
193 |
+
2024-03-13 13:25:09,014 INFO ___FILE_ONLY___ ═
|
194 |
+
2024-03-13 13:25:09,070 INFO ___FILE_ONLY___ ═
|
195 |
+
2024-03-13 13:25:09,137 INFO ___FILE_ONLY___ ═
|
196 |
+
2024-03-13 13:25:09,205 INFO ___FILE_ONLY___ ═
|
197 |
+
2024-03-13 13:25:09,259 INFO ___FILE_ONLY___ ═
|
198 |
+
2024-03-13 13:25:09,313 INFO ___FILE_ONLY___ ═
|
199 |
+
2024-03-13 13:25:09,363 INFO ___FILE_ONLY___ ═
|
200 |
+
2024-03-13 13:25:09,417 INFO ___FILE_ONLY___ ═
|
201 |
+
2024-03-13 13:25:09,470 INFO ___FILE_ONLY___ ═
|
202 |
+
2024-03-13 13:25:09,524 INFO ___FILE_ONLY___ ═
|
203 |
+
2024-03-13 13:25:09,584 INFO ___FILE_ONLY___ ═
|
204 |
+
2024-03-13 13:25:09,584 INFO ___FILE_ONLY___ ╝
|
205 |
+
|
206 |
+
2024-03-13 13:25:09,668 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
207 |
+
|
208 |
+
2024-03-13 13:25:09,668 INFO ___FILE_ONLY___ ╠═ Installing: BigQuery Command Line Tool ═╣
|
209 |
+
|
210 |
+
2024-03-13 13:25:09,668 INFO ___FILE_ONLY___ ╚
|
211 |
+
2024-03-13 13:25:09,672 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
212 |
+
2024-03-13 13:25:09,790 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bq-20240112150613.tar.gz HTTP/1.1" 200 1679148
|
213 |
+
2024-03-13 13:25:09,799 INFO ___FILE_ONLY___ ═
|
214 |
+
2024-03-13 13:25:09,800 INFO ___FILE_ONLY___ ═
|
215 |
+
2024-03-13 13:25:09,800 INFO ___FILE_ONLY___ ═
|
216 |
+
2024-03-13 13:25:09,800 INFO ___FILE_ONLY___ ═
|
217 |
+
2024-03-13 13:25:09,800 INFO ___FILE_ONLY___ ═
|
218 |
+
2024-03-13 13:25:09,800 INFO ___FILE_ONLY___ ═
|
219 |
+
2024-03-13 13:25:09,800 INFO ___FILE_ONLY___ ═
|
220 |
+
2024-03-13 13:25:09,801 INFO ___FILE_ONLY___ ═
|
221 |
+
2024-03-13 13:25:09,801 INFO ___FILE_ONLY___ ═
|
222 |
+
2024-03-13 13:25:09,801 INFO ___FILE_ONLY___ ═
|
223 |
+
2024-03-13 13:25:09,801 INFO ___FILE_ONLY___ ═
|
224 |
+
2024-03-13 13:25:09,801 INFO ___FILE_ONLY___ ═
|
225 |
+
2024-03-13 13:25:09,801 INFO ___FILE_ONLY___ ═
|
226 |
+
2024-03-13 13:25:09,801 INFO ___FILE_ONLY___ ═
|
227 |
+
2024-03-13 13:25:09,802 INFO ___FILE_ONLY___ ═
|
228 |
+
2024-03-13 13:25:09,802 INFO ___FILE_ONLY___ ═
|
229 |
+
2024-03-13 13:25:09,802 INFO ___FILE_ONLY___ ═
|
230 |
+
2024-03-13 13:25:09,802 INFO ___FILE_ONLY___ ═
|
231 |
+
2024-03-13 13:25:09,802 INFO ___FILE_ONLY___ ═
|
232 |
+
2024-03-13 13:25:09,802 INFO ___FILE_ONLY___ ═
|
233 |
+
2024-03-13 13:25:09,802 INFO ___FILE_ONLY___ ═
|
234 |
+
2024-03-13 13:25:09,802 INFO ___FILE_ONLY___ ═
|
235 |
+
2024-03-13 13:25:09,803 INFO ___FILE_ONLY___ ═
|
236 |
+
2024-03-13 13:25:09,803 INFO ___FILE_ONLY___ ═
|
237 |
+
2024-03-13 13:25:09,803 INFO ___FILE_ONLY___ ═
|
238 |
+
2024-03-13 13:25:09,803 INFO ___FILE_ONLY___ ═
|
239 |
+
2024-03-13 13:25:09,803 INFO ___FILE_ONLY___ ═
|
240 |
+
2024-03-13 13:25:09,803 INFO ___FILE_ONLY___ ═
|
241 |
+
2024-03-13 13:25:09,803 INFO ___FILE_ONLY___ ═
|
242 |
+
2024-03-13 13:25:09,804 INFO ___FILE_ONLY___ ═
|
243 |
+
2024-03-13 13:25:09,924 INFO ___FILE_ONLY___ ═
|
244 |
+
2024-03-13 13:25:09,929 INFO ___FILE_ONLY___ ═
|
245 |
+
2024-03-13 13:25:09,933 INFO ___FILE_ONLY___ ═
|
246 |
+
2024-03-13 13:25:09,937 INFO ___FILE_ONLY___ ═
|
247 |
+
2024-03-13 13:25:09,940 INFO ___FILE_ONLY___ ═
|
248 |
+
2024-03-13 13:25:09,944 INFO ___FILE_ONLY___ ═
|
249 |
+
2024-03-13 13:25:09,949 INFO ___FILE_ONLY___ ═
|
250 |
+
2024-03-13 13:25:09,953 INFO ___FILE_ONLY___ ═
|
251 |
+
2024-03-13 13:25:09,958 INFO ___FILE_ONLY___ ═
|
252 |
+
2024-03-13 13:25:09,961 INFO ___FILE_ONLY___ ═
|
253 |
+
2024-03-13 13:25:09,965 INFO ___FILE_ONLY___ ═
|
254 |
+
2024-03-13 13:25:09,968 INFO ___FILE_ONLY___ ═
|
255 |
+
2024-03-13 13:25:09,974 INFO ___FILE_ONLY___ ═
|
256 |
+
2024-03-13 13:25:09,978 INFO ___FILE_ONLY___ ═
|
257 |
+
2024-03-13 13:25:09,981 INFO ___FILE_ONLY___ ═
|
258 |
+
2024-03-13 13:25:09,986 INFO ___FILE_ONLY___ ═
|
259 |
+
2024-03-13 13:25:09,990 INFO ___FILE_ONLY___ ═
|
260 |
+
2024-03-13 13:25:09,994 INFO ___FILE_ONLY___ ═
|
261 |
+
2024-03-13 13:25:10,000 INFO ___FILE_ONLY___ ═
|
262 |
+
2024-03-13 13:25:10,004 INFO ___FILE_ONLY___ ═
|
263 |
+
2024-03-13 13:25:10,008 INFO ___FILE_ONLY___ ═
|
264 |
+
2024-03-13 13:25:10,015 INFO ___FILE_ONLY___ ═
|
265 |
+
2024-03-13 13:25:10,022 INFO ___FILE_ONLY___ ═
|
266 |
+
2024-03-13 13:25:10,025 INFO ___FILE_ONLY___ ═
|
267 |
+
2024-03-13 13:25:10,029 INFO ___FILE_ONLY___ ═
|
268 |
+
2024-03-13 13:25:10,033 INFO ___FILE_ONLY___ ═
|
269 |
+
2024-03-13 13:25:10,037 INFO ___FILE_ONLY___ ═
|
270 |
+
2024-03-13 13:25:10,041 INFO ___FILE_ONLY___ ═
|
271 |
+
2024-03-13 13:25:10,044 INFO ___FILE_ONLY___ ═
|
272 |
+
2024-03-13 13:25:10,048 INFO ___FILE_ONLY___ ═
|
273 |
+
2024-03-13 13:25:10,048 INFO ___FILE_ONLY___ ╝
|
274 |
+
|
275 |
+
2024-03-13 13:25:10,060 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
276 |
+
|
277 |
+
2024-03-13 13:25:10,061 INFO ___FILE_ONLY___ ╠═ Installing: BigQuery Command Line Tool (Platform Spec... ═╣
|
278 |
+
|
279 |
+
2024-03-13 13:25:10,061 INFO ___FILE_ONLY___ ╚
|
280 |
+
2024-03-13 13:25:10,064 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
281 |
+
2024-03-13 13:25:10,180 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bq-nix-20240106004423.tar.gz HTTP/1.1" 200 2026
|
282 |
+
2024-03-13 13:25:10,181 INFO ___FILE_ONLY___ ══════════════════════════════
|
283 |
+
2024-03-13 13:25:10,182 INFO ___FILE_ONLY___ ══════════════════════════════
|
284 |
+
2024-03-13 13:25:10,182 INFO ___FILE_ONLY___ ╝
|
285 |
+
|
286 |
+
2024-03-13 13:25:10,190 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
287 |
+
|
288 |
+
2024-03-13 13:25:10,190 INFO ___FILE_ONLY___ ╠═ Installing: Bundled Python 3.11 ═╣
|
289 |
+
|
290 |
+
2024-03-13 13:25:10,190 INFO ___FILE_ONLY___ ╚
|
291 |
+
2024-03-13 13:25:10,195 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
292 |
+
2024-03-13 13:25:10,195 INFO ___FILE_ONLY___ ╝
|
293 |
+
|
294 |
+
2024-03-13 13:25:10,197 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
295 |
+
|
296 |
+
2024-03-13 13:25:10,197 INFO ___FILE_ONLY___ ╠═ Installing: Bundled Python 3.11 ═╣
|
297 |
+
|
298 |
+
2024-03-13 13:25:10,197 INFO ___FILE_ONLY___ ╚
|
299 |
+
2024-03-13 13:25:10,201 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
300 |
+
2024-03-13 13:25:10,316 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-bundled-python3-unix-linux-x86_64-20240229170130.tar.gz HTTP/1.1" 200 78486918
|
301 |
+
2024-03-13 13:25:10,583 INFO ___FILE_ONLY___ ═
|
302 |
+
2024-03-13 13:25:10,586 INFO ___FILE_ONLY___ ═
|
303 |
+
2024-03-13 13:25:10,588 INFO ___FILE_ONLY___ ═
|
304 |
+
2024-03-13 13:25:10,591 INFO ___FILE_ONLY___ ═
|
305 |
+
2024-03-13 13:25:10,594 INFO ___FILE_ONLY___ ═
|
306 |
+
2024-03-13 13:25:10,597 INFO ___FILE_ONLY___ ═
|
307 |
+
2024-03-13 13:25:10,600 INFO ___FILE_ONLY___ ═
|
308 |
+
2024-03-13 13:25:10,603 INFO ___FILE_ONLY___ ═
|
309 |
+
2024-03-13 13:25:10,605 INFO ___FILE_ONLY___ ═
|
310 |
+
2024-03-13 13:25:10,608 INFO ___FILE_ONLY___ ═
|
311 |
+
2024-03-13 13:25:10,611 INFO ___FILE_ONLY___ ═
|
312 |
+
2024-03-13 13:25:10,614 INFO ___FILE_ONLY___ ═
|
313 |
+
2024-03-13 13:25:10,617 INFO ___FILE_ONLY___ ═
|
314 |
+
2024-03-13 13:25:10,619 INFO ___FILE_ONLY___ ═
|
315 |
+
2024-03-13 13:25:10,622 INFO ___FILE_ONLY___ ═
|
316 |
+
2024-03-13 13:25:10,625 INFO ___FILE_ONLY___ ═
|
317 |
+
2024-03-13 13:25:10,628 INFO ___FILE_ONLY___ ═
|
318 |
+
2024-03-13 13:25:10,631 INFO ___FILE_ONLY___ ═
|
319 |
+
2024-03-13 13:25:10,633 INFO ___FILE_ONLY___ ═
|
320 |
+
2024-03-13 13:25:10,636 INFO ___FILE_ONLY___ ═
|
321 |
+
2024-03-13 13:25:10,639 INFO ___FILE_ONLY___ ═
|
322 |
+
2024-03-13 13:25:10,642 INFO ___FILE_ONLY___ ═
|
323 |
+
2024-03-13 13:25:10,645 INFO ___FILE_ONLY___ ═
|
324 |
+
2024-03-13 13:25:10,647 INFO ___FILE_ONLY___ ═
|
325 |
+
2024-03-13 13:25:10,650 INFO ___FILE_ONLY___ ═
|
326 |
+
2024-03-13 13:25:10,653 INFO ___FILE_ONLY___ ═
|
327 |
+
2024-03-13 13:25:10,656 INFO ___FILE_ONLY___ ═
|
328 |
+
2024-03-13 13:25:10,659 INFO ___FILE_ONLY___ ═
|
329 |
+
2024-03-13 13:25:10,662 INFO ___FILE_ONLY___ ═
|
330 |
+
2024-03-13 13:25:10,665 INFO ___FILE_ONLY___ ═
|
331 |
+
2024-03-13 13:25:12,921 INFO ___FILE_ONLY___ ═
|
332 |
+
2024-03-13 13:25:12,946 INFO ___FILE_ONLY___ ═
|
333 |
+
2024-03-13 13:25:12,971 INFO ___FILE_ONLY___ ═
|
334 |
+
2024-03-13 13:25:12,996 INFO ___FILE_ONLY___ ═
|
335 |
+
2024-03-13 13:25:13,021 INFO ___FILE_ONLY___ ═
|
336 |
+
2024-03-13 13:25:13,046 INFO ___FILE_ONLY___ ═
|
337 |
+
2024-03-13 13:25:13,071 INFO ___FILE_ONLY___ ═
|
338 |
+
2024-03-13 13:25:13,096 INFO ___FILE_ONLY___ ═
|
339 |
+
2024-03-13 13:25:13,122 INFO ___FILE_ONLY___ ═
|
340 |
+
2024-03-13 13:25:13,146 INFO ___FILE_ONLY___ ═
|
341 |
+
2024-03-13 13:25:13,171 INFO ___FILE_ONLY___ ═
|
342 |
+
2024-03-13 13:25:13,195 INFO ___FILE_ONLY___ ═
|
343 |
+
2024-03-13 13:25:13,219 INFO ___FILE_ONLY___ ═
|
344 |
+
2024-03-13 13:25:13,244 INFO ___FILE_ONLY___ ═
|
345 |
+
2024-03-13 13:25:13,269 INFO ___FILE_ONLY___ ═
|
346 |
+
2024-03-13 13:25:13,294 INFO ___FILE_ONLY___ ═
|
347 |
+
2024-03-13 13:25:13,320 INFO ___FILE_ONLY___ ═
|
348 |
+
2024-03-13 13:25:13,750 INFO ___FILE_ONLY___ ═
|
349 |
+
2024-03-13 13:25:13,788 INFO ___FILE_ONLY___ ═
|
350 |
+
2024-03-13 13:25:13,840 INFO ___FILE_ONLY___ ═
|
351 |
+
2024-03-13 13:25:13,880 INFO ___FILE_ONLY___ ═
|
352 |
+
2024-03-13 13:25:14,036 INFO ___FILE_ONLY___ ═
|
353 |
+
2024-03-13 13:25:14,173 INFO ___FILE_ONLY___ ═
|
354 |
+
2024-03-13 13:25:14,213 INFO ___FILE_ONLY___ ═
|
355 |
+
2024-03-13 13:25:14,254 INFO ___FILE_ONLY___ ═
|
356 |
+
2024-03-13 13:25:14,324 INFO ___FILE_ONLY___ ═
|
357 |
+
2024-03-13 13:25:14,363 INFO ___FILE_ONLY___ ═
|
358 |
+
2024-03-13 13:25:14,408 INFO ___FILE_ONLY___ ═
|
359 |
+
2024-03-13 13:25:15,515 INFO ___FILE_ONLY___ ═
|
360 |
+
2024-03-13 13:25:15,545 INFO ___FILE_ONLY___ ═
|
361 |
+
2024-03-13 13:25:15,545 INFO ___FILE_ONLY___ ╝
|
362 |
+
|
363 |
+
2024-03-13 13:25:15,625 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
364 |
+
|
365 |
+
2024-03-13 13:25:15,626 INFO ___FILE_ONLY___ ╠═ Installing: Cloud Storage Command Line Tool ═╣
|
366 |
+
|
367 |
+
2024-03-13 13:25:15,626 INFO ___FILE_ONLY___ ╚
|
368 |
+
2024-03-13 13:25:15,630 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
369 |
+
2024-03-13 13:25:15,708 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gsutil-20231025210228.tar.gz HTTP/1.1" 200 11833901
|
370 |
+
2024-03-13 13:25:15,755 INFO ___FILE_ONLY___ ═
|
371 |
+
2024-03-13 13:25:15,755 INFO ___FILE_ONLY___ ═
|
372 |
+
2024-03-13 13:25:15,756 INFO ___FILE_ONLY___ ═
|
373 |
+
2024-03-13 13:25:15,756 INFO ___FILE_ONLY___ ═
|
374 |
+
2024-03-13 13:25:15,757 INFO ___FILE_ONLY___ ═
|
375 |
+
2024-03-13 13:25:15,757 INFO ___FILE_ONLY___ ═
|
376 |
+
2024-03-13 13:25:15,758 INFO ___FILE_ONLY___ ═
|
377 |
+
2024-03-13 13:25:15,758 INFO ___FILE_ONLY___ ═
|
378 |
+
2024-03-13 13:25:15,759 INFO ___FILE_ONLY___ ═
|
379 |
+
2024-03-13 13:25:15,759 INFO ___FILE_ONLY___ ═
|
380 |
+
2024-03-13 13:25:15,760 INFO ___FILE_ONLY___ ═
|
381 |
+
2024-03-13 13:25:15,761 INFO ___FILE_ONLY___ ═
|
382 |
+
2024-03-13 13:25:15,761 INFO ___FILE_ONLY___ ═
|
383 |
+
2024-03-13 13:25:15,762 INFO ___FILE_ONLY___ ═
|
384 |
+
2024-03-13 13:25:15,762 INFO ___FILE_ONLY___ ═
|
385 |
+
2024-03-13 13:25:15,763 INFO ___FILE_ONLY___ ═
|
386 |
+
2024-03-13 13:25:15,763 INFO ___FILE_ONLY___ ═
|
387 |
+
2024-03-13 13:25:15,764 INFO ___FILE_ONLY___ ═
|
388 |
+
2024-03-13 13:25:15,764 INFO ___FILE_ONLY___ ═
|
389 |
+
2024-03-13 13:25:15,765 INFO ___FILE_ONLY___ ═
|
390 |
+
2024-03-13 13:25:15,765 INFO ___FILE_ONLY___ ═
|
391 |
+
2024-03-13 13:25:15,766 INFO ___FILE_ONLY___ ═
|
392 |
+
2024-03-13 13:25:15,766 INFO ___FILE_ONLY___ ═
|
393 |
+
2024-03-13 13:25:15,767 INFO ___FILE_ONLY___ ═
|
394 |
+
2024-03-13 13:25:15,767 INFO ___FILE_ONLY___ ═
|
395 |
+
2024-03-13 13:25:15,768 INFO ___FILE_ONLY___ ═
|
396 |
+
2024-03-13 13:25:15,768 INFO ___FILE_ONLY___ ═
|
397 |
+
2024-03-13 13:25:15,769 INFO ___FILE_ONLY___ ═
|
398 |
+
2024-03-13 13:25:15,769 INFO ___FILE_ONLY___ ═
|
399 |
+
2024-03-13 13:25:15,770 INFO ___FILE_ONLY___ ═
|
400 |
+
2024-03-13 13:25:16,456 INFO ___FILE_ONLY___ ═
|
401 |
+
2024-03-13 13:25:16,492 INFO ___FILE_ONLY___ ═
|
402 |
+
2024-03-13 13:25:16,521 INFO ___FILE_ONLY___ ═
|
403 |
+
2024-03-13 13:25:16,550 INFO ___FILE_ONLY___ ═
|
404 |
+
2024-03-13 13:25:16,575 INFO ___FILE_ONLY___ ═
|
405 |
+
2024-03-13 13:25:16,602 INFO ___FILE_ONLY___ ═
|
406 |
+
2024-03-13 13:25:16,621 INFO ___FILE_ONLY___ ═
|
407 |
+
2024-03-13 13:25:16,639 INFO ___FILE_ONLY___ ═
|
408 |
+
2024-03-13 13:25:16,661 INFO ___FILE_ONLY___ ═
|
409 |
+
2024-03-13 13:25:16,680 INFO ___FILE_ONLY___ ═
|
410 |
+
2024-03-13 13:25:16,702 INFO ___FILE_ONLY___ ═
|
411 |
+
2024-03-13 13:25:16,722 INFO ___FILE_ONLY___ ═
|
412 |
+
2024-03-13 13:25:16,752 INFO ___FILE_ONLY___ ═
|
413 |
+
2024-03-13 13:25:16,774 INFO ___FILE_ONLY___ ═
|
414 |
+
2024-03-13 13:25:16,807 INFO ___FILE_ONLY___ ═
|
415 |
+
2024-03-13 13:25:16,836 INFO ___FILE_ONLY___ ═
|
416 |
+
2024-03-13 13:25:16,867 INFO ___FILE_ONLY___ ═
|
417 |
+
2024-03-13 13:25:16,896 INFO ___FILE_ONLY___ ═
|
418 |
+
2024-03-13 13:25:16,917 INFO ___FILE_ONLY___ ═
|
419 |
+
2024-03-13 13:25:16,940 INFO ___FILE_ONLY___ ═
|
420 |
+
2024-03-13 13:25:16,962 INFO ___FILE_ONLY___ ═
|
421 |
+
2024-03-13 13:25:16,984 INFO ___FILE_ONLY___ ═
|
422 |
+
2024-03-13 13:25:17,006 INFO ___FILE_ONLY___ ═
|
423 |
+
2024-03-13 13:25:17,029 INFO ___FILE_ONLY___ ═
|
424 |
+
2024-03-13 13:25:17,049 INFO ___FILE_ONLY___ ═
|
425 |
+
2024-03-13 13:25:17,097 INFO ___FILE_ONLY___ ═
|
426 |
+
2024-03-13 13:25:17,125 INFO ___FILE_ONLY___ ═
|
427 |
+
2024-03-13 13:25:17,151 INFO ___FILE_ONLY___ ═
|
428 |
+
2024-03-13 13:25:17,180 INFO ___FILE_ONLY___ ═
|
429 |
+
2024-03-13 13:25:17,201 INFO ___FILE_ONLY___ ═
|
430 |
+
2024-03-13 13:25:17,201 INFO ___FILE_ONLY___ ╝
|
431 |
+
|
432 |
+
2024-03-13 13:25:17,259 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
433 |
+
|
434 |
+
2024-03-13 13:25:17,259 INFO ___FILE_ONLY___ ╠═ Installing: Cloud Storage Command Line Tool (Platform... ═╣
|
435 |
+
|
436 |
+
2024-03-13 13:25:17,259 INFO ___FILE_ONLY___ ╚
|
437 |
+
2024-03-13 13:25:17,263 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
438 |
+
2024-03-13 13:25:17,384 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gsutil-nix-20240106004423.tar.gz HTTP/1.1" 200 2042
|
439 |
+
2024-03-13 13:25:17,385 INFO ___FILE_ONLY___ ══════════════════════════════
|
440 |
+
2024-03-13 13:25:17,386 INFO ___FILE_ONLY___ ══════════════════════════════
|
441 |
+
2024-03-13 13:25:17,386 INFO ___FILE_ONLY___ ╝
|
442 |
+
|
443 |
+
2024-03-13 13:25:17,395 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
444 |
+
|
445 |
+
2024-03-13 13:25:17,395 INFO ___FILE_ONLY___ ╠═ Installing: Default set of gcloud commands ═╣
|
446 |
+
|
447 |
+
2024-03-13 13:25:17,395 INFO ___FILE_ONLY___ ╚
|
448 |
+
2024-03-13 13:25:17,400 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
449 |
+
2024-03-13 13:25:17,400 INFO ___FILE_ONLY___ ╝
|
450 |
+
|
451 |
+
2024-03-13 13:25:17,402 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
452 |
+
|
453 |
+
2024-03-13 13:25:17,402 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CLI Core Libraries (Platform... ═╣
|
454 |
+
|
455 |
+
2024-03-13 13:25:17,402 INFO ___FILE_ONLY___ ╚
|
456 |
+
2024-03-13 13:25:17,406 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
457 |
+
2024-03-13 13:25:17,521 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-core-nix-20240106004423.tar.gz HTTP/1.1" 200 2410
|
458 |
+
2024-03-13 13:25:17,522 INFO ___FILE_ONLY___ ══════════════════════════════
|
459 |
+
2024-03-13 13:25:17,523 INFO ___FILE_ONLY___ ═══════════════
|
460 |
+
2024-03-13 13:25:17,524 INFO ___FILE_ONLY___ ═══════════════
|
461 |
+
2024-03-13 13:25:17,524 INFO ___FILE_ONLY___ ╝
|
462 |
+
|
463 |
+
2024-03-13 13:25:17,532 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
464 |
+
|
465 |
+
2024-03-13 13:25:17,533 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CRC32C Hash Tool ═╣
|
466 |
+
|
467 |
+
2024-03-13 13:25:17,533 INFO ___FILE_ONLY___ ╚
|
468 |
+
2024-03-13 13:25:17,537 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
469 |
+
2024-03-13 13:25:17,654 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gcloud-crc32c-linux-x86_64-20231215195722.tar.gz HTTP/1.1" 200 1287877
|
470 |
+
2024-03-13 13:25:17,663 INFO ___FILE_ONLY___ ═
|
471 |
+
2024-03-13 13:25:17,664 INFO ___FILE_ONLY___ ═
|
472 |
+
2024-03-13 13:25:17,664 INFO ___FILE_ONLY___ ═
|
473 |
+
2024-03-13 13:25:17,664 INFO ___FILE_ONLY___ ═
|
474 |
+
2024-03-13 13:25:17,664 INFO ___FILE_ONLY___ ═
|
475 |
+
2024-03-13 13:25:17,664 INFO ___FILE_ONLY___ ═
|
476 |
+
2024-03-13 13:25:17,664 INFO ___FILE_ONLY___ ═
|
477 |
+
2024-03-13 13:25:17,664 INFO ___FILE_ONLY___ ═
|
478 |
+
2024-03-13 13:25:17,664 INFO ___FILE_ONLY___ ═
|
479 |
+
2024-03-13 13:25:17,665 INFO ___FILE_ONLY___ ═
|
480 |
+
2024-03-13 13:25:17,665 INFO ___FILE_ONLY___ ═
|
481 |
+
2024-03-13 13:25:17,665 INFO ___FILE_ONLY___ ═
|
482 |
+
2024-03-13 13:25:17,665 INFO ___FILE_ONLY___ ═
|
483 |
+
2024-03-13 13:25:17,665 INFO ___FILE_ONLY___ ═
|
484 |
+
2024-03-13 13:25:17,665 INFO ___FILE_ONLY___ ═
|
485 |
+
2024-03-13 13:25:17,665 INFO ___FILE_ONLY___ ═
|
486 |
+
2024-03-13 13:25:17,665 INFO ___FILE_ONLY___ ═
|
487 |
+
2024-03-13 13:25:17,666 INFO ___FILE_ONLY___ ═
|
488 |
+
2024-03-13 13:25:17,666 INFO ___FILE_ONLY___ ═
|
489 |
+
2024-03-13 13:25:17,666 INFO ___FILE_ONLY___ ═
|
490 |
+
2024-03-13 13:25:17,666 INFO ___FILE_ONLY___ ═
|
491 |
+
2024-03-13 13:25:17,666 INFO ___FILE_ONLY___ ═
|
492 |
+
2024-03-13 13:25:17,666 INFO ___FILE_ONLY___ ═
|
493 |
+
2024-03-13 13:25:17,666 INFO ___FILE_ONLY___ ═
|
494 |
+
2024-03-13 13:25:17,666 INFO ___FILE_ONLY___ ═
|
495 |
+
2024-03-13 13:25:17,666 INFO ___FILE_ONLY___ ═
|
496 |
+
2024-03-13 13:25:17,667 INFO ___FILE_ONLY___ ═
|
497 |
+
2024-03-13 13:25:17,667 INFO ___FILE_ONLY___ ═
|
498 |
+
2024-03-13 13:25:17,667 INFO ___FILE_ONLY___ ═
|
499 |
+
2024-03-13 13:25:17,667 INFO ___FILE_ONLY___ ═
|
500 |
+
2024-03-13 13:25:17,701 INFO ___FILE_ONLY___ ═══════════════
|
501 |
+
2024-03-13 13:25:17,701 INFO ___FILE_ONLY___ ═══════════════
|
502 |
+
2024-03-13 13:25:17,702 INFO ___FILE_ONLY___ ╝
|
503 |
+
|
504 |
+
2024-03-13 13:25:17,711 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
505 |
+
|
506 |
+
2024-03-13 13:25:17,711 INFO ___FILE_ONLY___ ╠═ Installing: Google Cloud CRC32C Hash Tool ═╣
|
507 |
+
|
508 |
+
2024-03-13 13:25:17,711 INFO ___FILE_ONLY___ ╚
|
509 |
+
2024-03-13 13:25:17,715 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
510 |
+
2024-03-13 13:25:17,716 INFO ___FILE_ONLY___ ╝
|
511 |
+
|
512 |
+
2024-03-13 13:25:17,718 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
513 |
+
|
514 |
+
2024-03-13 13:25:17,718 INFO ___FILE_ONLY___ ╠═ Installing: anthoscli ═╣
|
515 |
+
|
516 |
+
2024-03-13 13:25:17,718 INFO ___FILE_ONLY___ ╚
|
517 |
+
2024-03-13 13:25:17,726 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
518 |
+
2024-03-13 13:25:17,848 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-anthoscli-linux-x86_64-20240209195330.tar.gz HTTP/1.1" 200 72231225
|
519 |
+
2024-03-13 13:25:18,091 INFO ___FILE_ONLY___ ═
|
520 |
+
2024-03-13 13:25:18,094 INFO ___FILE_ONLY___ ═
|
521 |
+
2024-03-13 13:25:18,097 INFO ___FILE_ONLY___ ═
|
522 |
+
2024-03-13 13:25:18,099 INFO ___FILE_ONLY___ ═
|
523 |
+
2024-03-13 13:25:18,102 INFO ___FILE_ONLY___ ═
|
524 |
+
2024-03-13 13:25:18,105 INFO ___FILE_ONLY___ ═
|
525 |
+
2024-03-13 13:25:18,108 INFO ___FILE_ONLY___ ═
|
526 |
+
2024-03-13 13:25:18,111 INFO ___FILE_ONLY___ ═
|
527 |
+
2024-03-13 13:25:18,113 INFO ___FILE_ONLY___ ═
|
528 |
+
2024-03-13 13:25:18,116 INFO ___FILE_ONLY___ ═
|
529 |
+
2024-03-13 13:25:18,119 INFO ___FILE_ONLY___ ═
|
530 |
+
2024-03-13 13:25:18,121 INFO ___FILE_ONLY___ ═
|
531 |
+
2024-03-13 13:25:18,124 INFO ___FILE_ONLY___ ═
|
532 |
+
2024-03-13 13:25:18,127 INFO ___FILE_ONLY___ ═
|
533 |
+
2024-03-13 13:25:18,130 INFO ___FILE_ONLY___ ═
|
534 |
+
2024-03-13 13:25:18,132 INFO ___FILE_ONLY___ ═
|
535 |
+
2024-03-13 13:25:18,135 INFO ___FILE_ONLY___ ═
|
536 |
+
2024-03-13 13:25:18,138 INFO ___FILE_ONLY___ ═
|
537 |
+
2024-03-13 13:25:18,141 INFO ___FILE_ONLY___ ═
|
538 |
+
2024-03-13 13:25:18,144 INFO ___FILE_ONLY___ ═
|
539 |
+
2024-03-13 13:25:18,146 INFO ___FILE_ONLY___ ═
|
540 |
+
2024-03-13 13:25:18,149 INFO ___FILE_ONLY___ ═
|
541 |
+
2024-03-13 13:25:18,152 INFO ___FILE_ONLY___ ═
|
542 |
+
2024-03-13 13:25:18,154 INFO ___FILE_ONLY___ ═
|
543 |
+
2024-03-13 13:25:18,157 INFO ___FILE_ONLY___ ═
|
544 |
+
2024-03-13 13:25:18,160 INFO ___FILE_ONLY___ ═
|
545 |
+
2024-03-13 13:25:18,163 INFO ___FILE_ONLY___ ═
|
546 |
+
2024-03-13 13:25:18,165 INFO ___FILE_ONLY___ ═
|
547 |
+
2024-03-13 13:25:18,168 INFO ___FILE_ONLY___ ═
|
548 |
+
2024-03-13 13:25:18,171 INFO ___FILE_ONLY___ ═
|
549 |
+
2024-03-13 13:25:20,366 INFO ___FILE_ONLY___ ══════════
|
550 |
+
2024-03-13 13:25:20,371 INFO ___FILE_ONLY___ ═════════
|
551 |
+
2024-03-13 13:25:20,398 INFO ___FILE_ONLY___ ═══════════
|
552 |
+
2024-03-13 13:25:20,398 INFO ___FILE_ONLY___ ╝
|
553 |
+
|
554 |
+
2024-03-13 13:25:20,419 INFO ___FILE_ONLY___ ╔═════════════════════════════════��══════════════════════════╗
|
555 |
+
|
556 |
+
2024-03-13 13:25:20,419 INFO ___FILE_ONLY___ ╠═ Installing: anthoscli ═╣
|
557 |
+
|
558 |
+
2024-03-13 13:25:20,420 INFO ___FILE_ONLY___ ╚
|
559 |
+
2024-03-13 13:25:20,424 INFO ___FILE_ONLY___ ════════════════════════════════════════════════════════════
|
560 |
+
2024-03-13 13:25:20,424 INFO ___FILE_ONLY___ ╝
|
561 |
+
|
562 |
+
2024-03-13 13:25:20,427 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
563 |
+
|
564 |
+
2024-03-13 13:25:20,427 INFO ___FILE_ONLY___ ╠═ Installing: gcloud cli dependencies ═╣
|
565 |
+
|
566 |
+
2024-03-13 13:25:20,427 INFO ___FILE_ONLY___ ╚
|
567 |
+
2024-03-13 13:25:20,431 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
568 |
+
2024-03-13 13:25:20,547 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-gcloud-deps-linux-x86_64-20210416153011.tar.gz HTTP/1.1" 200 104
|
569 |
+
2024-03-13 13:25:20,548 INFO ___FILE_ONLY___ ══════════════════════════════
|
570 |
+
2024-03-13 13:25:20,549 INFO ___FILE_ONLY___ ══════════════════════════════
|
571 |
+
2024-03-13 13:25:20,549 INFO ___FILE_ONLY___ ╝
|
572 |
+
|
573 |
+
2024-03-13 13:25:20,558 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
574 |
+
|
575 |
+
2024-03-13 13:25:20,558 INFO ___FILE_ONLY___ ╠═ Creating backup and activating new installation ═╣
|
576 |
+
|
577 |
+
2024-03-13 13:25:20,558 INFO ___FILE_ONLY___ ╚
|
578 |
+
2024-03-13 13:25:20,558 DEBUG root Attempting to move directory [/tools/google-cloud-sdk] to [/tools/google-cloud-sdk.staging/.install/.backup]
|
579 |
+
2024-03-13 13:25:20,558 INFO ___FILE_ONLY___ ══════════════════════════════
|
580 |
+
2024-03-13 13:25:20,558 DEBUG root Attempting to move directory [/tools/google-cloud-sdk.staging] to [/tools/google-cloud-sdk]
|
581 |
+
2024-03-13 13:25:20,558 INFO ___FILE_ONLY___ ══════════════════════════════
|
582 |
+
2024-03-13 13:25:20,559 INFO ___FILE_ONLY___ ╝
|
583 |
+
|
584 |
+
2024-03-13 13:25:20,562 DEBUG root Updating notification cache...
|
585 |
+
2024-03-13 13:25:20,563 INFO ___FILE_ONLY___
|
586 |
+
|
587 |
+
2024-03-13 13:25:20,565 INFO ___FILE_ONLY___ Performing post processing steps...
|
588 |
+
2024-03-13 13:25:20,566 DEBUG root Executing command: ['/tools/google-cloud-sdk/bin/gcloud', 'components', 'post-process']
|
589 |
+
2024-03-13 13:25:31,429 DEBUG ___FILE_ONLY___
|
590 |
+
2024-03-13 13:25:31,429 DEBUG ___FILE_ONLY___
|
591 |
+
2024-03-13 13:25:31,582 INFO ___FILE_ONLY___
|
592 |
+
Update done!
|
593 |
+
|
594 |
+
|
595 |
+
2024-03-13 13:25:31,586 DEBUG root Chosen display Format:none
|
596 |
+
2024-03-13 13:25:31,586 INFO root Display format: "none"
|
.config/logs/2024.03.13/13.25.21.193490.log
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-03-13 13:25:21,194 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2024-03-13 13:25:21,197 DEBUG root Loaded Command Group: ['gcloud', 'components', 'post_process']
|
3 |
+
2024-03-13 13:25:21,199 DEBUG root Running [gcloud.components.post-process] with arguments: []
|
4 |
+
2024-03-13 13:25:31,239 DEBUG root Chosen display Format:none
|
5 |
+
2024-03-13 13:25:31,240 INFO root Display format: "none"
|
.config/logs/2024.03.13/13.25.32.425298.log
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-03-13 13:25:32,426 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2024-03-13 13:25:32,429 DEBUG root Loaded Command Group: ['gcloud', 'components', 'update']
|
3 |
+
2024-03-13 13:25:32,431 DEBUG root Running [gcloud.components.update] with arguments: [--quiet: "True", COMPONENT-IDS:8: "['gcloud', 'core', 'bq', 'gsutil', 'compute', 'preview', 'alpha', 'beta']"]
|
4 |
+
2024-03-13 13:25:32,433 INFO ___FILE_ONLY___ Beginning update. This process may take several minutes.
|
5 |
+
|
6 |
+
2024-03-13 13:25:32,439 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
7 |
+
2024-03-13 13:25:32,556 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components-2.json HTTP/1.1" 200 214446
|
8 |
+
2024-03-13 13:25:32,575 WARNING root Component [preview] no longer exists.
|
9 |
+
2024-03-13 13:25:32,575 WARNING root Component [compute] no longer exists.
|
10 |
+
2024-03-13 13:25:32,576 INFO ___FILE_ONLY___
|
11 |
+
|
12 |
+
2024-03-13 13:25:32,577 INFO ___FILE_ONLY___
|
13 |
+
Your current Google Cloud CLI version is: 468.0.0
|
14 |
+
|
15 |
+
2024-03-13 13:25:32,577 INFO ___FILE_ONLY___ Installing components from version: 468.0.0
|
16 |
+
|
17 |
+
2024-03-13 13:25:32,577 INFO ___FILE_ONLY___
|
18 |
+
|
19 |
+
2024-03-13 13:25:32,577 DEBUG root Chosen display Format:table[box,title="These components will be removed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
20 |
+
2024-03-13 13:25:32,578 DEBUG root Chosen display Format:table[box,title="These components will be updated."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
21 |
+
2024-03-13 13:25:32,578 DEBUG root Chosen display Format:table[box,title="These components will be installed."](details.display_name:label=Name:align=left,version.version_string:label=Version:align=right,data.size.size(zero="",min=1048576):label=Size:align=right)
|
22 |
+
2024-03-13 13:25:32,580 INFO ___FILE_ONLY___ ┌──────────────────────────────────────────────┐
|
23 |
+
2024-03-13 13:25:32,580 INFO ___FILE_ONLY___
|
24 |
+
|
25 |
+
2024-03-13 13:25:32,580 INFO ___FILE_ONLY___ │ These components will be installed. │
|
26 |
+
2024-03-13 13:25:32,580 INFO ___FILE_ONLY___
|
27 |
+
|
28 |
+
2024-03-13 13:25:32,580 INFO ___FILE_ONLY___ ├───────────────────────┬────────────┬─────────┤
|
29 |
+
2024-03-13 13:25:32,580 INFO ___FILE_ONLY___
|
30 |
+
|
31 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___ │ Name │ Version │ Size │
|
32 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___
|
33 |
+
|
34 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___ ├───────────────────────┼────────────┼─────────┤
|
35 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___
|
36 |
+
|
37 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___ │
|
38 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___ gcloud Alpha Commands
|
39 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___
|
40 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___ │
|
41 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___ 2024.03.08
|
42 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___
|
43 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___ │
|
44 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___ < 1 MiB
|
45 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___
|
46 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___ │
|
47 |
+
2024-03-13 13:25:32,581 INFO ___FILE_ONLY___
|
48 |
+
|
49 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___ │
|
50 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___ gcloud Beta Commands
|
51 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___
|
52 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___ │
|
53 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___ 2024.03.08
|
54 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___
|
55 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___ │
|
56 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___ < 1 MiB
|
57 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___
|
58 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___ │
|
59 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___
|
60 |
+
|
61 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___ └───────────────────────┴────────────┴─────────┘
|
62 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___
|
63 |
+
|
64 |
+
2024-03-13 13:25:32,582 INFO ___FILE_ONLY___
|
65 |
+
|
66 |
+
2024-03-13 13:25:32,586 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
67 |
+
2024-03-13 13:25:32,658 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/RELEASE_NOTES HTTP/1.1" 200 1169079
|
68 |
+
2024-03-13 13:25:32,691 INFO ___FILE_ONLY___ For the latest full release notes, please visit:
|
69 |
+
https://cloud.google.com/sdk/release_notes
|
70 |
+
|
71 |
+
|
72 |
+
2024-03-13 13:25:32,693 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
73 |
+
|
74 |
+
2024-03-13 13:25:32,694 INFO ___FILE_ONLY___ ╠═ Creating update staging area ═╣
|
75 |
+
|
76 |
+
2024-03-13 13:25:32,694 INFO ___FILE_ONLY___ ╚
|
77 |
+
2024-03-13 13:25:32,694 INFO ___FILE_ONLY___ ══════
|
78 |
+
2024-03-13 13:25:33,336 INFO ___FILE_ONLY___ ══════
|
79 |
+
2024-03-13 13:25:33,336 INFO ___FILE_ONLY___ ══════
|
80 |
+
2024-03-13 13:25:34,021 INFO ___FILE_ONLY___ ═
|
81 |
+
2024-03-13 13:25:34,063 INFO ___FILE_ONLY___ ═
|
82 |
+
2024-03-13 13:25:34,119 INFO ___FILE_ONLY___ ═
|
83 |
+
2024-03-13 13:25:34,160 INFO ___FILE_ONLY___ ═
|
84 |
+
2024-03-13 13:25:34,204 INFO ___FILE_ONLY___ ═
|
85 |
+
2024-03-13 13:25:34,246 INFO ___FILE_ONLY___ ═
|
86 |
+
2024-03-13 13:25:34,292 INFO ___FILE_ONLY___ ═
|
87 |
+
2024-03-13 13:25:34,344 INFO ___FILE_ONLY___ ═
|
88 |
+
2024-03-13 13:25:34,524 INFO ___FILE_ONLY___ ═
|
89 |
+
2024-03-13 13:25:34,616 INFO ___FILE_ONLY___ ═
|
90 |
+
2024-03-13 13:25:34,786 INFO ___FILE_ONLY___ ═
|
91 |
+
2024-03-13 13:25:34,940 INFO ___FILE_ONLY___ ═
|
92 |
+
2024-03-13 13:25:35,026 INFO ___FILE_ONLY___ ═
|
93 |
+
2024-03-13 13:25:35,108 INFO ___FILE_ONLY___ ═
|
94 |
+
2024-03-13 13:25:35,190 INFO ___FILE_ONLY___ ═
|
95 |
+
2024-03-13 13:25:35,253 INFO ___FILE_ONLY___ ═
|
96 |
+
2024-03-13 13:25:35,298 INFO ___FILE_ONLY___ ═
|
97 |
+
2024-03-13 13:25:35,359 INFO ___FILE_ONLY___ ═
|
98 |
+
2024-03-13 13:25:35,429 INFO ___FILE_ONLY___ ═
|
99 |
+
2024-03-13 13:25:35,474 INFO ___FILE_ONLY___ ═
|
100 |
+
2024-03-13 13:25:35,529 INFO ___FILE_ONLY___ ═
|
101 |
+
2024-03-13 13:25:35,621 INFO ___FILE_ONLY___ ═
|
102 |
+
2024-03-13 13:25:35,684 INFO ___FILE_ONLY___ ═
|
103 |
+
2024-03-13 13:25:35,738 INFO ___FILE_ONLY___ ═
|
104 |
+
2024-03-13 13:25:35,799 INFO ___FILE_ONLY___ ═
|
105 |
+
2024-03-13 13:25:35,872 INFO ___FILE_ONLY___ ═
|
106 |
+
2024-03-13 13:25:36,016 INFO ___FILE_ONLY___ ═
|
107 |
+
2024-03-13 13:25:36,069 INFO ___FILE_ONLY___ ═
|
108 |
+
2024-03-13 13:25:36,129 INFO ___FILE_ONLY___ ═
|
109 |
+
2024-03-13 13:25:36,208 INFO ___FILE_ONLY___ ═
|
110 |
+
2024-03-13 13:25:36,266 INFO ___FILE_ONLY___ ═
|
111 |
+
2024-03-13 13:25:36,324 INFO ___FILE_ONLY___ ═
|
112 |
+
2024-03-13 13:25:36,390 INFO ___FILE_ONLY___ ═
|
113 |
+
2024-03-13 13:25:36,538 INFO ___FILE_ONLY___ ═
|
114 |
+
2024-03-13 13:25:36,596 INFO ___FILE_ONLY___ ═
|
115 |
+
2024-03-13 13:25:36,654 INFO ___FILE_ONLY___ ═
|
116 |
+
2024-03-13 13:25:36,717 INFO ___FILE_ONLY___ ═
|
117 |
+
2024-03-13 13:25:36,769 INFO ___FILE_ONLY___ ═
|
118 |
+
2024-03-13 13:25:36,839 INFO ___FILE_ONLY___ ═
|
119 |
+
2024-03-13 13:25:36,901 INFO ___FILE_ONLY___ ═
|
120 |
+
2024-03-13 13:25:36,969 INFO ___FILE_ONLY___ ═
|
121 |
+
2024-03-13 13:25:37,044 INFO ___FILE_ONLY___ ═
|
122 |
+
2024-03-13 13:25:37,045 INFO ___FILE_ONLY___ ╝
|
123 |
+
|
124 |
+
2024-03-13 13:25:40,080 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
125 |
+
|
126 |
+
2024-03-13 13:25:40,080 INFO ___FILE_ONLY___ ╠═ Installing: gcloud Alpha Commands ═╣
|
127 |
+
|
128 |
+
2024-03-13 13:25:40,080 INFO ___FILE_ONLY___ ╚
|
129 |
+
2024-03-13 13:25:40,084 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
130 |
+
2024-03-13 13:25:40,156 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-alpha-20240308155052.tar.gz HTTP/1.1" 200 800
|
131 |
+
2024-03-13 13:25:40,157 INFO ___FILE_ONLY___ ══════════════════════════════
|
132 |
+
2024-03-13 13:25:40,159 INFO ___FILE_ONLY___ ══════════════════════════════
|
133 |
+
2024-03-13 13:25:40,159 INFO ___FILE_ONLY___ ╝
|
134 |
+
|
135 |
+
2024-03-13 13:25:40,167 INFO ___FILE_ONLY___ ╔════════════════════════════════════════════════════════════╗
|
136 |
+
|
137 |
+
2024-03-13 13:25:40,167 INFO ___FILE_ONLY___ ╠═ Installing: gcloud Beta Commands ═╣
|
138 |
+
|
139 |
+
2024-03-13 13:25:40,167 INFO ___FILE_ONLY___ ╚
|
140 |
+
2024-03-13 13:25:40,171 DEBUG urllib3.connectionpool Starting new HTTPS connection (1): dl.google.com:443
|
141 |
+
2024-03-13 13:25:40,290 DEBUG urllib3.connectionpool https://dl.google.com:443 "GET /dl/cloudsdk/channels/rapid/components/google-cloud-sdk-beta-20240308155052.tar.gz HTTP/1.1" 200 797
|
142 |
+
2024-03-13 13:25:40,291 INFO ___FILE_ONLY___ ══════════════════════════════
|
143 |
+
2024-03-13 13:25:40,292 INFO ___FILE_ONLY___ ══════════════════════════════
|
144 |
+
2024-03-13 13:25:40,292 INFO ___FILE_ONLY___ ╝
|
145 |
+
|
146 |
+
2024-03-13 13:25:40,300 INFO ___FILE_ONLY___ ��════════════════════════════════════════════════════════════╗
|
147 |
+
|
148 |
+
2024-03-13 13:25:40,301 INFO ___FILE_ONLY___ ╠═ Creating backup and activating new installation ═╣
|
149 |
+
|
150 |
+
2024-03-13 13:25:40,301 INFO ___FILE_ONLY___ ╚
|
151 |
+
2024-03-13 13:25:40,301 DEBUG root Attempting to move directory [/tools/google-cloud-sdk] to [/tools/google-cloud-sdk.staging/.install/.backup]
|
152 |
+
2024-03-13 13:25:40,301 INFO ___FILE_ONLY___ ══════════════════════════════
|
153 |
+
2024-03-13 13:25:40,301 DEBUG root Attempting to move directory [/tools/google-cloud-sdk.staging] to [/tools/google-cloud-sdk]
|
154 |
+
2024-03-13 13:25:40,301 INFO ___FILE_ONLY___ ══════════════════════════════
|
155 |
+
2024-03-13 13:25:40,301 INFO ___FILE_ONLY___ ╝
|
156 |
+
|
157 |
+
2024-03-13 13:25:40,305 DEBUG root Updating notification cache...
|
158 |
+
2024-03-13 13:25:40,306 INFO ___FILE_ONLY___
|
159 |
+
|
160 |
+
2024-03-13 13:25:40,308 INFO ___FILE_ONLY___ Performing post processing steps...
|
161 |
+
2024-03-13 13:25:40,309 DEBUG root Executing command: ['/tools/google-cloud-sdk/bin/gcloud', 'components', 'post-process']
|
162 |
+
2024-03-13 13:25:51,113 DEBUG ___FILE_ONLY___
|
163 |
+
2024-03-13 13:25:51,113 DEBUG ___FILE_ONLY___
|
164 |
+
2024-03-13 13:25:51,325 INFO ___FILE_ONLY___
|
165 |
+
Update done!
|
166 |
+
|
167 |
+
|
168 |
+
2024-03-13 13:25:51,328 DEBUG root Chosen display Format:none
|
169 |
+
2024-03-13 13:25:51,328 INFO root Display format: "none"
|
.config/logs/2024.03.13/13.25.40.985811.log
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-03-13 13:25:40,986 DEBUG root Loaded Command Group: ['gcloud', 'components']
|
2 |
+
2024-03-13 13:25:40,988 DEBUG root Loaded Command Group: ['gcloud', 'components', 'post_process']
|
3 |
+
2024-03-13 13:25:40,991 DEBUG root Running [gcloud.components.post-process] with arguments: []
|
4 |
+
2024-03-13 13:25:50,926 DEBUG root Chosen display Format:none
|
5 |
+
2024-03-13 13:25:50,927 INFO root Display format: "none"
|
.config/logs/2024.03.13/13.25.52.149346.log
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-03-13 13:25:52,151 DEBUG root Loaded Command Group: ['gcloud', 'config']
|
2 |
+
2024-03-13 13:25:52,182 DEBUG root Loaded Command Group: ['gcloud', 'config', 'set']
|
3 |
+
2024-03-13 13:25:52,185 DEBUG root Running [gcloud.config.set] with arguments: [SECTION/PROPERTY: "component_manager/disable_update_check", VALUE: "true"]
|
4 |
+
2024-03-13 13:25:52,186 INFO ___FILE_ONLY___ Updated property [component_manager/disable_update_check].
|
5 |
+
|
6 |
+
2024-03-13 13:25:52,187 DEBUG root Chosen display Format:default
|
7 |
+
2024-03-13 13:25:52,188 INFO root Display format: "default"
|
8 |
+
2024-03-13 13:25:52,188 DEBUG root SDK update checks are disabled.
|
.config/logs/2024.03.13/13.25.53.020753.log
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
2024-03-13 13:25:53,022 DEBUG root Loaded Command Group: ['gcloud', 'config']
|
2 |
+
2024-03-13 13:25:53,052 DEBUG root Loaded Command Group: ['gcloud', 'config', 'set']
|
3 |
+
2024-03-13 13:25:53,055 DEBUG root Running [gcloud.config.set] with arguments: [SECTION/PROPERTY: "compute/gce_metadata_read_timeout_sec", VALUE: "0"]
|
4 |
+
2024-03-13 13:25:53,056 INFO ___FILE_ONLY___ Updated property [compute/gce_metadata_read_timeout_sec].
|
5 |
+
|
6 |
+
2024-03-13 13:25:53,057 DEBUG root Chosen display Format:default
|
7 |
+
2024-03-13 13:25:53,058 INFO root Display format: "default"
|
8 |
+
2024-03-13 13:25:53,059 DEBUG root SDK update checks are disabled.
|
.gitattributes
CHANGED
@@ -33,3 +33,14 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
LLaMA-Factory/data/alpaca_data_en_52k.json filter=lfs diff=lfs merge=lfs -text
|
37 |
+
LLaMA-Factory/data/alpaca_data_zh_51k.json filter=lfs diff=lfs merge=lfs -text
|
38 |
+
LLaMA-Factory/data/alpaca_gpt4_data_en.json filter=lfs diff=lfs merge=lfs -text
|
39 |
+
LLaMA-Factory/data/alpaca_gpt4_data_zh.json filter=lfs diff=lfs merge=lfs -text
|
40 |
+
LLaMA-Factory/data/comparison_gpt4_data_en.json filter=lfs diff=lfs merge=lfs -text
|
41 |
+
LLaMA-Factory/data/comparison_gpt4_data_zh.json filter=lfs diff=lfs merge=lfs -text
|
42 |
+
LLaMA-Factory/data/glaive_toolcall_10k.json filter=lfs diff=lfs merge=lfs -text
|
43 |
+
LLaMA-Factory/data/oaast_rm.json filter=lfs diff=lfs merge=lfs -text
|
44 |
+
LLaMA-Factory/data/oaast_sft.json filter=lfs diff=lfs merge=lfs -text
|
45 |
+
sample_data/mnist_test.csv filter=lfs diff=lfs merge=lfs -text
|
46 |
+
sample_data/mnist_train_small.csv filter=lfs diff=lfs merge=lfs -text
|
LLaMA-Factory/.dockerignore
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.vscode
|
2 |
+
.git
|
3 |
+
.github
|
4 |
+
.venv
|
5 |
+
cache
|
6 |
+
data
|
7 |
+
examples
|
8 |
+
.dockerignore
|
9 |
+
.gitattributes
|
10 |
+
.gitignore
|
11 |
+
Dockerfile
|
LLaMA-Factory/.gitattributes
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
# Auto detect text files and perform LF normalization
|
2 |
+
* text=auto
|
LLaMA-Factory/.github/CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Contributor Covenant Code of Conduct
|
2 |
+
|
3 |
+
## Our Pledge
|
4 |
+
|
5 |
+
We as members, contributors, and leaders pledge to make participation in our
|
6 |
+
community a harassment-free experience for everyone, regardless of age, body
|
7 |
+
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
8 |
+
identity and expression, level of experience, education, socio-economic status,
|
9 |
+
nationality, personal appearance, race, religion, or sexual identity
|
10 |
+
and orientation.
|
11 |
+
|
12 |
+
We pledge to act and interact in ways that contribute to an open, welcoming,
|
13 |
+
diverse, inclusive, and healthy community.
|
14 |
+
|
15 |
+
## Our Standards
|
16 |
+
|
17 |
+
Examples of behavior that contributes to a positive environment for our
|
18 |
+
community include:
|
19 |
+
|
20 |
+
* Demonstrating empathy and kindness toward other people
|
21 |
+
* Being respectful of differing opinions, viewpoints, and experiences
|
22 |
+
* Giving and gracefully accepting constructive feedback
|
23 |
+
* Accepting responsibility and apologizing to those affected by our mistakes,
|
24 |
+
and learning from the experience
|
25 |
+
* Focusing on what is best not just for us as individuals, but for the
|
26 |
+
overall community
|
27 |
+
|
28 |
+
Examples of unacceptable behavior include:
|
29 |
+
|
30 |
+
* The use of sexualized language or imagery, and sexual attention or
|
31 |
+
advances of any kind
|
32 |
+
* Trolling, insulting or derogatory comments, and personal or political attacks
|
33 |
+
* Public or private harassment
|
34 |
+
* Publishing others' private information, such as a physical or email
|
35 |
+
address, without their explicit permission
|
36 |
+
* Other conduct which could reasonably be considered inappropriate in a
|
37 |
+
professional setting
|
38 |
+
|
39 |
+
## Enforcement Responsibilities
|
40 |
+
|
41 |
+
Community leaders are responsible for clarifying and enforcing our standards of
|
42 |
+
acceptable behavior and will take appropriate and fair corrective action in
|
43 |
+
response to any behavior that they deem inappropriate, threatening, offensive,
|
44 |
+
or harmful.
|
45 |
+
|
46 |
+
Community leaders have the right and responsibility to remove, edit, or reject
|
47 |
+
comments, commits, code, wiki edits, issues, and other contributions that are
|
48 |
+
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
49 |
+
decisions when appropriate.
|
50 |
+
|
51 |
+
## Scope
|
52 |
+
|
53 |
+
This Code of Conduct applies within all community spaces, and also applies when
|
54 |
+
an individual is officially representing the community in public spaces.
|
55 |
+
Examples of representing our community include using an official e-mail address,
|
56 |
+
posting via an official social media account, or acting as an appointed
|
57 |
+
representative at an online or offline event.
|
58 |
+
|
59 |
+
## Enforcement
|
60 |
+
|
61 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
62 |
+
reported to the community leaders responsible for enforcement at
|
63 |
+
`hoshihiyouga AT gmail DOT com`.
|
64 |
+
All complaints will be reviewed and investigated promptly and fairly.
|
65 |
+
|
66 |
+
All community leaders are obligated to respect the privacy and security of the
|
67 |
+
reporter of any incident.
|
68 |
+
|
69 |
+
## Enforcement Guidelines
|
70 |
+
|
71 |
+
Community leaders will follow these Community Impact Guidelines in determining
|
72 |
+
the consequences for any action they deem in violation of this Code of Conduct:
|
73 |
+
|
74 |
+
### 1. Correction
|
75 |
+
|
76 |
+
**Community Impact**: Use of inappropriate language or other behavior deemed
|
77 |
+
unprofessional or unwelcome in the community.
|
78 |
+
|
79 |
+
**Consequence**: A private, written warning from community leaders, providing
|
80 |
+
clarity around the nature of the violation and an explanation of why the
|
81 |
+
behavior was inappropriate. A public apology may be requested.
|
82 |
+
|
83 |
+
### 2. Warning
|
84 |
+
|
85 |
+
**Community Impact**: A violation through a single incident or series
|
86 |
+
of actions.
|
87 |
+
|
88 |
+
**Consequence**: A warning with consequences for continued behavior. No
|
89 |
+
interaction with the people involved, including unsolicited interaction with
|
90 |
+
those enforcing the Code of Conduct, for a specified period of time. This
|
91 |
+
includes avoiding interactions in community spaces as well as external channels
|
92 |
+
like social media. Violating these terms may lead to a temporary or
|
93 |
+
permanent ban.
|
94 |
+
|
95 |
+
### 3. Temporary Ban
|
96 |
+
|
97 |
+
**Community Impact**: A serious violation of community standards, including
|
98 |
+
sustained inappropriate behavior.
|
99 |
+
|
100 |
+
**Consequence**: A temporary ban from any sort of interaction or public
|
101 |
+
communication with the community for a specified period of time. No public or
|
102 |
+
private interaction with the people involved, including unsolicited interaction
|
103 |
+
with those enforcing the Code of Conduct, is allowed during this period.
|
104 |
+
Violating these terms may lead to a permanent ban.
|
105 |
+
|
106 |
+
### 4. Permanent Ban
|
107 |
+
|
108 |
+
**Community Impact**: Demonstrating a pattern of violation of community
|
109 |
+
standards, including sustained inappropriate behavior, harassment of an
|
110 |
+
individual, or aggression toward or disparagement of classes of individuals.
|
111 |
+
|
112 |
+
**Consequence**: A permanent ban from any sort of public interaction within
|
113 |
+
the community.
|
114 |
+
|
115 |
+
## Attribution
|
116 |
+
|
117 |
+
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
118 |
+
version 2.0, available at
|
119 |
+
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
120 |
+
|
121 |
+
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
122 |
+
enforcement ladder](https://github.com/mozilla/diversity).
|
123 |
+
|
124 |
+
[homepage]: https://www.contributor-covenant.org
|
125 |
+
|
126 |
+
For answers to common questions about this code of conduct, see the FAQ at
|
127 |
+
https://www.contributor-covenant.org/faq. Translations are available at
|
128 |
+
https://www.contributor-covenant.org/translations.
|
LLaMA-Factory/.github/CONTRIBUTING.md
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Contributing to LLaMA Factory
|
2 |
+
|
3 |
+
Everyone is welcome to contribute, and we value everybody's contribution. Code contributions are not the only way to help the community. Answering questions, helping others, and improving the documentation are also immensely valuable.
|
4 |
+
|
5 |
+
It also helps us if you spread the word! Reference the library in blog posts about the awesome projects it made possible, shout out on Twitter every time it has helped you, or simply ⭐️ the repository to say thank you.
|
6 |
+
|
7 |
+
However you choose to contribute, please be mindful and respect our [code of conduct](CODE_OF_CONDUCT.md).
|
8 |
+
|
9 |
+
**This guide was heavily inspired by [transformers guide to contributing](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md).**
|
10 |
+
|
11 |
+
## Ways to contribute
|
12 |
+
|
13 |
+
There are several ways you can contribute to LLaMA Factory:
|
14 |
+
|
15 |
+
* Fix outstanding issues with the existing code.
|
16 |
+
* Submit issues related to bugs or desired new features.
|
17 |
+
* Contribute to the examples or to the documentation.
|
18 |
+
|
19 |
+
### Style guide
|
20 |
+
|
21 |
+
LLaMA Factory follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html), check it for details.
|
LLaMA-Factory/.github/ISSUE_TEMPLATE/bug-report.yml
ADDED
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: "\U0001F41B Bug / Help"
|
2 |
+
description: Create a report to help us improve the LLaMA Factory
|
3 |
+
body:
|
4 |
+
- type: checkboxes
|
5 |
+
id: reminder
|
6 |
+
attributes:
|
7 |
+
label: Reminder
|
8 |
+
description: |
|
9 |
+
Please ensure you have read the README carefully and searched the existing issues.
|
10 |
+
请确保您已经认真阅读了 README 并且搜索过现有的 Issue。
|
11 |
+
|
12 |
+
options:
|
13 |
+
- label: I have read the README and searched the existing issues.
|
14 |
+
required: true
|
15 |
+
|
16 |
+
- type: textarea
|
17 |
+
id: reproduction
|
18 |
+
validations:
|
19 |
+
required: true
|
20 |
+
attributes:
|
21 |
+
label: Reproduction
|
22 |
+
description: |
|
23 |
+
Please provide code snippets, error messages and stack traces that reproduces the problem.
|
24 |
+
请提供运行参数,错误信息以及异常堆栈以便于我们复现该问题。
|
25 |
+
Remember to use Markdown tags to correctly format your code.
|
26 |
+
请合理使用 Markdown 标签来格式化您的文本。
|
27 |
+
|
28 |
+
placeholder: |
|
29 |
+
python src/train_bash.py ...
|
30 |
+
|
31 |
+
- type: textarea
|
32 |
+
id: expected-behavior
|
33 |
+
validations:
|
34 |
+
required: false
|
35 |
+
attributes:
|
36 |
+
label: Expected behavior
|
37 |
+
description: |
|
38 |
+
Please provide a clear and concise description of what you would expect to happen.
|
39 |
+
请提供您原本的目的,即这段代码的期望行为。
|
40 |
+
|
41 |
+
- type: textarea
|
42 |
+
id: system-info
|
43 |
+
validations:
|
44 |
+
required: false
|
45 |
+
attributes:
|
46 |
+
label: System Info
|
47 |
+
description: |
|
48 |
+
Please share your system info with us. You can run the command **transformers-cli env** and copy-paste its output below.
|
49 |
+
请提供您的系统信息。您可以在命令行运行 **transformers-cli env** 并将其输出复制到该文本框中。
|
50 |
+
|
51 |
+
placeholder: transformers version, platform, python version, ...
|
52 |
+
|
53 |
+
- type: textarea
|
54 |
+
id: others
|
55 |
+
validations:
|
56 |
+
required: false
|
57 |
+
attributes:
|
58 |
+
label: Others
|
LLaMA-Factory/.github/PULL_REQUEST_TEMPLATE.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# What does this PR do?
|
2 |
+
|
3 |
+
Fixes # (issue)
|
4 |
+
|
5 |
+
## Before submitting
|
6 |
+
|
7 |
+
- [ ] Did you read the [contributor guideline](https://github.com/hiyouga/LLaMA-Factory/blob/main/.github/CONTRIBUTING.md)?
|
LLaMA-Factory/.github/SECURITY.md
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Reporting Security Issues
|
2 |
+
|
3 |
+
To report a security issue, please use the GitHub Security Advisory ["Report a Vulnerability"](https://github.com/electron/electron/security/advisories/new) tab.
|
4 |
+
|
5 |
+
We will send a response indicating the next steps in handling your report. After the initial reply to your report, the security team will keep you informed of the progress towards a fix and full announcement, and may ask for additional information or guidance.
|
6 |
+
|
7 |
+
Report security bugs in third-party modules to the person or team maintaining the module.
|
LLaMA-Factory/.github/workflows/tests.yml
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: tests
|
2 |
+
|
3 |
+
on:
|
4 |
+
push:
|
5 |
+
branches: [ "main" ]
|
6 |
+
pull_request:
|
7 |
+
branches: [ "main" ]
|
8 |
+
|
9 |
+
jobs:
|
10 |
+
check_code_quality:
|
11 |
+
|
12 |
+
runs-on: ubuntu-latest
|
13 |
+
|
14 |
+
steps:
|
15 |
+
- uses: actions/checkout@v4
|
16 |
+
|
17 |
+
- name: Set up Python
|
18 |
+
uses: actions/setup-python@v5
|
19 |
+
with:
|
20 |
+
python-version: "3.8"
|
21 |
+
|
22 |
+
- name: Install dependencies
|
23 |
+
run: |
|
24 |
+
python -m pip install --upgrade pip
|
25 |
+
python -m pip install ruff
|
26 |
+
|
27 |
+
- name: Check quality
|
28 |
+
run: |
|
29 |
+
make style && make quality
|
LLaMA-Factory/.gitignore
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Byte-compiled / optimized / DLL files
|
2 |
+
__pycache__/
|
3 |
+
*.py[cod]
|
4 |
+
*$py.class
|
5 |
+
|
6 |
+
# C extensions
|
7 |
+
*.so
|
8 |
+
|
9 |
+
# Distribution / packaging
|
10 |
+
.Python
|
11 |
+
build/
|
12 |
+
develop-eggs/
|
13 |
+
dist/
|
14 |
+
downloads/
|
15 |
+
eggs/
|
16 |
+
.eggs/
|
17 |
+
lib/
|
18 |
+
lib64/
|
19 |
+
parts/
|
20 |
+
sdist/
|
21 |
+
var/
|
22 |
+
wheels/
|
23 |
+
share/python-wheels/
|
24 |
+
*.egg-info/
|
25 |
+
.installed.cfg
|
26 |
+
*.egg
|
27 |
+
MANIFEST
|
28 |
+
|
29 |
+
# PyInstaller
|
30 |
+
# Usually these files are written by a python script from a template
|
31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
32 |
+
*.manifest
|
33 |
+
*.spec
|
34 |
+
|
35 |
+
# Installer logs
|
36 |
+
pip-log.txt
|
37 |
+
pip-delete-this-directory.txt
|
38 |
+
|
39 |
+
# Unit test / coverage reports
|
40 |
+
htmlcov/
|
41 |
+
.tox/
|
42 |
+
.nox/
|
43 |
+
.coverage
|
44 |
+
.coverage.*
|
45 |
+
.cache
|
46 |
+
nosetests.xml
|
47 |
+
coverage.xml
|
48 |
+
*.cover
|
49 |
+
*.py,cover
|
50 |
+
.hypothesis/
|
51 |
+
.pytest_cache/
|
52 |
+
cover/
|
53 |
+
|
54 |
+
# Translations
|
55 |
+
*.mo
|
56 |
+
*.pot
|
57 |
+
|
58 |
+
# Django stuff:
|
59 |
+
*.log
|
60 |
+
local_settings.py
|
61 |
+
db.sqlite3
|
62 |
+
db.sqlite3-journal
|
63 |
+
|
64 |
+
# Flask stuff:
|
65 |
+
instance/
|
66 |
+
.webassets-cache
|
67 |
+
|
68 |
+
# Scrapy stuff:
|
69 |
+
.scrapy
|
70 |
+
|
71 |
+
# Sphinx documentation
|
72 |
+
docs/_build/
|
73 |
+
|
74 |
+
# PyBuilder
|
75 |
+
.pybuilder/
|
76 |
+
target/
|
77 |
+
|
78 |
+
# Jupyter Notebook
|
79 |
+
.ipynb_checkpoints
|
80 |
+
|
81 |
+
# IPython
|
82 |
+
profile_default/
|
83 |
+
ipython_config.py
|
84 |
+
|
85 |
+
# pyenv
|
86 |
+
# For a library or package, you might want to ignore these files since the code is
|
87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
88 |
+
# .python-version
|
89 |
+
|
90 |
+
# pipenv
|
91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
94 |
+
# install all needed dependencies.
|
95 |
+
#Pipfile.lock
|
96 |
+
|
97 |
+
# poetry
|
98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
100 |
+
# commonly ignored for libraries.
|
101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
102 |
+
#poetry.lock
|
103 |
+
|
104 |
+
# pdm
|
105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
106 |
+
#pdm.lock
|
107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
108 |
+
# in version control.
|
109 |
+
# https://pdm.fming.dev/#use-with-ide
|
110 |
+
.pdm.toml
|
111 |
+
|
112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
113 |
+
__pypackages__/
|
114 |
+
|
115 |
+
# Celery stuff
|
116 |
+
celerybeat-schedule
|
117 |
+
celerybeat.pid
|
118 |
+
|
119 |
+
# SageMath parsed files
|
120 |
+
*.sage.py
|
121 |
+
|
122 |
+
# Environments
|
123 |
+
.env
|
124 |
+
.venv
|
125 |
+
env/
|
126 |
+
venv/
|
127 |
+
ENV/
|
128 |
+
env.bak/
|
129 |
+
venv.bak/
|
130 |
+
|
131 |
+
# Spyder project settings
|
132 |
+
.spyderproject
|
133 |
+
.spyproject
|
134 |
+
|
135 |
+
# Rope project settings
|
136 |
+
.ropeproject
|
137 |
+
|
138 |
+
# mkdocs documentation
|
139 |
+
/site
|
140 |
+
|
141 |
+
# mypy
|
142 |
+
.mypy_cache/
|
143 |
+
.dmypy.json
|
144 |
+
dmypy.json
|
145 |
+
|
146 |
+
# Pyre type checker
|
147 |
+
.pyre/
|
148 |
+
|
149 |
+
# pytype static type analyzer
|
150 |
+
.pytype/
|
151 |
+
|
152 |
+
# Cython debug symbols
|
153 |
+
cython_debug/
|
154 |
+
|
155 |
+
# PyCharm
|
156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
157 |
+
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
158 |
+
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
159 |
+
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
160 |
+
.idea/
|
161 |
+
|
162 |
+
# custom .gitignore
|
163 |
+
user.config
|
164 |
+
saves/
|
165 |
+
cache/
|
LLaMA-Factory/Dockerfile
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM cnstark/pytorch:2.0.1-py3.9.17-cuda11.8.0-ubuntu20.04
|
2 |
+
|
3 |
+
WORKDIR /app
|
4 |
+
|
5 |
+
COPY requirements.txt /app/
|
6 |
+
RUN pip install -r requirements.txt && \
|
7 |
+
pip install tiktoken && \
|
8 |
+
pip install transformers_stream_generator
|
9 |
+
|
10 |
+
COPY . /app/
|
11 |
+
|
12 |
+
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
|
13 |
+
EXPOSE 7860
|
14 |
+
|
15 |
+
CMD [ "python", "src/train_web.py" ]
|
LLaMA-Factory/LICENSE
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Apache License
|
2 |
+
Version 2.0, January 2004
|
3 |
+
http://www.apache.org/licenses/
|
4 |
+
|
5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
6 |
+
|
7 |
+
1. Definitions.
|
8 |
+
|
9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
11 |
+
|
12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
13 |
+
the copyright owner that is granting the License.
|
14 |
+
|
15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
16 |
+
other entities that control, are controlled by, or are under common
|
17 |
+
control with that entity. For the purposes of this definition,
|
18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
19 |
+
direction or management of such entity, whether by contract or
|
20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
22 |
+
|
23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
24 |
+
exercising permissions granted by this License.
|
25 |
+
|
26 |
+
"Source" form shall mean the preferred form for making modifications,
|
27 |
+
including but not limited to software source code, documentation
|
28 |
+
source, and configuration files.
|
29 |
+
|
30 |
+
"Object" form shall mean any form resulting from mechanical
|
31 |
+
transformation or translation of a Source form, including but
|
32 |
+
not limited to compiled object code, generated documentation,
|
33 |
+
and conversions to other media types.
|
34 |
+
|
35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
36 |
+
Object form, made available under the License, as indicated by a
|
37 |
+
copyright notice that is included in or attached to the work
|
38 |
+
(an example is provided in the Appendix below).
|
39 |
+
|
40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
41 |
+
form, that is based on (or derived from) the Work and for which the
|
42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
44 |
+
of this License, Derivative Works shall not include works that remain
|
45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
46 |
+
the Work and Derivative Works thereof.
|
47 |
+
|
48 |
+
"Contribution" shall mean any work of authorship, including
|
49 |
+
the original version of the Work and any modifications or additions
|
50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
54 |
+
means any form of electronic, verbal, or written communication sent
|
55 |
+
to the Licensor or its representatives, including but not limited to
|
56 |
+
communication on electronic mailing lists, source code control systems,
|
57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
59 |
+
excluding communication that is conspicuously marked or otherwise
|
60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
61 |
+
|
62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
64 |
+
subsequently incorporated within the Work.
|
65 |
+
|
66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
71 |
+
Work and such Derivative Works in Source or Object form.
|
72 |
+
|
73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
76 |
+
(except as stated in this section) patent license to make, have made,
|
77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
78 |
+
where such license applies only to those patent claims licensable
|
79 |
+
by such Contributor that are necessarily infringed by their
|
80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
82 |
+
institute patent litigation against any entity (including a
|
83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
84 |
+
or a Contribution incorporated within the Work constitutes direct
|
85 |
+
or contributory patent infringement, then any patent licenses
|
86 |
+
granted to You under this License for that Work shall terminate
|
87 |
+
as of the date such litigation is filed.
|
88 |
+
|
89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
90 |
+
Work or Derivative Works thereof in any medium, with or without
|
91 |
+
modifications, and in Source or Object form, provided that You
|
92 |
+
meet the following conditions:
|
93 |
+
|
94 |
+
(a) You must give any other recipients of the Work or
|
95 |
+
Derivative Works a copy of this License; and
|
96 |
+
|
97 |
+
(b) You must cause any modified files to carry prominent notices
|
98 |
+
stating that You changed the files; and
|
99 |
+
|
100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
101 |
+
that You distribute, all copyright, patent, trademark, and
|
102 |
+
attribution notices from the Source form of the Work,
|
103 |
+
excluding those notices that do not pertain to any part of
|
104 |
+
the Derivative Works; and
|
105 |
+
|
106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
107 |
+
distribution, then any Derivative Works that You distribute must
|
108 |
+
include a readable copy of the attribution notices contained
|
109 |
+
within such NOTICE file, excluding those notices that do not
|
110 |
+
pertain to any part of the Derivative Works, in at least one
|
111 |
+
of the following places: within a NOTICE text file distributed
|
112 |
+
as part of the Derivative Works; within the Source form or
|
113 |
+
documentation, if provided along with the Derivative Works; or,
|
114 |
+
within a display generated by the Derivative Works, if and
|
115 |
+
wherever such third-party notices normally appear. The contents
|
116 |
+
of the NOTICE file are for informational purposes only and
|
117 |
+
do not modify the License. You may add Your own attribution
|
118 |
+
notices within Derivative Works that You distribute, alongside
|
119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
120 |
+
that such additional attribution notices cannot be construed
|
121 |
+
as modifying the License.
|
122 |
+
|
123 |
+
You may add Your own copyright statement to Your modifications and
|
124 |
+
may provide additional or different license terms and conditions
|
125 |
+
for use, reproduction, or distribution of Your modifications, or
|
126 |
+
for any such Derivative Works as a whole, provided Your use,
|
127 |
+
reproduction, and distribution of the Work otherwise complies with
|
128 |
+
the conditions stated in this License.
|
129 |
+
|
130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
132 |
+
by You to the Licensor shall be under the terms and conditions of
|
133 |
+
this License, without any additional terms or conditions.
|
134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
135 |
+
the terms of any separate license agreement you may have executed
|
136 |
+
with Licensor regarding such Contributions.
|
137 |
+
|
138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
140 |
+
except as required for reasonable and customary use in describing the
|
141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
142 |
+
|
143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
144 |
+
agreed to in writing, Licensor provides the Work (and each
|
145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
147 |
+
implied, including, without limitation, any warranties or conditions
|
148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
150 |
+
appropriateness of using or redistributing the Work and assume any
|
151 |
+
risks associated with Your exercise of permissions under this License.
|
152 |
+
|
153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
154 |
+
whether in tort (including negligence), contract, or otherwise,
|
155 |
+
unless required by applicable law (such as deliberate and grossly
|
156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
157 |
+
liable to You for damages, including any direct, indirect, special,
|
158 |
+
incidental, or consequential damages of any character arising as a
|
159 |
+
result of this License or out of the use or inability to use the
|
160 |
+
Work (including but not limited to damages for loss of goodwill,
|
161 |
+
work stoppage, computer failure or malfunction, or any and all
|
162 |
+
other commercial damages or losses), even if such Contributor
|
163 |
+
has been advised of the possibility of such damages.
|
164 |
+
|
165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
168 |
+
or other liability obligations and/or rights consistent with this
|
169 |
+
License. However, in accepting such obligations, You may act only
|
170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
171 |
+
of any other Contributor, and only if You agree to indemnify,
|
172 |
+
defend, and hold each Contributor harmless for any liability
|
173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
174 |
+
of your accepting any such warranty or additional liability.
|
175 |
+
|
176 |
+
END OF TERMS AND CONDITIONS
|
177 |
+
|
178 |
+
APPENDIX: How to apply the Apache License to your work.
|
179 |
+
|
180 |
+
To apply the Apache License to your work, attach the following
|
181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
182 |
+
replaced with your own identifying information. (Don't include
|
183 |
+
the brackets!) The text should be enclosed in the appropriate
|
184 |
+
comment syntax for the file format. We also recommend that a
|
185 |
+
file or class name and description of purpose be included on the
|
186 |
+
same "printed page" as the copyright notice for easier
|
187 |
+
identification within third-party archives.
|
188 |
+
|
189 |
+
Copyright [yyyy] [name of copyright owner]
|
190 |
+
|
191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
192 |
+
you may not use this file except in compliance with the License.
|
193 |
+
You may obtain a copy of the License at
|
194 |
+
|
195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
196 |
+
|
197 |
+
Unless required by applicable law or agreed to in writing, software
|
198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
200 |
+
See the License for the specific language governing permissions and
|
201 |
+
limitations under the License.
|
LLaMA-Factory/Makefile
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.PHONY: quality style
|
2 |
+
|
3 |
+
check_dirs := scripts src tests
|
4 |
+
|
5 |
+
quality:
|
6 |
+
ruff check $(check_dirs)
|
7 |
+
ruff format --check $(check_dirs)
|
8 |
+
|
9 |
+
style:
|
10 |
+
ruff check $(check_dirs) --fix
|
11 |
+
ruff format $(check_dirs)
|
LLaMA-Factory/README.md
ADDED
@@ -0,0 +1,735 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
![# LLaMA Factory](assets/logo.png)
|
2 |
+
|
3 |
+
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
4 |
+
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
|
5 |
+
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
6 |
+
[![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/)
|
7 |
+
[![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/)
|
8 |
+
[![Citation](https://img.shields.io/badge/citation-21-green)](#projects-using-llama-factory)
|
9 |
+
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
10 |
+
[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
|
11 |
+
[![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai)
|
12 |
+
[![Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue)](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
13 |
+
[![Studios](https://img.shields.io/badge/ModelScope-Open%20in%20Studios-blue)](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
14 |
+
[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)
|
15 |
+
|
16 |
+
👋 Join our [WeChat](assets/wechat.jpg).
|
17 |
+
|
18 |
+
\[ English | [中文](README_zh.md) \]
|
19 |
+
|
20 |
+
**Fine-tuning a large language model can be easy as...**
|
21 |
+
|
22 |
+
https://github.com/hiyouga/LLaMA-Factory/assets/16256802/9840a653-7e9c-41c8-ae89-7ace5698baf6
|
23 |
+
|
24 |
+
Choose your path:
|
25 |
+
|
26 |
+
- **🤗 Spaces**: https://huggingface.co/spaces/hiyouga/LLaMA-Board
|
27 |
+
- **ModelScope**: https://modelscope.cn/studios/hiyouga/LLaMA-Board
|
28 |
+
- **Colab**: https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
|
29 |
+
- **Local machine**: Please refer to [usage](#getting-started)
|
30 |
+
|
31 |
+
## Table of Contents
|
32 |
+
|
33 |
+
- [Features](#features)
|
34 |
+
- [Benchmark](#benchmark)
|
35 |
+
- [Changelog](#changelog)
|
36 |
+
- [Supported Models](#supported-models)
|
37 |
+
- [Supported Training Approaches](#supported-training-approaches)
|
38 |
+
- [Provided Datasets](#provided-datasets)
|
39 |
+
- [Requirement](#requirement)
|
40 |
+
- [Getting Started](#getting-started)
|
41 |
+
- [Projects using LLaMA Factory](#projects-using-llama-factory)
|
42 |
+
- [License](#license)
|
43 |
+
- [Citation](#citation)
|
44 |
+
- [Acknowledgement](#acknowledgement)
|
45 |
+
|
46 |
+
## Features
|
47 |
+
|
48 |
+
- **Various models**: LLaMA, Mistral, Mixtral-MoE, Qwen, Yi, Gemma, Baichuan, ChatGLM, Phi, etc.
|
49 |
+
- **Integrated methods**: (Continuous) pre-training, supervised fine-tuning, reward modeling, PPO and DPO.
|
50 |
+
- **Scalable resources**: 32-bit full-tuning, 16-bit freeze-tuning, 16-bit LoRA and 2/4/8-bit QLoRA via AQLM/AWQ/GPTQ/LLM.int8.
|
51 |
+
- **Advanced algorithms**: GaLore, DoRA, LongLoRA, LLaMA Pro, LoRA+, LoftQ and Agent tuning.
|
52 |
+
- **Practical tricks**: FlashAttention-2, Unsloth, RoPE scaling, NEFTune and rsLoRA.
|
53 |
+
- **Experiment monitors**: LlamaBoard, TensorBoard, Wandb, MLflow, etc.
|
54 |
+
- **Faster inference**: OpenAI-style API, Gradio UI and CLI with vLLM worker.
|
55 |
+
|
56 |
+
## Benchmark
|
57 |
+
|
58 |
+
Compared to ChatGLM's [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning), LLaMA-Factory's LoRA tuning offers up to **3.7 times faster** training speed with a better Rouge score on the advertising text generation task. By leveraging 4-bit quantization technique, LLaMA-Factory's QLoRA further improves the efficiency regarding the GPU memory.
|
59 |
+
|
60 |
+
![benchmark](assets/benchmark.svg)
|
61 |
+
|
62 |
+
<details><summary>Definitions</summary>
|
63 |
+
|
64 |
+
- **Training Speed**: the number of training samples processed per second during the training. (bs=4, cutoff_len=1024)
|
65 |
+
- **Rouge Score**: Rouge-2 score on the development set of the [advertising text generation](https://aclanthology.org/D19-1321.pdf) task. (bs=4, cutoff_len=1024)
|
66 |
+
- **GPU Memory**: Peak GPU memory usage in 4-bit quantized training. (bs=1, cutoff_len=1024)
|
67 |
+
- We adopt `pre_seq_len=128` for ChatGLM's P-Tuning and `lora_rank=32` for LLaMA-Factory's LoRA tuning.
|
68 |
+
|
69 |
+
</details>
|
70 |
+
|
71 |
+
## Changelog
|
72 |
+
|
73 |
+
[24/03/13] We supported **[LoRA+](https://arxiv.org/abs/2402.12354)**. Try `loraplus_lr_ratio=16.0` to enable LoRA+ algorithm.
|
74 |
+
|
75 |
+
[24/03/07] We supported gradient low-rank projection (**[GaLore](https://arxiv.org/abs/2403.03507)**) algorithm. Try `--use_galore` to use the memory-efficient optimizer.
|
76 |
+
|
77 |
+
[24/03/07] We integrated **[vLLM](https://github.com/vllm-project/vllm)** for faster and concurrent inference. Try `--infer_backend vllm` to enjoy **270%** inference speed. (LoRA is not yet supported, merge it first.)
|
78 |
+
|
79 |
+
[24/02/28] We supported weight-decomposed LoRA (**[DoRA](https://arxiv.org/abs/2402.09353)**). Try `--use_dora` to activate DoRA training.
|
80 |
+
|
81 |
+
[24/02/15] We supported **block expansion** proposed by [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro). See `scripts/llama_pro.py` for usage.
|
82 |
+
|
83 |
+
<details><summary>Full Changelog</summary>
|
84 |
+
|
85 |
+
[24/02/05] Qwen1.5 (Qwen2 beta version) series models are supported in LLaMA-Factory. Check this [blog post](https://qwenlm.github.io/blog/qwen1.5/) for details.
|
86 |
+
|
87 |
+
[24/01/18] We supported **agent tuning** for most models, equipping model with tool using abilities by fine-tuning with `--dataset glaive_toolcall`.
|
88 |
+
|
89 |
+
[23/12/23] We supported **[unsloth](https://github.com/unslothai/unsloth)**'s implementation to boost LoRA tuning for the LLaMA, Mistral and Yi models. Try `--use_unsloth` argument to activate unsloth patch. It achieves **170%** speed in our benchmark, check [this page](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison) for details.
|
90 |
+
|
91 |
+
[23/12/12] We supported fine-tuning the latest MoE model **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)** in our framework. See hardware requirement [here](#hardware-requirement).
|
92 |
+
|
93 |
+
[23/12/01] We supported downloading pre-trained models and datasets from the **[ModelScope Hub](https://modelscope.cn/models)** for Chinese mainland users. See [this tutorial](#use-modelscope-hub-optional) for usage.
|
94 |
+
|
95 |
+
[23/10/21] We supported **[NEFTune](https://arxiv.org/abs/2310.05914)** trick for fine-tuning. Try `--neftune_noise_alpha` argument to activate NEFTune, e.g., `--neftune_noise_alpha 5`.
|
96 |
+
|
97 |
+
[23/09/27] We supported **$S^2$-Attn** proposed by [LongLoRA](https://github.com/dvlab-research/LongLoRA) for the LLaMA models. Try `--shift_attn` argument to enable shift short attention.
|
98 |
+
|
99 |
+
[23/09/23] We integrated MMLU, C-Eval and CMMLU benchmarks in this repo. See [this example](#evaluation) to evaluate your models.
|
100 |
+
|
101 |
+
[23/09/10] We supported **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**. Try `--flash_attn` argument to enable FlashAttention-2 if you are using RTX4090, A100 or H100 GPUs.
|
102 |
+
|
103 |
+
[23/08/12] We supported **RoPE scaling** to extend the context length of the LLaMA models. Try `--rope_scaling linear` argument in training and `--rope_scaling dynamic` argument at inference to extrapolate the position embeddings.
|
104 |
+
|
105 |
+
[23/08/11] We supported **[DPO training](https://arxiv.org/abs/2305.18290)** for instruction-tuned models. See [this example](#dpo-training) to train your models.
|
106 |
+
|
107 |
+
[23/07/31] We supported **dataset streaming**. Try `--streaming` and `--max_steps 10000` arguments to load your dataset in streaming mode.
|
108 |
+
|
109 |
+
[23/07/29] We released two instruction-tuned 13B models at Hugging Face. See these Hugging Face Repos ([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft)) for details.
|
110 |
+
|
111 |
+
[23/07/18] We developed an **all-in-one Web UI** for training, evaluation and inference. Try `train_web.py` to fine-tune models in your Web browser. Thank [@KanadeSiina](https://github.com/KanadeSiina) and [@codemayq](https://github.com/codemayq) for their efforts in the development.
|
112 |
+
|
113 |
+
[23/07/09] We released **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹, an easy-to-use package for editing the factual knowledge of large language models efficiently. Please follow [FastEdit](https://github.com/hiyouga/FastEdit) if you are interested.
|
114 |
+
|
115 |
+
[23/06/29] We provided a **reproducible example** of training a chat model using instruction-following datasets, see [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft) for details.
|
116 |
+
|
117 |
+
[23/06/22] We aligned the [demo API](src/api_demo.py) with the [OpenAI's](https://platform.openai.com/docs/api-reference/chat) format where you can insert the fine-tuned model in **arbitrary ChatGPT-based applications**.
|
118 |
+
|
119 |
+
[23/06/03] We supported quantized training and inference (aka **[QLoRA](https://github.com/artidoro/qlora)**). Try `--quantization_bit 4/8` argument to work with quantized models.
|
120 |
+
|
121 |
+
</details>
|
122 |
+
|
123 |
+
## Supported Models
|
124 |
+
|
125 |
+
| Model | Model size | Default module | Template |
|
126 |
+
| -------------------------------------------------------- | --------------------------- | ----------------- | --------- |
|
127 |
+
| [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | W_pack | baichuan2 |
|
128 |
+
| [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
|
129 |
+
| [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
|
130 |
+
| [ChatGLM3](https://huggingface.co/THUDM/chatglm3-6b) | 6B | query_key_value | chatglm3 |
|
131 |
+
| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B | q_proj,v_proj | deepseek |
|
132 |
+
| [Falcon](https://huggingface.co/tiiuae) | 7B/40B/180B | query_key_value | falcon |
|
133 |
+
| [Gemma](https://huggingface.co/google) | 2B/7B | q_proj,v_proj | gemma |
|
134 |
+
| [InternLM2](https://huggingface.co/internlm) | 7B/20B | wqkv | intern2 |
|
135 |
+
| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
|
136 |
+
| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
|
137 |
+
| [Mistral](https://huggingface.co/mistralai) | 7B | q_proj,v_proj | mistral |
|
138 |
+
| [Mixtral](https://huggingface.co/mistralai) | 8x7B | q_proj,v_proj | mistral |
|
139 |
+
| [OLMo](https://huggingface.co/allenai) | 1B/7B | att_proj | olmo |
|
140 |
+
| [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | q_proj,v_proj | - |
|
141 |
+
| [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | c_attn | qwen |
|
142 |
+
| [Qwen1.5](https://huggingface.co/Qwen) | 0.5B/1.8B/4B/7B/14B/72B | q_proj,v_proj | qwen |
|
143 |
+
| [StarCoder2](https://huggingface.co/bigcode) | 3B/7B/15B | q_proj,v_proj | - |
|
144 |
+
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | q_proj,v_proj | xverse |
|
145 |
+
| [Yi](https://huggingface.co/01-ai) | 6B/9B/34B | q_proj,v_proj | yi |
|
146 |
+
| [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | q_proj,v_proj | yuan |
|
147 |
+
|
148 |
+
> [!NOTE]
|
149 |
+
> **Default module** is used for the `--lora_target` argument, you can use `--lora_target all` to specify all the available modules.
|
150 |
+
>
|
151 |
+
> For the "base" models, the `--template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the **corresponding template** for the "chat" models.
|
152 |
+
|
153 |
+
Please refer to [constants.py](src/llmtuner/extras/constants.py) for a full list of models we supported.
|
154 |
+
|
155 |
+
You also can add a custom chat template to [template.py](src/llmtuner/data/template.py).
|
156 |
+
|
157 |
+
## Supported Training Approaches
|
158 |
+
|
159 |
+
| Approach | Full-tuning | Freeze-tuning | LoRA | QLoRA |
|
160 |
+
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
161 |
+
| Pre-Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
162 |
+
| Supervised Fine-Tuning | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
163 |
+
| Reward Modeling | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
164 |
+
| PPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
165 |
+
| DPO Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
166 |
+
|
167 |
+
> [!NOTE]
|
168 |
+
> Use `--quantization_bit 4` argument to enable QLoRA.
|
169 |
+
|
170 |
+
## Provided Datasets
|
171 |
+
|
172 |
+
<details><summary>Pre-training datasets</summary>
|
173 |
+
|
174 |
+
- [Wiki Demo (en)](data/wiki_demo.txt)
|
175 |
+
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
|
176 |
+
- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
|
177 |
+
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
|
178 |
+
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
179 |
+
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
180 |
+
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
181 |
+
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
182 |
+
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
183 |
+
|
184 |
+
</details>
|
185 |
+
|
186 |
+
<details><summary>Supervised fine-tuning datasets</summary>
|
187 |
+
|
188 |
+
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
189 |
+
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
|
190 |
+
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
191 |
+
- [Self Cognition (zh)](data/self_cognition.json)
|
192 |
+
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
|
193 |
+
- [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
|
194 |
+
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
195 |
+
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
196 |
+
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
197 |
+
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
|
198 |
+
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
|
199 |
+
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
200 |
+
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
201 |
+
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
202 |
+
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
203 |
+
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
204 |
+
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
205 |
+
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
206 |
+
- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
|
207 |
+
- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca)
|
208 |
+
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
|
209 |
+
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
|
210 |
+
- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa)
|
211 |
+
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
|
212 |
+
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
213 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
214 |
+
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
215 |
+
- [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
216 |
+
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
217 |
+
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
218 |
+
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
219 |
+
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
220 |
+
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
221 |
+
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
222 |
+
- [Glaive Function Calling V2 (en)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
223 |
+
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
224 |
+
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
225 |
+
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
226 |
+
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
227 |
+
- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de)
|
228 |
+
- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de)
|
229 |
+
- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de)
|
230 |
+
- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de)
|
231 |
+
- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de)
|
232 |
+
- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de)
|
233 |
+
|
234 |
+
</details>
|
235 |
+
|
236 |
+
<details><summary>Preference datasets</summary>
|
237 |
+
|
238 |
+
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
239 |
+
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
|
240 |
+
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
241 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
242 |
+
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
243 |
+
|
244 |
+
</details>
|
245 |
+
|
246 |
+
Please refer to [data/README.md](data/README.md) for details.
|
247 |
+
|
248 |
+
Some datasets require confirmation before using them, so we recommend logging in with your Hugging Face account using these commands.
|
249 |
+
|
250 |
+
```bash
|
251 |
+
pip install --upgrade huggingface_hub
|
252 |
+
huggingface-cli login
|
253 |
+
```
|
254 |
+
|
255 |
+
## Requirement
|
256 |
+
|
257 |
+
| Mandatory | Minimum | Recommend |
|
258 |
+
| ------------ | ------- | --------- |
|
259 |
+
| python | 3.8 | 3.10 |
|
260 |
+
| torch | 1.13.1 | 2.2.0 |
|
261 |
+
| transformers | 4.37.2 | 4.38.2 |
|
262 |
+
| datasets | 2.14.3 | 2.17.1 |
|
263 |
+
| accelerate | 0.27.2 | 0.27.2 |
|
264 |
+
| peft | 0.9.0 | 0.9.0 |
|
265 |
+
| trl | 0.7.11 | 0.7.11 |
|
266 |
+
|
267 |
+
| Optional | Minimum | Recommend |
|
268 |
+
| ------------ | ------- | --------- |
|
269 |
+
| CUDA | 11.6 | 12.2 |
|
270 |
+
| deepspeed | 0.10.0 | 0.13.1 |
|
271 |
+
| bitsandbytes | 0.39.0 | 0.41.3 |
|
272 |
+
| flash-attn | 2.3.0 | 2.5.5 |
|
273 |
+
|
274 |
+
### Hardware Requirement
|
275 |
+
|
276 |
+
\* *estimated*
|
277 |
+
|
278 |
+
| Method | Bits | 7B | 13B | 30B | 70B | 8x7B |
|
279 |
+
| ------ | ---- | ----- | ----- | ----- | ------ | ------ |
|
280 |
+
| Full | AMP | 120GB | 240GB | 600GB | 1200GB | 900GB |
|
281 |
+
| Full | 16 | 60GB | 120GB | 300GB | 600GB | 400GB |
|
282 |
+
| GaLore | 16 | 16GB | 32GB | 64GB | 160GB | 120GB |
|
283 |
+
| Freeze | 16 | 20GB | 40GB | 80GB | 200GB | 160GB |
|
284 |
+
| LoRA | 16 | 16GB | 32GB | 64GB | 160GB | 120GB |
|
285 |
+
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | 60GB |
|
286 |
+
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 30GB |
|
287 |
+
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | 18GB |
|
288 |
+
|
289 |
+
## Getting Started
|
290 |
+
|
291 |
+
### Data Preparation (optional)
|
292 |
+
|
293 |
+
Please refer to [data/README.md](data/README.md) for checking the details about the format of dataset files. You can either use a single `.json` file or a [dataset loading script](https://huggingface.co/docs/datasets/dataset_script) with multiple files to create a custom dataset.
|
294 |
+
|
295 |
+
> [!NOTE]
|
296 |
+
> Please update `data/dataset_info.json` to use your custom dataset. About the format of this file, please refer to `data/README.md`.
|
297 |
+
|
298 |
+
### Dependence Installation (optional)
|
299 |
+
|
300 |
+
```bash
|
301 |
+
git clone https://github.com/hiyouga/LLaMA-Factory.git
|
302 |
+
conda create -n llama_factory python=3.10
|
303 |
+
conda activate llama_factory
|
304 |
+
cd LLaMA-Factory
|
305 |
+
pip install -r requirements.txt
|
306 |
+
```
|
307 |
+
|
308 |
+
If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you will be required to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.2.
|
309 |
+
|
310 |
+
```bash
|
311 |
+
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.40.0-py3-none-win_amd64.whl
|
312 |
+
```
|
313 |
+
|
314 |
+
To enable FlashAttention-2 on the Windows platform, you need to install the precompiled `flash-attn` library, which supports CUDA 12.1 to 12.2. Please download the corresponding version from [flash-attention](https://github.com/bdashore3/flash-attention/releases) based on your requirements.
|
315 |
+
|
316 |
+
### Use ModelScope Hub (optional)
|
317 |
+
|
318 |
+
If you have trouble with downloading models and datasets from Hugging Face, you can use LLaMA-Factory together with ModelScope in the following manner.
|
319 |
+
|
320 |
+
```bash
|
321 |
+
export USE_MODELSCOPE_HUB=1 # `set USE_MODELSCOPE_HUB=1` for Windows
|
322 |
+
```
|
323 |
+
|
324 |
+
Then you can train the corresponding model by specifying a model ID of the ModelScope Hub. (find a full list of model IDs at [ModelScope Hub](https://modelscope.cn/models))
|
325 |
+
|
326 |
+
```bash
|
327 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
328 |
+
--model_name_or_path modelscope/Llama-2-7b-ms \
|
329 |
+
... # arguments (same as below)
|
330 |
+
```
|
331 |
+
|
332 |
+
LLaMA Board also supports using the models and datasets on the ModelScope Hub.
|
333 |
+
|
334 |
+
```bash
|
335 |
+
CUDA_VISIBLE_DEVICES=0 USE_MODELSCOPE_HUB=1 python src/train_web.py
|
336 |
+
```
|
337 |
+
|
338 |
+
### Train on a single GPU
|
339 |
+
|
340 |
+
> [!IMPORTANT]
|
341 |
+
> If you want to train models on multiple GPUs, please refer to [Distributed Training](#distributed-training).
|
342 |
+
|
343 |
+
|
344 |
+
#### LLaMA Board GUI
|
345 |
+
|
346 |
+
```bash
|
347 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_web.py
|
348 |
+
```
|
349 |
+
|
350 |
+
#### Pre-Training
|
351 |
+
|
352 |
+
```bash
|
353 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
354 |
+
--stage pt \
|
355 |
+
--do_train \
|
356 |
+
--model_name_or_path path_to_llama_model \
|
357 |
+
--dataset wiki_demo \
|
358 |
+
--finetuning_type lora \
|
359 |
+
--lora_target q_proj,v_proj \
|
360 |
+
--output_dir path_to_pt_checkpoint \
|
361 |
+
--overwrite_cache \
|
362 |
+
--per_device_train_batch_size 4 \
|
363 |
+
--gradient_accumulation_steps 4 \
|
364 |
+
--lr_scheduler_type cosine \
|
365 |
+
--logging_steps 10 \
|
366 |
+
--save_steps 1000 \
|
367 |
+
--learning_rate 5e-5 \
|
368 |
+
--num_train_epochs 3.0 \
|
369 |
+
--plot_loss \
|
370 |
+
--fp16
|
371 |
+
```
|
372 |
+
|
373 |
+
#### Supervised Fine-Tuning
|
374 |
+
|
375 |
+
```bash
|
376 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
377 |
+
--stage sft \
|
378 |
+
--do_train \
|
379 |
+
--model_name_or_path path_to_llama_model \
|
380 |
+
--dataset alpaca_gpt4_en \
|
381 |
+
--template default \
|
382 |
+
--finetuning_type lora \
|
383 |
+
--lora_target q_proj,v_proj \
|
384 |
+
--output_dir path_to_sft_checkpoint \
|
385 |
+
--overwrite_cache \
|
386 |
+
--per_device_train_batch_size 4 \
|
387 |
+
--gradient_accumulation_steps 4 \
|
388 |
+
--lr_scheduler_type cosine \
|
389 |
+
--logging_steps 10 \
|
390 |
+
--save_steps 1000 \
|
391 |
+
--learning_rate 5e-5 \
|
392 |
+
--num_train_epochs 3.0 \
|
393 |
+
--plot_loss \
|
394 |
+
--fp16
|
395 |
+
```
|
396 |
+
|
397 |
+
#### Reward Modeling
|
398 |
+
|
399 |
+
```bash
|
400 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
401 |
+
--stage rm \
|
402 |
+
--do_train \
|
403 |
+
--model_name_or_path path_to_llama_model \
|
404 |
+
--adapter_name_or_path path_to_sft_checkpoint \
|
405 |
+
--create_new_adapter \
|
406 |
+
--dataset comparison_gpt4_en \
|
407 |
+
--template default \
|
408 |
+
--finetuning_type lora \
|
409 |
+
--lora_target q_proj,v_proj \
|
410 |
+
--output_dir path_to_rm_checkpoint \
|
411 |
+
--per_device_train_batch_size 2 \
|
412 |
+
--gradient_accumulation_steps 4 \
|
413 |
+
--lr_scheduler_type cosine \
|
414 |
+
--logging_steps 10 \
|
415 |
+
--save_steps 1000 \
|
416 |
+
--learning_rate 1e-6 \
|
417 |
+
--num_train_epochs 1.0 \
|
418 |
+
--plot_loss \
|
419 |
+
--fp16
|
420 |
+
```
|
421 |
+
|
422 |
+
#### PPO Training
|
423 |
+
|
424 |
+
```bash
|
425 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
426 |
+
--stage ppo \
|
427 |
+
--do_train \
|
428 |
+
--model_name_or_path path_to_llama_model \
|
429 |
+
--adapter_name_or_path path_to_sft_checkpoint \
|
430 |
+
--create_new_adapter \
|
431 |
+
--dataset alpaca_gpt4_en \
|
432 |
+
--template default \
|
433 |
+
--finetuning_type lora \
|
434 |
+
--lora_target q_proj,v_proj \
|
435 |
+
--reward_model path_to_rm_checkpoint \
|
436 |
+
--output_dir path_to_ppo_checkpoint \
|
437 |
+
--per_device_train_batch_size 2 \
|
438 |
+
--gradient_accumulation_steps 4 \
|
439 |
+
--lr_scheduler_type cosine \
|
440 |
+
--top_k 0 \
|
441 |
+
--top_p 0.9 \
|
442 |
+
--logging_steps 10 \
|
443 |
+
--save_steps 1000 \
|
444 |
+
--learning_rate 1e-5 \
|
445 |
+
--num_train_epochs 1.0 \
|
446 |
+
--plot_loss \
|
447 |
+
--fp16
|
448 |
+
```
|
449 |
+
|
450 |
+
> [!TIP]
|
451 |
+
> Use `--adapter_name_or_path path_to_sft_checkpoint,path_to_ppo_checkpoint` to infer the fine-tuned model.
|
452 |
+
|
453 |
+
> [!WARNING]
|
454 |
+
> Use `--per_device_train_batch_size=1` for LLaMA-2 models in fp16 PPO training.
|
455 |
+
|
456 |
+
#### DPO Training
|
457 |
+
|
458 |
+
```bash
|
459 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
460 |
+
--stage dpo \
|
461 |
+
--do_train \
|
462 |
+
--model_name_or_path path_to_llama_model \
|
463 |
+
--adapter_name_or_path path_to_sft_checkpoint \
|
464 |
+
--create_new_adapter \
|
465 |
+
--dataset comparison_gpt4_en \
|
466 |
+
--template default \
|
467 |
+
--finetuning_type lora \
|
468 |
+
--lora_target q_proj,v_proj \
|
469 |
+
--output_dir path_to_dpo_checkpoint \
|
470 |
+
--per_device_train_batch_size 2 \
|
471 |
+
--gradient_accumulation_steps 4 \
|
472 |
+
--lr_scheduler_type cosine \
|
473 |
+
--logging_steps 10 \
|
474 |
+
--save_steps 1000 \
|
475 |
+
--learning_rate 1e-5 \
|
476 |
+
--num_train_epochs 1.0 \
|
477 |
+
--plot_loss \
|
478 |
+
--fp16
|
479 |
+
```
|
480 |
+
|
481 |
+
> [!TIP]
|
482 |
+
> Use `--adapter_name_or_path path_to_sft_checkpoint,path_to_dpo_checkpoint` to infer the fine-tuned model.
|
483 |
+
|
484 |
+
### Distributed Training
|
485 |
+
|
486 |
+
#### Use Huggingface Accelerate
|
487 |
+
|
488 |
+
```bash
|
489 |
+
accelerate launch --config_file config.yaml src/train_bash.py # arguments (same as above)
|
490 |
+
```
|
491 |
+
|
492 |
+
<details><summary>Example config.yaml for LoRA training</summary>
|
493 |
+
|
494 |
+
```yaml
|
495 |
+
compute_environment: LOCAL_MACHINE
|
496 |
+
debug: false
|
497 |
+
distributed_type: MULTI_GPU
|
498 |
+
downcast_bf16: 'no'
|
499 |
+
gpu_ids: all
|
500 |
+
machine_rank: 0
|
501 |
+
main_training_function: main
|
502 |
+
mixed_precision: fp16
|
503 |
+
num_machines: 1
|
504 |
+
num_processes: 4
|
505 |
+
rdzv_backend: static
|
506 |
+
same_network: true
|
507 |
+
tpu_env: []
|
508 |
+
tpu_use_cluster: false
|
509 |
+
tpu_use_sudo: false
|
510 |
+
use_cpu: false
|
511 |
+
```
|
512 |
+
|
513 |
+
</details>
|
514 |
+
|
515 |
+
> [!TIP]
|
516 |
+
> We commend using Accelerate for LoRA tuning.
|
517 |
+
|
518 |
+
#### Use DeepSpeed
|
519 |
+
|
520 |
+
```bash
|
521 |
+
deepspeed --num_gpus 8 src/train_bash.py \
|
522 |
+
--deepspeed ds_config.json \
|
523 |
+
... # arguments (same as above)
|
524 |
+
```
|
525 |
+
|
526 |
+
<details><summary>Example ds_config.json for full-parameter training with DeepSpeed ZeRO-2</summary>
|
527 |
+
|
528 |
+
```json
|
529 |
+
{
|
530 |
+
"train_batch_size": "auto",
|
531 |
+
"train_micro_batch_size_per_gpu": "auto",
|
532 |
+
"gradient_accumulation_steps": "auto",
|
533 |
+
"gradient_clipping": "auto",
|
534 |
+
"zero_allow_untested_optimizer": true,
|
535 |
+
"fp16": {
|
536 |
+
"enabled": "auto",
|
537 |
+
"loss_scale": 0,
|
538 |
+
"loss_scale_window": 1000,
|
539 |
+
"initial_scale_power": 16,
|
540 |
+
"hysteresis": 2,
|
541 |
+
"min_loss_scale": 1
|
542 |
+
},
|
543 |
+
"bf16": {
|
544 |
+
"enabled": "auto"
|
545 |
+
},
|
546 |
+
"zero_optimization": {
|
547 |
+
"stage": 2,
|
548 |
+
"allgather_partitions": true,
|
549 |
+
"allgather_bucket_size": 5e8,
|
550 |
+
"overlap_comm": true,
|
551 |
+
"reduce_scatter": true,
|
552 |
+
"reduce_bucket_size": 5e8,
|
553 |
+
"contiguous_gradients": true,
|
554 |
+
"round_robin_gradients": true
|
555 |
+
}
|
556 |
+
}
|
557 |
+
```
|
558 |
+
|
559 |
+
</details>
|
560 |
+
|
561 |
+
> [!TIP]
|
562 |
+
> Refer to [examples](examples) for more training scripts.
|
563 |
+
|
564 |
+
### Merge LoRA weights and export model
|
565 |
+
|
566 |
+
```bash
|
567 |
+
CUDA_VISIBLE_DEVICES=0 python src/export_model.py \
|
568 |
+
--model_name_or_path path_to_llama_model \
|
569 |
+
--adapter_name_or_path path_to_checkpoint \
|
570 |
+
--template default \
|
571 |
+
--finetuning_type lora \
|
572 |
+
--export_dir path_to_export \
|
573 |
+
--export_size 2 \
|
574 |
+
--export_legacy_format False
|
575 |
+
```
|
576 |
+
|
577 |
+
> [!WARNING]
|
578 |
+
> Merging LoRA weights into a quantized model is not supported.
|
579 |
+
|
580 |
+
> [!TIP]
|
581 |
+
> Use `--model_name_or_path path_to_export` solely to use the exported model.
|
582 |
+
>
|
583 |
+
> Use `--export_quantization_bit 4` and `--export_quantization_dataset data/c4_demo.json` to quantize the model with AutoGPTQ after merging the LoRA weights.
|
584 |
+
|
585 |
+
### Inference with OpenAI-style API
|
586 |
+
|
587 |
+
```bash
|
588 |
+
CUDA_VISIBLE_DEVICES=0 API_PORT=8000 python src/api_demo.py \
|
589 |
+
--model_name_or_path path_to_llama_model \
|
590 |
+
--adapter_name_or_path path_to_checkpoint \
|
591 |
+
--template default \
|
592 |
+
--finetuning_type lora
|
593 |
+
```
|
594 |
+
|
595 |
+
> [!TIP]
|
596 |
+
> Visit `http://localhost:8000/docs` for API documentation.
|
597 |
+
|
598 |
+
### Inference with command line
|
599 |
+
|
600 |
+
```bash
|
601 |
+
CUDA_VISIBLE_DEVICES=0 python src/cli_demo.py \
|
602 |
+
--model_name_or_path path_to_llama_model \
|
603 |
+
--adapter_name_or_path path_to_checkpoint \
|
604 |
+
--template default \
|
605 |
+
--finetuning_type lora
|
606 |
+
```
|
607 |
+
|
608 |
+
### Inference with web browser
|
609 |
+
|
610 |
+
```bash
|
611 |
+
CUDA_VISIBLE_DEVICES=0 python src/web_demo.py \
|
612 |
+
--model_name_or_path path_to_llama_model \
|
613 |
+
--adapter_name_or_path path_to_checkpoint \
|
614 |
+
--template default \
|
615 |
+
--finetuning_type lora
|
616 |
+
```
|
617 |
+
|
618 |
+
### Evaluation
|
619 |
+
|
620 |
+
```bash
|
621 |
+
CUDA_VISIBLE_DEVICES=0 python src/evaluate.py \
|
622 |
+
--model_name_or_path path_to_llama_model \
|
623 |
+
--adapter_name_or_path path_to_checkpoint \
|
624 |
+
--template vanilla \
|
625 |
+
--finetuning_type lora \
|
626 |
+
--task mmlu \
|
627 |
+
--split test \
|
628 |
+
--lang en \
|
629 |
+
--n_shot 5 \
|
630 |
+
--batch_size 4
|
631 |
+
```
|
632 |
+
|
633 |
+
### Predict
|
634 |
+
|
635 |
+
```bash
|
636 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
637 |
+
--stage sft \
|
638 |
+
--do_predict \
|
639 |
+
--model_name_or_path path_to_llama_model \
|
640 |
+
--adapter_name_or_path path_to_checkpoint \
|
641 |
+
--dataset alpaca_gpt4_en \
|
642 |
+
--template default \
|
643 |
+
--finetuning_type lora \
|
644 |
+
--output_dir path_to_predict_result \
|
645 |
+
--per_device_eval_batch_size 1 \
|
646 |
+
--max_samples 100 \
|
647 |
+
--predict_with_generate \
|
648 |
+
--fp16
|
649 |
+
```
|
650 |
+
|
651 |
+
> [!WARNING]
|
652 |
+
> Use `--per_device_train_batch_size=1` for LLaMA-2 models in fp16 predict.
|
653 |
+
|
654 |
+
> [!TIP]
|
655 |
+
> We recommend using `--per_device_eval_batch_size=1` and `--max_target_length 128` at 4/8-bit predict.
|
656 |
+
|
657 |
+
### Dockerize Training
|
658 |
+
|
659 |
+
#### Get ready
|
660 |
+
|
661 |
+
Necessary dockerized environment is needed, such as Docker or Docker Compose.
|
662 |
+
|
663 |
+
#### Docker support
|
664 |
+
|
665 |
+
```bash
|
666 |
+
docker build -f ./Dockerfile -t llama-factory:latest .
|
667 |
+
|
668 |
+
docker run --gpus=all -v ./hf_cache:/root/.cache/huggingface/ -v ./data:/app/data -v ./output:/app/output -p 7860:7860 --shm-size 16G --name llama_factory -d llama-factory:latest
|
669 |
+
```
|
670 |
+
|
671 |
+
#### Docker Compose support
|
672 |
+
|
673 |
+
```bash
|
674 |
+
docker compose -f ./docker-compose.yml up -d
|
675 |
+
```
|
676 |
+
|
677 |
+
> [!TIP]
|
678 |
+
> Details about volume:
|
679 |
+
> * hf_cache: Utilize Huggingface cache on the host machine. Reassignable if a cache already exists in a different directory.
|
680 |
+
> * data: Place datasets on this dir of the host machine so that they can be selected on LLaMA Board GUI.
|
681 |
+
> * output: Set export dir to this location so that the merged result can be accessed directly on the host machine.
|
682 |
+
|
683 |
+
## Projects using LLaMA Factory
|
684 |
+
|
685 |
+
1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223)
|
686 |
+
1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092)
|
687 |
+
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
688 |
+
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
689 |
+
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
690 |
+
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
691 |
+
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
692 |
+
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
693 |
+
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
694 |
+
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
695 |
+
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
696 |
+
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
697 |
+
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. 2024. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
698 |
+
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
699 |
+
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
700 |
+
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
701 |
+
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: A large language model for Astronomy, based on ChatGLM2-6B and Qwen-14B.
|
702 |
+
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: A large language model specialized in Chinese legal domain, based on Baichuan-13B, is capable of retrieving and reasoning on legal knowledge.
|
703 |
+
1. **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: A large language model specialized in Chinese medical domain, based on Baichuan-7B and ChatGLM-6B.
|
704 |
+
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: A series of large language models for Chinese medical domain, based on LLaMA2-7B and Baichuan-13B.
|
705 |
+
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**: A series of MBTI Personality large language models, capable of giving any LLM 16 different personality types based on different datasets and training methods.
|
706 |
+
|
707 |
+
> [!TIP]
|
708 |
+
> If you have a project that should be incorporated, please contact via email or create a pull request.
|
709 |
+
|
710 |
+
## License
|
711 |
+
|
712 |
+
This repository is licensed under the [Apache-2.0 License](LICENSE).
|
713 |
+
|
714 |
+
Please follow the model licenses to use the corresponding model weights: [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2](https://ai.meta.com/llama/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
715 |
+
|
716 |
+
## Citation
|
717 |
+
|
718 |
+
If this work is helpful, please kindly cite as:
|
719 |
+
|
720 |
+
```bibtex
|
721 |
+
@Misc{llama-factory,
|
722 |
+
title = {LLaMA Factory},
|
723 |
+
author = {hiyouga},
|
724 |
+
howpublished = {\url{https://github.com/hiyouga/LLaMA-Factory}},
|
725 |
+
year = {2023}
|
726 |
+
}
|
727 |
+
```
|
728 |
+
|
729 |
+
## Acknowledgement
|
730 |
+
|
731 |
+
This repo benefits from [PEFT](https://github.com/huggingface/peft), [QLoRA](https://github.com/artidoro/qlora) and [FastChat](https://github.com/lm-sys/FastChat). Thanks for their wonderful works.
|
732 |
+
|
733 |
+
## Star History
|
734 |
+
|
735 |
+
![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Factory&type=Date)
|
LLaMA-Factory/README_zh.md
ADDED
@@ -0,0 +1,708 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
![# LLaMA Factory](assets/logo.png)
|
2 |
+
|
3 |
+
[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Factory?style=social)](https://github.com/hiyouga/LLaMA-Factory/stargazers)
|
4 |
+
[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Factory)](LICENSE)
|
5 |
+
[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Factory)](https://github.com/hiyouga/LLaMA-Factory/commits/main)
|
6 |
+
[![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/)
|
7 |
+
[![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/)
|
8 |
+
[![Citation](https://img.shields.io/badge/citation-21-green)](#使用了-llama-factory-的项目)
|
9 |
+
[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Factory/pulls)
|
10 |
+
[![Discord](https://dcbadge.vercel.app/api/server/rKfvV9r9FK?compact=true&style=flat)](https://discord.gg/rKfvV9r9FK)
|
11 |
+
[![Twitter](https://img.shields.io/twitter/follow/llamafactory_ai)](https://twitter.com/llamafactory_ai)
|
12 |
+
[![Spaces](https://img.shields.io/badge/🤗-Open%20in%20Spaces-blue)](https://huggingface.co/spaces/hiyouga/LLaMA-Board)
|
13 |
+
[![Studios](https://img.shields.io/badge/ModelScope-Open%20in%20Studios-blue)](https://modelscope.cn/studios/hiyouga/LLaMA-Board)
|
14 |
+
[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing)
|
15 |
+
|
16 |
+
👋 加入我们的[微信群](assets/wechat.jpg)。
|
17 |
+
|
18 |
+
\[ [English](README.md) | 中文 \]
|
19 |
+
|
20 |
+
**微调大模型可以像这样轻松…**
|
21 |
+
|
22 |
+
https://github.com/hiyouga/LLaMA-Factory/assets/16256802/ec36a9dd-37f4-4f72-81bd-d76c6d0a6594
|
23 |
+
|
24 |
+
选择你的打开方式:
|
25 |
+
|
26 |
+
- **Hugging Face 空间**:https://huggingface.co/spaces/hiyouga/LLaMA-Board
|
27 |
+
- **魔搭社区**:https://modelscope.cn/studios/hiyouga/LLaMA-Board
|
28 |
+
- **Colab**:https://colab.research.google.com/drive/1eRTPn37ltBbYsISy9Aw2NuI2Aq5CQrD9?usp=sharing
|
29 |
+
- **本地机器**:请见[如何使用](#如何使用)
|
30 |
+
|
31 |
+
## 目录
|
32 |
+
|
33 |
+
- [项目特色](#项目特色)
|
34 |
+
- [性能指标](#性能指标)
|
35 |
+
- [更新日志](#更新日志)
|
36 |
+
- [模型](#模型)
|
37 |
+
- [训练方法](#训练方法)
|
38 |
+
- [数据集](#数据集)
|
39 |
+
- [软硬件依赖](#软硬件依赖)
|
40 |
+
- [如何使用](#如何使用)
|
41 |
+
- [使用了 LLaMA Factory 的项目](#使用了-llama-factory-的项目)
|
42 |
+
- [协议](#协议)
|
43 |
+
- [引用](#引用)
|
44 |
+
- [致谢](#致谢)
|
45 |
+
|
46 |
+
## 项目特色
|
47 |
+
|
48 |
+
- **多种模型**:LLaMA、Mistral、Mixtral-MoE、Qwen、Yi、Gemma、Baichuan、ChatGLM、Phi 等等。
|
49 |
+
- **集成方法**:(增量)预训练、指令监督微调、奖励模型训练、PPO 训练和 DPO 训练。
|
50 |
+
- **多种精度**:32 比特全参数微调、16 比特冻结微调、16 比特 LoRA 微调和基于 AQLM/AWQ/GPTQ/LLM.int8 的 2/4/8 比特 QLoRA 微调。
|
51 |
+
- **先进算法**:GaLore、DoRA、LongLoRA、LLaMA Pro、LoRA+、LoftQ 和 Agent 微调。
|
52 |
+
- **实用技巧**:FlashAttention-2、Unsloth、RoPE scaling、NEFTune 和 rsLoRA。
|
53 |
+
- **实验监控**:LlamaBoard、TensorBoard、Wandb、MLflow 等等。
|
54 |
+
- **极速推理**:基于 vLLM 的 OpenAI 风格 API、浏览器界面和命令行接口。
|
55 |
+
|
56 |
+
## 性能指标
|
57 |
+
|
58 |
+
与 ChatGLM 官方的 [P-Tuning](https://github.com/THUDM/ChatGLM2-6B/tree/main/ptuning) 微调相比,LLaMA-Factory 的 LoRA 微调提供了 **3.7 倍**的加速比,同时在广告文案生成任务上取得了更高的 Rouge 分数。结合 4 比特量化技术,LLaMA-Factory 的 QLoRA 微调进一步降低了 GPU 显存消耗。
|
59 |
+
|
60 |
+
![benchmark](assets/benchmark.svg)
|
61 |
+
|
62 |
+
<details><summary>变量定义</summary>
|
63 |
+
|
64 |
+
- **Training Speed**: 训练阶段每秒处理的样本数量。(批处理大小=4,截断长度=1024)
|
65 |
+
- **Rouge Score**: [广告文案生成](https://aclanthology.org/D19-1321.pdf)任务验证集上的 Rouge-2 分数。(批处理大小=4,截断长度=1024)
|
66 |
+
- **GPU Memory**: 4 比特量化训练的 GPU 显存峰值。(批处理大小=1,截断长度=1024)
|
67 |
+
- 我们在 ChatGLM 的 P-Tuning 中采用 `pre_seq_len=128`,在 LLaMA-Factory 的 LoRA 微调中采用 `lora_rank=32`。
|
68 |
+
|
69 |
+
</details>
|
70 |
+
|
71 |
+
## 更新日志
|
72 |
+
|
73 |
+
[24/03/13] 我们支持了 **[LoRA+](https://arxiv.org/abs/2402.12354)**。请使用 `loraplus_lr_ratio=16.0` 参数开启 LoRA+ 方法。
|
74 |
+
|
75 |
+
[24/03/07] 我们支持了梯度低秩投影(**[GaLore](https://arxiv.org/abs/2403.03507)**)算法。请使用 `--use_galore` 参数切换显存高效的优化器。
|
76 |
+
|
77 |
+
[24/03/07] 我们集成了 **[vLLM](https://github.com/vllm-project/vllm)** 以实现极速并发推理。请使用 `--infer_backend vllm` 来获得 **270%** 的推理速度。(尚不支持 LoRA,请先合并权重。)
|
78 |
+
|
79 |
+
[24/02/28] 我们支持了 **[DoRA](https://arxiv.org/abs/2402.09353)** 微调。请使用 `--use_dora` 参数进行 DoRA 微调。
|
80 |
+
|
81 |
+
[24/02/15] 我们支持了 [LLaMA Pro](https://github.com/TencentARC/LLaMA-Pro) 提出的**块扩展**方法。详细用法请参照 `scripts/llama_pro.py`。
|
82 |
+
|
83 |
+
<details><summary>展开日志</summary>
|
84 |
+
|
85 |
+
[24/02/05] Qwen1.5(Qwen2 测试版)系列模型已在 LLaMA-Factory 中实现微调支持。详情请查阅该[博客页面](https://qwenlm.github.io/zh/blog/qwen1.5/)。
|
86 |
+
|
87 |
+
[24/01/18] 我们针对绝大多数模型实现了 **Agent 微调**,微调时指定 `--dataset glaive_toolcall` 即可使模型获得工具调用能力。
|
88 |
+
|
89 |
+
[23/12/23] 我们针对 LLaMA, Mistral 和 Yi 模型支持了 **[unsloth](https://github.com/unslothai/unsloth)** 的 LoRA 训练加速。请使用 `--use_unsloth` 参数启用 unsloth 优化。该方法可提供 **170%** 的训练速度,详情请查阅[此页面](https://github.com/hiyouga/LLaMA-Factory/wiki/Performance-comparison)。
|
90 |
+
|
91 |
+
[23/12/12] 我们支持了微调最新的混合专家模型 **[Mixtral 8x7B](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)**。硬件需求请查阅[此处](#硬件依赖)。
|
92 |
+
|
93 |
+
[23/12/01] 我们支持了从 **[魔搭社区](https://modelscope.cn/models)** 下载预训练模型和数据集。详细用法请参照 [此教程](#使用魔搭社区可跳过)。
|
94 |
+
|
95 |
+
[23/10/21] 我们支持了 **[NEFTune](https://arxiv.org/abs/2310.05914)** 训练技巧。请使用 `--neftune_noise_alpha` 参数启用 NEFTune,例如 `--neftune_noise_alpha 5`。
|
96 |
+
|
97 |
+
[23/09/27] 我们针对 LLaMA 模型支持了 [LongLoRA](https://github.com/dvlab-research/LongLoRA) 提出的 **$S^2$-Attn**。请使用 `--shift_attn` 参数以启用该功能。
|
98 |
+
|
99 |
+
[23/09/23] 我们在项目中集成了 MMLU、C-Eval 和 CMMLU 评估集。使用方法请参阅[此示例](#模型评估)。
|
100 |
+
|
101 |
+
[23/09/10] 我们支持了 **[FlashAttention-2](https://github.com/Dao-AILab/flash-attention)**。如果您使用的是 RTX4090、A100 或 H100 GPU,请使用 `--flash_attn` 参数以启用 FlashAttention-2。
|
102 |
+
|
103 |
+
[23/08/12] 我们支持了 **RoPE 插值**来扩展 LLaMA 模型的上下文长度。请使用 `--rope_scaling linear` 参数训练模型或使用 `--rope_scaling dynamic` 参数评估模型。
|
104 |
+
|
105 |
+
[23/08/11] 我们支持了指令模型的 **[DPO 训练](https://arxiv.org/abs/2305.18290)**。使用方法请参阅[此示例](#dpo-训练)。
|
106 |
+
|
107 |
+
[23/07/31] 我们支持了**数据流式加载**。请使用 `--streaming` 和 `--max_steps 10000` 参数来流式加载数据集。
|
108 |
+
|
109 |
+
[23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft))。
|
110 |
+
|
111 |
+
[23/07/18] 我们开发了支持训练和测试的**浏览器一体化界面**。请使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力。
|
112 |
+
|
113 |
+
[23/07/09] 我们开源了 **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目。
|
114 |
+
|
115 |
+
[23/06/29] 我们提供了一个**可复现的**指令模型微调示例,详细内容请查阅 [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft)。
|
116 |
+
|
117 |
+
[23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入**任意基于 ChatGPT 的应用**中。
|
118 |
+
|
119 |
+
[23/06/03] 我们实现了 4 比特的 LoRA 训练(也称 **[QLoRA](https://github.com/artidoro/qlora)**)。请使用 `--quantization_bit 4` 参数进行 4 比特量化微调。
|
120 |
+
|
121 |
+
</details>
|
122 |
+
|
123 |
+
## 模型
|
124 |
+
|
125 |
+
| 模型名 | 模型大小 | 默认模块 | Template |
|
126 |
+
| -------------------------------------------------------- | --------------------------- | ----------------- | --------- |
|
127 |
+
| [Baichuan2](https://huggingface.co/baichuan-inc) | 7B/13B | W_pack | baichuan2 |
|
128 |
+
| [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
|
129 |
+
| [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - |
|
130 |
+
| [ChatGLM3](https://huggingface.co/THUDM/chatglm3-6b) | 6B | query_key_value | chatglm3 |
|
131 |
+
| [DeepSeek (MoE)](https://huggingface.co/deepseek-ai) | 7B/16B/67B | q_proj,v_proj | deepseek |
|
132 |
+
| [Falcon](https://huggingface.co/tiiuae) | 7B/40B/180B | query_key_value | falcon |
|
133 |
+
| [Gemma](https://huggingface.co/google) | 2B/7B | q_proj,v_proj | gemma |
|
134 |
+
| [InternLM2](https://huggingface.co/internlm) | 7B/20B | wqkv | intern2 |
|
135 |
+
| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - |
|
136 |
+
| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 |
|
137 |
+
| [Mistral](https://huggingface.co/mistralai) | 7B | q_proj,v_proj | mistral |
|
138 |
+
| [Mixtral](https://huggingface.co/mistralai) | 8x7B | q_proj,v_proj | mistral |
|
139 |
+
| [OLMo](https://huggingface.co/allenai) | 1B/7B | att_proj | olmo |
|
140 |
+
| [Phi-1.5/2](https://huggingface.co/microsoft) | 1.3B/2.7B | q_proj,v_proj | - |
|
141 |
+
| [Qwen](https://huggingface.co/Qwen) | 1.8B/7B/14B/72B | c_attn | qwen |
|
142 |
+
| [Qwen1.5](https://huggingface.co/Qwen) | 0.5B/1.8B/4B/7B/14B/72B | q_proj,v_proj | qwen |
|
143 |
+
| [StarCoder2](https://huggingface.co/bigcode) | 3B/7B/15B | q_proj,v_proj | - |
|
144 |
+
| [XVERSE](https://huggingface.co/xverse) | 7B/13B/65B | q_proj,v_proj | xverse |
|
145 |
+
| [Yi](https://huggingface.co/01-ai) | 6B/9B/34B | q_proj,v_proj | yi |
|
146 |
+
| [Yuan](https://huggingface.co/IEITYuan) | 2B/51B/102B | q_proj,v_proj | yuan |
|
147 |
+
|
148 |
+
> [!NOTE]
|
149 |
+
> **默认模块**应作为 `--lora_target` 参数的默认值,可使用 `--lora_target all` 参数指定全部模块。
|
150 |
+
>
|
151 |
+
> 对于所有“基座”(Base)模型,`--template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Chat)模型请务必使用**对应的模板**。
|
152 |
+
|
153 |
+
项目所支持模型的完整列表请参阅 [constants.py](src/llmtuner/extras/constants.py)。
|
154 |
+
|
155 |
+
您也可以在 [template.py](src/llmtuner/data/template.py) 中添加自己的对话模板。
|
156 |
+
|
157 |
+
## 训练方法
|
158 |
+
|
159 |
+
| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA |
|
160 |
+
| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ |
|
161 |
+
| 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
162 |
+
| 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
163 |
+
| 奖励模型训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
164 |
+
| PPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
165 |
+
| DPO 训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: |
|
166 |
+
|
167 |
+
> [!NOTE]
|
168 |
+
> 请使用 `--quantization_bit 4` 参数来启用 QLoRA 训练。
|
169 |
+
|
170 |
+
## 数据集
|
171 |
+
|
172 |
+
<details><summary>预训练数据集</summary>
|
173 |
+
|
174 |
+
- [Wiki Demo (en)](data/wiki_demo.txt)
|
175 |
+
- [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb)
|
176 |
+
- [RedPajama V2 (en)](https://huggingface.co/datasets/togethercomputer/RedPajama-Data-V2)
|
177 |
+
- [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220)
|
178 |
+
- [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered)
|
179 |
+
- [Pile (en)](https://huggingface.co/datasets/EleutherAI/pile)
|
180 |
+
- [SkyPile (zh)](https://huggingface.co/datasets/Skywork/SkyPile-150B)
|
181 |
+
- [The Stack (en)](https://huggingface.co/datasets/bigcode/the-stack)
|
182 |
+
- [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata)
|
183 |
+
|
184 |
+
</details>
|
185 |
+
|
186 |
+
<details><summary>指令微调数据集</summary>
|
187 |
+
|
188 |
+
- [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca)
|
189 |
+
- [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca)
|
190 |
+
- [Alpaca GPT4 (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
191 |
+
- [Self Cognition (zh)](data/self_cognition.json)
|
192 |
+
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
|
193 |
+
- [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection)
|
194 |
+
- [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset)
|
195 |
+
- [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN)
|
196 |
+
- [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN)
|
197 |
+
- [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN)
|
198 |
+
- [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M)
|
199 |
+
- [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M)
|
200 |
+
- [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M)
|
201 |
+
- [UltraChat (en)](https://github.com/thunlp/UltraChat)
|
202 |
+
- [LIMA (en)](https://huggingface.co/datasets/GAIR/lima)
|
203 |
+
- [OpenPlatypus (en)](https://huggingface.co/datasets/garage-bAInd/Open-Platypus)
|
204 |
+
- [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k)
|
205 |
+
- [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT)
|
206 |
+
- [OpenOrca (en)](https://huggingface.co/datasets/Open-Orca/OpenOrca)
|
207 |
+
- [SlimOrca (en)](https://huggingface.co/datasets/Open-Orca/SlimOrca)
|
208 |
+
- [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct)
|
209 |
+
- [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M)
|
210 |
+
- [Wiki QA (en)](https://huggingface.co/datasets/wiki_qa)
|
211 |
+
- [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa)
|
212 |
+
- [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn)
|
213 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
214 |
+
- [deepctrl (en&zh)](https://www.modelscope.cn/datasets/deepctrl/deepctrl-sft-data)
|
215 |
+
- [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen)
|
216 |
+
- [ShareGPT Hyperfiltered (en)](https://huggingface.co/datasets/totally-not-an-llm/sharegpt-hyperfiltered-3k)
|
217 |
+
- [ShareGPT4 (en&zh)](https://huggingface.co/datasets/shibing624/sharegpt_gpt4)
|
218 |
+
- [UltraChat 200k (en)](https://huggingface.co/datasets/HuggingFaceH4/ultrachat_200k)
|
219 |
+
- [AgentInstruct (en)](https://huggingface.co/datasets/THUDM/AgentInstruct)
|
220 |
+
- [LMSYS Chat 1M (en)](https://huggingface.co/datasets/lmsys/lmsys-chat-1m)
|
221 |
+
- [Evol Instruct V2 (en)](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k)
|
222 |
+
- [Glaive Function Calling V2 (en)](https://huggingface.co/datasets/glaiveai/glaive-function-calling-v2)
|
223 |
+
- [Cosmopedia (en)](https://huggingface.co/datasets/HuggingFaceTB/cosmopedia)
|
224 |
+
- [Open Assistant (de)](https://huggingface.co/datasets/mayflowergmbh/oasst_de)
|
225 |
+
- [Dolly 15k (de)](https://huggingface.co/datasets/mayflowergmbh/dolly-15k_de)
|
226 |
+
- [Alpaca GPT4 (de)](https://huggingface.co/datasets/mayflowergmbh/alpaca-gpt4_de)
|
227 |
+
- [OpenSchnabeltier (de)](https://huggingface.co/datasets/mayflowergmbh/openschnabeltier_de)
|
228 |
+
- [Evol Instruct (de)](https://huggingface.co/datasets/mayflowergmbh/evol-instruct_de)
|
229 |
+
- [Dolphin (de)](https://huggingface.co/datasets/mayflowergmbh/dolphin_de)
|
230 |
+
- [Booksum (de)](https://huggingface.co/datasets/mayflowergmbh/booksum_de)
|
231 |
+
- [Airoboros (de)](https://huggingface.co/datasets/mayflowergmbh/airoboros-3.0_de)
|
232 |
+
- [Ultrachat (de)](https://huggingface.co/datasets/mayflowergmbh/ultra-chat_de)
|
233 |
+
|
234 |
+
</details>
|
235 |
+
|
236 |
+
<details><summary>偏好数据集</summary>
|
237 |
+
|
238 |
+
- [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf)
|
239 |
+
- [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1)
|
240 |
+
- [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM)
|
241 |
+
- [Nectar (en)](https://huggingface.co/datasets/berkeley-nest/Nectar)
|
242 |
+
- [Orca DPO (de)](https://huggingface.co/datasets/mayflowergmbh/intel_orca_dpo_pairs_de)
|
243 |
+
|
244 |
+
</details>
|
245 |
+
|
246 |
+
使用方法请参考 [data/README_zh.md](data/README_zh.md) 文件。
|
247 |
+
|
248 |
+
部分数据集的使用需要确认,我们推荐使用下述命令登录您的 Hugging Face 账户。
|
249 |
+
|
250 |
+
```bash
|
251 |
+
pip install --upgrade huggingface_hub
|
252 |
+
huggingface-cli login
|
253 |
+
```
|
254 |
+
|
255 |
+
## 软硬件依赖
|
256 |
+
|
257 |
+
| 必需项 | 至少 | 推荐 |
|
258 |
+
| ------------ | ------- | --------- |
|
259 |
+
| python | 3.8 | 3.10 |
|
260 |
+
| torch | 1.13.1 | 2.2.0 |
|
261 |
+
| transformers | 4.37.2 | 4.38.2 |
|
262 |
+
| datasets | 2.14.3 | 2.17.1 |
|
263 |
+
| accelerate | 0.27.2 | 0.27.2 |
|
264 |
+
| peft | 0.9.0 | 0.9.0 |
|
265 |
+
| trl | 0.7.11 | 0.7.11 |
|
266 |
+
|
267 |
+
| 可选项 | 至少 | 推荐 |
|
268 |
+
| ------------ | ------- | --------- |
|
269 |
+
| CUDA | 11.6 | 12.2 |
|
270 |
+
| deepspeed | 0.10.0 | 0.13.1 |
|
271 |
+
| bitsandbytes | 0.39.0 | 0.41.3 |
|
272 |
+
| flash-attn | 2.3.0 | 2.5.5 |
|
273 |
+
|
274 |
+
### 硬件依赖
|
275 |
+
|
276 |
+
\* *估算值*
|
277 |
+
|
278 |
+
| 训练方法 | 精度 | 7B | 13B | 30B | 70B | 8x7B |
|
279 |
+
| ------- | ---- | ----- | ----- | ----- | ------ | ------ |
|
280 |
+
| 全参数 | AMP | 120GB | 240GB | 600GB | 1200GB | 900GB |
|
281 |
+
| 全参数 | 16 | 60GB | 120GB | 300GB | 600GB | 400GB |
|
282 |
+
| GaLore | 16 | 16GB | 32GB | 64GB | 160GB | 120GB |
|
283 |
+
| 部分参数 | 16 | 20GB | 40GB | 80GB | 200GB | 160GB |
|
284 |
+
| LoRA | 16 | 16GB | 32GB | 64GB | 160GB | 120GB |
|
285 |
+
| QLoRA | 8 | 10GB | 20GB | 40GB | 80GB | 60GB |
|
286 |
+
| QLoRA | 4 | 6GB | 12GB | 24GB | 48GB | 30GB |
|
287 |
+
| QLoRA | 2 | 4GB | 8GB | 16GB | 24GB | 18GB |
|
288 |
+
|
289 |
+
## 如何使用
|
290 |
+
|
291 |
+
### 数据准备(可跳过)
|
292 |
+
|
293 |
+
关于数据集文件的格式,请参考 [data/README_zh.md](data/README_zh.md) 的内容。构建自定义数据集时,既可以使用单个 `.json` 文件,也可以使用一个[数据加载脚本](https://huggingface.co/docs/datasets/dataset_script)和多个文件。
|
294 |
+
|
295 |
+
> [!NOTE]
|
296 |
+
> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件,该文件的格式请参考 `data/README_zh.md`。
|
297 |
+
|
298 |
+
### 环境搭建(可跳过)
|
299 |
+
|
300 |
+
```bash
|
301 |
+
git clone https://github.com/hiyouga/LLaMA-Factory.git
|
302 |
+
conda create -n llama_factory python=3.10
|
303 |
+
conda activate llama_factory
|
304 |
+
cd LLaMA-Factory
|
305 |
+
pip install -r requirements.txt
|
306 |
+
```
|
307 |
+
|
308 |
+
如果要在 Windows 平台上开启量化 LoRA(QLoRA),需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.2。
|
309 |
+
|
310 |
+
```bash
|
311 |
+
pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.40.0-py3-none-win_amd64.whl
|
312 |
+
```
|
313 |
+
|
314 |
+
如果要在 Windows 平台上开启 FlashAttention-2,需要安装预编译的 `flash-attn` 库,支持 CUDA 12.1 到 12.2,请根据需求到 [flash-attention](https://github.com/bdashore3/flash-attention/releases) 下载对应版本安装。
|
315 |
+
|
316 |
+
### 使用魔搭社区(可跳过)
|
317 |
+
|
318 |
+
如果您在 Hugging Face 模型和数据集的下载中遇到了问题,可以通过下述方法使用魔搭社区。
|
319 |
+
|
320 |
+
```bash
|
321 |
+
export USE_MODELSCOPE_HUB=1 # Windows 使用 `set USE_MODELSCOPE_HUB=1`
|
322 |
+
```
|
323 |
+
|
324 |
+
接着即可通过指定模型名称来训练对应的模型。(在[魔搭社区](https://modelscope.cn/models)查看所有可用的模型)
|
325 |
+
|
326 |
+
```bash
|
327 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
328 |
+
--model_name_or_path modelscope/Llama-2-7b-ms \
|
329 |
+
... # 参数同下
|
330 |
+
```
|
331 |
+
|
332 |
+
LLaMA Board 同样支持魔搭社区的模型和数据集下载。
|
333 |
+
|
334 |
+
```bash
|
335 |
+
CUDA_VISIBLE_DEVICES=0 USE_MODELSCOPE_HUB=1 python src/train_web.py
|
336 |
+
```
|
337 |
+
|
338 |
+
### 单 GPU 训练
|
339 |
+
|
340 |
+
> [!IMPORTANT]
|
341 |
+
> 如果您使用多张 GPU 训练模型,请移步[多 GPU 分布式训练](#多-gpu-分布式训练)部分。
|
342 |
+
|
343 |
+
#### LLaMA Board GUI
|
344 |
+
|
345 |
+
```bash
|
346 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_web.py
|
347 |
+
```
|
348 |
+
|
349 |
+
#### 预训练
|
350 |
+
|
351 |
+
```bash
|
352 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
353 |
+
--stage pt \
|
354 |
+
--do_train \
|
355 |
+
--model_name_or_path path_to_llama_model \
|
356 |
+
--dataset wiki_demo \
|
357 |
+
--finetuning_type lora \
|
358 |
+
--lora_target q_proj,v_proj \
|
359 |
+
--output_dir path_to_pt_checkpoint \
|
360 |
+
--overwrite_cache \
|
361 |
+
--per_device_train_batch_size 4 \
|
362 |
+
--gradient_accumulation_steps 4 \
|
363 |
+
--lr_scheduler_type cosine \
|
364 |
+
--logging_steps 10 \
|
365 |
+
--save_steps 1000 \
|
366 |
+
--learning_rate 5e-5 \
|
367 |
+
--num_train_epochs 3.0 \
|
368 |
+
--plot_loss \
|
369 |
+
--fp16
|
370 |
+
```
|
371 |
+
|
372 |
+
#### 指令监督微调
|
373 |
+
|
374 |
+
```bash
|
375 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
376 |
+
--stage sft \
|
377 |
+
--do_train \
|
378 |
+
--model_name_or_path path_to_llama_model \
|
379 |
+
--dataset alpaca_gpt4_zh \
|
380 |
+
--template default \
|
381 |
+
--finetuning_type lora \
|
382 |
+
--lora_target q_proj,v_proj \
|
383 |
+
--output_dir path_to_sft_checkpoint \
|
384 |
+
--overwrite_cache \
|
385 |
+
--per_device_train_batch_size 4 \
|
386 |
+
--gradient_accumulation_steps 4 \
|
387 |
+
--lr_scheduler_type cosine \
|
388 |
+
--logging_steps 10 \
|
389 |
+
--save_steps 1000 \
|
390 |
+
--learning_rate 5e-5 \
|
391 |
+
--num_train_epochs 3.0 \
|
392 |
+
--plot_loss \
|
393 |
+
--fp16
|
394 |
+
```
|
395 |
+
|
396 |
+
#### 奖励模型训练
|
397 |
+
|
398 |
+
```bash
|
399 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
400 |
+
--stage rm \
|
401 |
+
--do_train \
|
402 |
+
--model_name_or_path path_to_llama_model \
|
403 |
+
--adapter_name_or_path path_to_sft_checkpoint \
|
404 |
+
--create_new_adapter \
|
405 |
+
--dataset comparison_gpt4_zh \
|
406 |
+
--template default \
|
407 |
+
--finetuning_type lora \
|
408 |
+
--lora_target q_proj,v_proj \
|
409 |
+
--output_dir path_to_rm_checkpoint \
|
410 |
+
--per_device_train_batch_size 2 \
|
411 |
+
--gradient_accumulation_steps 4 \
|
412 |
+
--lr_scheduler_type cosine \
|
413 |
+
--logging_steps 10 \
|
414 |
+
--save_steps 1000 \
|
415 |
+
--learning_rate 1e-6 \
|
416 |
+
--num_train_epochs 1.0 \
|
417 |
+
--plot_loss \
|
418 |
+
--fp16
|
419 |
+
```
|
420 |
+
|
421 |
+
#### PPO 训练
|
422 |
+
|
423 |
+
```bash
|
424 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
425 |
+
--stage ppo \
|
426 |
+
--do_train \
|
427 |
+
--model_name_or_path path_to_llama_model \
|
428 |
+
--adapter_name_or_path path_to_sft_checkpoint \
|
429 |
+
--create_new_adapter \
|
430 |
+
--dataset alpaca_gpt4_zh \
|
431 |
+
--template default \
|
432 |
+
--finetuning_type lora \
|
433 |
+
--lora_target q_proj,v_proj \
|
434 |
+
--reward_model path_to_rm_checkpoint \
|
435 |
+
--output_dir path_to_ppo_checkpoint \
|
436 |
+
--per_device_train_batch_size 2 \
|
437 |
+
--gradient_accumulation_steps 4 \
|
438 |
+
--lr_scheduler_type cosine \
|
439 |
+
--top_k 0 \
|
440 |
+
--top_p 0.9 \
|
441 |
+
--logging_steps 10 \
|
442 |
+
--save_steps 1000 \
|
443 |
+
--learning_rate 1e-5 \
|
444 |
+
--num_train_epochs 1.0 \
|
445 |
+
--plot_loss \
|
446 |
+
--fp16
|
447 |
+
```
|
448 |
+
|
449 |
+
> [!TIP]
|
450 |
+
> 使用 `--adapter_name_or_path path_to_sft_checkpoint,path_to_ppo_checkpoint` 来进行微调模型的推理。
|
451 |
+
|
452 |
+
> [!WARNING]
|
453 |
+
> 如果使用 fp16 精度进行 LLaMA-2 模型的 PPO 训练,请使用 `--per_device_train_batch_size=1`。
|
454 |
+
|
455 |
+
#### DPO 训练
|
456 |
+
|
457 |
+
```bash
|
458 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
459 |
+
--stage dpo \
|
460 |
+
--do_train \
|
461 |
+
--model_name_or_path path_to_llama_model \
|
462 |
+
--adapter_name_or_path path_to_sft_checkpoint \
|
463 |
+
--create_new_adapter \
|
464 |
+
--dataset comparison_gpt4_zh \
|
465 |
+
--template default \
|
466 |
+
--finetuning_type lora \
|
467 |
+
--lora_target q_proj,v_proj \
|
468 |
+
--output_dir path_to_dpo_checkpoint \
|
469 |
+
--per_device_train_batch_size 2 \
|
470 |
+
--gradient_accumulation_steps 4 \
|
471 |
+
--lr_scheduler_type cosine \
|
472 |
+
--logging_steps 10 \
|
473 |
+
--save_steps 1000 \
|
474 |
+
--learning_rate 1e-5 \
|
475 |
+
--num_train_epochs 1.0 \
|
476 |
+
--plot_loss \
|
477 |
+
--fp16
|
478 |
+
```
|
479 |
+
|
480 |
+
> [!TIP]
|
481 |
+
> 使用 `--adapter_name_or_path path_to_sft_checkpoint,path_to_dpo_checkpoint` 来进行微调模型的推理。
|
482 |
+
|
483 |
+
### 多 GPU 分布式训练
|
484 |
+
|
485 |
+
#### 使用 Huggingface Accelerate
|
486 |
+
|
487 |
+
```bash
|
488 |
+
accelerate launch --config_file config.yaml src/train_bash.py # 参数同上
|
489 |
+
```
|
490 |
+
|
491 |
+
<details><summary>使用 Accelerate 进行 LoRA 训练的 config.yaml 示例</summary>
|
492 |
+
|
493 |
+
```yaml
|
494 |
+
compute_environment: LOCAL_MACHINE
|
495 |
+
debug: false
|
496 |
+
distributed_type: MULTI_GPU
|
497 |
+
downcast_bf16: 'no'
|
498 |
+
gpu_ids: all
|
499 |
+
machine_rank: 0
|
500 |
+
main_training_function: main
|
501 |
+
mixed_precision: fp16
|
502 |
+
num_machines: 1
|
503 |
+
num_processes: 4
|
504 |
+
rdzv_backend: static
|
505 |
+
same_network: true
|
506 |
+
tpu_env: []
|
507 |
+
tpu_use_cluster: false
|
508 |
+
tpu_use_sudo: false
|
509 |
+
use_cpu: false
|
510 |
+
```
|
511 |
+
|
512 |
+
</details>
|
513 |
+
|
514 |
+
> [!TIP]
|
515 |
+
> 我们推荐使用 Accelerate 进行 LoRA 训练。
|
516 |
+
|
517 |
+
#### 使用 DeepSpeed
|
518 |
+
|
519 |
+
```bash
|
520 |
+
deepspeed --num_gpus 8 src/train_bash.py \
|
521 |
+
--deepspeed ds_config.json \
|
522 |
+
... # 参数同上
|
523 |
+
```
|
524 |
+
|
525 |
+
<details><summary>使用 DeepSpeed ZeRO-2 进行全参数训练的 ds_config.json 示例</summary>
|
526 |
+
|
527 |
+
```json
|
528 |
+
{
|
529 |
+
"train_batch_size": "auto",
|
530 |
+
"train_micro_batch_size_per_gpu": "auto",
|
531 |
+
"gradient_accumulation_steps": "auto",
|
532 |
+
"gradient_clipping": "auto",
|
533 |
+
"zero_allow_untested_optimizer": true,
|
534 |
+
"fp16": {
|
535 |
+
"enabled": "auto",
|
536 |
+
"loss_scale": 0,
|
537 |
+
"loss_scale_window": 1000,
|
538 |
+
"initial_scale_power": 16,
|
539 |
+
"hysteresis": 2,
|
540 |
+
"min_loss_scale": 1
|
541 |
+
},
|
542 |
+
"bf16": {
|
543 |
+
"enabled": "auto"
|
544 |
+
},
|
545 |
+
"zero_optimization": {
|
546 |
+
"stage": 2,
|
547 |
+
"allgather_partitions": true,
|
548 |
+
"allgather_bucket_size": 5e8,
|
549 |
+
"overlap_comm": true,
|
550 |
+
"reduce_scatter": true,
|
551 |
+
"reduce_bucket_size": 5e8,
|
552 |
+
"contiguous_gradients": true,
|
553 |
+
"round_robin_gradients": true
|
554 |
+
}
|
555 |
+
}
|
556 |
+
```
|
557 |
+
|
558 |
+
</details>
|
559 |
+
|
560 |
+
> [!TIP]
|
561 |
+
> 更多训练脚本请查看 [examples](examples)。
|
562 |
+
|
563 |
+
### 合并 LoRA 权重并导出模型
|
564 |
+
|
565 |
+
```bash
|
566 |
+
CUDA_VISIBLE_DEVICES=0 python src/export_model.py \
|
567 |
+
--model_name_or_path path_to_llama_model \
|
568 |
+
--adapter_name_or_path path_to_checkpoint \
|
569 |
+
--template default \
|
570 |
+
--finetuning_type lora \
|
571 |
+
--export_dir path_to_export \
|
572 |
+
--export_size 2 \
|
573 |
+
--export_legacy_format False
|
574 |
+
```
|
575 |
+
|
576 |
+
> [!WARNING]
|
577 |
+
> 尚不支持量化模型的 LoRA 权重合并及导出。
|
578 |
+
|
579 |
+
> [!TIP]
|
580 |
+
> 仅使用 `--model_name_or_path path_to_export` 来加载导出后的模型。
|
581 |
+
>
|
582 |
+
> 合并 LoRA 权重之后可再次使用 `--export_quantization_bit 4` 和 `--export_quantization_dataset data/c4_demo.json` 基于 AutoGPTQ 量化模型。
|
583 |
+
|
584 |
+
### 使用 OpenAI 风格 API 推理
|
585 |
+
|
586 |
+
```bash
|
587 |
+
CUDA_VISIBLE_DEVICES=0 API_PORT=8000 python src/api_demo.py \
|
588 |
+
--model_name_or_path path_to_llama_model \
|
589 |
+
--adapter_name_or_path path_to_checkpoint \
|
590 |
+
--template default \
|
591 |
+
--finetuning_type lora
|
592 |
+
```
|
593 |
+
|
594 |
+
> [!TIP]
|
595 |
+
> 关于 API 文档请见 `http://localhost:8000/docs`。
|
596 |
+
|
597 |
+
### 使用命令行推理
|
598 |
+
|
599 |
+
```bash
|
600 |
+
CUDA_VISIBLE_DEVICES=0 python src/cli_demo.py \
|
601 |
+
--model_name_or_path path_to_llama_model \
|
602 |
+
--adapter_name_or_path path_to_checkpoint \
|
603 |
+
--template default \
|
604 |
+
--finetuning_type lora
|
605 |
+
```
|
606 |
+
|
607 |
+
### 使用浏览器推理
|
608 |
+
|
609 |
+
```bash
|
610 |
+
CUDA_VISIBLE_DEVICES=0 python src/web_demo.py \
|
611 |
+
--model_name_or_path path_to_llama_model \
|
612 |
+
--adapter_name_or_path path_to_checkpoint \
|
613 |
+
--template default \
|
614 |
+
--finetuning_type lora
|
615 |
+
```
|
616 |
+
|
617 |
+
### 模型评估
|
618 |
+
|
619 |
+
```bash
|
620 |
+
CUDA_VISIBLE_DEVICES=0 python src/evaluate.py \
|
621 |
+
--model_name_or_path path_to_llama_model \
|
622 |
+
--adapter_name_or_path path_to_checkpoint \
|
623 |
+
--template vanilla \
|
624 |
+
--finetuning_type lora \
|
625 |
+
--task ceval \
|
626 |
+
--split validation \
|
627 |
+
--lang zh \
|
628 |
+
--n_shot 5 \
|
629 |
+
--batch_size 4
|
630 |
+
```
|
631 |
+
|
632 |
+
### 模型预测
|
633 |
+
|
634 |
+
```bash
|
635 |
+
CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \
|
636 |
+
--stage sft \
|
637 |
+
--do_predict \
|
638 |
+
--model_name_or_path path_to_llama_model \
|
639 |
+
--adapter_name_or_path path_to_checkpoint \
|
640 |
+
--dataset alpaca_gpt4_zh \
|
641 |
+
--template default \
|
642 |
+
--finetuning_type lora \
|
643 |
+
--output_dir path_to_predict_result \
|
644 |
+
--per_device_eval_batch_size 1 \
|
645 |
+
--max_samples 100 \
|
646 |
+
--predict_with_generate \
|
647 |
+
--fp16
|
648 |
+
```
|
649 |
+
|
650 |
+
> [!WARNING]
|
651 |
+
> 如果使用 fp16 精度进行 LLaMA-2 模型的预测,请使用 `--per_device_eval_batch_size=1`。
|
652 |
+
|
653 |
+
> [!TIP]
|
654 |
+
> 我们建议在量化模型的预测中使用 `--per_device_eval_batch_size=1` 和 `--max_target_length 128`。
|
655 |
+
|
656 |
+
## 使用了 LLaMA Factory 的项目
|
657 |
+
|
658 |
+
1. Wang et al. ESRL: Efficient Sampling-based Reinforcement Learning for Sequence Generation. 2023. [[arxiv]](https://arxiv.org/abs/2308.02223)
|
659 |
+
1. Yu et al. Open, Closed, or Small Language Models for Text Classification? 2023. [[arxiv]](https://arxiv.org/abs/2308.10092)
|
660 |
+
1. Luceri et al. Leveraging Large Language Models to Detect Influence Campaigns in Social Media. 2023. [[arxiv]](https://arxiv.org/abs/2311.07816)
|
661 |
+
1. Zhang et al. Alleviating Hallucinations of Large Language Models through Induced Hallucinations. 2023. [[arxiv]](https://arxiv.org/abs/2312.15710)
|
662 |
+
1. Wang et al. Know Your Needs Better: Towards Structured Understanding of Marketer Demands with Analogical Reasoning Augmented LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2401.04319)
|
663 |
+
1. Wang et al. CANDLE: Iterative Conceptualization and Instantiation Distillation from Large Language Models for Commonsense Reasoning. 2024. [[arxiv]](https://arxiv.org/abs/2401.07286)
|
664 |
+
1. Choi et al. FACT-GPT: Fact-Checking Augmentation via Claim Matching with LLMs. 2024. [[arxiv]](https://arxiv.org/abs/2402.05904)
|
665 |
+
1. Zhang et al. AutoMathText: Autonomous Data Selection with Language Models for Mathematical Texts. 2024. [[arxiv]](https://arxiv.org/abs/2402.07625)
|
666 |
+
1. Lyu et al. KnowTuning: Knowledge-aware Fine-tuning for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11176)
|
667 |
+
1. Yang et al. LaCo: Large Language Model Pruning via Layer Collaps. 2024. [[arxiv]](https://arxiv.org/abs/2402.11187)
|
668 |
+
1. Bhardwaj et al. Language Models are Homer Simpson! Safety Re-Alignment of Fine-tuned Language Models through Task Arithmetic. 2024. [[arxiv]](https://arxiv.org/abs/2402.11746)
|
669 |
+
1. Yang et al. Enhancing Empathetic Response Generation by Augmenting LLMs with Small-scale Empathetic Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11801)
|
670 |
+
1. Yi et al. Generation Meets Verification: Accelerating Large Language Model Inference with Smart Parallel Auto-Correct Decoding. 2024. [[arxiv]](https://arxiv.org/abs/2402.11809)
|
671 |
+
1. Cao et al. Head-wise Shareable Attention for Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.11819)
|
672 |
+
1. Zhang et al. Enhancing Multilingual Capabilities of Large Language Models through Self-Distillation from Resource-Rich Languages. 2024. [[arxiv]](https://arxiv.org/abs/2402.12204)
|
673 |
+
1. Kim et al. Efficient and Effective Vocabulary Expansion Towards Multilingual Large Language Models. 2024. [[arxiv]](https://arxiv.org/abs/2402.14714)
|
674 |
+
1. **[StarWhisper](https://github.com/Yu-Yang-Li/StarWhisper)**: 天文大模型 StarWhisper,基于 ChatGLM2-6B 和 Qwen-14B 在天文数据上微调而得。
|
675 |
+
1. **[DISC-LawLLM](https://github.com/FudanDISC/DISC-LawLLM)**: 中文法律领域大模型 DISC-LawLLM,基于 Baichuan-13B 微调而得,具有法律推理和知识检索能力。
|
676 |
+
1. **[Sunsimiao](https://github.com/thomas-yanxin/Sunsimiao)**: 孙思邈中文医疗大模型 Sumsimiao,基于 Baichuan-7B 和 ChatGLM-6B 在中文医疗数据上微调而得。
|
677 |
+
1. **[CareGPT](https://github.com/WangRongsheng/CareGPT)**: 医疗大模型项目 CareGPT,基于 LLaMA2-7B 和 Baichuan-13B 在中文医疗数据上微调而得。
|
678 |
+
1. **[MachineMindset](https://github.com/PKU-YuanGroup/Machine-Mindset/)**:MBTI性格大模型项目,根据数据集与训练方式让任意 LLM 拥有 16 个不同的性格类型。
|
679 |
+
|
680 |
+
> [!TIP]
|
681 |
+
> 如果您有项目希望添加至上述列表,请通过邮件联系或者创建一个 PR。
|
682 |
+
|
683 |
+
## 协议
|
684 |
+
|
685 |
+
本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。
|
686 |
+
|
687 |
+
使用模型权重时,请遵循对应的模型协议:[Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/blob/main/Community%20License%20for%20Baichuan%202%20Model.pdf) / [BLOOM](https://huggingface.co/spaces/bigscience/license) / [ChatGLM3](https://github.com/THUDM/ChatGLM3/blob/main/MODEL_LICENSE) / [DeepSeek](https://github.com/deepseek-ai/DeepSeek-LLM/blob/main/LICENSE-MODEL) / [Falcon](https://huggingface.co/tiiuae/falcon-180B/blob/main/LICENSE.txt) / [Gemma](https://ai.google.dev/gemma/terms) / [InternLM2](https://github.com/InternLM/InternLM#license) / [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) / [LLaMA-2](https://ai.meta.com/llama/license/) / [Mistral](LICENSE) / [OLMo](LICENSE) / [Phi-1.5/2](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) / [Qwen](https://github.com/QwenLM/Qwen/blob/main/Tongyi%20Qianwen%20LICENSE%20AGREEMENT) / [StarCoder2](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement) / [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) / [Yi](https://huggingface.co/01-ai/Yi-6B/blob/main/LICENSE) / [Yuan](https://github.com/IEIT-Yuan/Yuan-2.0/blob/main/LICENSE-Yuan)
|
688 |
+
|
689 |
+
## 引用
|
690 |
+
|
691 |
+
如果您觉得此项目有帮助,请考虑以下列格式引用
|
692 |
+
|
693 |
+
```bibtex
|
694 |
+
@Misc{llama-factory,
|
695 |
+
title = {LLaMA Factory},
|
696 |
+
author = {hiyouga},
|
697 |
+
howpublished = {\url{https://github.com/hiyouga/LLaMA-Factory}},
|
698 |
+
year = {2023}
|
699 |
+
}
|
700 |
+
```
|
701 |
+
|
702 |
+
## 致谢
|
703 |
+
|
704 |
+
本项目受益于 [PEFT](https://github.com/huggingface/peft)、[QLoRA](https://github.com/artidoro/qlora) 和 [FastChat](https://github.com/lm-sys/FastChat),感谢以上诸位作者的付出。
|
705 |
+
|
706 |
+
## Star History
|
707 |
+
|
708 |
+
![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Factory&type=Date)
|
LLaMA-Factory/assets/benchmark.svg
ADDED
LLaMA-Factory/assets/logo.png
ADDED
LLaMA-Factory/assets/wechat.jpg
ADDED
LLaMA-Factory/build/lib/llmtuner/__init__.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Level: api, webui > chat, eval, train > data, model > extras, hparams
|
2 |
+
|
3 |
+
from .api import create_app
|
4 |
+
from .chat import ChatModel
|
5 |
+
from .eval import Evaluator
|
6 |
+
from .train import export_model, run_exp
|
7 |
+
from .webui import create_ui, create_web_demo
|
8 |
+
|
9 |
+
|
10 |
+
__version__ = "0.5.3"
|
11 |
+
__all__ = ["create_app", "ChatModel", "Evaluator", "export_model", "run_exp", "create_ui", "create_web_demo"]
|
LLaMA-Factory/build/lib/llmtuner/api/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .app import create_app
|
2 |
+
|
3 |
+
|
4 |
+
__all__ = ["create_app"]
|
LLaMA-Factory/build/lib/llmtuner/api/app.py
ADDED
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from contextlib import asynccontextmanager
|
4 |
+
from typing import Any, Dict, Sequence
|
5 |
+
|
6 |
+
from pydantic import BaseModel
|
7 |
+
|
8 |
+
from ..chat import ChatModel
|
9 |
+
from ..data import Role as DataRole
|
10 |
+
from ..extras.misc import torch_gc
|
11 |
+
from ..extras.packages import is_fastapi_availble, is_starlette_available, is_uvicorn_available
|
12 |
+
from .protocol import (
|
13 |
+
ChatCompletionMessage,
|
14 |
+
ChatCompletionRequest,
|
15 |
+
ChatCompletionResponse,
|
16 |
+
ChatCompletionResponseChoice,
|
17 |
+
ChatCompletionResponseStreamChoice,
|
18 |
+
ChatCompletionResponseUsage,
|
19 |
+
ChatCompletionStreamResponse,
|
20 |
+
Finish,
|
21 |
+
Function,
|
22 |
+
FunctionCall,
|
23 |
+
ModelCard,
|
24 |
+
ModelList,
|
25 |
+
Role,
|
26 |
+
ScoreEvaluationRequest,
|
27 |
+
ScoreEvaluationResponse,
|
28 |
+
)
|
29 |
+
|
30 |
+
|
31 |
+
if is_fastapi_availble():
|
32 |
+
from fastapi import FastAPI, HTTPException, status
|
33 |
+
from fastapi.middleware.cors import CORSMiddleware
|
34 |
+
|
35 |
+
|
36 |
+
if is_starlette_available():
|
37 |
+
from sse_starlette import EventSourceResponse
|
38 |
+
|
39 |
+
|
40 |
+
if is_uvicorn_available():
|
41 |
+
import uvicorn
|
42 |
+
|
43 |
+
|
44 |
+
@asynccontextmanager
|
45 |
+
async def lifespan(app: "FastAPI"): # collects GPU memory
|
46 |
+
yield
|
47 |
+
torch_gc()
|
48 |
+
|
49 |
+
|
50 |
+
def dictify(data: "BaseModel") -> Dict[str, Any]:
|
51 |
+
try: # pydantic v2
|
52 |
+
return data.model_dump(exclude_unset=True)
|
53 |
+
except AttributeError: # pydantic v1
|
54 |
+
return data.dict(exclude_unset=True)
|
55 |
+
|
56 |
+
|
57 |
+
def jsonify(data: "BaseModel") -> str:
|
58 |
+
try: # pydantic v2
|
59 |
+
return json.dumps(data.model_dump(exclude_unset=True), ensure_ascii=False)
|
60 |
+
except AttributeError: # pydantic v1
|
61 |
+
return data.json(exclude_unset=True, ensure_ascii=False)
|
62 |
+
|
63 |
+
|
64 |
+
def create_app(chat_model: "ChatModel") -> "FastAPI":
|
65 |
+
app = FastAPI(lifespan=lifespan)
|
66 |
+
|
67 |
+
app.add_middleware(
|
68 |
+
CORSMiddleware,
|
69 |
+
allow_origins=["*"],
|
70 |
+
allow_credentials=True,
|
71 |
+
allow_methods=["*"],
|
72 |
+
allow_headers=["*"],
|
73 |
+
)
|
74 |
+
|
75 |
+
role_mapping = {
|
76 |
+
Role.USER: DataRole.USER.value,
|
77 |
+
Role.ASSISTANT: DataRole.ASSISTANT.value,
|
78 |
+
Role.SYSTEM: DataRole.SYSTEM.value,
|
79 |
+
Role.FUNCTION: DataRole.FUNCTION.value,
|
80 |
+
Role.TOOL: DataRole.OBSERVATION.value,
|
81 |
+
}
|
82 |
+
|
83 |
+
@app.get("/v1/models", response_model=ModelList)
|
84 |
+
async def list_models():
|
85 |
+
model_card = ModelCard(id="gpt-3.5-turbo")
|
86 |
+
return ModelList(data=[model_card])
|
87 |
+
|
88 |
+
@app.post("/v1/chat/completions", response_model=ChatCompletionResponse, status_code=status.HTTP_200_OK)
|
89 |
+
async def create_chat_completion(request: ChatCompletionRequest):
|
90 |
+
if not chat_model.engine.can_generate:
|
91 |
+
raise HTTPException(status_code=status.HTTP_405_METHOD_NOT_ALLOWED, detail="Not allowed")
|
92 |
+
|
93 |
+
if len(request.messages) == 0:
|
94 |
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid length")
|
95 |
+
|
96 |
+
if request.messages[0].role == Role.SYSTEM:
|
97 |
+
system = request.messages.pop(0).content
|
98 |
+
else:
|
99 |
+
system = ""
|
100 |
+
|
101 |
+
if len(request.messages) % 2 == 0:
|
102 |
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Only supports u/a/u/a/u...")
|
103 |
+
|
104 |
+
input_messages = []
|
105 |
+
for i, message in enumerate(request.messages):
|
106 |
+
if i % 2 == 0 and message.role not in [Role.USER, Role.TOOL]:
|
107 |
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role")
|
108 |
+
elif i % 2 == 1 and message.role not in [Role.ASSISTANT, Role.FUNCTION]:
|
109 |
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid role")
|
110 |
+
|
111 |
+
input_messages.append({"role": role_mapping[message.role], "content": message.content})
|
112 |
+
|
113 |
+
tool_list = request.tools
|
114 |
+
if isinstance(tool_list, list) and len(tool_list):
|
115 |
+
try:
|
116 |
+
tools = json.dumps([tool["function"] for tool in tool_list], ensure_ascii=False)
|
117 |
+
except Exception:
|
118 |
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid tools")
|
119 |
+
else:
|
120 |
+
tools = ""
|
121 |
+
|
122 |
+
if request.stream:
|
123 |
+
if tools:
|
124 |
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot stream function calls.")
|
125 |
+
|
126 |
+
generate = stream_chat_completion(input_messages, system, tools, request)
|
127 |
+
return EventSourceResponse(generate, media_type="text/event-stream")
|
128 |
+
|
129 |
+
responses = await chat_model.achat(
|
130 |
+
input_messages,
|
131 |
+
system,
|
132 |
+
tools,
|
133 |
+
do_sample=request.do_sample,
|
134 |
+
temperature=request.temperature,
|
135 |
+
top_p=request.top_p,
|
136 |
+
max_new_tokens=request.max_tokens,
|
137 |
+
num_return_sequences=request.n,
|
138 |
+
)
|
139 |
+
|
140 |
+
prompt_length, response_length = 0, 0
|
141 |
+
choices = []
|
142 |
+
for i, response in enumerate(responses):
|
143 |
+
if tools:
|
144 |
+
result = chat_model.engine.template.format_tools.extract(response.response_text)
|
145 |
+
else:
|
146 |
+
result = response.response_text
|
147 |
+
|
148 |
+
if isinstance(result, tuple):
|
149 |
+
name, arguments = result
|
150 |
+
function = Function(name=name, arguments=arguments)
|
151 |
+
response_message = ChatCompletionMessage(
|
152 |
+
role=Role.ASSISTANT, tool_calls=[FunctionCall(function=function)]
|
153 |
+
)
|
154 |
+
finish_reason = Finish.TOOL
|
155 |
+
else:
|
156 |
+
response_message = ChatCompletionMessage(role=Role.ASSISTANT, content=result)
|
157 |
+
finish_reason = Finish.STOP if response.finish_reason == "stop" else Finish.LENGTH
|
158 |
+
|
159 |
+
choices.append(
|
160 |
+
ChatCompletionResponseChoice(index=i, message=response_message, finish_reason=finish_reason)
|
161 |
+
)
|
162 |
+
prompt_length = response.prompt_length
|
163 |
+
response_length += response.response_length
|
164 |
+
|
165 |
+
usage = ChatCompletionResponseUsage(
|
166 |
+
prompt_tokens=prompt_length,
|
167 |
+
completion_tokens=response_length,
|
168 |
+
total_tokens=prompt_length + response_length,
|
169 |
+
)
|
170 |
+
|
171 |
+
return ChatCompletionResponse(model=request.model, choices=choices, usage=usage)
|
172 |
+
|
173 |
+
async def stream_chat_completion(
|
174 |
+
messages: Sequence[Dict[str, str]], system: str, tools: str, request: ChatCompletionRequest
|
175 |
+
):
|
176 |
+
choice_data = ChatCompletionResponseStreamChoice(
|
177 |
+
index=0, delta=ChatCompletionMessage(role=Role.ASSISTANT, content=""), finish_reason=None
|
178 |
+
)
|
179 |
+
chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data])
|
180 |
+
yield jsonify(chunk)
|
181 |
+
|
182 |
+
async for new_token in chat_model.astream_chat(
|
183 |
+
messages,
|
184 |
+
system,
|
185 |
+
tools,
|
186 |
+
do_sample=request.do_sample,
|
187 |
+
temperature=request.temperature,
|
188 |
+
top_p=request.top_p,
|
189 |
+
max_new_tokens=request.max_tokens,
|
190 |
+
):
|
191 |
+
if len(new_token) == 0:
|
192 |
+
continue
|
193 |
+
|
194 |
+
choice_data = ChatCompletionResponseStreamChoice(
|
195 |
+
index=0, delta=ChatCompletionMessage(content=new_token), finish_reason=None
|
196 |
+
)
|
197 |
+
chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data])
|
198 |
+
yield jsonify(chunk)
|
199 |
+
|
200 |
+
choice_data = ChatCompletionResponseStreamChoice(
|
201 |
+
index=0, delta=ChatCompletionMessage(), finish_reason=Finish.STOP
|
202 |
+
)
|
203 |
+
chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data])
|
204 |
+
yield jsonify(chunk)
|
205 |
+
yield "[DONE]"
|
206 |
+
|
207 |
+
@app.post("/v1/score/evaluation", response_model=ScoreEvaluationResponse, status_code=status.HTTP_200_OK)
|
208 |
+
async def create_score_evaluation(request: ScoreEvaluationRequest):
|
209 |
+
if chat_model.engine.can_generate:
|
210 |
+
raise HTTPException(status_code=status.HTTP_405_METHOD_NOT_ALLOWED, detail="Not allowed")
|
211 |
+
|
212 |
+
if len(request.messages) == 0:
|
213 |
+
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid request")
|
214 |
+
|
215 |
+
scores = await chat_model.aget_scores(request.messages, max_length=request.max_length)
|
216 |
+
return ScoreEvaluationResponse(model=request.model, scores=scores)
|
217 |
+
|
218 |
+
return app
|
219 |
+
|
220 |
+
|
221 |
+
if __name__ == "__main__":
|
222 |
+
chat_model = ChatModel()
|
223 |
+
app = create_app(chat_model)
|
224 |
+
uvicorn.run(app, host="0.0.0.0", port=int(os.environ.get("API_PORT", 8000)), workers=1)
|
LLaMA-Factory/build/lib/llmtuner/api/protocol.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
from enum import Enum, unique
|
3 |
+
from typing import List, Optional
|
4 |
+
|
5 |
+
from pydantic import BaseModel, Field
|
6 |
+
from typing_extensions import Literal
|
7 |
+
|
8 |
+
|
9 |
+
@unique
|
10 |
+
class Role(str, Enum):
|
11 |
+
USER = "user"
|
12 |
+
ASSISTANT = "assistant"
|
13 |
+
SYSTEM = "system"
|
14 |
+
FUNCTION = "function"
|
15 |
+
TOOL = "tool"
|
16 |
+
|
17 |
+
|
18 |
+
@unique
|
19 |
+
class Finish(str, Enum):
|
20 |
+
STOP = "stop"
|
21 |
+
LENGTH = "length"
|
22 |
+
TOOL = "tool_calls"
|
23 |
+
|
24 |
+
|
25 |
+
class ModelCard(BaseModel):
|
26 |
+
id: str
|
27 |
+
object: Literal["model"] = "model"
|
28 |
+
created: int = Field(default_factory=lambda: int(time.time()))
|
29 |
+
owned_by: Literal["owner"] = "owner"
|
30 |
+
|
31 |
+
|
32 |
+
class ModelList(BaseModel):
|
33 |
+
object: Literal["list"] = "list"
|
34 |
+
data: List[ModelCard] = []
|
35 |
+
|
36 |
+
|
37 |
+
class Function(BaseModel):
|
38 |
+
name: str
|
39 |
+
arguments: str
|
40 |
+
|
41 |
+
|
42 |
+
class FunctionCall(BaseModel):
|
43 |
+
id: Literal["call_default"] = "call_default"
|
44 |
+
type: Literal["function"] = "function"
|
45 |
+
function: Function
|
46 |
+
|
47 |
+
|
48 |
+
class ChatMessage(BaseModel):
|
49 |
+
role: Role
|
50 |
+
content: str
|
51 |
+
|
52 |
+
|
53 |
+
class ChatCompletionMessage(BaseModel):
|
54 |
+
role: Optional[Role] = None
|
55 |
+
content: Optional[str] = None
|
56 |
+
tool_calls: Optional[List[FunctionCall]] = None
|
57 |
+
|
58 |
+
|
59 |
+
class ChatCompletionRequest(BaseModel):
|
60 |
+
model: str
|
61 |
+
messages: List[ChatMessage]
|
62 |
+
tools: list = []
|
63 |
+
do_sample: bool = True
|
64 |
+
temperature: Optional[float] = None
|
65 |
+
top_p: Optional[float] = None
|
66 |
+
n: int = 1
|
67 |
+
max_tokens: Optional[int] = None
|
68 |
+
stream: bool = False
|
69 |
+
|
70 |
+
|
71 |
+
class ChatCompletionResponseChoice(BaseModel):
|
72 |
+
index: int
|
73 |
+
message: ChatCompletionMessage
|
74 |
+
finish_reason: Finish
|
75 |
+
|
76 |
+
|
77 |
+
class ChatCompletionResponseStreamChoice(BaseModel):
|
78 |
+
index: int
|
79 |
+
delta: ChatCompletionMessage
|
80 |
+
finish_reason: Optional[Finish] = None
|
81 |
+
|
82 |
+
|
83 |
+
class ChatCompletionResponseUsage(BaseModel):
|
84 |
+
prompt_tokens: int
|
85 |
+
completion_tokens: int
|
86 |
+
total_tokens: int
|
87 |
+
|
88 |
+
|
89 |
+
class ChatCompletionResponse(BaseModel):
|
90 |
+
id: Literal["chatcmpl-default"] = "chatcmpl-default"
|
91 |
+
object: Literal["chat.completion"] = "chat.completion"
|
92 |
+
created: int = Field(default_factory=lambda: int(time.time()))
|
93 |
+
model: str
|
94 |
+
choices: List[ChatCompletionResponseChoice]
|
95 |
+
usage: ChatCompletionResponseUsage
|
96 |
+
|
97 |
+
|
98 |
+
class ChatCompletionStreamResponse(BaseModel):
|
99 |
+
id: Literal["chatcmpl-default"] = "chatcmpl-default"
|
100 |
+
object: Literal["chat.completion.chunk"] = "chat.completion.chunk"
|
101 |
+
created: int = Field(default_factory=lambda: int(time.time()))
|
102 |
+
model: str
|
103 |
+
choices: List[ChatCompletionResponseStreamChoice]
|
104 |
+
|
105 |
+
|
106 |
+
class ScoreEvaluationRequest(BaseModel):
|
107 |
+
model: str
|
108 |
+
messages: List[str]
|
109 |
+
max_length: Optional[int] = None
|
110 |
+
|
111 |
+
|
112 |
+
class ScoreEvaluationResponse(BaseModel):
|
113 |
+
id: Literal["scoreeval-default"] = "scoreeval-default"
|
114 |
+
object: Literal["score.evaluation"] = "score.evaluation"
|
115 |
+
model: str
|
116 |
+
scores: List[float]
|
LLaMA-Factory/build/lib/llmtuner/chat/__init__.py
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .base_engine import BaseEngine
|
2 |
+
from .chat_model import ChatModel
|
3 |
+
|
4 |
+
|
5 |
+
__all__ = ["BaseEngine", "ChatModel"]
|
LLaMA-Factory/build/lib/llmtuner/chat/base_engine.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
from dataclasses import dataclass
|
3 |
+
from typing import TYPE_CHECKING, Any, AsyncGenerator, Dict, List, Literal, Optional, Sequence, Union
|
4 |
+
|
5 |
+
|
6 |
+
if TYPE_CHECKING:
|
7 |
+
from transformers import PreTrainedModel, PreTrainedTokenizer
|
8 |
+
|
9 |
+
from ..data import Template
|
10 |
+
from ..extras.packages import is_vllm_available
|
11 |
+
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
|
12 |
+
|
13 |
+
if is_vllm_available():
|
14 |
+
from vllm import AsyncLLMEngine
|
15 |
+
|
16 |
+
|
17 |
+
@dataclass
|
18 |
+
class Response:
|
19 |
+
response_text: str
|
20 |
+
response_length: int
|
21 |
+
prompt_length: int
|
22 |
+
finish_reason: Literal["stop", "length"]
|
23 |
+
|
24 |
+
|
25 |
+
class BaseEngine(ABC):
|
26 |
+
model: Union["PreTrainedModel", "AsyncLLMEngine"]
|
27 |
+
tokenizer: "PreTrainedTokenizer"
|
28 |
+
can_generate: bool
|
29 |
+
template: "Template"
|
30 |
+
generating_args: Dict[str, Any]
|
31 |
+
|
32 |
+
@abstractmethod
|
33 |
+
def __init__(
|
34 |
+
self,
|
35 |
+
model_args: "ModelArguments",
|
36 |
+
data_args: "DataArguments",
|
37 |
+
finetuning_args: "FinetuningArguments",
|
38 |
+
generating_args: "GeneratingArguments",
|
39 |
+
) -> None: ...
|
40 |
+
|
41 |
+
@abstractmethod
|
42 |
+
async def start(
|
43 |
+
self,
|
44 |
+
) -> None: ...
|
45 |
+
|
46 |
+
@abstractmethod
|
47 |
+
async def chat(
|
48 |
+
self,
|
49 |
+
messages: Sequence[Dict[str, str]],
|
50 |
+
system: Optional[str] = None,
|
51 |
+
tools: Optional[str] = None,
|
52 |
+
**input_kwargs,
|
53 |
+
) -> List["Response"]: ...
|
54 |
+
|
55 |
+
@abstractmethod
|
56 |
+
async def stream_chat(
|
57 |
+
self,
|
58 |
+
messages: Sequence[Dict[str, str]],
|
59 |
+
system: Optional[str] = None,
|
60 |
+
tools: Optional[str] = None,
|
61 |
+
**input_kwargs,
|
62 |
+
) -> AsyncGenerator[str, None]: ...
|
63 |
+
|
64 |
+
@abstractmethod
|
65 |
+
async def get_scores(
|
66 |
+
self,
|
67 |
+
batch_input: List[str],
|
68 |
+
**input_kwargs,
|
69 |
+
) -> List[float]: ...
|
LLaMA-Factory/build/lib/llmtuner/chat/chat_model.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
from threading import Thread
|
3 |
+
from typing import TYPE_CHECKING, Any, AsyncGenerator, Dict, Generator, List, Optional, Sequence
|
4 |
+
|
5 |
+
from ..hparams import get_infer_args
|
6 |
+
from .hf_engine import HuggingfaceEngine
|
7 |
+
from .vllm_engine import VllmEngine
|
8 |
+
|
9 |
+
|
10 |
+
if TYPE_CHECKING:
|
11 |
+
from .base_engine import BaseEngine, Response
|
12 |
+
|
13 |
+
|
14 |
+
def _start_background_loop(loop: asyncio.AbstractEventLoop) -> None:
|
15 |
+
asyncio.set_event_loop(loop)
|
16 |
+
loop.run_forever()
|
17 |
+
|
18 |
+
|
19 |
+
class ChatModel:
|
20 |
+
def __init__(self, args: Optional[Dict[str, Any]] = None) -> None:
|
21 |
+
model_args, data_args, finetuning_args, generating_args = get_infer_args(args)
|
22 |
+
if model_args.infer_backend == "huggingface":
|
23 |
+
self.engine: "BaseEngine" = HuggingfaceEngine(model_args, data_args, finetuning_args, generating_args)
|
24 |
+
elif model_args.infer_backend == "vllm":
|
25 |
+
self.engine: "BaseEngine" = VllmEngine(model_args, data_args, finetuning_args, generating_args)
|
26 |
+
else:
|
27 |
+
raise NotImplementedError("Unknown backend: {}".format(model_args.infer_backend))
|
28 |
+
|
29 |
+
self._loop = asyncio.new_event_loop()
|
30 |
+
self._thread = Thread(target=_start_background_loop, args=(self._loop,), daemon=True)
|
31 |
+
self._thread.start()
|
32 |
+
asyncio.run_coroutine_threadsafe(self.engine.start(), self._loop)
|
33 |
+
|
34 |
+
def chat(
|
35 |
+
self,
|
36 |
+
messages: Sequence[Dict[str, str]],
|
37 |
+
system: Optional[str] = None,
|
38 |
+
tools: Optional[str] = None,
|
39 |
+
**input_kwargs,
|
40 |
+
) -> List["Response"]:
|
41 |
+
task = asyncio.run_coroutine_threadsafe(self.achat(messages, system, tools, **input_kwargs), self._loop)
|
42 |
+
return task.result()
|
43 |
+
|
44 |
+
async def achat(
|
45 |
+
self,
|
46 |
+
messages: Sequence[Dict[str, str]],
|
47 |
+
system: Optional[str] = None,
|
48 |
+
tools: Optional[str] = None,
|
49 |
+
**input_kwargs,
|
50 |
+
) -> List["Response"]:
|
51 |
+
return await self.engine.chat(messages, system, tools, **input_kwargs)
|
52 |
+
|
53 |
+
def stream_chat(
|
54 |
+
self,
|
55 |
+
messages: Sequence[Dict[str, str]],
|
56 |
+
system: Optional[str] = None,
|
57 |
+
tools: Optional[str] = None,
|
58 |
+
**input_kwargs,
|
59 |
+
) -> Generator[str, None, None]:
|
60 |
+
generator = self.astream_chat(messages, system, tools, **input_kwargs)
|
61 |
+
while True:
|
62 |
+
try:
|
63 |
+
task = asyncio.run_coroutine_threadsafe(generator.__anext__(), self._loop)
|
64 |
+
yield task.result()
|
65 |
+
except StopAsyncIteration:
|
66 |
+
break
|
67 |
+
|
68 |
+
async def astream_chat(
|
69 |
+
self,
|
70 |
+
messages: Sequence[Dict[str, str]],
|
71 |
+
system: Optional[str] = None,
|
72 |
+
tools: Optional[str] = None,
|
73 |
+
**input_kwargs,
|
74 |
+
) -> AsyncGenerator[str, None]:
|
75 |
+
async for new_token in self.engine.stream_chat(messages, system, tools, **input_kwargs):
|
76 |
+
yield new_token
|
77 |
+
|
78 |
+
def get_scores(
|
79 |
+
self,
|
80 |
+
batch_input: List[str],
|
81 |
+
**input_kwargs,
|
82 |
+
) -> List[float]:
|
83 |
+
task = asyncio.run_coroutine_threadsafe(self.aget_scores(batch_input, **input_kwargs), self._loop)
|
84 |
+
return task.result()
|
85 |
+
|
86 |
+
async def aget_scores(
|
87 |
+
self,
|
88 |
+
batch_input: List[str],
|
89 |
+
**input_kwargs,
|
90 |
+
) -> List[float]:
|
91 |
+
return await self.engine.get_scores(batch_input, **input_kwargs)
|
LLaMA-Factory/build/lib/llmtuner/chat/hf_engine.py
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import concurrent.futures
|
3 |
+
import os
|
4 |
+
from threading import Thread
|
5 |
+
from typing import TYPE_CHECKING, Any, AsyncGenerator, Callable, Dict, List, Optional, Sequence, Tuple
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from transformers import GenerationConfig, TextIteratorStreamer
|
9 |
+
|
10 |
+
from ..data import get_template_and_fix_tokenizer
|
11 |
+
from ..extras.misc import get_logits_processor
|
12 |
+
from ..model import load_model_and_tokenizer
|
13 |
+
from .base_engine import BaseEngine, Response
|
14 |
+
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from transformers import PreTrainedModel, PreTrainedTokenizer
|
18 |
+
from trl import PreTrainedModelWrapper
|
19 |
+
|
20 |
+
from ..data import Template
|
21 |
+
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
|
22 |
+
|
23 |
+
|
24 |
+
class HuggingfaceEngine(BaseEngine):
|
25 |
+
def __init__(
|
26 |
+
self,
|
27 |
+
model_args: "ModelArguments",
|
28 |
+
data_args: "DataArguments",
|
29 |
+
finetuning_args: "FinetuningArguments",
|
30 |
+
generating_args: "GeneratingArguments",
|
31 |
+
) -> None:
|
32 |
+
self.can_generate = finetuning_args.stage == "sft"
|
33 |
+
self.model, self.tokenizer = load_model_and_tokenizer(
|
34 |
+
model_args, finetuning_args, is_trainable=False, add_valuehead=(not self.can_generate)
|
35 |
+
)
|
36 |
+
self.tokenizer.padding_side = "left" if self.can_generate else "right"
|
37 |
+
self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template)
|
38 |
+
self.generating_args = generating_args.to_dict()
|
39 |
+
|
40 |
+
@staticmethod
|
41 |
+
def _process_args(
|
42 |
+
model: "PreTrainedModel",
|
43 |
+
tokenizer: "PreTrainedTokenizer",
|
44 |
+
template: "Template",
|
45 |
+
generating_args: Dict[str, Any],
|
46 |
+
messages: Sequence[Dict[str, str]],
|
47 |
+
system: Optional[str] = None,
|
48 |
+
tools: Optional[str] = None,
|
49 |
+
input_kwargs: Optional[Dict[str, Any]] = {},
|
50 |
+
) -> Tuple[Dict[str, Any], int]:
|
51 |
+
paired_messages = messages + [{"role": "assistant", "content": ""}]
|
52 |
+
prompt_ids, _ = template.encode_oneturn(
|
53 |
+
tokenizer=tokenizer, messages=paired_messages, system=system, tools=tools
|
54 |
+
)
|
55 |
+
prompt_length = len(prompt_ids)
|
56 |
+
inputs = torch.tensor([prompt_ids], device=model.device)
|
57 |
+
|
58 |
+
do_sample = input_kwargs.pop("do_sample", None)
|
59 |
+
temperature = input_kwargs.pop("temperature", None)
|
60 |
+
top_p = input_kwargs.pop("top_p", None)
|
61 |
+
top_k = input_kwargs.pop("top_k", None)
|
62 |
+
num_return_sequences = input_kwargs.pop("num_return_sequences", None)
|
63 |
+
repetition_penalty = input_kwargs.pop("repetition_penalty", None)
|
64 |
+
max_length = input_kwargs.pop("max_length", None)
|
65 |
+
max_new_tokens = input_kwargs.pop("max_new_tokens", None)
|
66 |
+
|
67 |
+
generating_args.update(
|
68 |
+
dict(
|
69 |
+
do_sample=do_sample if do_sample is not None else generating_args["do_sample"],
|
70 |
+
temperature=temperature or generating_args["temperature"],
|
71 |
+
top_p=top_p or generating_args["top_p"],
|
72 |
+
top_k=top_k or generating_args["top_k"],
|
73 |
+
num_return_sequences=num_return_sequences or 1,
|
74 |
+
repetition_penalty=repetition_penalty or generating_args["repetition_penalty"],
|
75 |
+
eos_token_id=[tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids,
|
76 |
+
pad_token_id=tokenizer.pad_token_id,
|
77 |
+
)
|
78 |
+
)
|
79 |
+
|
80 |
+
if isinstance(num_return_sequences, int) and num_return_sequences > 1:
|
81 |
+
generating_args["do_sample"] = True
|
82 |
+
|
83 |
+
if max_length:
|
84 |
+
generating_args.pop("max_new_tokens", None)
|
85 |
+
generating_args["max_length"] = max_length
|
86 |
+
|
87 |
+
if max_new_tokens:
|
88 |
+
generating_args.pop("max_length", None)
|
89 |
+
generating_args["max_new_tokens"] = max_new_tokens
|
90 |
+
|
91 |
+
gen_kwargs = dict(
|
92 |
+
inputs=inputs,
|
93 |
+
generation_config=GenerationConfig(**generating_args),
|
94 |
+
logits_processor=get_logits_processor(),
|
95 |
+
)
|
96 |
+
|
97 |
+
return gen_kwargs, prompt_length
|
98 |
+
|
99 |
+
@staticmethod
|
100 |
+
@torch.inference_mode()
|
101 |
+
def _chat(
|
102 |
+
model: "PreTrainedModel",
|
103 |
+
tokenizer: "PreTrainedTokenizer",
|
104 |
+
template: "Template",
|
105 |
+
generating_args: Dict[str, Any],
|
106 |
+
messages: Sequence[Dict[str, str]],
|
107 |
+
system: Optional[str] = None,
|
108 |
+
tools: Optional[str] = None,
|
109 |
+
input_kwargs: Optional[Dict[str, Any]] = {},
|
110 |
+
) -> List["Response"]:
|
111 |
+
gen_kwargs, prompt_length = HuggingfaceEngine._process_args(
|
112 |
+
model, tokenizer, template, generating_args, messages, system, tools, input_kwargs
|
113 |
+
)
|
114 |
+
generate_output = model.generate(**gen_kwargs)
|
115 |
+
response_ids = generate_output[:, prompt_length:]
|
116 |
+
response = tokenizer.batch_decode(response_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
117 |
+
results = []
|
118 |
+
for i in range(len(response)):
|
119 |
+
eos_index = (response_ids[i] == tokenizer.eos_token_id).nonzero()
|
120 |
+
response_length = (eos_index[0].item() + 1) if len(eos_index) else len(response_ids[i])
|
121 |
+
results.append(
|
122 |
+
Response(
|
123 |
+
response_text=response[i],
|
124 |
+
response_length=response_length,
|
125 |
+
prompt_length=prompt_length,
|
126 |
+
finish_reason="stop" if len(eos_index) else "length",
|
127 |
+
)
|
128 |
+
)
|
129 |
+
|
130 |
+
return results
|
131 |
+
|
132 |
+
@staticmethod
|
133 |
+
@torch.inference_mode()
|
134 |
+
def _stream_chat(
|
135 |
+
model: "PreTrainedModel",
|
136 |
+
tokenizer: "PreTrainedTokenizer",
|
137 |
+
template: "Template",
|
138 |
+
generating_args: Dict[str, Any],
|
139 |
+
messages: Sequence[Dict[str, str]],
|
140 |
+
system: Optional[str] = None,
|
141 |
+
tools: Optional[str] = None,
|
142 |
+
input_kwargs: Optional[Dict[str, Any]] = {},
|
143 |
+
) -> Callable[[], str]:
|
144 |
+
gen_kwargs, _ = HuggingfaceEngine._process_args(
|
145 |
+
model, tokenizer, template, generating_args, messages, system, tools, input_kwargs
|
146 |
+
)
|
147 |
+
streamer = TextIteratorStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True)
|
148 |
+
gen_kwargs["streamer"] = streamer
|
149 |
+
thread = Thread(target=model.generate, kwargs=gen_kwargs, daemon=True)
|
150 |
+
thread.start()
|
151 |
+
|
152 |
+
def stream():
|
153 |
+
try:
|
154 |
+
return streamer.__next__()
|
155 |
+
except StopIteration:
|
156 |
+
raise StopAsyncIteration()
|
157 |
+
|
158 |
+
return stream
|
159 |
+
|
160 |
+
@staticmethod
|
161 |
+
@torch.inference_mode()
|
162 |
+
def _get_scores(
|
163 |
+
model: "PreTrainedModelWrapper",
|
164 |
+
tokenizer: "PreTrainedTokenizer",
|
165 |
+
batch_input: List[str],
|
166 |
+
input_kwargs: Optional[Dict[str, Any]] = {},
|
167 |
+
) -> List[float]:
|
168 |
+
max_length = input_kwargs.pop("max_length", None)
|
169 |
+
device = getattr(model.pretrained_model, "device", "cuda")
|
170 |
+
inputs = tokenizer(
|
171 |
+
batch_input,
|
172 |
+
padding=True,
|
173 |
+
truncation=True,
|
174 |
+
max_length=max_length or getattr(model.config, "max_position_embeddings", 1024),
|
175 |
+
return_tensors="pt",
|
176 |
+
add_special_tokens=True,
|
177 |
+
).to(device)
|
178 |
+
|
179 |
+
input_ids: torch.Tensor = inputs["input_ids"]
|
180 |
+
_, _, values = model(**inputs, output_hidden_states=True, return_dict=True)
|
181 |
+
|
182 |
+
if getattr(model.config, "model_type", None) == "chatglm":
|
183 |
+
values = torch.transpose(values, 0, 1)
|
184 |
+
|
185 |
+
scores = []
|
186 |
+
for i in range(input_ids.size(0)):
|
187 |
+
end_indexes = (input_ids[i] != tokenizer.pad_token_id).nonzero()
|
188 |
+
end_index = end_indexes[-1].item() if len(end_indexes) else 0
|
189 |
+
scores.append(values[i, end_index].nan_to_num().item())
|
190 |
+
|
191 |
+
return scores
|
192 |
+
|
193 |
+
async def start(self) -> None:
|
194 |
+
self._semaphore = asyncio.Semaphore(int(os.environ.get("MAX_CONCURRENT", 1)))
|
195 |
+
|
196 |
+
async def chat(
|
197 |
+
self,
|
198 |
+
messages: Sequence[Dict[str, str]],
|
199 |
+
system: Optional[str] = None,
|
200 |
+
tools: Optional[str] = None,
|
201 |
+
**input_kwargs,
|
202 |
+
) -> List["Response"]:
|
203 |
+
if not self.can_generate:
|
204 |
+
raise ValueError("The current model does not support `chat`.")
|
205 |
+
|
206 |
+
loop = asyncio.get_running_loop()
|
207 |
+
input_args = (
|
208 |
+
self.model,
|
209 |
+
self.tokenizer,
|
210 |
+
self.template,
|
211 |
+
self.generating_args,
|
212 |
+
messages,
|
213 |
+
system,
|
214 |
+
tools,
|
215 |
+
input_kwargs,
|
216 |
+
)
|
217 |
+
async with self._semaphore:
|
218 |
+
with concurrent.futures.ThreadPoolExecutor() as pool:
|
219 |
+
return await loop.run_in_executor(pool, self._chat, *input_args)
|
220 |
+
|
221 |
+
async def stream_chat(
|
222 |
+
self,
|
223 |
+
messages: Sequence[Dict[str, str]],
|
224 |
+
system: Optional[str] = None,
|
225 |
+
tools: Optional[str] = None,
|
226 |
+
**input_kwargs,
|
227 |
+
) -> AsyncGenerator[str, None]:
|
228 |
+
if not self.can_generate:
|
229 |
+
raise ValueError("The current model does not support `stream_chat`.")
|
230 |
+
|
231 |
+
loop = asyncio.get_running_loop()
|
232 |
+
input_args = (
|
233 |
+
self.model,
|
234 |
+
self.tokenizer,
|
235 |
+
self.template,
|
236 |
+
self.generating_args,
|
237 |
+
messages,
|
238 |
+
system,
|
239 |
+
tools,
|
240 |
+
input_kwargs,
|
241 |
+
)
|
242 |
+
async with self._semaphore:
|
243 |
+
with concurrent.futures.ThreadPoolExecutor() as pool:
|
244 |
+
stream = self._stream_chat(*input_args)
|
245 |
+
while True:
|
246 |
+
try:
|
247 |
+
yield await loop.run_in_executor(pool, stream)
|
248 |
+
except StopAsyncIteration:
|
249 |
+
break
|
250 |
+
|
251 |
+
async def get_scores(
|
252 |
+
self,
|
253 |
+
batch_input: List[str],
|
254 |
+
**input_kwargs,
|
255 |
+
) -> List[float]:
|
256 |
+
if self.can_generate:
|
257 |
+
raise ValueError("Cannot get scores using an auto-regressive model.")
|
258 |
+
|
259 |
+
loop = asyncio.get_running_loop()
|
260 |
+
input_args = (self.model, self.tokenizer, batch_input, input_kwargs)
|
261 |
+
async with self._semaphore:
|
262 |
+
with concurrent.futures.ThreadPoolExecutor() as pool:
|
263 |
+
return await loop.run_in_executor(pool, self._get_scores, *input_args)
|
LLaMA-Factory/build/lib/llmtuner/chat/vllm_engine.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import uuid
|
2 |
+
from typing import TYPE_CHECKING, AsyncGenerator, AsyncIterator, Dict, List, Optional, Sequence
|
3 |
+
|
4 |
+
from transformers.utils.versions import require_version
|
5 |
+
|
6 |
+
from ..data import get_template_and_fix_tokenizer
|
7 |
+
from ..extras.misc import get_device_count
|
8 |
+
from ..extras.packages import is_vllm_available
|
9 |
+
from ..model import load_tokenizer
|
10 |
+
from .base_engine import BaseEngine, Response
|
11 |
+
|
12 |
+
|
13 |
+
if is_vllm_available():
|
14 |
+
from vllm import AsyncEngineArgs, AsyncLLMEngine, RequestOutput, SamplingParams
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from ..hparams import DataArguments, FinetuningArguments, GeneratingArguments, ModelArguments
|
18 |
+
|
19 |
+
|
20 |
+
class VllmEngine(BaseEngine):
|
21 |
+
def __init__(
|
22 |
+
self,
|
23 |
+
model_args: "ModelArguments",
|
24 |
+
data_args: "DataArguments",
|
25 |
+
finetuning_args: "FinetuningArguments",
|
26 |
+
generating_args: "GeneratingArguments",
|
27 |
+
) -> None:
|
28 |
+
require_version("vllm>=0.3.3", "To fix: pip install vllm>=0.3.3")
|
29 |
+
self.can_generate = finetuning_args.stage == "sft"
|
30 |
+
engine_args = AsyncEngineArgs(
|
31 |
+
model=model_args.model_name_or_path,
|
32 |
+
trust_remote_code=True,
|
33 |
+
max_model_len=model_args.vllm_maxlen,
|
34 |
+
tensor_parallel_size=get_device_count() or 1,
|
35 |
+
gpu_memory_utilization=model_args.vllm_gpu_util,
|
36 |
+
disable_log_stats=True,
|
37 |
+
disable_log_requests=True,
|
38 |
+
enforce_eager=model_args.vllm_enforce_eager,
|
39 |
+
)
|
40 |
+
self.model = AsyncLLMEngine.from_engine_args(engine_args)
|
41 |
+
self.tokenizer = load_tokenizer(model_args)
|
42 |
+
self.tokenizer.padding_side = "left"
|
43 |
+
self.template = get_template_and_fix_tokenizer(self.tokenizer, data_args.template)
|
44 |
+
self.generating_args = generating_args.to_dict()
|
45 |
+
|
46 |
+
async def _generate(
|
47 |
+
self,
|
48 |
+
messages: Sequence[Dict[str, str]],
|
49 |
+
system: Optional[str] = None,
|
50 |
+
tools: Optional[str] = None,
|
51 |
+
**input_kwargs,
|
52 |
+
) -> AsyncIterator["RequestOutput"]:
|
53 |
+
request_id = "chatcmpl-{}".format(uuid.uuid4().hex)
|
54 |
+
paired_messages = messages + [{"role": "assistant", "content": ""}]
|
55 |
+
prompt_ids, _ = self.template.encode_oneturn(
|
56 |
+
tokenizer=self.tokenizer, messages=paired_messages, system=system, tools=tools
|
57 |
+
)
|
58 |
+
prompt_length = len(prompt_ids)
|
59 |
+
|
60 |
+
temperature = input_kwargs.pop("temperature", None)
|
61 |
+
top_p = input_kwargs.pop("top_p", None)
|
62 |
+
top_k = input_kwargs.pop("top_k", None)
|
63 |
+
num_return_sequences = input_kwargs.pop("num_return_sequences", None)
|
64 |
+
repetition_penalty = input_kwargs.pop("repetition_penalty", None)
|
65 |
+
max_length = input_kwargs.pop("max_length", None)
|
66 |
+
max_new_tokens = input_kwargs.pop("max_new_tokens", None)
|
67 |
+
|
68 |
+
generating_args = self.generating_args.copy()
|
69 |
+
generating_args.update(
|
70 |
+
dict(
|
71 |
+
temperature=temperature or generating_args["temperature"],
|
72 |
+
top_p=top_p or generating_args["top_p"],
|
73 |
+
top_k=top_k or generating_args["top_k"],
|
74 |
+
num_return_sequences=num_return_sequences or 1,
|
75 |
+
repetition_penalty=repetition_penalty or generating_args["repetition_penalty"],
|
76 |
+
)
|
77 |
+
)
|
78 |
+
|
79 |
+
if max_length:
|
80 |
+
generating_args["max_new_tokens"] = max_length - prompt_length
|
81 |
+
|
82 |
+
if max_new_tokens:
|
83 |
+
generating_args["max_new_tokens"] = max_new_tokens
|
84 |
+
|
85 |
+
sampling_params = SamplingParams(
|
86 |
+
n=generating_args["num_return_sequences"],
|
87 |
+
repetition_penalty=generating_args["repetition_penalty"],
|
88 |
+
temperature=generating_args["temperature"],
|
89 |
+
top_p=generating_args["top_p"],
|
90 |
+
top_k=generating_args["top_k"],
|
91 |
+
use_beam_search=generating_args["num_beams"] > 1,
|
92 |
+
length_penalty=generating_args["length_penalty"],
|
93 |
+
stop_token_ids=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids,
|
94 |
+
max_tokens=generating_args["max_new_tokens"],
|
95 |
+
skip_special_tokens=True,
|
96 |
+
)
|
97 |
+
result_generator = self.model.generate(
|
98 |
+
prompt=None, sampling_params=sampling_params, request_id=request_id, prompt_token_ids=prompt_ids
|
99 |
+
)
|
100 |
+
return result_generator
|
101 |
+
|
102 |
+
async def start(self) -> None:
|
103 |
+
pass
|
104 |
+
|
105 |
+
async def chat(
|
106 |
+
self,
|
107 |
+
messages: Sequence[Dict[str, str]],
|
108 |
+
system: Optional[str] = None,
|
109 |
+
tools: Optional[str] = None,
|
110 |
+
**input_kwargs,
|
111 |
+
) -> List["Response"]:
|
112 |
+
final_output = None
|
113 |
+
generator = await self._generate(messages, system, tools, **input_kwargs)
|
114 |
+
async for request_output in generator:
|
115 |
+
final_output = request_output
|
116 |
+
|
117 |
+
results = []
|
118 |
+
for output in final_output.outputs:
|
119 |
+
results.append(
|
120 |
+
Response(
|
121 |
+
response_text=output.text,
|
122 |
+
response_length=len(output.token_ids),
|
123 |
+
prompt_length=len(final_output.prompt_token_ids),
|
124 |
+
finish_reason=output.finish_reason,
|
125 |
+
)
|
126 |
+
)
|
127 |
+
|
128 |
+
return results
|
129 |
+
|
130 |
+
async def stream_chat(
|
131 |
+
self,
|
132 |
+
messages: Sequence[Dict[str, str]],
|
133 |
+
system: Optional[str] = None,
|
134 |
+
tools: Optional[str] = None,
|
135 |
+
**input_kwargs,
|
136 |
+
) -> AsyncGenerator[str, None]:
|
137 |
+
generated_text = ""
|
138 |
+
generator = await self._generate(messages, system, tools, **input_kwargs)
|
139 |
+
async for result in generator:
|
140 |
+
delta_text = result.outputs[0].text[len(generated_text) :]
|
141 |
+
generated_text = result.outputs[0].text
|
142 |
+
yield delta_text
|
143 |
+
|
144 |
+
async def get_scores(
|
145 |
+
self,
|
146 |
+
batch_input: List[str],
|
147 |
+
**input_kwargs,
|
148 |
+
) -> List[float]:
|
149 |
+
raise NotImplementedError("vLLM engine does not support get_scores.")
|
LLaMA-Factory/build/lib/llmtuner/data/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .loader import get_dataset
|
2 |
+
from .template import Template, get_template_and_fix_tokenizer, templates
|
3 |
+
from .utils import Role, split_dataset
|
4 |
+
|
5 |
+
|
6 |
+
__all__ = ["get_dataset", "Template", "get_template_and_fix_tokenizer", "templates", "Role", "split_dataset"]
|
LLaMA-Factory/build/lib/llmtuner/data/aligner.py
ADDED
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import partial
|
2 |
+
from typing import TYPE_CHECKING, Any, Dict, List, Union
|
3 |
+
|
4 |
+
from datasets import Features
|
5 |
+
|
6 |
+
from .utils import Role
|
7 |
+
|
8 |
+
|
9 |
+
if TYPE_CHECKING:
|
10 |
+
from datasets import Dataset, IterableDataset
|
11 |
+
|
12 |
+
from ..hparams import DataArguments
|
13 |
+
from .parser import DatasetAttr
|
14 |
+
|
15 |
+
|
16 |
+
def convert_alpaca(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]:
|
17 |
+
outputs = {"prompt": [], "response": [], "system": [], "tools": []}
|
18 |
+
for i in range(len(examples[dataset_attr.prompt])):
|
19 |
+
prompt = []
|
20 |
+
if dataset_attr.history and isinstance(examples[dataset_attr.history][i], list):
|
21 |
+
for old_prompt, old_response in examples[dataset_attr.history][i]:
|
22 |
+
prompt.append({"role": Role.USER.value, "content": old_prompt})
|
23 |
+
prompt.append({"role": Role.ASSISTANT.value, "content": old_response})
|
24 |
+
|
25 |
+
content = []
|
26 |
+
if dataset_attr.prompt and examples[dataset_attr.prompt][i]:
|
27 |
+
content.append(examples[dataset_attr.prompt][i])
|
28 |
+
|
29 |
+
if dataset_attr.query and examples[dataset_attr.query][i]:
|
30 |
+
content.append(examples[dataset_attr.query][i])
|
31 |
+
|
32 |
+
prompt.append({"role": Role.USER.value, "content": "\n".join(content)})
|
33 |
+
|
34 |
+
if dataset_attr.response and isinstance(examples[dataset_attr.response][i], list):
|
35 |
+
response = [
|
36 |
+
{"role": Role.ASSISTANT.value, "content": content} for content in examples[dataset_attr.response][i]
|
37 |
+
]
|
38 |
+
elif dataset_attr.response and isinstance(examples[dataset_attr.response][i], str):
|
39 |
+
response = [{"role": Role.ASSISTANT.value, "content": examples[dataset_attr.response][i]}]
|
40 |
+
else:
|
41 |
+
response = []
|
42 |
+
|
43 |
+
outputs["prompt"].append(prompt)
|
44 |
+
outputs["response"].append(response)
|
45 |
+
outputs["system"].append(examples[dataset_attr.system][i] if dataset_attr.system else "")
|
46 |
+
outputs["tools"].append("")
|
47 |
+
|
48 |
+
return outputs
|
49 |
+
|
50 |
+
|
51 |
+
def convert_sharegpt(examples: Dict[str, List[Any]], dataset_attr: "DatasetAttr") -> Dict[str, List[Any]]:
|
52 |
+
outputs = {"prompt": [], "response": [], "system": [], "tools": []}
|
53 |
+
tag_mapping = {
|
54 |
+
dataset_attr.user_tag: Role.USER.value,
|
55 |
+
dataset_attr.assistant_tag: Role.ASSISTANT.value,
|
56 |
+
dataset_attr.observation_tag: Role.OBSERVATION.value,
|
57 |
+
dataset_attr.function_tag: Role.FUNCTION.value,
|
58 |
+
dataset_attr.system_tag: Role.SYSTEM.value,
|
59 |
+
}
|
60 |
+
odd_tags = (dataset_attr.user_tag, dataset_attr.observation_tag)
|
61 |
+
even_tags = (dataset_attr.assistant_tag, dataset_attr.function_tag)
|
62 |
+
accept_tags = (odd_tags, even_tags)
|
63 |
+
for i, messages in enumerate(examples[dataset_attr.messages]):
|
64 |
+
if dataset_attr.system_tag and messages[0][dataset_attr.role_tag] == dataset_attr.system_tag:
|
65 |
+
system = messages[0][dataset_attr.content_tag]
|
66 |
+
messages = messages[1:]
|
67 |
+
else:
|
68 |
+
system = examples[dataset_attr.system][i] if dataset_attr.system else ""
|
69 |
+
|
70 |
+
messages = messages[: len(messages) // 2 * 2] # should be multiples of 2
|
71 |
+
if len(messages) == 0:
|
72 |
+
continue
|
73 |
+
|
74 |
+
aligned_messages = []
|
75 |
+
for turn_idx, message in enumerate(messages):
|
76 |
+
if message[dataset_attr.role_tag] not in accept_tags[turn_idx % 2]:
|
77 |
+
raise ValueError("Invalid role tag in {}.".format(messages))
|
78 |
+
|
79 |
+
aligned_messages.append(
|
80 |
+
{"role": tag_mapping[message[dataset_attr.role_tag]], "content": message[dataset_attr.content_tag]}
|
81 |
+
)
|
82 |
+
|
83 |
+
outputs["prompt"].append(aligned_messages[:-1])
|
84 |
+
outputs["response"].append(aligned_messages[-1:])
|
85 |
+
outputs["system"].append(system)
|
86 |
+
outputs["tools"].append(examples[dataset_attr.tools][i] if dataset_attr.tools else "")
|
87 |
+
|
88 |
+
return outputs
|
89 |
+
|
90 |
+
|
91 |
+
def align_dataset(
|
92 |
+
dataset: Union["Dataset", "IterableDataset"], dataset_attr: "DatasetAttr", data_args: "DataArguments"
|
93 |
+
) -> Union["Dataset", "IterableDataset"]:
|
94 |
+
r"""
|
95 |
+
Aligned dataset:
|
96 |
+
prompt: [{"role": "user", "content": "..."}] * (2T - 1)
|
97 |
+
response: [{"role": "assistant", "content": "..."}] * N (N > 1 for ranking dataset)
|
98 |
+
system: "..."
|
99 |
+
tools: "..."
|
100 |
+
"""
|
101 |
+
if dataset_attr.formatting == "alpaca":
|
102 |
+
convert_func = partial(convert_alpaca, dataset_attr=dataset_attr)
|
103 |
+
else:
|
104 |
+
convert_func = partial(convert_sharegpt, dataset_attr=dataset_attr)
|
105 |
+
|
106 |
+
column_names = list(next(iter(dataset)).keys())
|
107 |
+
features = Features.from_dict(
|
108 |
+
{
|
109 |
+
"prompt": [
|
110 |
+
{"role": {"dtype": "string", "_type": "Value"}, "content": {"dtype": "string", "_type": "Value"}}
|
111 |
+
],
|
112 |
+
"response": [
|
113 |
+
{"role": {"dtype": "string", "_type": "Value"}, "content": {"dtype": "string", "_type": "Value"}}
|
114 |
+
],
|
115 |
+
"system": {"dtype": "string", "_type": "Value"},
|
116 |
+
"tools": {"dtype": "string", "_type": "Value"},
|
117 |
+
}
|
118 |
+
)
|
119 |
+
kwargs = {}
|
120 |
+
if not data_args.streaming:
|
121 |
+
kwargs = dict(
|
122 |
+
num_proc=data_args.preprocessing_num_workers,
|
123 |
+
load_from_cache_file=(not data_args.overwrite_cache),
|
124 |
+
desc="Converting format of dataset",
|
125 |
+
)
|
126 |
+
|
127 |
+
return dataset.map(
|
128 |
+
convert_func,
|
129 |
+
batched=True,
|
130 |
+
remove_columns=column_names,
|
131 |
+
features=features,
|
132 |
+
**kwargs,
|
133 |
+
)
|
LLaMA-Factory/build/lib/llmtuner/data/formatter.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
from abc import ABC, abstractmethod
|
4 |
+
from dataclasses import dataclass, field
|
5 |
+
from typing import Any, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union
|
6 |
+
|
7 |
+
|
8 |
+
SLOTS = Sequence[Union[str, Set[str], Dict[str, str]]]
|
9 |
+
|
10 |
+
|
11 |
+
JSON_FORMAT_PROMPT = (
|
12 |
+
""", in a JSON format representing the kwargs (e.g. ```{"input": "hello world", "num_beams": 5}```)"""
|
13 |
+
)
|
14 |
+
|
15 |
+
|
16 |
+
TOOL_SYSTEM_PROMPT = (
|
17 |
+
"You have access to the following tools:\n{tool_text}"
|
18 |
+
"Use the following format if using a tool:\n"
|
19 |
+
"```\n"
|
20 |
+
"Action: tool name (one of [{tool_names}]).\n"
|
21 |
+
"Action Input: the input to the tool{format_prompt}.\n"
|
22 |
+
"```\n"
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
def default_tool_formatter(tools: List[Dict[str, Any]]) -> str:
|
27 |
+
tool_text = ""
|
28 |
+
tool_names = []
|
29 |
+
for tool in tools:
|
30 |
+
param_text = ""
|
31 |
+
for name, param in tool["parameters"]["properties"].items():
|
32 |
+
required = ", required" if name in tool["parameters"].get("required", []) else ""
|
33 |
+
enum = ", should be one of [{}]".format(", ".join(param["enum"])) if param.get("enum", None) else ""
|
34 |
+
items = (
|
35 |
+
", where each item should be {}".format(param["items"].get("type", "")) if param.get("items") else ""
|
36 |
+
)
|
37 |
+
param_text += " - {name} ({type}{required}): {desc}{enum}{items}\n".format(
|
38 |
+
name=name,
|
39 |
+
type=param.get("type", ""),
|
40 |
+
required=required,
|
41 |
+
desc=param.get("description", ""),
|
42 |
+
enum=enum,
|
43 |
+
items=items,
|
44 |
+
)
|
45 |
+
|
46 |
+
tool_text += "> Tool Name: {name}\nTool Description: {desc}\nTool Args:\n{args}\n".format(
|
47 |
+
name=tool["name"], desc=tool.get("description", ""), args=param_text
|
48 |
+
)
|
49 |
+
tool_names.append(tool["name"])
|
50 |
+
|
51 |
+
return TOOL_SYSTEM_PROMPT.format(
|
52 |
+
tool_text=tool_text, tool_names=", ".join(tool_names), format_prompt=JSON_FORMAT_PROMPT
|
53 |
+
)
|
54 |
+
|
55 |
+
|
56 |
+
def default_tool_extractor(content: str) -> Union[str, Tuple[str, str]]:
|
57 |
+
regex = re.compile(r"Action:\s*([a-zA-Z0-9_]+).*?Action Input:\s*(.*)", re.DOTALL)
|
58 |
+
action_match = re.search(regex, content)
|
59 |
+
if not action_match:
|
60 |
+
return content
|
61 |
+
|
62 |
+
tool_name = action_match.group(1).strip()
|
63 |
+
tool_input = action_match.group(2).strip().strip('"').strip("```")
|
64 |
+
try:
|
65 |
+
arguments = json.loads(tool_input)
|
66 |
+
except json.JSONDecodeError:
|
67 |
+
return content
|
68 |
+
|
69 |
+
return tool_name, json.dumps(arguments, ensure_ascii=False)
|
70 |
+
|
71 |
+
|
72 |
+
@dataclass
|
73 |
+
class Formatter(ABC):
|
74 |
+
slots: SLOTS = field(default_factory=list)
|
75 |
+
tool_format: Optional[Literal["default"]] = None
|
76 |
+
|
77 |
+
@abstractmethod
|
78 |
+
def apply(self, **kwargs) -> SLOTS: ...
|
79 |
+
|
80 |
+
def extract(self, content: str) -> Union[str, Tuple[str, str]]:
|
81 |
+
raise NotImplementedError
|
82 |
+
|
83 |
+
|
84 |
+
@dataclass
|
85 |
+
class EmptyFormatter(Formatter):
|
86 |
+
def __post_init__(self):
|
87 |
+
has_placeholder = False
|
88 |
+
for slot in filter(lambda s: isinstance(s, str), self.slots):
|
89 |
+
if re.search(r"\{\{[a-zA-Z_][a-zA-Z0-9_]*\}\}", slot):
|
90 |
+
has_placeholder = True
|
91 |
+
|
92 |
+
if has_placeholder:
|
93 |
+
raise ValueError("Empty formatter should not contain any placeholder.")
|
94 |
+
|
95 |
+
def apply(self, **kwargs) -> SLOTS:
|
96 |
+
return self.slots
|
97 |
+
|
98 |
+
|
99 |
+
@dataclass
|
100 |
+
class StringFormatter(Formatter):
|
101 |
+
def __post_init__(self):
|
102 |
+
has_placeholder = False
|
103 |
+
for slot in filter(lambda s: isinstance(s, str), self.slots):
|
104 |
+
if re.search(r"\{\{[a-zA-Z_][a-zA-Z0-9_]*\}\}", slot):
|
105 |
+
has_placeholder = True
|
106 |
+
|
107 |
+
if not has_placeholder:
|
108 |
+
raise ValueError("A placeholder is required in the string formatter.")
|
109 |
+
|
110 |
+
def apply(self, **kwargs) -> SLOTS:
|
111 |
+
elements = []
|
112 |
+
for slot in self.slots:
|
113 |
+
if isinstance(slot, str):
|
114 |
+
for name, value in kwargs.items():
|
115 |
+
if not isinstance(value, str):
|
116 |
+
raise RuntimeError("Expected a string, got {}".format(value))
|
117 |
+
|
118 |
+
slot = slot.replace("{{" + name + "}}", value, 1)
|
119 |
+
elements.append(slot)
|
120 |
+
elif isinstance(slot, (dict, set)):
|
121 |
+
elements.append(slot)
|
122 |
+
else:
|
123 |
+
raise RuntimeError("Input must be string, set[str] or dict[str, str], got {}".format(type(slot)))
|
124 |
+
|
125 |
+
return elements
|
126 |
+
|
127 |
+
|
128 |
+
@dataclass
|
129 |
+
class FunctionFormatter(Formatter):
|
130 |
+
def __post_init__(self):
|
131 |
+
has_name, has_args = False, False
|
132 |
+
for slot in filter(lambda s: isinstance(s, str), self.slots):
|
133 |
+
if "{{name}}" in slot:
|
134 |
+
has_name = True
|
135 |
+
if "{{arguments}}" in slot:
|
136 |
+
has_args = True
|
137 |
+
|
138 |
+
if not has_name or not has_args:
|
139 |
+
raise ValueError("Name and arguments placeholders are required in the function formatter.")
|
140 |
+
|
141 |
+
def apply(self, **kwargs) -> SLOTS:
|
142 |
+
content = kwargs.pop("content")
|
143 |
+
try:
|
144 |
+
function = json.loads(content)
|
145 |
+
name = function["name"]
|
146 |
+
arguments = json.dumps(function["arguments"], ensure_ascii=False)
|
147 |
+
except Exception:
|
148 |
+
name, arguments = "", ""
|
149 |
+
|
150 |
+
elements = []
|
151 |
+
for slot in self.slots:
|
152 |
+
if isinstance(slot, str):
|
153 |
+
slot = slot.replace("{{name}}", name).replace("{{arguments}}", arguments)
|
154 |
+
elements.append(slot)
|
155 |
+
elif isinstance(slot, (dict, set)):
|
156 |
+
elements.append(slot)
|
157 |
+
else:
|
158 |
+
raise RuntimeError("Input must be string, set[str] or dict[str, str], got {}".format(type(slot)))
|
159 |
+
|
160 |
+
return elements
|
161 |
+
|
162 |
+
|
163 |
+
@dataclass
|
164 |
+
class ToolFormatter(Formatter):
|
165 |
+
def __post_init__(self):
|
166 |
+
if self.tool_format is None:
|
167 |
+
raise ValueError("Tool format was not found.")
|
168 |
+
|
169 |
+
def apply(self, **kwargs) -> SLOTS:
|
170 |
+
content = kwargs.pop("content")
|
171 |
+
try:
|
172 |
+
tools = json.loads(content)
|
173 |
+
if not len(tools):
|
174 |
+
return [""]
|
175 |
+
|
176 |
+
if self.tool_format == "default":
|
177 |
+
return [default_tool_formatter(tools)]
|
178 |
+
else:
|
179 |
+
raise NotImplementedError
|
180 |
+
except Exception:
|
181 |
+
return [""]
|
182 |
+
|
183 |
+
def extract(self, content: str) -> Union[str, Tuple[str, str]]:
|
184 |
+
if self.tool_format == "default":
|
185 |
+
return default_tool_extractor(content)
|
186 |
+
else:
|
187 |
+
raise NotImplementedError
|
LLaMA-Factory/build/lib/llmtuner/data/loader.py
ADDED
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import inspect
|
2 |
+
import os
|
3 |
+
from typing import TYPE_CHECKING, Literal, Union
|
4 |
+
|
5 |
+
from datasets import load_dataset, load_from_disk
|
6 |
+
|
7 |
+
from ..extras.constants import FILEEXT2TYPE
|
8 |
+
from ..extras.logging import get_logger
|
9 |
+
from .aligner import align_dataset
|
10 |
+
from .parser import get_dataset_list
|
11 |
+
from .preprocess import get_preprocess_and_print_func
|
12 |
+
from .template import get_template_and_fix_tokenizer
|
13 |
+
from .utils import checksum, merge_dataset
|
14 |
+
|
15 |
+
|
16 |
+
if TYPE_CHECKING:
|
17 |
+
from datasets import Dataset, IterableDataset
|
18 |
+
from transformers import Seq2SeqTrainingArguments
|
19 |
+
from transformers.tokenization_utils import PreTrainedTokenizer
|
20 |
+
|
21 |
+
from ..hparams import DataArguments, ModelArguments
|
22 |
+
from .parser import DatasetAttr
|
23 |
+
|
24 |
+
|
25 |
+
logger = get_logger(__name__)
|
26 |
+
|
27 |
+
|
28 |
+
def load_single_dataset(
|
29 |
+
dataset_attr: "DatasetAttr",
|
30 |
+
model_args: "ModelArguments",
|
31 |
+
data_args: "DataArguments",
|
32 |
+
) -> Union["Dataset", "IterableDataset"]:
|
33 |
+
logger.info("Loading dataset {}...".format(dataset_attr))
|
34 |
+
data_path, data_name, data_dir, data_files = None, None, None, None
|
35 |
+
if dataset_attr.load_from in ["hf_hub", "ms_hub"]:
|
36 |
+
data_path = dataset_attr.dataset_name
|
37 |
+
data_name = dataset_attr.subset
|
38 |
+
data_dir = dataset_attr.folder
|
39 |
+
|
40 |
+
elif dataset_attr.load_from == "script":
|
41 |
+
data_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)
|
42 |
+
data_name = dataset_attr.subset
|
43 |
+
data_dir = dataset_attr.folder
|
44 |
+
|
45 |
+
elif dataset_attr.load_from == "file":
|
46 |
+
data_files = []
|
47 |
+
local_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)
|
48 |
+
if os.path.isdir(local_path): # is directory
|
49 |
+
for file_name in os.listdir(local_path):
|
50 |
+
data_files.append(os.path.join(local_path, file_name))
|
51 |
+
if data_path is None:
|
52 |
+
data_path = FILEEXT2TYPE.get(file_name.split(".")[-1], None)
|
53 |
+
elif data_path != FILEEXT2TYPE.get(file_name.split(".")[-1], None):
|
54 |
+
raise ValueError("File types should be identical.")
|
55 |
+
elif os.path.isfile(local_path): # is file
|
56 |
+
data_files.append(local_path)
|
57 |
+
data_path = FILEEXT2TYPE.get(local_path.split(".")[-1], None)
|
58 |
+
else:
|
59 |
+
raise ValueError("File not found.")
|
60 |
+
|
61 |
+
if data_path is None:
|
62 |
+
raise ValueError("File extension must be txt, csv, json or jsonl.")
|
63 |
+
|
64 |
+
checksum(data_files, dataset_attr.file_sha1)
|
65 |
+
else:
|
66 |
+
raise NotImplementedError
|
67 |
+
|
68 |
+
if dataset_attr.load_from == "ms_hub":
|
69 |
+
try:
|
70 |
+
from modelscope import MsDataset
|
71 |
+
from modelscope.utils.config_ds import MS_DATASETS_CACHE
|
72 |
+
|
73 |
+
cache_dir = model_args.cache_dir or MS_DATASETS_CACHE
|
74 |
+
dataset = MsDataset.load(
|
75 |
+
dataset_name=data_path,
|
76 |
+
subset_name=data_name,
|
77 |
+
data_dir=data_dir,
|
78 |
+
data_files=data_files,
|
79 |
+
split=data_args.split,
|
80 |
+
cache_dir=cache_dir,
|
81 |
+
token=model_args.ms_hub_token,
|
82 |
+
use_streaming=(data_args.streaming and (dataset_attr.load_from != "file")),
|
83 |
+
).to_hf_dataset()
|
84 |
+
except ImportError:
|
85 |
+
raise ImportError("Please install modelscope via `pip install modelscope -U`")
|
86 |
+
else:
|
87 |
+
if "trust_remote_code" in inspect.signature(load_dataset).parameters: # for datasets==2.16.0
|
88 |
+
kwargs = {"trust_remote_code": True}
|
89 |
+
else:
|
90 |
+
kwargs = {}
|
91 |
+
|
92 |
+
dataset = load_dataset(
|
93 |
+
path=data_path,
|
94 |
+
name=data_name,
|
95 |
+
data_dir=data_dir,
|
96 |
+
data_files=data_files,
|
97 |
+
split=data_args.split,
|
98 |
+
cache_dir=model_args.cache_dir,
|
99 |
+
token=model_args.hf_hub_token,
|
100 |
+
streaming=(data_args.streaming and (dataset_attr.load_from != "file")),
|
101 |
+
**kwargs,
|
102 |
+
)
|
103 |
+
|
104 |
+
if data_args.streaming and (dataset_attr.load_from == "file"): # faster than specifying streaming=True
|
105 |
+
dataset = dataset.to_iterable_dataset() # TODO: add num shards parameter
|
106 |
+
|
107 |
+
if data_args.max_samples is not None: # truncate dataset
|
108 |
+
num_samples = min(data_args.max_samples, len(dataset))
|
109 |
+
dataset = dataset.select(range(num_samples))
|
110 |
+
|
111 |
+
return align_dataset(dataset, dataset_attr, data_args)
|
112 |
+
|
113 |
+
|
114 |
+
def get_dataset(
|
115 |
+
tokenizer: "PreTrainedTokenizer",
|
116 |
+
model_args: "ModelArguments",
|
117 |
+
data_args: "DataArguments",
|
118 |
+
training_args: "Seq2SeqTrainingArguments",
|
119 |
+
stage: Literal["pt", "sft", "rm", "ppo"],
|
120 |
+
# split: Optional[str] = "train", # TODO: add split
|
121 |
+
) -> Union["Dataset", "IterableDataset"]:
|
122 |
+
template = get_template_and_fix_tokenizer(tokenizer, data_args.template)
|
123 |
+
if data_args.train_on_prompt and template.efficient_eos:
|
124 |
+
raise ValueError("Current template does not support `train_on_prompt`.")
|
125 |
+
|
126 |
+
# Load from cache
|
127 |
+
if data_args.cache_path is not None:
|
128 |
+
if os.path.exists(data_args.cache_path):
|
129 |
+
logger.warning("Loading dataset from disk will ignore other data arguments.")
|
130 |
+
dataset = load_from_disk(data_args.cache_path)
|
131 |
+
if data_args.streaming:
|
132 |
+
dataset = dataset.to_iterable_dataset()
|
133 |
+
return dataset
|
134 |
+
|
135 |
+
if data_args.streaming:
|
136 |
+
raise ValueError("Turn off `streaming` when saving dataset to disk.")
|
137 |
+
|
138 |
+
with training_args.main_process_first(desc="load dataset"):
|
139 |
+
all_datasets = []
|
140 |
+
for dataset_attr in get_dataset_list(data_args):
|
141 |
+
all_datasets.append(load_single_dataset(dataset_attr, model_args, data_args))
|
142 |
+
dataset = merge_dataset(all_datasets, data_args, training_args)
|
143 |
+
|
144 |
+
with training_args.main_process_first(desc="pre-process dataset"):
|
145 |
+
preprocess_func, print_function = get_preprocess_and_print_func(
|
146 |
+
tokenizer, template, data_args, training_args, stage
|
147 |
+
)
|
148 |
+
column_names = list(next(iter(dataset)).keys())
|
149 |
+
kwargs = {}
|
150 |
+
if not data_args.streaming:
|
151 |
+
kwargs = dict(
|
152 |
+
num_proc=data_args.preprocessing_num_workers,
|
153 |
+
load_from_cache_file=(not data_args.overwrite_cache),
|
154 |
+
desc="Running tokenizer on dataset",
|
155 |
+
)
|
156 |
+
|
157 |
+
dataset = dataset.map(preprocess_func, batched=True, remove_columns=column_names, **kwargs)
|
158 |
+
|
159 |
+
if data_args.cache_path is not None and not os.path.exists(data_args.cache_path):
|
160 |
+
if training_args.should_save:
|
161 |
+
dataset.save_to_disk(data_args.cache_path)
|
162 |
+
logger.info("Dataset cache saved at {}.".format(data_args.cache_path))
|
163 |
+
|
164 |
+
if training_args.should_log:
|
165 |
+
try:
|
166 |
+
print_function(next(iter(dataset)))
|
167 |
+
except StopIteration:
|
168 |
+
raise RuntimeError("Cannot find valid samples, check `data/README.md` for the data format.")
|
169 |
+
|
170 |
+
return dataset
|
LLaMA-Factory/build/lib/llmtuner/data/parser.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from dataclasses import dataclass
|
4 |
+
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional
|
5 |
+
|
6 |
+
from ..extras.constants import DATA_CONFIG
|
7 |
+
from ..extras.misc import use_modelscope
|
8 |
+
|
9 |
+
|
10 |
+
if TYPE_CHECKING:
|
11 |
+
from ..hparams import DataArguments
|
12 |
+
|
13 |
+
|
14 |
+
@dataclass
|
15 |
+
class DatasetAttr:
|
16 |
+
r"""
|
17 |
+
Dataset attributes.
|
18 |
+
"""
|
19 |
+
|
20 |
+
""" basic configs """
|
21 |
+
load_from: Literal["hf_hub", "ms_hub", "script", "file"]
|
22 |
+
dataset_name: str
|
23 |
+
""" extra configs """
|
24 |
+
file_sha1: Optional[str] = None
|
25 |
+
subset: Optional[str] = None
|
26 |
+
folder: Optional[str] = None
|
27 |
+
ranking: bool = False
|
28 |
+
formatting: Literal["alpaca", "sharegpt"] = "alpaca"
|
29 |
+
""" columns """
|
30 |
+
system: Optional[str] = None
|
31 |
+
""" columns for the alpaca format """
|
32 |
+
prompt: Optional[str] = "instruction"
|
33 |
+
query: Optional[str] = "input"
|
34 |
+
response: Optional[str] = "output"
|
35 |
+
history: Optional[str] = None
|
36 |
+
""" columns for the sharegpt format """
|
37 |
+
messages: Optional[str] = "conversations"
|
38 |
+
tools: Optional[str] = None
|
39 |
+
""" tags for the sharegpt format """
|
40 |
+
role_tag: Optional[str] = "from"
|
41 |
+
content_tag: Optional[str] = "value"
|
42 |
+
user_tag: Optional[str] = "human"
|
43 |
+
assistant_tag: Optional[str] = "gpt"
|
44 |
+
observation_tag: Optional[str] = "observation"
|
45 |
+
function_tag: Optional[str] = "function_call"
|
46 |
+
system_tag: Optional[str] = "system"
|
47 |
+
|
48 |
+
def __repr__(self) -> str:
|
49 |
+
return self.dataset_name
|
50 |
+
|
51 |
+
def set_attr(self, key: str, obj: Dict[str, Any], default: Optional[Any] = None) -> None:
|
52 |
+
setattr(self, key, obj.get(key, default))
|
53 |
+
|
54 |
+
|
55 |
+
def get_dataset_list(data_args: "DataArguments") -> List["DatasetAttr"]:
|
56 |
+
dataset_names = [ds.strip() for ds in data_args.dataset.split(",")] if data_args.dataset is not None else []
|
57 |
+
try:
|
58 |
+
with open(os.path.join(data_args.dataset_dir, DATA_CONFIG), "r") as f:
|
59 |
+
dataset_info = json.load(f)
|
60 |
+
except Exception as err:
|
61 |
+
if data_args.dataset is not None:
|
62 |
+
raise ValueError(
|
63 |
+
"Cannot open {} due to {}.".format(os.path.join(data_args.dataset_dir, DATA_CONFIG), str(err))
|
64 |
+
)
|
65 |
+
dataset_info = None
|
66 |
+
|
67 |
+
if data_args.interleave_probs is not None:
|
68 |
+
data_args.interleave_probs = [float(prob.strip()) for prob in data_args.interleave_probs.split(",")]
|
69 |
+
|
70 |
+
dataset_list: List[DatasetAttr] = []
|
71 |
+
for name in dataset_names:
|
72 |
+
if name not in dataset_info:
|
73 |
+
raise ValueError("Undefined dataset {} in {}.".format(name, DATA_CONFIG))
|
74 |
+
|
75 |
+
has_hf_url = "hf_hub_url" in dataset_info[name]
|
76 |
+
has_ms_url = "ms_hub_url" in dataset_info[name]
|
77 |
+
|
78 |
+
if has_hf_url or has_ms_url:
|
79 |
+
if (use_modelscope() and has_ms_url) or (not has_hf_url):
|
80 |
+
dataset_attr = DatasetAttr("ms_hub", dataset_name=dataset_info[name]["ms_hub_url"])
|
81 |
+
else:
|
82 |
+
dataset_attr = DatasetAttr("hf_hub", dataset_name=dataset_info[name]["hf_hub_url"])
|
83 |
+
elif "script_url" in dataset_info[name]:
|
84 |
+
dataset_attr = DatasetAttr("script", dataset_name=dataset_info[name]["script_url"])
|
85 |
+
else:
|
86 |
+
dataset_attr = DatasetAttr("file", dataset_name=dataset_info[name]["file_name"])
|
87 |
+
|
88 |
+
dataset_attr.set_attr("file_sha1", dataset_info[name])
|
89 |
+
dataset_attr.set_attr("subset", dataset_info[name])
|
90 |
+
dataset_attr.set_attr("folder", dataset_info[name])
|
91 |
+
dataset_attr.set_attr("ranking", dataset_info[name], default=False)
|
92 |
+
dataset_attr.set_attr("formatting", dataset_info[name], default="alpaca")
|
93 |
+
|
94 |
+
if "columns" in dataset_info[name]:
|
95 |
+
column_names = ["system"]
|
96 |
+
if dataset_attr.formatting == "alpaca":
|
97 |
+
column_names.extend(["prompt", "query", "response", "history"])
|
98 |
+
else:
|
99 |
+
column_names.extend(["messages", "tools"])
|
100 |
+
|
101 |
+
for column_name in column_names:
|
102 |
+
dataset_attr.set_attr(column_name, dataset_info[name]["columns"])
|
103 |
+
|
104 |
+
if dataset_attr.formatting == "sharegpt" and "tags" in dataset_info[name]:
|
105 |
+
tag_names = (
|
106 |
+
"role_tag",
|
107 |
+
"content_tag",
|
108 |
+
"user_tag",
|
109 |
+
"assistant_tag",
|
110 |
+
"observation_tag",
|
111 |
+
"function_tag",
|
112 |
+
"system_tag",
|
113 |
+
)
|
114 |
+
for tag in tag_names:
|
115 |
+
dataset_attr.set_attr(tag, dataset_info[name]["tags"])
|
116 |
+
|
117 |
+
dataset_list.append(dataset_attr)
|
118 |
+
|
119 |
+
return dataset_list
|
LLaMA-Factory/build/lib/llmtuner/data/preprocess.py
ADDED
@@ -0,0 +1,276 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import partial
|
2 |
+
from itertools import chain
|
3 |
+
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Literal, Tuple
|
4 |
+
|
5 |
+
from ..extras.constants import IGNORE_INDEX
|
6 |
+
from ..extras.logging import get_logger
|
7 |
+
from .utils import Role
|
8 |
+
|
9 |
+
|
10 |
+
if TYPE_CHECKING:
|
11 |
+
from transformers import Seq2SeqTrainingArguments
|
12 |
+
from transformers.tokenization_utils import PreTrainedTokenizer
|
13 |
+
|
14 |
+
from ..hparams import DataArguments
|
15 |
+
from .template import Template
|
16 |
+
|
17 |
+
|
18 |
+
logger = get_logger(__name__)
|
19 |
+
|
20 |
+
|
21 |
+
def preprocess_pretrain_dataset(
|
22 |
+
examples: Dict[str, List[Any]], tokenizer: "PreTrainedTokenizer", data_args: "DataArguments"
|
23 |
+
) -> Dict[str, List[List[int]]]:
|
24 |
+
# build grouped texts with format `X1 X2 X3 ...` if packing is enabled
|
25 |
+
text_examples = [messages[0]["content"] + tokenizer.eos_token for messages in examples["prompt"]]
|
26 |
+
if not data_args.packing:
|
27 |
+
return tokenizer(text_examples, add_special_tokens=False, max_length=data_args.cutoff_len)
|
28 |
+
|
29 |
+
tokenized_examples = tokenizer(text_examples, add_special_tokens=False)
|
30 |
+
concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()}
|
31 |
+
total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]])
|
32 |
+
block_size = data_args.cutoff_len
|
33 |
+
# we drop the small remainder, and if the total_length < block_size, we exclude this batch
|
34 |
+
total_length = (total_length // block_size) * block_size
|
35 |
+
# split by chunks of cutoff_len
|
36 |
+
result = {
|
37 |
+
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
|
38 |
+
for k, t in concatenated_examples.items()
|
39 |
+
}
|
40 |
+
if data_args.template == "gemma":
|
41 |
+
for i in range(len(result["input_ids"])):
|
42 |
+
result["input_ids"][i][0] = tokenizer.bos_token_id
|
43 |
+
|
44 |
+
return result
|
45 |
+
|
46 |
+
|
47 |
+
def preprocess_supervised_dataset(
|
48 |
+
examples: Dict[str, List[Any]],
|
49 |
+
tokenizer: "PreTrainedTokenizer",
|
50 |
+
template: "Template",
|
51 |
+
data_args: "DataArguments",
|
52 |
+
) -> Dict[str, List[List[int]]]:
|
53 |
+
# build inputs with format `<bos> X Y <eos>` and labels with format `<ignore> ... <ignore> Y <eos>`
|
54 |
+
# for multiturn examples, we only mask the prompt part in each prompt-response pair.
|
55 |
+
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
|
56 |
+
|
57 |
+
for i in range(len(examples["prompt"])):
|
58 |
+
if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1:
|
59 |
+
continue
|
60 |
+
|
61 |
+
messages = examples["prompt"][i] + examples["response"][i]
|
62 |
+
input_ids, labels = [], []
|
63 |
+
for turn_idx, (source_ids, target_ids) in enumerate(
|
64 |
+
template.encode_multiturn(
|
65 |
+
tokenizer,
|
66 |
+
messages,
|
67 |
+
examples["system"][i],
|
68 |
+
examples["tools"][i],
|
69 |
+
data_args.cutoff_len,
|
70 |
+
data_args.reserved_label_len,
|
71 |
+
)
|
72 |
+
):
|
73 |
+
if data_args.train_on_prompt:
|
74 |
+
source_mask = source_ids
|
75 |
+
elif turn_idx != 0 and template.efficient_eos:
|
76 |
+
source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1)
|
77 |
+
else:
|
78 |
+
source_mask = [IGNORE_INDEX] * len(source_ids)
|
79 |
+
|
80 |
+
input_ids += source_ids + target_ids
|
81 |
+
labels += source_mask + target_ids
|
82 |
+
|
83 |
+
if template.efficient_eos:
|
84 |
+
input_ids += [tokenizer.eos_token_id]
|
85 |
+
labels += [tokenizer.eos_token_id]
|
86 |
+
|
87 |
+
model_inputs["input_ids"].append(input_ids)
|
88 |
+
model_inputs["attention_mask"].append([1] * len(input_ids))
|
89 |
+
model_inputs["labels"].append(labels)
|
90 |
+
|
91 |
+
return model_inputs
|
92 |
+
|
93 |
+
|
94 |
+
def preprocess_packed_supervised_dataset(
|
95 |
+
examples: Dict[str, List[Any]],
|
96 |
+
tokenizer: "PreTrainedTokenizer",
|
97 |
+
template: "Template",
|
98 |
+
data_args: "DataArguments",
|
99 |
+
) -> Dict[str, List[List[int]]]:
|
100 |
+
# build inputs with format `<bos> X1 Y1 <eos> <bos> X2 Y2 <eos>`
|
101 |
+
# and labels with format `<ignore> ... <ignore> Y1 <eos> <ignore> ... <ignore> Y2 <eos>`
|
102 |
+
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
|
103 |
+
input_ids, labels = [], []
|
104 |
+
for i in range(len(examples["prompt"])):
|
105 |
+
if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) != 1:
|
106 |
+
continue
|
107 |
+
|
108 |
+
messages = examples["prompt"][i] + examples["response"][i]
|
109 |
+
for source_ids, target_ids in template.encode_multiturn(
|
110 |
+
tokenizer, messages, examples["system"][i], examples["tools"][i]
|
111 |
+
):
|
112 |
+
if data_args.train_on_prompt:
|
113 |
+
source_mask = source_ids
|
114 |
+
elif len(input_ids) != 0 and template.efficient_eos:
|
115 |
+
source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1)
|
116 |
+
else:
|
117 |
+
source_mask = [IGNORE_INDEX] * len(source_ids)
|
118 |
+
|
119 |
+
input_ids += source_ids + target_ids
|
120 |
+
labels += source_mask + target_ids
|
121 |
+
|
122 |
+
if template.efficient_eos:
|
123 |
+
input_ids += [tokenizer.eos_token_id]
|
124 |
+
labels += [tokenizer.eos_token_id]
|
125 |
+
|
126 |
+
total_length = len(input_ids)
|
127 |
+
block_size = data_args.cutoff_len
|
128 |
+
# we drop the small remainder, and if the total_length < block_size, we exclude this batch
|
129 |
+
total_length = (total_length // block_size) * block_size
|
130 |
+
# split by chunks of cutoff_len
|
131 |
+
for i in range(0, total_length, block_size):
|
132 |
+
if not all(label == IGNORE_INDEX for label in labels[i : i + block_size]):
|
133 |
+
model_inputs["input_ids"].append(input_ids[i : i + block_size])
|
134 |
+
model_inputs["attention_mask"].append([1] * block_size)
|
135 |
+
model_inputs["labels"].append(labels[i : i + block_size])
|
136 |
+
|
137 |
+
return model_inputs
|
138 |
+
|
139 |
+
|
140 |
+
def preprocess_unsupervised_dataset(
|
141 |
+
examples: Dict[str, List[Any]],
|
142 |
+
tokenizer: "PreTrainedTokenizer",
|
143 |
+
template: "Template",
|
144 |
+
data_args: "DataArguments",
|
145 |
+
) -> Dict[str, List[List[int]]]:
|
146 |
+
# build inputs with format `<bos> X` and labels with format `Y <eos>`
|
147 |
+
model_inputs = {"input_ids": [], "attention_mask": [], "labels": []}
|
148 |
+
|
149 |
+
for i in range(len(examples["prompt"])):
|
150 |
+
if len(examples["prompt"][i]) % 2 != 1:
|
151 |
+
continue
|
152 |
+
|
153 |
+
if len(examples["response"][i]) == 1:
|
154 |
+
messages = examples["prompt"][i] + examples["response"][i]
|
155 |
+
else:
|
156 |
+
messages = examples["prompt"][i] + [{"role": Role.ASSISTANT.value, "content": ""}]
|
157 |
+
|
158 |
+
input_ids, labels = template.encode_oneturn(
|
159 |
+
tokenizer,
|
160 |
+
messages,
|
161 |
+
examples["system"][i],
|
162 |
+
examples["tools"][i],
|
163 |
+
data_args.cutoff_len,
|
164 |
+
data_args.reserved_label_len,
|
165 |
+
)
|
166 |
+
|
167 |
+
if template.efficient_eos:
|
168 |
+
labels += [tokenizer.eos_token_id]
|
169 |
+
|
170 |
+
model_inputs["input_ids"].append(input_ids)
|
171 |
+
model_inputs["attention_mask"].append([1] * len(input_ids))
|
172 |
+
model_inputs["labels"].append(labels)
|
173 |
+
|
174 |
+
return model_inputs
|
175 |
+
|
176 |
+
|
177 |
+
def preprocess_pairwise_dataset(
|
178 |
+
examples: Dict[str, List[Any]],
|
179 |
+
tokenizer: "PreTrainedTokenizer",
|
180 |
+
template: "Template",
|
181 |
+
data_args: "DataArguments",
|
182 |
+
) -> Dict[str, List[List[int]]]:
|
183 |
+
# build input pairs with format `<bos> X`, `Y1 <eos>` and `Y2 <eos>`
|
184 |
+
model_inputs = {"prompt_ids": [], "chosen_ids": [], "rejected_ids": []}
|
185 |
+
for i in range(len(examples["prompt"])):
|
186 |
+
if len(examples["prompt"][i]) % 2 != 1 or len(examples["response"][i]) < 2:
|
187 |
+
continue
|
188 |
+
|
189 |
+
chosen_messages = examples["prompt"][i] + [examples["response"][i][0]]
|
190 |
+
rejected_messages = examples["prompt"][i] + [examples["response"][i][1]]
|
191 |
+
prompt_ids, chosen_ids = template.encode_oneturn(
|
192 |
+
tokenizer,
|
193 |
+
chosen_messages,
|
194 |
+
examples["system"][i],
|
195 |
+
examples["tools"][i],
|
196 |
+
data_args.cutoff_len,
|
197 |
+
data_args.reserved_label_len,
|
198 |
+
)
|
199 |
+
_, rejected_ids = template.encode_oneturn(
|
200 |
+
tokenizer,
|
201 |
+
rejected_messages,
|
202 |
+
examples["system"][i],
|
203 |
+
examples["tools"][i],
|
204 |
+
data_args.cutoff_len,
|
205 |
+
data_args.reserved_label_len,
|
206 |
+
)
|
207 |
+
|
208 |
+
if template.efficient_eos:
|
209 |
+
chosen_ids += [tokenizer.eos_token_id]
|
210 |
+
rejected_ids += [tokenizer.eos_token_id]
|
211 |
+
|
212 |
+
model_inputs["prompt_ids"].append(prompt_ids)
|
213 |
+
model_inputs["chosen_ids"].append(chosen_ids)
|
214 |
+
model_inputs["rejected_ids"].append(rejected_ids)
|
215 |
+
|
216 |
+
return model_inputs
|
217 |
+
|
218 |
+
|
219 |
+
def print_supervised_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None:
|
220 |
+
print("input_ids:\n{}".format(example["input_ids"]))
|
221 |
+
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
|
222 |
+
print("label_ids:\n{}".format(example["labels"]))
|
223 |
+
print(
|
224 |
+
"labels:\n{}".format(
|
225 |
+
tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False)
|
226 |
+
)
|
227 |
+
)
|
228 |
+
|
229 |
+
|
230 |
+
def print_pairwise_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None:
|
231 |
+
print("prompt_ids:\n{}".format(example["prompt_ids"]))
|
232 |
+
print("prompt:\n{}".format(tokenizer.decode(example["prompt_ids"], skip_special_tokens=False)))
|
233 |
+
print("chosen_ids:\n{}".format(example["chosen_ids"]))
|
234 |
+
print("chosen:\n{}".format(tokenizer.decode(example["chosen_ids"], skip_special_tokens=False)))
|
235 |
+
print("rejected_ids:\n{}".format(example["rejected_ids"]))
|
236 |
+
print("rejected:\n{}".format(tokenizer.decode(example["rejected_ids"], skip_special_tokens=False)))
|
237 |
+
|
238 |
+
|
239 |
+
def print_unsupervised_dataset_example(example: Dict[str, List[int]], tokenizer: "PreTrainedTokenizer") -> None:
|
240 |
+
print("input_ids:\n{}".format(example["input_ids"]))
|
241 |
+
print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False)))
|
242 |
+
|
243 |
+
|
244 |
+
def get_preprocess_and_print_func(
|
245 |
+
tokenizer: "PreTrainedTokenizer",
|
246 |
+
template: "Template",
|
247 |
+
data_args: "DataArguments",
|
248 |
+
training_args: "Seq2SeqTrainingArguments",
|
249 |
+
stage: Literal["pt", "sft", "rm", "ppo"],
|
250 |
+
) -> Tuple[Callable, Callable]:
|
251 |
+
if stage == "pt":
|
252 |
+
preprocess_func = partial(preprocess_pretrain_dataset, tokenizer=tokenizer, data_args=data_args)
|
253 |
+
print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer)
|
254 |
+
elif stage == "sft" and not training_args.predict_with_generate:
|
255 |
+
if data_args.packing:
|
256 |
+
preprocess_func = partial(
|
257 |
+
preprocess_packed_supervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args
|
258 |
+
)
|
259 |
+
else:
|
260 |
+
preprocess_func = partial(
|
261 |
+
preprocess_supervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args
|
262 |
+
)
|
263 |
+
|
264 |
+
print_function = partial(print_supervised_dataset_example, tokenizer=tokenizer)
|
265 |
+
elif stage == "rm":
|
266 |
+
preprocess_func = partial(
|
267 |
+
preprocess_pairwise_dataset, tokenizer=tokenizer, template=template, data_args=data_args
|
268 |
+
)
|
269 |
+
print_function = partial(print_pairwise_dataset_example, tokenizer=tokenizer)
|
270 |
+
else:
|
271 |
+
preprocess_func = partial(
|
272 |
+
preprocess_unsupervised_dataset, tokenizer=tokenizer, template=template, data_args=data_args
|
273 |
+
)
|
274 |
+
print_function = partial(print_unsupervised_dataset_example, tokenizer=tokenizer)
|
275 |
+
|
276 |
+
return preprocess_func, print_function
|
LLaMA-Factory/build/lib/llmtuner/data/template.py
ADDED
@@ -0,0 +1,773 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Tuple, Union
|
3 |
+
|
4 |
+
from ..extras.logging import get_logger
|
5 |
+
from .formatter import EmptyFormatter, FunctionFormatter, StringFormatter, ToolFormatter
|
6 |
+
from .utils import Role, infer_max_len
|
7 |
+
|
8 |
+
|
9 |
+
if TYPE_CHECKING:
|
10 |
+
from transformers import PreTrainedTokenizer
|
11 |
+
|
12 |
+
from .formatter import SLOTS, Formatter
|
13 |
+
|
14 |
+
|
15 |
+
logger = get_logger(__name__)
|
16 |
+
|
17 |
+
|
18 |
+
@dataclass
|
19 |
+
class Template:
|
20 |
+
format_user: "Formatter"
|
21 |
+
format_assistant: "Formatter"
|
22 |
+
format_system: "Formatter"
|
23 |
+
format_function: "Formatter"
|
24 |
+
format_observation: "Formatter"
|
25 |
+
format_tools: "Formatter"
|
26 |
+
format_separator: "Formatter"
|
27 |
+
default_system: str
|
28 |
+
stop_words: List[str]
|
29 |
+
efficient_eos: bool
|
30 |
+
replace_eos: bool
|
31 |
+
force_system: bool
|
32 |
+
|
33 |
+
def encode_oneturn(
|
34 |
+
self,
|
35 |
+
tokenizer: "PreTrainedTokenizer",
|
36 |
+
messages: List[Dict[str, str]],
|
37 |
+
system: Optional[str] = None,
|
38 |
+
tools: Optional[str] = None,
|
39 |
+
cutoff_len: int = 1_000_000,
|
40 |
+
reserved_label_len: int = 1,
|
41 |
+
) -> Tuple[List[int], List[int]]:
|
42 |
+
r"""
|
43 |
+
Returns a single pair of token ids representing prompt and response respectively.
|
44 |
+
"""
|
45 |
+
encoded_pairs = self._encode(tokenizer, messages, system, tools, cutoff_len, reserved_label_len)
|
46 |
+
prompt_ids = []
|
47 |
+
for query_ids, resp_ids in encoded_pairs[:-1]:
|
48 |
+
prompt_ids += query_ids + resp_ids
|
49 |
+
prompt_ids = prompt_ids + encoded_pairs[-1][0]
|
50 |
+
answer_ids = encoded_pairs[-1][1]
|
51 |
+
return prompt_ids, answer_ids
|
52 |
+
|
53 |
+
def encode_multiturn(
|
54 |
+
self,
|
55 |
+
tokenizer: "PreTrainedTokenizer",
|
56 |
+
messages: List[Dict[str, str]],
|
57 |
+
system: Optional[str] = None,
|
58 |
+
tools: Optional[str] = None,
|
59 |
+
cutoff_len: int = 1_000_000,
|
60 |
+
reserved_label_len: int = 1,
|
61 |
+
) -> Sequence[Tuple[List[int], List[int]]]:
|
62 |
+
r"""
|
63 |
+
Returns multiple pairs of token ids representing prompts and responses respectively.
|
64 |
+
"""
|
65 |
+
return self._encode(tokenizer, messages, system, tools, cutoff_len, reserved_label_len)
|
66 |
+
|
67 |
+
def _encode(
|
68 |
+
self,
|
69 |
+
tokenizer: "PreTrainedTokenizer",
|
70 |
+
messages: List[Dict[str, str]],
|
71 |
+
system: str,
|
72 |
+
tools: str,
|
73 |
+
cutoff_len: int,
|
74 |
+
reserved_label_len: int,
|
75 |
+
) -> Sequence[Tuple[List[int], List[int]]]:
|
76 |
+
r"""
|
77 |
+
Encodes formatted inputs to pairs of token ids.
|
78 |
+
Turn 0: system + query resp
|
79 |
+
Turn t: sep + query resp
|
80 |
+
"""
|
81 |
+
system = system or self.default_system
|
82 |
+
encoded_messages = []
|
83 |
+
for i, message in enumerate(messages):
|
84 |
+
elements = []
|
85 |
+
if i == 0 and (system or tools or self.force_system):
|
86 |
+
tool_text = self.format_tools.apply(content=tools)[0] if tools else ""
|
87 |
+
elements += self.format_system.apply(content=(system + tool_text))
|
88 |
+
elif i > 0 and i % 2 == 0:
|
89 |
+
elements += self.format_separator.apply()
|
90 |
+
|
91 |
+
if message["role"] == Role.USER.value:
|
92 |
+
elements += self.format_user.apply(content=message["content"], idx=str(i // 2))
|
93 |
+
elif message["role"] == Role.ASSISTANT.value:
|
94 |
+
elements += self.format_assistant.apply(content=message["content"])
|
95 |
+
elif message["role"] == Role.OBSERVATION.value:
|
96 |
+
elements += self.format_observation.apply(content=message["content"])
|
97 |
+
elif message["role"] == Role.FUNCTION.value:
|
98 |
+
elements += self.format_function.apply(content=message["content"])
|
99 |
+
else:
|
100 |
+
raise NotImplementedError("Unexpected role: {}".format(message["role"]))
|
101 |
+
|
102 |
+
encoded_messages.append(self._convert_elements_to_ids(tokenizer, elements))
|
103 |
+
|
104 |
+
return self._make_pairs(encoded_messages, cutoff_len, reserved_label_len)
|
105 |
+
|
106 |
+
def _convert_elements_to_ids(
|
107 |
+
self, tokenizer: "PreTrainedTokenizer", elements: List[Union[str, Dict[str, str]]]
|
108 |
+
) -> List[int]:
|
109 |
+
r"""
|
110 |
+
Converts elements to token ids.
|
111 |
+
"""
|
112 |
+
token_ids = []
|
113 |
+
for elem in elements:
|
114 |
+
if isinstance(elem, str):
|
115 |
+
if len(elem) != 0:
|
116 |
+
token_ids += tokenizer.encode(elem, add_special_tokens=False)
|
117 |
+
elif isinstance(elem, dict):
|
118 |
+
token_ids += [tokenizer.convert_tokens_to_ids(elem.get("token"))]
|
119 |
+
elif isinstance(elem, set):
|
120 |
+
if "bos_token" in elem and tokenizer.bos_token_id is not None:
|
121 |
+
token_ids += [tokenizer.bos_token_id]
|
122 |
+
elif "eos_token" in elem and tokenizer.eos_token_id is not None:
|
123 |
+
token_ids += [tokenizer.eos_token_id]
|
124 |
+
else:
|
125 |
+
raise ValueError("Input must be string, set[str] or dict[str, str], got {}".format(type(elem)))
|
126 |
+
|
127 |
+
return token_ids
|
128 |
+
|
129 |
+
def _make_pairs(
|
130 |
+
self,
|
131 |
+
encoded_messages: Sequence[List[int]],
|
132 |
+
cutoff_len: int,
|
133 |
+
reserved_label_len: int,
|
134 |
+
) -> Sequence[Tuple[List[int], List[int]]]:
|
135 |
+
encoded_pairs = []
|
136 |
+
total_length = 0
|
137 |
+
for i in range(0, len(encoded_messages), 2):
|
138 |
+
if total_length >= cutoff_len:
|
139 |
+
break
|
140 |
+
|
141 |
+
max_source_len, max_target_len = infer_max_len(
|
142 |
+
source_len=len(encoded_messages[i]),
|
143 |
+
target_len=len(encoded_messages[i + 1]),
|
144 |
+
max_len=(cutoff_len - total_length),
|
145 |
+
reserved_label_len=reserved_label_len,
|
146 |
+
)
|
147 |
+
source_ids = encoded_messages[i][:max_source_len]
|
148 |
+
target_ids = encoded_messages[i + 1][:max_target_len]
|
149 |
+
total_length += len(source_ids) + len(target_ids)
|
150 |
+
encoded_pairs.append((source_ids, target_ids))
|
151 |
+
|
152 |
+
return encoded_pairs
|
153 |
+
|
154 |
+
|
155 |
+
@dataclass
|
156 |
+
class Llama2Template(Template):
|
157 |
+
def _encode(
|
158 |
+
self,
|
159 |
+
tokenizer: "PreTrainedTokenizer",
|
160 |
+
messages: List[Dict[str, str]],
|
161 |
+
system: str,
|
162 |
+
tools: str,
|
163 |
+
cutoff_len: int,
|
164 |
+
reserved_label_len: int,
|
165 |
+
) -> Sequence[Tuple[List[int], List[int]]]:
|
166 |
+
r"""
|
167 |
+
Encodes formatted inputs to pairs of token ids.
|
168 |
+
Turn 0: system + query resp
|
169 |
+
Turn t: sep + query resp
|
170 |
+
"""
|
171 |
+
system = system or self.default_system
|
172 |
+
encoded_messages = []
|
173 |
+
for i, message in enumerate(messages):
|
174 |
+
elements = []
|
175 |
+
system_text = ""
|
176 |
+
if i == 0 and (system or tools or self.force_system):
|
177 |
+
tool_text = self.format_tools.apply(content=tools)[0] if tools else ""
|
178 |
+
system_text = self.format_system.apply(content=(system + tool_text))[0]
|
179 |
+
elif i > 0 and i % 2 == 0:
|
180 |
+
elements += self.format_separator.apply()
|
181 |
+
|
182 |
+
if message["role"] == Role.USER.value:
|
183 |
+
elements += self.format_user.apply(content=system_text + message["content"])
|
184 |
+
elif message["role"] == Role.ASSISTANT.value:
|
185 |
+
elements += self.format_assistant.apply(content=message["content"])
|
186 |
+
elif message["role"] == Role.OBSERVATION.value:
|
187 |
+
elements += self.format_observation.apply(content=message["content"])
|
188 |
+
elif message["role"] == Role.FUNCTION.value:
|
189 |
+
elements += self.format_function.apply(content=message["content"])
|
190 |
+
else:
|
191 |
+
raise NotImplementedError("Unexpected role: {}".format(message["role"]))
|
192 |
+
|
193 |
+
encoded_messages.append(self._convert_elements_to_ids(tokenizer, elements))
|
194 |
+
|
195 |
+
return self._make_pairs(encoded_messages, cutoff_len, reserved_label_len)
|
196 |
+
|
197 |
+
|
198 |
+
templates: Dict[str, Template] = {}
|
199 |
+
|
200 |
+
|
201 |
+
def _register_template(
|
202 |
+
name: str,
|
203 |
+
format_user: Optional["Formatter"] = None,
|
204 |
+
format_assistant: Optional["Formatter"] = None,
|
205 |
+
format_system: Optional["Formatter"] = None,
|
206 |
+
format_function: Optional["Formatter"] = None,
|
207 |
+
format_observation: Optional["Formatter"] = None,
|
208 |
+
format_tools: Optional["Formatter"] = None,
|
209 |
+
format_separator: Optional["Formatter"] = None,
|
210 |
+
default_system: str = "",
|
211 |
+
stop_words: List[str] = [],
|
212 |
+
efficient_eos: bool = False,
|
213 |
+
replace_eos: bool = False,
|
214 |
+
force_system: bool = False,
|
215 |
+
) -> None:
|
216 |
+
r"""
|
217 |
+
Registers a chat template.
|
218 |
+
|
219 |
+
To add the following chat template:
|
220 |
+
```
|
221 |
+
[HUMAN]:
|
222 |
+
user prompt here
|
223 |
+
[AI]:
|
224 |
+
model response here
|
225 |
+
|
226 |
+
[HUMAN]:
|
227 |
+
user prompt here
|
228 |
+
[AI]:
|
229 |
+
model response here
|
230 |
+
```
|
231 |
+
|
232 |
+
The corresponding code should be:
|
233 |
+
```
|
234 |
+
_register_template(
|
235 |
+
name="custom",
|
236 |
+
format_user=StringFormatter(slots=["[HUMAN]:\n{{content}}\n[AI]:\n"]),
|
237 |
+
format_separator=EmptyFormatter(slots=["\n\n"]),
|
238 |
+
efficient_eos=True,
|
239 |
+
)
|
240 |
+
```
|
241 |
+
"""
|
242 |
+
eos_slots = [] if efficient_eos else [{"eos_token"}]
|
243 |
+
template_class = Llama2Template if name.startswith("llama2") else Template
|
244 |
+
default_user_formatter = StringFormatter(slots=["{{content}}"])
|
245 |
+
default_assistant_formatter = StringFormatter(slots=["{{content}}"] + eos_slots)
|
246 |
+
default_function_formatter = FunctionFormatter(slots=["Action: {{name}}\nAction Input: {{arguments}}"] + eos_slots)
|
247 |
+
default_tool_formatter = ToolFormatter(tool_format="default")
|
248 |
+
default_separator_formatter = EmptyFormatter()
|
249 |
+
templates[name] = template_class(
|
250 |
+
format_user=format_user or default_user_formatter,
|
251 |
+
format_assistant=format_assistant or default_assistant_formatter,
|
252 |
+
format_system=format_system or default_user_formatter,
|
253 |
+
format_function=format_function or default_function_formatter,
|
254 |
+
format_observation=format_observation or format_user or default_user_formatter,
|
255 |
+
format_tools=format_tools or default_tool_formatter,
|
256 |
+
format_separator=format_separator or default_separator_formatter,
|
257 |
+
default_system=default_system,
|
258 |
+
stop_words=stop_words,
|
259 |
+
efficient_eos=efficient_eos,
|
260 |
+
replace_eos=replace_eos,
|
261 |
+
force_system=force_system,
|
262 |
+
)
|
263 |
+
|
264 |
+
|
265 |
+
def _add_or_replace_eos_token(tokenizer: "PreTrainedTokenizer", eos_token: str) -> None:
|
266 |
+
is_added = tokenizer.eos_token_id is None
|
267 |
+
num_added_tokens = tokenizer.add_special_tokens({"eos_token": eos_token})
|
268 |
+
|
269 |
+
if is_added:
|
270 |
+
logger.info("Add eos token: {}".format(tokenizer.eos_token))
|
271 |
+
else:
|
272 |
+
logger.info("Replace eos token: {}".format(tokenizer.eos_token))
|
273 |
+
|
274 |
+
if num_added_tokens > 0:
|
275 |
+
logger.warning("New tokens have been added, make sure `resize_vocab` is True.")
|
276 |
+
|
277 |
+
|
278 |
+
def _jinja_escape(content: str) -> str:
|
279 |
+
return content.replace("\n", r"\n").replace("'", r"\'")
|
280 |
+
|
281 |
+
|
282 |
+
def _convert_slots_to_jinja(slots: "SLOTS", tokenizer: "PreTrainedTokenizer", placeholder: str = "content") -> str:
|
283 |
+
slot_items = []
|
284 |
+
for slot in slots:
|
285 |
+
if isinstance(slot, str):
|
286 |
+
slot_pieces = slot.split("{{content}}")
|
287 |
+
if slot_pieces[0]:
|
288 |
+
slot_items.append("'" + _jinja_escape(slot_pieces[0]) + "'")
|
289 |
+
if len(slot_pieces) > 1:
|
290 |
+
slot_items.append(placeholder)
|
291 |
+
if slot_pieces[1]:
|
292 |
+
slot_items.append("'" + _jinja_escape(slot_pieces[1]) + "'")
|
293 |
+
elif isinstance(slot, set):
|
294 |
+
if "bos_token" in slot:
|
295 |
+
slot_items.append("'" + tokenizer.bos_token + "'")
|
296 |
+
elif "eos_token" in slot: # do not use {{ eos_token }} since it may be replaced
|
297 |
+
slot_items.append("'" + tokenizer.eos_token + "'")
|
298 |
+
elif isinstance(slot, dict):
|
299 |
+
raise ValueError("Dict is not supported.")
|
300 |
+
|
301 |
+
return " + ".join(slot_items)
|
302 |
+
|
303 |
+
|
304 |
+
def _get_jinja_template(template: "Template", tokenizer: "PreTrainedTokenizer") -> str:
|
305 |
+
jinja_template = ""
|
306 |
+
|
307 |
+
if template.default_system:
|
308 |
+
jinja_template += "{% set system_message = '" + _jinja_escape(template.default_system) + "' %}"
|
309 |
+
|
310 |
+
jinja_template += (
|
311 |
+
"{% if messages[0]['role'] == 'system' %}" "{% set system_message = messages[0]['content'] %}" "{% endif %}"
|
312 |
+
)
|
313 |
+
|
314 |
+
system_message = _convert_slots_to_jinja(template.format_system.apply(), tokenizer, placeholder="system_message")
|
315 |
+
if isinstance(template, Llama2Template):
|
316 |
+
pass
|
317 |
+
elif template.force_system:
|
318 |
+
jinja_template += "{{ " + system_message + " }}"
|
319 |
+
else:
|
320 |
+
jinja_template += "{% if system_message is defined %}{{ " + system_message + " }}{% endif %}"
|
321 |
+
|
322 |
+
jinja_template += "{% for message in messages %}"
|
323 |
+
jinja_template += "{% set content = message['content'] %}"
|
324 |
+
if isinstance(template, Llama2Template):
|
325 |
+
jinja_template += "{% if loop.index0 == 0 and system_message is defined %}"
|
326 |
+
jinja_template += "{% set content = " + system_message + " + message['content'] %}"
|
327 |
+
jinja_template += "{% endif %}"
|
328 |
+
jinja_template += "{% if message['role'] == 'user' %}"
|
329 |
+
user_message = _convert_slots_to_jinja(template.format_user.apply(), tokenizer)
|
330 |
+
jinja_template += "{{ " + user_message + " }}"
|
331 |
+
jinja_template += "{% elif message['role'] == 'assistant' %}"
|
332 |
+
assistant_message = _convert_slots_to_jinja(
|
333 |
+
template.format_assistant.apply() + template.format_separator.apply(), tokenizer
|
334 |
+
)
|
335 |
+
jinja_template += "{{ " + assistant_message + " }}"
|
336 |
+
jinja_template += "{% endif %}"
|
337 |
+
jinja_template += "{% endfor %}"
|
338 |
+
return jinja_template
|
339 |
+
|
340 |
+
|
341 |
+
def get_template_and_fix_tokenizer(
|
342 |
+
tokenizer: "PreTrainedTokenizer",
|
343 |
+
name: Optional[str] = None,
|
344 |
+
) -> Template:
|
345 |
+
if name is None:
|
346 |
+
template = templates["vanilla"] # placeholder
|
347 |
+
else:
|
348 |
+
template = templates.get(name, None)
|
349 |
+
if template is None:
|
350 |
+
raise ValueError("Template {} does not exist.".format(name))
|
351 |
+
|
352 |
+
stop_words = template.stop_words
|
353 |
+
if template.replace_eos:
|
354 |
+
if not stop_words:
|
355 |
+
raise ValueError("Stop words are required to replace the EOS token.")
|
356 |
+
|
357 |
+
_add_or_replace_eos_token(tokenizer, eos_token=stop_words[0])
|
358 |
+
stop_words = stop_words[1:]
|
359 |
+
|
360 |
+
if tokenizer.eos_token_id is None:
|
361 |
+
_add_or_replace_eos_token(tokenizer, eos_token="<|endoftext|>")
|
362 |
+
|
363 |
+
if tokenizer.pad_token_id is None:
|
364 |
+
tokenizer.pad_token = tokenizer.eos_token
|
365 |
+
logger.info("Add pad token: {}".format(tokenizer.pad_token))
|
366 |
+
|
367 |
+
if stop_words:
|
368 |
+
num_added_tokens = tokenizer.add_special_tokens(
|
369 |
+
dict(additional_special_tokens=stop_words), replace_additional_special_tokens=False
|
370 |
+
)
|
371 |
+
logger.info("Add {} to stop words.".format(",".join(stop_words)))
|
372 |
+
if num_added_tokens > 0:
|
373 |
+
logger.warning("New tokens have been added, make sure `resize_vocab` is True.")
|
374 |
+
|
375 |
+
try:
|
376 |
+
tokenizer.chat_template = _get_jinja_template(template, tokenizer)
|
377 |
+
except ValueError:
|
378 |
+
logger.info("Cannot add this chat template to tokenizer.")
|
379 |
+
|
380 |
+
return template
|
381 |
+
|
382 |
+
|
383 |
+
_register_template(
|
384 |
+
name="alpaca",
|
385 |
+
format_user=StringFormatter(slots=["### Instruction:\n{{content}}\n\n### Response:\n"]),
|
386 |
+
format_separator=EmptyFormatter(slots=["\n\n"]),
|
387 |
+
default_system=(
|
388 |
+
"Below is an instruction that describes a task. " "Write a response that appropriately completes the request."
|
389 |
+
),
|
390 |
+
)
|
391 |
+
|
392 |
+
|
393 |
+
_register_template(
|
394 |
+
name="aquila",
|
395 |
+
format_user=StringFormatter(slots=["Human: {{content}}###Assistant:"]),
|
396 |
+
format_separator=EmptyFormatter(slots=["###"]),
|
397 |
+
default_system=(
|
398 |
+
"A chat between a curious human and an artificial intelligence assistant. "
|
399 |
+
"The assistant gives helpful, detailed, and polite answers to the human's questions."
|
400 |
+
),
|
401 |
+
stop_words=["</s>"],
|
402 |
+
efficient_eos=True,
|
403 |
+
)
|
404 |
+
|
405 |
+
|
406 |
+
_register_template(
|
407 |
+
name="atom",
|
408 |
+
format_user=StringFormatter(
|
409 |
+
slots=[{"bos_token"}, "Human: {{content}}\n", {"eos_token"}, {"bos_token"}, "Assistant:"]
|
410 |
+
),
|
411 |
+
format_assistant=StringFormatter(slots=["{{content}}\n", {"eos_token"}]),
|
412 |
+
)
|
413 |
+
|
414 |
+
|
415 |
+
_register_template(
|
416 |
+
name="baichuan",
|
417 |
+
format_user=StringFormatter(slots=["<reserved_102>{{content}}<reserved_103>"]),
|
418 |
+
efficient_eos=True,
|
419 |
+
)
|
420 |
+
|
421 |
+
|
422 |
+
_register_template(
|
423 |
+
name="baichuan2",
|
424 |
+
format_user=StringFormatter(slots=["<reserved_106>{{content}}<reserved_107>"]),
|
425 |
+
efficient_eos=True,
|
426 |
+
)
|
427 |
+
|
428 |
+
|
429 |
+
_register_template(
|
430 |
+
name="belle",
|
431 |
+
format_user=StringFormatter(slots=["Human: {{content}}\n\nBelle: "]),
|
432 |
+
format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]),
|
433 |
+
format_separator=EmptyFormatter(slots=["\n\n"]),
|
434 |
+
force_system=True,
|
435 |
+
)
|
436 |
+
|
437 |
+
|
438 |
+
_register_template(
|
439 |
+
name="bluelm",
|
440 |
+
format_user=StringFormatter(slots=[{"token": "[|Human|]:"}, "{{content}}", {"token": "[|AI|]:"}]),
|
441 |
+
)
|
442 |
+
|
443 |
+
|
444 |
+
_register_template(
|
445 |
+
name="chatglm2",
|
446 |
+
format_user=StringFormatter(slots=["[Round {{idx}}]\n\n问:{{content}}\n\n答:"]),
|
447 |
+
format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]),
|
448 |
+
format_separator=EmptyFormatter(slots=["\n\n"]),
|
449 |
+
efficient_eos=True,
|
450 |
+
force_system=True,
|
451 |
+
)
|
452 |
+
|
453 |
+
|
454 |
+
_register_template(
|
455 |
+
name="chatglm3",
|
456 |
+
format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]),
|
457 |
+
format_assistant=StringFormatter(slots=["\n", "{{content}}"]),
|
458 |
+
format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]),
|
459 |
+
format_function=FunctionFormatter(slots=["{{name}}\n{{arguments}}"]),
|
460 |
+
format_observation=StringFormatter(
|
461 |
+
slots=[{"token": "<|observation|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]
|
462 |
+
),
|
463 |
+
stop_words=["<|user|>", "<|observation|>"],
|
464 |
+
efficient_eos=True,
|
465 |
+
force_system=True,
|
466 |
+
)
|
467 |
+
|
468 |
+
|
469 |
+
_register_template(
|
470 |
+
name="chatglm3_system",
|
471 |
+
format_user=StringFormatter(slots=[{"token": "<|user|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]),
|
472 |
+
format_assistant=StringFormatter(slots=["\n", "{{content}}"]),
|
473 |
+
format_system=StringFormatter(
|
474 |
+
slots=[{"token": "[gMASK]"}, {"token": "sop"}, {"token": "<|system|>"}, "\n", "{{content}}"]
|
475 |
+
),
|
476 |
+
format_function=FunctionFormatter(slots=["{{name}}\n{{arguments}}"]),
|
477 |
+
format_observation=StringFormatter(
|
478 |
+
slots=[{"token": "<|observation|>"}, "\n", "{{content}}", {"token": "<|assistant|>"}]
|
479 |
+
),
|
480 |
+
default_system=(
|
481 |
+
"You are ChatGLM3, a large language model trained by Zhipu.AI. "
|
482 |
+
"Follow the user's instructions carefully. Respond using markdown."
|
483 |
+
),
|
484 |
+
stop_words=["<|user|>", "<|observation|>"],
|
485 |
+
efficient_eos=True,
|
486 |
+
)
|
487 |
+
|
488 |
+
|
489 |
+
_register_template(
|
490 |
+
name="chatml",
|
491 |
+
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
492 |
+
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
493 |
+
format_separator=EmptyFormatter(slots=["\n"]),
|
494 |
+
stop_words=["<|im_end|>", "<|im_start|>"],
|
495 |
+
replace_eos=True,
|
496 |
+
)
|
497 |
+
|
498 |
+
|
499 |
+
_register_template(
|
500 |
+
name="chatml_de",
|
501 |
+
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
502 |
+
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
503 |
+
format_separator=EmptyFormatter(slots=["\n"]),
|
504 |
+
default_system="Du bist ein freundlicher und hilfsbereiter KI-Assistent.",
|
505 |
+
stop_words=["<|im_end|>", "<|im_start|>"],
|
506 |
+
replace_eos=True,
|
507 |
+
)
|
508 |
+
|
509 |
+
|
510 |
+
_register_template(
|
511 |
+
name="codegeex2",
|
512 |
+
format_system=StringFormatter(slots=[{"token": "[gMASK]"}, {"token": "sop"}, "{{content}}"]),
|
513 |
+
force_system=True,
|
514 |
+
)
|
515 |
+
|
516 |
+
|
517 |
+
_register_template(
|
518 |
+
name="cpm",
|
519 |
+
format_user=StringFormatter(slots=["<用户>{{content}}<AI>"]),
|
520 |
+
format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]),
|
521 |
+
force_system=True,
|
522 |
+
)
|
523 |
+
|
524 |
+
|
525 |
+
_register_template(
|
526 |
+
name="deepseek",
|
527 |
+
format_user=StringFormatter(slots=["User: {{content}}\n\nAssistant:"]),
|
528 |
+
format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]),
|
529 |
+
force_system=True,
|
530 |
+
)
|
531 |
+
|
532 |
+
|
533 |
+
_register_template(
|
534 |
+
name="deepseekcoder",
|
535 |
+
format_user=StringFormatter(slots=["### Instruction:\n{{content}}\n### Response:"]),
|
536 |
+
format_assistant=StringFormatter(slots=["\n", "{{content}}"]),
|
537 |
+
format_separator=EmptyFormatter(slots=["\n<|EOT|>\n"]),
|
538 |
+
default_system=(
|
539 |
+
"You are an AI programming assistant, utilizing the Deepseek Coder model, "
|
540 |
+
"developed by Deepseek Company, and you only answer questions related to computer science. "
|
541 |
+
"For politically sensitive questions, security and privacy issues, "
|
542 |
+
"and other non-computer science questions, you will refuse to answer\n"
|
543 |
+
),
|
544 |
+
stop_words=["<|EOT|>"],
|
545 |
+
efficient_eos=True,
|
546 |
+
)
|
547 |
+
|
548 |
+
|
549 |
+
_register_template(
|
550 |
+
name="default",
|
551 |
+
format_user=StringFormatter(slots=["Human: {{content}}\nAssistant: "]),
|
552 |
+
format_system=StringFormatter(slots=["{{content}}\n"]),
|
553 |
+
format_separator=EmptyFormatter(slots=["\n"]),
|
554 |
+
)
|
555 |
+
|
556 |
+
|
557 |
+
_register_template(
|
558 |
+
name="falcon",
|
559 |
+
format_user=StringFormatter(slots=["User: {{content}}\nFalcon:"]),
|
560 |
+
format_separator=EmptyFormatter(slots=["\n"]),
|
561 |
+
efficient_eos=True,
|
562 |
+
)
|
563 |
+
|
564 |
+
|
565 |
+
_register_template(
|
566 |
+
name="gemma",
|
567 |
+
format_user=StringFormatter(slots=["<start_of_turn>user\n{{content}}<end_of_turn>\n<start_of_turn>model\n"]),
|
568 |
+
format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]),
|
569 |
+
format_separator=EmptyFormatter(slots=["<end_of_turn>\n"]),
|
570 |
+
efficient_eos=True,
|
571 |
+
force_system=True,
|
572 |
+
)
|
573 |
+
|
574 |
+
|
575 |
+
_register_template(
|
576 |
+
name="intern",
|
577 |
+
format_user=StringFormatter(slots=["<|User|>:{{content}}", {"token": "<eoh>"}, "\n<|Bot|>:"]),
|
578 |
+
format_separator=EmptyFormatter(slots=[{"token": "<eoa>"}, "\n"]),
|
579 |
+
stop_words=["<eoa>"],
|
580 |
+
efficient_eos=True,
|
581 |
+
)
|
582 |
+
|
583 |
+
|
584 |
+
_register_template(
|
585 |
+
name="intern2",
|
586 |
+
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
587 |
+
format_system=StringFormatter(slots=[{"bos_token"}, "<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
588 |
+
format_separator=EmptyFormatter(slots=["\n"]),
|
589 |
+
default_system=(
|
590 |
+
"You are an AI assistant whose name is InternLM (书生·浦语).\n"
|
591 |
+
"- InternLM (书生·浦语) is a conversational language model that is developed "
|
592 |
+
"by Shanghai AI Laboratory (上海人工智能实验室). It is designed to be helpful, honest, and harmless.\n"
|
593 |
+
"- InternLM (书生·浦语) can understand and communicate fluently in the language chosen "
|
594 |
+
"by the user such as English and 中文."
|
595 |
+
),
|
596 |
+
stop_words=["<|im_end|>"],
|
597 |
+
efficient_eos=True, # internlm2 tokenizer cannot set eos_token_id
|
598 |
+
)
|
599 |
+
|
600 |
+
|
601 |
+
_register_template(
|
602 |
+
name="llama2",
|
603 |
+
format_user=StringFormatter(slots=[{"bos_token"}, "[INST] {{content}} [/INST]"]),
|
604 |
+
format_system=StringFormatter(slots=["<<SYS>>\n{{content}}\n<</SYS>>\n\n"]),
|
605 |
+
default_system=(
|
606 |
+
"You are a helpful, respectful and honest assistant. "
|
607 |
+
"Always answer as helpfully as possible, while being safe. "
|
608 |
+
"Your answers should not include any harmful, unethical, "
|
609 |
+
"racist, sexist, toxic, dangerous, or illegal content. "
|
610 |
+
"Please ensure that your responses are socially unbiased and positive in nature.\n\n"
|
611 |
+
"If a question does not make any sense, or is not factually coherent, "
|
612 |
+
"explain why instead of answering something not correct. "
|
613 |
+
"If you don't know the answer to a question, please don't share false information."
|
614 |
+
),
|
615 |
+
)
|
616 |
+
|
617 |
+
|
618 |
+
_register_template(
|
619 |
+
name="llama2_zh",
|
620 |
+
format_user=StringFormatter(slots=[{"bos_token"}, "[INST] {{content}} [/INST]"]),
|
621 |
+
format_system=StringFormatter(slots=["<<SYS>>\n{{content}}\n<</SYS>>\n\n"]),
|
622 |
+
default_system="You are a helpful assistant. 你是一个乐于助人的助手。",
|
623 |
+
)
|
624 |
+
|
625 |
+
|
626 |
+
_register_template(
|
627 |
+
name="mistral",
|
628 |
+
format_user=StringFormatter(slots=["[INST] {{content}} [/INST]"]),
|
629 |
+
format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]),
|
630 |
+
force_system=True,
|
631 |
+
)
|
632 |
+
|
633 |
+
|
634 |
+
_register_template(
|
635 |
+
name="olmo",
|
636 |
+
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|assistant|>"]),
|
637 |
+
format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}]),
|
638 |
+
format_system=StringFormatter(slots=[{"eos_token"}, "{{content}}"]),
|
639 |
+
force_system=True,
|
640 |
+
)
|
641 |
+
|
642 |
+
|
643 |
+
_register_template(
|
644 |
+
name="openchat",
|
645 |
+
format_user=StringFormatter(slots=["GPT4 Correct User: {{content}}", {"eos_token"}, "GPT4 Correct Assistant:"]),
|
646 |
+
format_assistant=StringFormatter(slots=["{{content}}", {"eos_token"}]),
|
647 |
+
format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]),
|
648 |
+
force_system=True,
|
649 |
+
)
|
650 |
+
|
651 |
+
|
652 |
+
_register_template(
|
653 |
+
name="orion",
|
654 |
+
format_user=StringFormatter(slots=["Human: {{content}}\n\nAssistant: ", {"eos_token"}]),
|
655 |
+
format_system=StringFormatter(slots=[{"bos_token"}, "{{content}}"]),
|
656 |
+
force_system=True,
|
657 |
+
)
|
658 |
+
|
659 |
+
|
660 |
+
_register_template(
|
661 |
+
name="qwen",
|
662 |
+
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
663 |
+
format_system=StringFormatter(slots=["<|im_start|>system\n{{content}}<|im_end|>\n"]),
|
664 |
+
format_separator=EmptyFormatter(slots=["\n"]),
|
665 |
+
default_system="You are a helpful assistant.",
|
666 |
+
stop_words=["<|im_end|>"],
|
667 |
+
replace_eos=True,
|
668 |
+
)
|
669 |
+
|
670 |
+
|
671 |
+
_register_template(
|
672 |
+
name="solar",
|
673 |
+
format_user=StringFormatter(slots=["### User:\n{{content}}\n\n### Assistant:\n"]),
|
674 |
+
format_system=StringFormatter(slots=["### System:\n{{content}}\n\n"]),
|
675 |
+
efficient_eos=True,
|
676 |
+
)
|
677 |
+
|
678 |
+
|
679 |
+
_register_template(
|
680 |
+
name="starchat",
|
681 |
+
format_user=StringFormatter(slots=["<|user|>\n{{content}}<|end|>\n<|assistant|>"]),
|
682 |
+
format_system=StringFormatter(slots=["<|system|>\n{{content}}<|end|>\n"]),
|
683 |
+
format_separator=EmptyFormatter(slots=["\n"]),
|
684 |
+
stop_words=["<|end|>"],
|
685 |
+
replace_eos=True,
|
686 |
+
force_system=True,
|
687 |
+
)
|
688 |
+
|
689 |
+
|
690 |
+
_register_template(
|
691 |
+
name="vanilla",
|
692 |
+
)
|
693 |
+
|
694 |
+
|
695 |
+
_register_template(
|
696 |
+
name="vicuna",
|
697 |
+
format_user=StringFormatter(slots=["USER: {{content}} ASSISTANT:"]),
|
698 |
+
default_system=(
|
699 |
+
"A chat between a curious user and an artificial intelligence assistant. "
|
700 |
+
"The assistant gives helpful, detailed, and polite answers to the user's questions."
|
701 |
+
),
|
702 |
+
)
|
703 |
+
|
704 |
+
|
705 |
+
_register_template(
|
706 |
+
name="xuanyuan",
|
707 |
+
format_user=StringFormatter(slots=["Human: {{content}} Assistant:"]),
|
708 |
+
default_system=(
|
709 |
+
"以下是用户和人工智能助手之间的对话。用户以Human开头,人工智能助手以Assistant开头,"
|
710 |
+
"会对人类提出的问题给出有帮助、高质量、详细和礼貌的回答,并且总是拒绝参与与不道德、"
|
711 |
+
"不安全、有争议、政治敏感等相关的话题、问题和指示。\n"
|
712 |
+
),
|
713 |
+
)
|
714 |
+
|
715 |
+
|
716 |
+
_register_template(
|
717 |
+
name="xverse",
|
718 |
+
format_user=StringFormatter(slots=["Human: {{content}}\n\nAssistant: "]),
|
719 |
+
)
|
720 |
+
|
721 |
+
|
722 |
+
_register_template(
|
723 |
+
name="yayi",
|
724 |
+
format_user=StringFormatter(slots=[{"token": "<|Human|>"}, ":\n{{content}}\n\n", {"token": "<|YaYi|>"}, ":"]),
|
725 |
+
format_system=StringFormatter(slots=[{"token": "<|System|>"}, ":\n{{content}}\n\n"]),
|
726 |
+
format_separator=EmptyFormatter(slots=["\n\n"]),
|
727 |
+
default_system=(
|
728 |
+
"You are a helpful, respectful and honest assistant named YaYi "
|
729 |
+
"developed by Beijing Wenge Technology Co.,Ltd. "
|
730 |
+
"Always answer as helpfully as possible, while being safe. "
|
731 |
+
"Your answers should not include any harmful, unethical, "
|
732 |
+
"racist, sexist, toxic, dangerous, or illegal content. "
|
733 |
+
"Please ensure that your responses are socially unbiased and positive in nature.\n\n"
|
734 |
+
"If a question does not make any sense, or is not factually coherent, "
|
735 |
+
"explain why instead of answering something not correct. "
|
736 |
+
"If you don't know the answer to a question, please don't share false information."
|
737 |
+
),
|
738 |
+
stop_words=["<|End|>"],
|
739 |
+
)
|
740 |
+
|
741 |
+
|
742 |
+
_register_template(
|
743 |
+
name="yi",
|
744 |
+
format_user=StringFormatter(slots=["<|im_start|>user\n{{content}}<|im_end|>\n<|im_start|>assistant\n"]),
|
745 |
+
format_separator=EmptyFormatter(slots=["\n"]),
|
746 |
+
stop_words=["<|im_end|>"],
|
747 |
+
replace_eos=True,
|
748 |
+
)
|
749 |
+
|
750 |
+
|
751 |
+
_register_template(
|
752 |
+
name="yuan",
|
753 |
+
format_user=StringFormatter(slots=["{{content}}", {"token": "<sep>"}]),
|
754 |
+
format_separator=EmptyFormatter(slots=["\n"]),
|
755 |
+
stop_words=["<eod>"],
|
756 |
+
replace_eos=True,
|
757 |
+
)
|
758 |
+
|
759 |
+
|
760 |
+
_register_template(
|
761 |
+
name="zephyr",
|
762 |
+
format_user=StringFormatter(slots=["<|user|>\n{{content}}", {"eos_token"}, "<|assistant|>"]),
|
763 |
+
format_assistant=StringFormatter(slots=["\n{{content}}", {"eos_token"}]),
|
764 |
+
format_system=StringFormatter(slots=["<|system|>\n{{content}}", {"eos_token"}]),
|
765 |
+
default_system="You are a friendly chatbot who always responds in the style of a pirate",
|
766 |
+
)
|
767 |
+
|
768 |
+
|
769 |
+
_register_template(
|
770 |
+
name="ziya",
|
771 |
+
format_user=StringFormatter(slots=["<human>:{{content}}\n<bot>:"]),
|
772 |
+
format_separator=EmptyFormatter(slots=["\n"]),
|
773 |
+
)
|
LLaMA-Factory/build/lib/llmtuner/data/utils.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hashlib
|
2 |
+
from enum import Enum, unique
|
3 |
+
from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union
|
4 |
+
|
5 |
+
from datasets import concatenate_datasets, interleave_datasets
|
6 |
+
|
7 |
+
from ..extras.logging import get_logger
|
8 |
+
|
9 |
+
|
10 |
+
if TYPE_CHECKING:
|
11 |
+
from datasets import Dataset, IterableDataset
|
12 |
+
from transformers import Seq2SeqTrainingArguments
|
13 |
+
|
14 |
+
from llmtuner.hparams import DataArguments
|
15 |
+
|
16 |
+
|
17 |
+
logger = get_logger(__name__)
|
18 |
+
|
19 |
+
|
20 |
+
@unique
|
21 |
+
class Role(str, Enum):
|
22 |
+
USER = "user"
|
23 |
+
ASSISTANT = "assistant"
|
24 |
+
SYSTEM = "system"
|
25 |
+
FUNCTION = "function"
|
26 |
+
OBSERVATION = "observation"
|
27 |
+
|
28 |
+
|
29 |
+
def checksum(data_files: List[str], file_sha1: Optional[str] = None) -> None:
|
30 |
+
if file_sha1 is None:
|
31 |
+
logger.warning("Checksum failed: missing SHA-1 hash value in dataset_info.json.")
|
32 |
+
return
|
33 |
+
|
34 |
+
if len(data_files) != 1:
|
35 |
+
logger.warning("Checksum failed: too many files.")
|
36 |
+
return
|
37 |
+
|
38 |
+
with open(data_files[0], "rb") as f:
|
39 |
+
sha1 = hashlib.sha1(f.read()).hexdigest()
|
40 |
+
if sha1 != file_sha1:
|
41 |
+
logger.warning("Checksum failed: mismatched SHA-1 hash value at {}.".format(data_files[0]))
|
42 |
+
|
43 |
+
|
44 |
+
def infer_max_len(source_len: int, target_len: int, max_len: int, reserved_label_len: int) -> Tuple[int, int]:
|
45 |
+
max_target_len = int(max_len * (target_len / (source_len + target_len)))
|
46 |
+
max_target_len = max(max_target_len, reserved_label_len)
|
47 |
+
max_source_len = max_len - max_target_len
|
48 |
+
return max_source_len, max_target_len
|
49 |
+
|
50 |
+
|
51 |
+
def merge_dataset(
|
52 |
+
all_datasets: List[Union["Dataset", "IterableDataset"]],
|
53 |
+
data_args: "DataArguments",
|
54 |
+
training_args: "Seq2SeqTrainingArguments",
|
55 |
+
) -> Union["Dataset", "IterableDataset"]:
|
56 |
+
if len(all_datasets) == 1:
|
57 |
+
return all_datasets[0]
|
58 |
+
elif data_args.mix_strategy == "concat":
|
59 |
+
if data_args.streaming:
|
60 |
+
logger.warning("The samples between different datasets will not be mixed in streaming mode.")
|
61 |
+
return concatenate_datasets(all_datasets)
|
62 |
+
elif data_args.mix_strategy.startswith("interleave"):
|
63 |
+
if not data_args.streaming:
|
64 |
+
logger.warning("We recommend using `mix_strategy=concat` in non-streaming mode.")
|
65 |
+
return interleave_datasets(
|
66 |
+
datasets=all_datasets,
|
67 |
+
probabilities=data_args.interleave_probs,
|
68 |
+
seed=training_args.seed,
|
69 |
+
stopping_strategy="first_exhausted" if data_args.mix_strategy.endswith("under") else "all_exhausted",
|
70 |
+
)
|
71 |
+
else:
|
72 |
+
raise ValueError("Unknown mixing strategy.")
|
73 |
+
|
74 |
+
|
75 |
+
def split_dataset(
|
76 |
+
dataset: Union["Dataset", "IterableDataset"], data_args: "DataArguments", training_args: "Seq2SeqTrainingArguments"
|
77 |
+
) -> Dict[str, "Dataset"]:
|
78 |
+
if training_args.do_train:
|
79 |
+
if data_args.val_size > 1e-6: # Split the dataset
|
80 |
+
if data_args.streaming:
|
81 |
+
val_set = dataset.take(int(data_args.val_size))
|
82 |
+
train_set = dataset.skip(int(data_args.val_size))
|
83 |
+
dataset = dataset.shuffle(buffer_size=data_args.buffer_size, seed=training_args.seed)
|
84 |
+
return {"train_dataset": train_set, "eval_dataset": val_set}
|
85 |
+
else:
|
86 |
+
val_size = int(data_args.val_size) if data_args.val_size > 1 else data_args.val_size
|
87 |
+
dataset = dataset.train_test_split(test_size=val_size, seed=training_args.seed)
|
88 |
+
return {"train_dataset": dataset["train"], "eval_dataset": dataset["test"]}
|
89 |
+
else:
|
90 |
+
if data_args.streaming:
|
91 |
+
dataset = dataset.shuffle(buffer_size=data_args.buffer_size, seed=training_args.seed)
|
92 |
+
return {"train_dataset": dataset}
|
93 |
+
else: # do_eval or do_predict
|
94 |
+
return {"eval_dataset": dataset}
|
LLaMA-Factory/build/lib/llmtuner/eval/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .evaluator import Evaluator
|
2 |
+
|
3 |
+
|
4 |
+
__all__ = ["Evaluator"]
|