diff --git a/LLaMA-Efficient-Tuning/.gitattributes b/LLaMA-Efficient-Tuning/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..dfe0770424b2a19faf507a501ebfc23be8f54e7b --- /dev/null +++ b/LLaMA-Efficient-Tuning/.gitattributes @@ -0,0 +1,2 @@ +# Auto detect text files and perform LF normalization +* text=auto diff --git a/LLaMA-Efficient-Tuning/.gitignore b/LLaMA-Efficient-Tuning/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..68bc17f9ff2104a9d7b6777058bb4c343ca72609 --- /dev/null +++ b/LLaMA-Efficient-Tuning/.gitignore @@ -0,0 +1,160 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore +# and can be added to the global gitignore or merged into this file. For a more nuclear +# option (not recommended) you can uncomment the following to ignore the entire idea folder. +#.idea/ diff --git a/LLaMA-Efficient-Tuning/LICENSE b/LLaMA-Efficient-Tuning/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..b09cd7856d58590578ee1a4f3ad45d1310a97f87 --- /dev/null +++ b/LLaMA-Efficient-Tuning/LICENSE @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LLaMA-Efficient-Tuning/README.md b/LLaMA-Efficient-Tuning/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9d1ea252d45a33c100f8a61ce986d8e1ecf0d343 --- /dev/null +++ b/LLaMA-Efficient-Tuning/README.md @@ -0,0 +1,482 @@ +# LLaMA Efficient Tuning + +[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Efficient-Tuning?style=social)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/stargazers) +[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Efficient-Tuning)](LICENSE) +[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Efficient-Tuning)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/commits/main) +[![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/) +[![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/) +[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/pulls) +[![Discord](https://dcbadge.vercel.app/api/server/7HGMsdxqJ?compact=true&style=flat)](https://discord.gg/7HGMsdxqJ) + +👋 Join our [WeChat](assets/wechat.jpg). + +\[ English | [中文](README_zh.md) \] + +## Changelog + +[23/09/10] Now we support using **[FlashAttention](https://github.com/Dao-AILab/flash-attention)** for the LLaMA models. Try `--flash_attn` argument to enable FlashAttention-2 if you are using RTX4090, A100 or H100 GPUs (experimental feature). + +[23/08/18] Now we support **resuming training**, upgrade `transformers` to `4.31.0` to enjoy this feature. + +[23/08/12] Now we support **RoPE scaling** to extend the context length of the LLaMA models. Try `--rope_scaling linear` argument in training and `--rope_scaling dynamic` argument at inference to extrapolate the position embeddings. + +[23/08/11] Now we support **[DPO training](https://arxiv.org/abs/2305.18290)** for instruction-tuned models. See [this example](#dpo-training) to train your models. + +[23/07/31] Now we support **dataset streaming**. Try `--streaming` and `--max_steps 10000` arguments to load your dataset in streaming mode. + +[23/07/29] We release two instruction-tuned 13B models at Hugging Face. See these Hugging Face Repos ([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft)) for details. + +[23/07/18] Now we develop an **all-in-one Web UI** for training, evaluation and inference. Try `train_web.py` to fine-tune models in your Web browser. Thank [@KanadeSiina](https://github.com/KanadeSiina) and [@codemayq](https://github.com/codemayq) for their efforts in the development. + +[23/07/09] Now we release **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹, an easy-to-use package for editing the factual knowledge of large language models efficiently. Please follow [FastEdit](https://github.com/hiyouga/FastEdit) if you are interested. + +[23/06/29] We provide a **reproducible example** of training a chat model using instruction-following datasets, see [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft) for details. + +[23/06/22] Now we align the [demo API](src/api_demo.py) with the [OpenAI's](https://platform.openai.com/docs/api-reference/chat) format where you can insert the fine-tuned model in **arbitrary ChatGPT-based applications**. + +[23/06/03] Now we support quantized training and inference (aka **[QLoRA](https://github.com/artidoro/qlora)**). Try `--quantization_bit 4/8` argument to work with quantized models. + +## Supported Models + +| Model | Model size | Default module | Template | +| -------------------------------------------------------- | --------------------------- | ----------------- | --------- | +| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - | +| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 | +| [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - | +| [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - | +| [Falcon](https://huggingface.co/tiiuae/falcon-7b) | 7B/40B | query_key_value | - | +| [Baichuan](https://github.com/baichuan-inc/Baichuan-13B) | 7B/13B | W_pack | baichuan | +| [Baichuan2](https://github.com/baichuan-inc/Baichuan2) | 7B/13B | W_pack | baichuan2 | +| [InternLM](https://github.com/InternLM/InternLM) | 7B/20B | q_proj,v_proj | intern | +| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B | c_attn | chatml | +| [XVERSE](https://github.com/xverse-ai/XVERSE-13B) | 13B | q_proj,v_proj | xverse | +| [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B) | 6B | query_key_value | chatglm2 | +| [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) | 1.5B | Wqkv | - | + +> [!NOTE] +> **Default module** is used for the `--lora_target` argument, you can use `--lora_target all` to specify all the available modules. +> +> For the "base" models, the `--template` argument can be chosen from `default`, `alpaca`, `vicuna` etc. But make sure to use the corresponding template for the "chat" models. + +## Supported Training Approaches + +| Approach | Full-parameter | Partial-parameter | LoRA | QLoRA | +| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ | +| Pre-Training | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| Supervised Fine-Tuning | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| Reward Modeling | | | :white_check_mark: | :white_check_mark: | +| PPO Training | | | :white_check_mark: | :white_check_mark: | +| DPO Training | :white_check_mark: | | :white_check_mark: | :white_check_mark: | + +> [!NOTE] +> Use `--quantization_bit 4/8` argument to enable QLoRA. + +## Provided Datasets + +- For pre-training: + - [Wiki Demo (en)](data/wiki_demo.txt) + - [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) + - [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata) + - [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220) + - [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered) +- For supervised fine-tuning: + - [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca) + - [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca) + - [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) + - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1) + - [Self-cognition (zh)](data/self_cognition.json) + - [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection) + - [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset) + - [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN) + - [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN) + - [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN) + - [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M) + - [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M) + - [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M) + - [LIMA (en)](https://huggingface.co/datasets/GAIR/lima) + - [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k) + - [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT) + - [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct) + - [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M) + - [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa) + - [UltraChat (en)](https://github.com/thunlp/UltraChat) + - [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn) + - [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen) +- For reward modeling or DPO training: + - [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf) + - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1) + - [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) + +Please refer to [data/README.md](data/README.md) for details. + +Some datasets require confirmation before using them, so we recommend logging in with your Hugging Face account using these commands. + +```bash +pip install --upgrade huggingface_hub +huggingface-cli login +``` + +## Requirement + +- Python 3.8+ and PyTorch 1.13.1+ +- 🤗Transformers, Datasets, Accelerate, PEFT and TRL +- sentencepiece, protobuf and tiktoken +- jieba, rouge-chinese and nltk (used at evaluation) +- gradio and matplotlib (used in web_demo.py) +- uvicorn, fastapi and sse-starlette (used in api_demo.py) + +And **powerful GPUs**! + +## Getting Started + +### Data Preparation (optional) + +Please refer to `data/example_dataset` for checking the details about the format of dataset files. You can either use a single `.json` file or a [dataset loading script](https://huggingface.co/docs/datasets/dataset_script) with multiple files to create a custom dataset. + +> [!NOTE] +> Please update `data/dataset_info.json` to use your custom dataset. About the format of this file, please refer to `data/README.md`. + +### Dependence Installation (optional) + +```bash +git clone https://github.com/hiyouga/LLaMA-Efficient-Tuning.git +conda create -n llama_etuning python=3.10 +conda activate llama_etuning +cd LLaMA-Efficient-Tuning +pip install -r requirements.txt +``` + +If you want to enable the quantized LoRA (QLoRA) on the Windows platform, you will be required to install a pre-built version of `bitsandbytes` library, which supports CUDA 11.1 to 12.1. + +```bash +pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl +``` + +### All-in-one Web UI + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_web.py +``` + +We **strongly recommend** using the all-in-one Web UI for newcomers since it can also generate training scripts automatically, even without a GPU environment. + +> [!WARNING] +> Currently the web UI only supports training on **a single GPU**. + +### Train on a single GPU + +> [!IMPORTANT] +> If you want to train models on multiple GPUs, please refer to [Distributed Training](#distributed-training). + +#### Pre-Training + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage pt \ + --model_name_or_path path_to_llama_model \ + --do_train \ + --dataset wiki_demo \ + --finetuning_type lora \ + --lora_target q_proj,v_proj \ + --output_dir path_to_pt_checkpoint \ + --overwrite_cache \ + --per_device_train_batch_size 4 \ + --gradient_accumulation_steps 4 \ + --lr_scheduler_type cosine \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate 5e-5 \ + --num_train_epochs 3.0 \ + --plot_loss \ + --fp16 +``` + +#### Supervised Fine-Tuning + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage sft \ + --model_name_or_path path_to_llama_model \ + --do_train \ + --dataset alpaca_gpt4_en \ + --template default \ + --finetuning_type lora \ + --lora_target q_proj,v_proj \ + --output_dir path_to_sft_checkpoint \ + --overwrite_cache \ + --per_device_train_batch_size 4 \ + --gradient_accumulation_steps 4 \ + --lr_scheduler_type cosine \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate 5e-5 \ + --num_train_epochs 3.0 \ + --plot_loss \ + --fp16 +``` + +#### Reward Modeling + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage rm \ + --model_name_or_path path_to_llama_model \ + --do_train \ + --dataset comparison_gpt4_en \ + --template default \ + --finetuning_type lora \ + --lora_target q_proj,v_proj \ + --resume_lora_training False \ + --checkpoint_dir path_to_sft_checkpoint \ + --output_dir path_to_rm_checkpoint \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 4 \ + --lr_scheduler_type cosine \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate 1e-6 \ + --num_train_epochs 1.0 \ + --plot_loss \ + --fp16 +``` + +#### PPO Training + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage ppo \ + --model_name_or_path path_to_llama_model \ + --do_train \ + --dataset alpaca_gpt4_en \ + --template default \ + --finetuning_type lora \ + --lora_target q_proj,v_proj \ + --resume_lora_training False \ + --checkpoint_dir path_to_sft_checkpoint \ + --reward_model path_to_rm_checkpoint \ + --output_dir path_to_ppo_checkpoint \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 4 \ + --lr_scheduler_type cosine \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate 1e-5 \ + --num_train_epochs 1.0 \ + --plot_loss \ + --fp16 +``` + +#### DPO Training + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage dpo \ + --model_name_or_path path_to_llama_model \ + --do_train \ + --dataset comparison_gpt4_en \ + --template default \ + --finetuning_type lora \ + --lora_target q_proj,v_proj \ + --resume_lora_training False \ + --checkpoint_dir path_to_sft_checkpoint \ + --output_dir path_to_dpo_checkpoint \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 4 \ + --lr_scheduler_type cosine \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate 1e-5 \ + --num_train_epochs 1.0 \ + --plot_loss \ + --fp16 +``` + +### Distributed Training + +#### Use Huggingface Accelerate + +```bash +accelerate config # configure the environment +accelerate launch src/train_bash.py # arguments (same as above) +``` + +
Example config for LoRA training + +```yaml +compute_environment: LOCAL_MACHINE +distributed_type: MULTI_GPU +downcast_bf16: 'no' +gpu_ids: all +machine_rank: 0 +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 4 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false +``` + +
+ +#### Use DeepSpeed + +```bash +deepspeed --num_gpus 8 --master_port=9901 src/train_bash.py \ + --deepspeed ds_config.json \ + ... # arguments (same as above) +``` + +
Example config for full-parameter training with DeepSpeed ZeRO-2 + +```json +{ + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "initial_scale_power": 16, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "overlap_comm": false, + "contiguous_gradients": true + } +} +``` + +
+ +### Export model + +```bash +python src/export_model.py \ + --model_name_or_path path_to_llama_model \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint \ + --output_dir path_to_export +``` + +### API Demo + +```bash +python src/api_demo.py \ + --model_name_or_path path_to_llama_model \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint +``` + +> [!NOTE] +> Visit `http://localhost:8000/docs` for API documentation. + +### CLI Demo + +```bash +python src/cli_demo.py \ + --model_name_or_path path_to_llama_model \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint +``` + +### Web Demo + +```bash +python src/web_demo.py \ + --model_name_or_path path_to_llama_model \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint +``` + +### Evaluation (BLEU and ROUGE_CHINESE) + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage sft \ + --model_name_or_path path_to_llama_model \ + --do_eval \ + --dataset alpaca_gpt4_en \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint \ + --output_dir path_to_eval_result \ + --per_device_eval_batch_size 8 \ + --max_samples 100 \ + --predict_with_generate +``` + +> [!NOTE] +> We recommend using `--per_device_eval_batch_size=1` and `--max_target_length 128` at 4/8-bit evaluation. + +### Predict + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage sft \ + --model_name_or_path path_to_llama_model \ + --do_predict \ + --dataset alpaca_gpt4_en \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint \ + --output_dir path_to_predict_result \ + --per_device_eval_batch_size 8 \ + --max_samples 100 \ + --predict_with_generate +``` + +## License + +This repository is licensed under the [Apache-2.0 License](LICENSE). + +Please follow the model licenses to use the corresponding model weights: + +- [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) +- [LLaMA-2](https://ai.meta.com/llama/license/) +- [BLOOM](https://huggingface.co/spaces/bigscience/license) +- [Falcon](LICENSE) +- [Baichuan](https://huggingface.co/baichuan-inc/baichuan-7B/resolve/main/baichuan-7B%20%E6%A8%A1%E5%9E%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) +- [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/resolve/main/Baichuan%202%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) +- [InternLM](https://github.com/InternLM/InternLM#open-source-license) +- [Qwen](https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/LICENSE) +- [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) +- [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B/blob/main/MODEL_LICENSE) +- [Phi-1.5](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) + +## Citation + +If this work is helpful, please kindly cite as: + +```bibtex +@Misc{llama-efficient-tuning, + title = {LLaMA Efficient Tuning}, + author = {hiyouga}, + howpublished = {\url{https://github.com/hiyouga/LLaMA-Efficient-Tuning}}, + year = {2023} +} +``` + +## Acknowledgement + +This repo benefits from [PEFT](https://github.com/huggingface/peft), [QLoRA](https://github.com/artidoro/qlora), [FastChat](https://github.com/lm-sys/FastChat) and [OpenChatKit](https://github.com/togethercomputer/OpenChatKit). Thanks for their wonderful works. + +## Star History + +![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Efficient-Tuning&type=Date) diff --git a/LLaMA-Efficient-Tuning/README_zh.md b/LLaMA-Efficient-Tuning/README_zh.md new file mode 100644 index 0000000000000000000000000000000000000000..350e2ddf7d9af865add0903922214742839fe0d1 --- /dev/null +++ b/LLaMA-Efficient-Tuning/README_zh.md @@ -0,0 +1,481 @@ +# LLaMA Efficient Tuning + +[![GitHub Repo stars](https://img.shields.io/github/stars/hiyouga/LLaMA-Efficient-Tuning?style=social)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/stargazers) +[![GitHub Code License](https://img.shields.io/github/license/hiyouga/LLaMA-Efficient-Tuning)](LICENSE) +[![GitHub last commit](https://img.shields.io/github/last-commit/hiyouga/LLaMA-Efficient-Tuning)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/commits/main) +[![PyPI](https://img.shields.io/pypi/v/llmtuner)](https://pypi.org/project/llmtuner/) +[![Downloads](https://static.pepy.tech/badge/llmtuner)](https://pypi.org/project/llmtuner/) +[![GitHub pull request](https://img.shields.io/badge/PRs-welcome-blue)](https://github.com/hiyouga/LLaMA-Efficient-Tuning/pulls) +[![Discord](https://dcbadge.vercel.app/api/server/7HGMsdxqJ?compact=true&style=flat)](https://discord.gg/7HGMsdxqJ) + +👋 加入我们的[微信群](assets/wechat.jpg)。 + +\[ [English](README.md) | 中文 \] + +## 更新日志 + +[23/09/10] 现在我们支持了 LLaMA 模型的 **[FlashAttention](https://github.com/Dao-AILab/flash-attention)**。如果您使用的是 RTX4090、A100 或 H100 GPU,请使用 `--flash_attn` 参数以启用 FlashAttention-2(实验性功能)。 + +[23/08/18] 现在我们支持了**训练状态恢复**,请将 `transformers` 升级至 `4.31.0` 以启用此功能。 + +[23/08/12] 现在我们支持了 **RoPE 插值**来扩展 LLaMA 模型的上下文长度。请使用 `--rope_scaling linear` 参数训练模型或使用 `--rope_scaling dynamic` 参数评估模型。 + +[23/08/11] 现在我们支持了指令模型的 **[DPO 训练](https://arxiv.org/abs/2305.18290)**。详情请参阅[此示例](#dpo-训练)。 + +[23/07/31] 现在我们支持了**数据流式加载**。请尝试使用 `--streaming` 和 `--max_steps 10000` 参数来流式加载数据集。 + +[23/07/29] 我们在 Hugging Face 发布了两个 13B 指令微调模型。详细内容请查阅我们的 Hugging Face 项目([LLaMA-2](https://huggingface.co/hiyouga/Llama-2-Chinese-13b-chat) / [Baichuan](https://huggingface.co/hiyouga/Baichuan-13B-sft))。 + +[23/07/18] 我们开发了支持训练和测试的**浏览器一体化界面**。请尝试使用 `train_web.py` 在您的浏览器中微调模型。感谢 [@KanadeSiina](https://github.com/KanadeSiina) 和 [@codemayq](https://github.com/codemayq) 在该功能开发中付出的努力。 + +[23/07/09] 我们开源了 **[FastEdit](https://github.com/hiyouga/FastEdit)** ⚡🩹,一个简单易用的、能迅速编辑大模型事实记忆的工具包。如果您感兴趣请关注我们的 [FastEdit](https://github.com/hiyouga/FastEdit) 项目。 + +[23/06/29] 我们提供了一个**可复现的**指令模型微调示例,详细内容请查阅 [Baichuan-7B-sft](https://huggingface.co/hiyouga/Baichuan-7B-sft)。 + +[23/06/22] 我们对齐了[示例 API](src/api_demo.py) 与 [OpenAI API](https://platform.openai.com/docs/api-reference/chat) 的格式,您可以将微调模型接入**任意基于 ChatGPT 的应用**中。 + +[23/06/03] 现在我们实现了 4 比特的 LoRA 训练(也称 **[QLoRA](https://github.com/artidoro/qlora)**)。请尝试使用 `--quantization_bit 4` 参数进行 4 比特量化微调。 + +## 模型 + +| 模型名 | 模型大小 | 默认模块 | Template | +| -------------------------------------------------------- | --------------------------- | ----------------- | --------- | +| [LLaMA](https://github.com/facebookresearch/llama) | 7B/13B/33B/65B | q_proj,v_proj | - | +| [LLaMA-2](https://huggingface.co/meta-llama) | 7B/13B/70B | q_proj,v_proj | llama2 | +| [BLOOM](https://huggingface.co/bigscience/bloom) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - | +| [BLOOMZ](https://huggingface.co/bigscience/bloomz) | 560M/1.1B/1.7B/3B/7.1B/176B | query_key_value | - | +| [Falcon](https://huggingface.co/tiiuae/falcon-7b) | 7B/40B | query_key_value | - | +| [Baichuan](https://github.com/baichuan-inc/Baichuan-13B) | 7B/13B | W_pack | baichuan | +| [Baichuan2](https://github.com/baichuan-inc/Baichuan2) | 7B/13B | W_pack | baichuan2 | +| [InternLM](https://github.com/InternLM/InternLM) | 7B/20B | q_proj,v_proj | intern | +| [Qwen](https://github.com/QwenLM/Qwen-7B) | 7B | c_attn | chatml | +| [XVERSE](https://github.com/xverse-ai/XVERSE-13B) | 13B | q_proj,v_proj | xverse | +| [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B) | 6B | query_key_value | chatglm2 | +| [Phi-1.5](https://huggingface.co/microsoft/phi-1_5) | 1.5B | Wqkv | - | + +> [!NOTE] +> **默认模块**应作为 `--lora_target` 参数的默认值,可使用 `--lora_target all` 参数指定全部模块。 +> +> 对于所有“基座”(Base)模型,`--template` 参数可以是 `default`, `alpaca`, `vicuna` 等任意值。但“对话”(Chat)模型请务必使用对应的模板。 + +## 训练方法 + +| 方法 | 全参数训练 | 部分参数训练 | LoRA | QLoRA | +| ---------------------- | ------------------ | ------------------ | ------------------ | ------------------ | +| 预训练 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| 指令监督微调 | :white_check_mark: | :white_check_mark: | :white_check_mark: | :white_check_mark: | +| 奖励模型训练 | | | :white_check_mark: | :white_check_mark: | +| PPO 训练 | | | :white_check_mark: | :white_check_mark: | +| DPO 训练 | :white_check_mark: | | :white_check_mark: | :white_check_mark: | + +> [!NOTE] +> 请使用 `--quantization_bit 4/8` 参数来启用 QLoRA 训练。 + +## 数据集 + +- 用于预训练: + - [Wiki Demo (en)](data/wiki_demo.txt) + - [RefinedWeb (en)](https://huggingface.co/datasets/tiiuae/falcon-refinedweb) + - [StarCoder (en)](https://huggingface.co/datasets/bigcode/starcoderdata) + - [Wikipedia (en)](https://huggingface.co/datasets/olm/olm-wikipedia-20221220) + - [Wikipedia (zh)](https://huggingface.co/datasets/pleisto/wikipedia-cn-20230720-filtered) +- 用于指令监督微调: + - [Stanford Alpaca (en)](https://github.com/tatsu-lab/stanford_alpaca) + - [Stanford Alpaca (zh)](https://github.com/ymcui/Chinese-LLaMA-Alpaca) + - [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) + - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1) + - [Self-cognition (zh)](data/self_cognition.json) + - [ShareGPT (zh)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT/tree/main/Chinese-instruction-collection) + - [Guanaco Dataset (multilingual)](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset) + - [BELLE 2M (zh)](https://huggingface.co/datasets/BelleGroup/train_2M_CN) + - [BELLE 1M (zh)](https://huggingface.co/datasets/BelleGroup/train_1M_CN) + - [BELLE 0.5M (zh)](https://huggingface.co/datasets/BelleGroup/train_0.5M_CN) + - [BELLE Dialogue 0.4M (zh)](https://huggingface.co/datasets/BelleGroup/generated_chat_0.4M) + - [BELLE School Math 0.25M (zh)](https://huggingface.co/datasets/BelleGroup/school_math_0.25M) + - [BELLE Multiturn Chat 0.8M (zh)](https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M) + - [LIMA (en)](https://huggingface.co/datasets/GAIR/lima) + - [CodeAlpaca 20k (en)](https://huggingface.co/datasets/sahil2801/CodeAlpaca-20k) + - [Alpaca CoT (multilingual)](https://huggingface.co/datasets/QingyiSi/Alpaca-CoT) + - [MathInstruct (en)](https://huggingface.co/datasets/TIGER-Lab/MathInstruct) + - [Firefly 1.1M (zh)](https://huggingface.co/datasets/YeungNLP/firefly-train-1.1M) + - [Web QA (zh)](https://huggingface.co/datasets/suolyer/webqa) + - [UltraChat (en)](https://github.com/thunlp/UltraChat) + - [WebNovel (zh)](https://huggingface.co/datasets/zxbsmk/webnovel_cn) + - [Ad Gen (zh)](https://huggingface.co/datasets/HasturOfficial/adgen) +- 用于训练奖励模型或 DPO 训练: + - [HH-RLHF (en)](https://huggingface.co/datasets/Anthropic/hh-rlhf) + - [Open Assistant (multilingual)](https://huggingface.co/datasets/OpenAssistant/oasst1) + - [GPT-4 Generated Data (en&zh)](https://github.com/Instruction-Tuning-with-GPT-4/GPT-4-LLM) + +使用方法请参考 [data/README.md](data/README_zh.md) 文件。 + +部分数据集的使用需要确认,我们推荐使用下述命令登录您的 Hugging Face 账户。 + +```bash +pip install --upgrade huggingface_hub +huggingface-cli login +``` + +## 软件依赖 + +- Python 3.8+ 和 PyTorch 1.13.1+ +- 🤗Transformers, Datasets, Accelerate, PEFT 和 TRL +- sentencepiece, protobuf 和 tiktoken +- jieba, rouge-chinese 和 nltk (用于评估) +- gradio 和 matplotlib (用于网页端交互) +- uvicorn, fastapi 和 sse-starlette (用于 API) + +以及 **强而有力的 GPU**! + +## 如何使用 + +### 数据准备(可跳过) + +关于数据集文件的格式,请参考 `data/example_dataset` 文件夹的内容。构建自定义数据集时,既可以使用单个 `.json` 文件,也可以使用一个[数据加载脚本](https://huggingface.co/docs/datasets/dataset_script)和多个文件。 + +> [!NOTE] +> 使用自定义数据集时,请更新 `data/dataset_info.json` 文件,该文件的格式请参考 `data/README.md`。 + +### 环境搭建(可跳过) + +```bash +git clone https://github.com/hiyouga/LLaMA-Efficient-Tuning.git +conda create -n llama_etuning python=3.10 +conda activate llama_etuning +cd LLaMA-Efficient-Tuning +pip install -r requirements.txt +``` + +如果要在 Windows 平台上开启量化 LoRA(QLoRA),需要安装预编译的 `bitsandbytes` 库, 支持 CUDA 11.1 到 12.1. + +```bash +pip install https://github.com/jllllll/bitsandbytes-windows-webui/releases/download/wheels/bitsandbytes-0.39.1-py3-none-win_amd64.whl +``` + +### 浏览器一体化界面 + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_web.py +``` + +我们**极力推荐**新手使用浏览器一体化界面,因为它还可以不依赖 GPU 环境自动生成在 GPU 上运行的命令行脚本。 + +> [!WARNING] +> 目前网页 UI 仅支持**单卡训练**。 + +### 单 GPU 训练 + +> [!IMPORTANT] +> 如果您使用多张 GPU 训练模型,请移步[多 GPU 分布式训练](#多-gpu-分布式训练)部分。 + +#### 预训练 + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage pt \ + --model_name_or_path path_to_llama_model \ + --do_train \ + --dataset wiki_demo \ + --finetuning_type lora \ + --lora_target q_proj,v_proj \ + --output_dir path_to_pt_checkpoint \ + --overwrite_cache \ + --per_device_train_batch_size 4 \ + --gradient_accumulation_steps 4 \ + --lr_scheduler_type cosine \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate 5e-5 \ + --num_train_epochs 3.0 \ + --plot_loss \ + --fp16 +``` + +#### 指令监督微调 + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage sft \ + --model_name_or_path path_to_llama_model \ + --do_train \ + --dataset alpaca_gpt4_zh \ + --template default \ + --finetuning_type lora \ + --lora_target q_proj,v_proj \ + --output_dir path_to_sft_checkpoint \ + --overwrite_cache \ + --per_device_train_batch_size 4 \ + --gradient_accumulation_steps 4 \ + --lr_scheduler_type cosine \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate 5e-5 \ + --num_train_epochs 3.0 \ + --plot_loss \ + --fp16 +``` + +#### 奖励模型训练 + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage rm \ + --model_name_or_path path_to_llama_model \ + --do_train \ + --dataset comparison_gpt4_zh \ + --template default \ + --finetuning_type lora \ + --lora_target q_proj,v_proj \ + --resume_lora_training False \ + --checkpoint_dir path_to_sft_checkpoint \ + --output_dir path_to_rm_checkpoint \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 4 \ + --lr_scheduler_type cosine \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate 1e-6 \ + --num_train_epochs 1.0 \ + --plot_loss \ + --fp16 +``` + +#### PPO 训练 + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage ppo \ + --model_name_or_path path_to_llama_model \ + --do_train \ + --dataset alpaca_gpt4_zh \ + --template default \ + --finetuning_type lora \ + --lora_target q_proj,v_proj \ + --resume_lora_training False \ + --checkpoint_dir path_to_sft_checkpoint \ + --reward_model path_to_rm_checkpoint \ + --output_dir path_to_ppo_checkpoint \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 4 \ + --lr_scheduler_type cosine \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate 1e-5 \ + --num_train_epochs 1.0 \ + --plot_loss +``` + +#### DPO 训练 + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage dpo \ + --model_name_or_path path_to_llama_model \ + --do_train \ + --dataset comparison_gpt4_zh \ + --template default \ + --finetuning_type lora \ + --lora_target q_proj,v_proj \ + --resume_lora_training False \ + --checkpoint_dir path_to_sft_checkpoint \ + --output_dir path_to_dpo_checkpoint \ + --per_device_train_batch_size 2 \ + --gradient_accumulation_steps 4 \ + --lr_scheduler_type cosine \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate 1e-5 \ + --num_train_epochs 1.0 \ + --plot_loss \ + --fp16 +``` + +### 多 GPU 分布式训练 + +#### 使用 Huggingface Accelerate + +```bash +accelerate config # 首先配置分布式环境 +accelerate launch src/train_bash.py # 参数同上 +``` + +
LoRA 训练的 Accelerate 配置示例 + +```yaml +compute_environment: LOCAL_MACHINE +distributed_type: MULTI_GPU +downcast_bf16: 'no' +gpu_ids: all +machine_rank: 0 +main_training_function: main +mixed_precision: fp16 +num_machines: 1 +num_processes: 4 +rdzv_backend: static +same_network: true +tpu_env: [] +tpu_use_cluster: false +tpu_use_sudo: false +use_cpu: false +``` + +
+ +#### 使用 DeepSpeed + +```bash +deepspeed --num_gpus 8 --master_port=9901 src/train_bash.py \ + --deepspeed ds_config.json \ + ... # 参数同上 +``` + +
使用 DeepSpeed ZeRO-2 进行全参数训练的 DeepSpeed 配置示例 + +```json +{ + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "initial_scale_power": 16, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "overlap_comm": false, + "contiguous_gradients": true + } +} +``` + +
+ +### 导出微调后的模型 + +```bash +python src/export_model.py \ + --model_name_or_path path_to_llama_model \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint \ + --output_dir path_to_export +``` + +### API 服务 + +```bash +python src/api_demo.py \ + --model_name_or_path path_to_llama_model \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint +``` + +> [!NOTE] +> 关于 API 文档请见 `http://localhost:8000/docs`。 + +### 命令行测试 + +```bash +python src/cli_demo.py \ + --model_name_or_path path_to_llama_model \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint +``` + +### 浏览器测试 + +```bash +python src/web_demo.py \ + --model_name_or_path path_to_llama_model \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint +``` + +### 指标评估(BLEU 分数和汉语 ROUGE 分数) + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage sft \ + --model_name_or_path path_to_llama_model \ + --do_eval \ + --dataset alpaca_gpt4_zh \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint \ + --output_dir path_to_eval_result \ + --per_device_eval_batch_size 8 \ + --max_samples 100 \ + --predict_with_generate +``` + +> [!NOTE] +> 我们建议在量化模型的评估中使用 `--per_device_eval_batch_size=1` 和 `--max_target_length 128`。 + +### 模型预测 + +```bash +CUDA_VISIBLE_DEVICES=0 python src/train_bash.py \ + --stage sft \ + --model_name_or_path path_to_llama_model \ + --do_predict \ + --dataset alpaca_gpt4_zh \ + --template default \ + --finetuning_type lora \ + --checkpoint_dir path_to_checkpoint \ + --output_dir path_to_predict_result \ + --per_device_eval_batch_size 8 \ + --max_samples 100 \ + --predict_with_generate +``` + +## 协议 + +本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源。 + +使用模型权重时,请遵循对应的模型协议: + +- [LLaMA](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) +- [LLaMA-2](https://ai.meta.com/llama/license/) +- [BLOOM](https://huggingface.co/spaces/bigscience/license) +- [Falcon](LICENSE) +- [Baichuan](https://huggingface.co/baichuan-inc/baichuan-7B/resolve/main/baichuan-7B%20%E6%A8%A1%E5%9E%8B%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) +- [Baichuan2](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base/resolve/main/Baichuan%202%E6%A8%A1%E5%9E%8B%E7%A4%BE%E5%8C%BA%E8%AE%B8%E5%8F%AF%E5%8D%8F%E8%AE%AE.pdf) +- [InternLM](https://github.com/InternLM/InternLM#open-source-license) +- [Qwen](https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/LICENSE) +- [XVERSE](https://github.com/xverse-ai/XVERSE-13B/blob/main/MODEL_LICENSE.pdf) +- [ChatGLM2](https://github.com/THUDM/ChatGLM2-6B/blob/main/MODEL_LICENSE) +- [Phi-1.5](https://huggingface.co/microsoft/phi-1_5/resolve/main/Research%20License.docx) + +## 引用 + +如果您觉得此项目有帮助,请考虑以下列格式引用 + +```bibtex +@Misc{llama-efficient-tuning, + title = {LLaMA Efficient Tuning}, + author = {hiyouga}, + howpublished = {\url{https://github.com/hiyouga/LLaMA-Efficient-Tuning}}, + year = {2023} +} +``` + +## 致谢 + +本项目受益于 [PEFT](https://github.com/huggingface/peft)、[QLoRA](https://github.com/artidoro/qlora)、[FastChat](https://github.com/lm-sys/FastChat) 和 [OpenChatKit](https://github.com/togethercomputer/OpenChatKit),感谢以上诸位作者的付出。 + +## Star History + +![Star History Chart](https://api.star-history.com/svg?repos=hiyouga/LLaMA-Efficient-Tuning&type=Date) diff --git a/LLaMA-Efficient-Tuning/data/README.md b/LLaMA-Efficient-Tuning/data/README.md new file mode 100644 index 0000000000000000000000000000000000000000..3be493b208c7746308b8af913accb0b24b8ebe1d --- /dev/null +++ b/LLaMA-Efficient-Tuning/data/README.md @@ -0,0 +1,32 @@ +If you are using a custom dataset, please provide your dataset definition in the following format in `dataset_info.json`. + +```json +"dataset_name": { + "hf_hub_url": "the name of the dataset repository on the HuggingFace hub. (if specified, ignore below 3 arguments)", + "script_url": "the name of the directory containing a dataset loading script. (if specified, ignore below 2 arguments)", + "file_name": "the name of the dataset file in the this directory. (required if above are not specified)", + "file_sha1": "the SHA-1 hash value of the dataset file. (optional)", + "ranking": "whether the examples contains ranked responses or not. (default: false)", + "columns": { + "prompt": "the name of the column in the datasets containing the prompts. (default: instruction)", + "query": "the name of the column in the datasets containing the queries. (default: input)", + "response": "the name of the column in the datasets containing the responses. (default: output)", + "history": "the name of the column in the datasets containing the history of chat. (default: None)" + } +} +``` + +where the `prompt` and `response` columns should contain non-empty values. The `query` column will be concatenated with the `prompt` column and used as input for the model. The `history` column should contain a list where each element is a string tuple representing a query-response pair. + +For datasets used in reward modeling or DPO training, the `response` column should be a string list, with the preferred answers appearing first, for example: + +```json +{ + "instruction": "Question", + "input": "", + "output": [ + "Chosen answer", + "Rejected answer" + ] +} +``` diff --git a/LLaMA-Efficient-Tuning/data/README_zh.md b/LLaMA-Efficient-Tuning/data/README_zh.md new file mode 100644 index 0000000000000000000000000000000000000000..a8f62ca21ae8864443d7a0bc7ea8e52d7c253b51 --- /dev/null +++ b/LLaMA-Efficient-Tuning/data/README_zh.md @@ -0,0 +1,32 @@ +如果您使用自定义数据集,请务必在 `dataset_info.json` 文件中以如下格式提供您的数据集定义。 + +```json +"数据集名称": { + "hf_hub_url": "HuggingFace上的项目地址(若指定,则忽略下列三个参数)", + "script_url": "包含数据加载脚本的本地文件夹名称(若指定,则忽略下列两个参数)", + "file_name": "该目录下数据集文件的名称(若上述参数未指定,则此项必需)", + "file_sha1": "数据集文件的SHA-1哈希值(可选)", + "ranking": "数据集是否包含排序后的回答(默认:false)", + "columns": { + "prompt": "数据集代表提示词的表头名称(默认:instruction)", + "query": "数据集代表请求的表头名称(默认:input)", + "response": "数据集代表回答的表头名称(默认:output)", + "history": "数据集代表历史对话的表头名称(默认:None)" + } +} +``` + +其中 `prompt` 和 `response` 列应当是非空的字符串。`query` 列的内容将会和 `prompt` 列拼接作为模型输入。`history` 列应当是一个列表,其中每个元素是一个字符串二元组,分别代表用户请求和模型答复。 + +对于训练奖励模型或 DPO 训练的数据集,`response` 列应当是一个字符串列表,排在前面的代表更优的答案,例如: + +```json +{ + "instruction": "Question", + "input": "", + "output": [ + "Chosen answer", + "Rejected answer" + ] +} +``` diff --git a/LLaMA-Efficient-Tuning/data/belle_multiturn/belle_multiturn.py b/LLaMA-Efficient-Tuning/data/belle_multiturn/belle_multiturn.py new file mode 100644 index 0000000000000000000000000000000000000000..4426b480615d399c58c4e6bb9c483dff4a3b1cd3 --- /dev/null +++ b/LLaMA-Efficient-Tuning/data/belle_multiturn/belle_multiturn.py @@ -0,0 +1,79 @@ +import json +import datasets +from typing import Any, Dict, List + + +_DESCRIPTION = "BELLE multiturn chat dataset." + +_CITATION = """\ +@article{belle2023exploring, + title={Exploring the Impact of Instruction Data Scaling on Large Language Models: An Empirical Study on Real-World Use Cases}, + author={Yunjie Ji, Yong Deng, Yan Gong, Yiping Peng, Qiang Niu, Lei Zhang, Baochang Ma, Xiangang Li}, + journal={arXiv preprint arXiv:2303.14742}, + year={2023} +} +""" + +_HOMEPAGE = "https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M" +_LICENSE = "gpl-3.0" +_URL = "https://huggingface.co/datasets/BelleGroup/multiturn_chat_0.8M/resolve/main/multiturn_chat_0.8M.json" + + +class BelleMultiturn(datasets.GeneratorBasedBuilder): + + VERSION = datasets.Version("0.0.0") + + def _info(self) -> datasets.DatasetInfo: + features = datasets.Features({ + "instruction": datasets.Value("string"), + "output": datasets.Value("string"), + "history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))) + }) + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=features, + homepage=_HOMEPAGE, + license=_LICENSE, + citation=_CITATION + ) + + def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: + file_path = dl_manager.download(_URL) + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "filepath": file_path + } + ) + ] + + def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat with history + with open(filepath, "r", encoding="utf-8") as f: + for key, row in enumerate(f): + data = json.loads(row) + prompt = data["instruction"].strip() + response = data["output"].strip() + + assist_idx = prompt.rfind("Assistant:") + human_idx = prompt.rfind("Human:") + query = prompt[human_idx+6:assist_idx].strip() + prompt = prompt[:human_idx].strip() + history = [] + + while prompt.rfind("Assistant:") != -1: + assist_idx = prompt.rfind("Assistant:") + human_idx = prompt.rfind("Human:") + if human_idx != -1: + old_query = prompt[human_idx+6:assist_idx].strip() + old_resp = prompt[assist_idx+10:].strip() + history.insert(0, (old_query, old_resp)) + else: + break + prompt = prompt[:human_idx].strip() + + yield key, { + "instruction": query, + "output": response, + "history": history + } diff --git a/LLaMA-Efficient-Tuning/data/dataset_info.json b/LLaMA-Efficient-Tuning/data/dataset_info.json new file mode 100644 index 0000000000000000000000000000000000000000..c4346f1a3f9803780d36a0b8c7968dd8de9ceaa2 --- /dev/null +++ b/LLaMA-Efficient-Tuning/data/dataset_info.json @@ -0,0 +1,193 @@ +{ + "mydata": { + "file_name": "output.json", + "file_sha1": "123456789abc" + }, + "example": { + "script_url": "example_dataset", + "columns": { + "prompt": "instruction", + "query": "input", + "response": "output", + "history": "history" + } + }, + "guanaco": { + "hf_hub_url": "JosephusCheung/GuanacoDataset" + }, + "belle_0.5m": { + "hf_hub_url": "BelleGroup/train_0.5M_CN" + }, + "belle_1m": { + "hf_hub_url": "BelleGroup/train_1M_CN" + }, + "belle_2m": { + "hf_hub_url": "BelleGroup/train_2M_CN" + }, + "belle_dialog": { + "hf_hub_url": "BelleGroup/generated_chat_0.4M" + }, + "belle_math": { + "hf_hub_url": "BelleGroup/school_math_0.25M" + }, + "belle_multiturn": { + "script_url": "belle_multiturn", + "columns": { + "prompt": "instruction", + "query": "", + "response": "output", + "history": "history" + } + }, + "codealpaca": { + "hf_hub_url": "sahil2801/CodeAlpaca-20k" + }, + "alpaca_cot": { + "hf_hub_url": "QingyiSi/Alpaca-CoT" + }, + "firefly": { + "hf_hub_url": "YeungNLP/firefly-train-1.1M", + "columns": { + "prompt": "input", + "query": "", + "response": "target", + "history": "" + } + }, + "mathinstruct": { + "hf_hub_url": "TIGER-Lab/MathInstruct", + "columns": { + "prompt": "instruction", + "query": "", + "response": "output", + "history": "" + } + }, + "webqa": { + "hf_hub_url": "suolyer/webqa", + "columns": { + "prompt": "input", + "query": "", + "response": "output", + "history": "" + } + }, + "ultra_chat": { + "script_url": "ultra_chat", + "columns": { + "prompt": "instruction", + "query": "", + "response": "output", + "history": "history" + } + }, + "novel_tokens512_50k": { + "hf_hub_url": "zxbsmk/webnovel_cn" + }, + "adgen": { + "hf_hub_url": "HasturOfficial/adgen", + "columns": { + "prompt": "content", + "query": "", + "response": "summary", + "history": "" + } + }, + "comparison_gpt4_en": { + "file_name": "comparison_gpt4_data_en.json", + "file_sha1": "96fa18313544e22444fe20eead7754b17da452ae", + "ranking": true + }, + "comparison_gpt4_zh": { + "file_name": "comparison_gpt4_data_zh.json", + "file_sha1": "515b18ed497199131ddcc1af950345c11dc5c7fd", + "ranking": true + }, + "hh_rlhf_en": { + "script_url": "hh_rlhf_en", + "columns": { + "prompt": "instruction", + "query": "", + "response": "output", + "history": "history" + }, + "ranking": true + }, + "oaast_rm": { + "file_name": "oaast_rm.json", + "file_sha1": "622d420e9b70003b210618253bd3d9d2891d86cb", + "columns": { + "prompt": "instruction", + "query": "input", + "response": "output", + "history": "history" + }, + "ranking": true + }, + "oaast_rm_zh": { + "file_name": "oaast_rm_zh.json", + "file_sha1": "1065af1f3784dd61be5e79713a35f427b713a232", + "columns": { + "prompt": "instruction", + "query": "input", + "response": "output", + "history": "history" + }, + "ranking": true + }, + "wiki_demo": { + "file_name": "wiki_demo.txt", + "file_sha1": "b2288edb05b233e5b35250fd4b308a5fa21fa66d", + "columns": { + "prompt": "text", + "query": "", + "response": "", + "history": "" + } + }, + "refinedweb": { + "hf_hub_url": "tiiuae/falcon-refinedweb", + "columns": { + "prompt": "content", + "query": "", + "response": "", + "history": "" + } + }, + "wikipedia_en": { + "hf_hub_url": "olm/olm-wikipedia-20221220", + "columns": { + "prompt": "text", + "query": "", + "response": "", + "history": "" + } + }, + "wikipedia_zh": { + "hf_hub_url": "pleisto/wikipedia-cn-20230720-filtered", + "columns": { + "prompt": "completion", + "query": "", + "response": "", + "history": "" + } + }, + "the_stack": { + "hf_hub_url": "bigcode/the-stack", + "columns": { + "prompt": "content", + "query": "", + "response": "", + "history": "" + } + }, + "starcoder": { + "hf_hub_url": "bigcode/starcoderdata", + "columns": { + "prompt": "content", + "query": "", + "response": "", + "history": "" + } + } +} diff --git a/LLaMA-Efficient-Tuning/data/example_dataset/example_dataset.py b/LLaMA-Efficient-Tuning/data/example_dataset/example_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..db3e9ffb000bb44e30b982f98413294b943af840 --- /dev/null +++ b/LLaMA-Efficient-Tuning/data/example_dataset/example_dataset.py @@ -0,0 +1,46 @@ +import json +import datasets +from typing import Any, Dict, List + + +_DESCRIPTION = "An example of dataset for LLaMA." +_CITATION = "" +_HOMEPAGE = "" +_LICENSE = "" +_URL = "examples.json" + + +class ExampleDataset(datasets.GeneratorBasedBuilder): + + VERSION = datasets.Version("0.0.0") + + def _info(self) -> datasets.DatasetInfo: + features = datasets.Features({ + "instruction": datasets.Value("string"), + "input": datasets.Value("string"), + "output": datasets.Value("string"), + "history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))) + }) + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=features, + homepage=_HOMEPAGE, + license=_LICENSE, + citation=_CITATION + ) + + def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: + file_path = dl_manager.download(_URL) + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "filepath": file_path + } + ) + ] + + def _generate_examples(self, filepath: str) -> Dict[int, Dict[str, Any]]: + example_dataset = json.load(open(filepath, "r", encoding="utf-8")) + for key, example in enumerate(example_dataset): + yield key, example diff --git a/LLaMA-Efficient-Tuning/data/example_dataset/examples.json b/LLaMA-Efficient-Tuning/data/example_dataset/examples.json new file mode 100644 index 0000000000000000000000000000000000000000..3b4205e8f106d2f9212b6d3cc932d3c895dcf662 --- /dev/null +++ b/LLaMA-Efficient-Tuning/data/example_dataset/examples.json @@ -0,0 +1,20 @@ +[ + { + "instruction": "听起来很不错。人工智能可能在哪些方面面临挑战呢?", + "input": "", + "output": "人工智能面临的挑战包括数据隐私、安全和道德方面的问题,以及影响就业机会的自动化等问题。", + "history": [ + ["你好,你能帮我解答一个问题吗?", "当然,请问有什么问题?"], + ["我想了解人工智能的未来发展方向,你有什么想法吗?", "人工智能在未来的发展方向可能包括更强大的机器学习算法,更先进的自然语言处理技术,以及更加智能的机器人。"] + ] + }, + { + "instruction": "好的,谢谢你!", + "input": "", + "output": "不客气,有其他需要帮忙的地方可以继续问我。", + "history": [ + ["你好,能告诉我今天天气怎么样吗?", "当然可以,请问您所在的城市是哪里?"], + ["我在纽约。", "纽约今天晴间多云,气温最高约26摄氏度,最低约18摄氏度,记得注意保暖喔。"] + ] + } +] diff --git a/LLaMA-Efficient-Tuning/data/hh_rlhf_en/hh_rlhf_en.py b/LLaMA-Efficient-Tuning/data/hh_rlhf_en/hh_rlhf_en.py new file mode 100644 index 0000000000000000000000000000000000000000..8d51e4c4b583229591b851b07678ffbe2a020110 --- /dev/null +++ b/LLaMA-Efficient-Tuning/data/hh_rlhf_en/hh_rlhf_en.py @@ -0,0 +1,97 @@ +import json +import datasets +from typing import Any, Dict, List + + +_DESCRIPTION = "Human preference data about helpfulness and harmlessness for ChatGLM." +_CITATION = "" +_HOMEPAGE = "https://huggingface.co/datasets/Anthropic/hh-rlhf" +_LICENSE = "mit" +_URL = "https://huggingface.co/datasets/Anthropic/hh-rlhf/resolve/main/" +_URLS = { + "train": [ + _URL + "harmless-base/train.jsonl.gz", + _URL + "helpful-base/train.jsonl.gz", + _URL + "helpful-online/train.jsonl.gz", + _URL + "helpful-rejection-sampled/train.jsonl.gz" + ], + "test": [ + _URL + "harmless-base/test.jsonl.gz", + _URL + "helpful-base/test.jsonl.gz", + _URL + "helpful-online/test.jsonl.gz", + _URL + "helpful-rejection-sampled/test.jsonl.gz" + ] +} + + +class HhRlhfEn(datasets.GeneratorBasedBuilder): + + VERSION = datasets.Version("0.0.0") + + def _info(self) -> datasets.DatasetInfo: + features = datasets.Features({ + "instruction": datasets.Value("string"), + "output": datasets.Sequence(datasets.Value("string")), + "history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))) + }) + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=features, + homepage=_HOMEPAGE, + license=_LICENSE, + citation=_CITATION + ) + + def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: + file_path = dl_manager.download_and_extract(_URLS) + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "filepaths": file_path["train"] + } + ), + datasets.SplitGenerator( + name=datasets.Split.TEST, + gen_kwargs={ + "filepaths": file_path["test"] + } + ) + ] + + def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM + key = 0 + for filepath in filepaths: + with open(filepath, "r", encoding="utf-8") as f: + for row in f: + data = json.loads(row) + chosen = data["chosen"] + rejected = data["rejected"] + + assist_idx = rejected.rfind("\n\nAssistant: ") + r_reject = rejected[assist_idx+13:].strip() + assist_idx = chosen.rfind("\n\nAssistant: ") + r_accept = chosen[assist_idx+13:].strip() + + human_idx = chosen.rfind("\n\nHuman: ") + query = chosen[human_idx+9:assist_idx].strip() + prompt = chosen[:human_idx] + history = [] + + while prompt.rfind("\n\nAssistant: ") != -1: + assist_idx = prompt.rfind("\n\nAssistant: ") + human_idx = prompt.rfind("\n\nHuman: ") + if human_idx != -1: + old_query = prompt[human_idx+9:assist_idx].strip() + old_resp = prompt[assist_idx+13:].strip() + history.insert(0, (old_query, old_resp)) + else: + break + prompt = prompt[:human_idx] + + yield key, { + "instruction": query, + "output": [r_accept, r_reject], + "history": history + } + key += 1 diff --git a/LLaMA-Efficient-Tuning/data/output.json b/LLaMA-Efficient-Tuning/data/output.json new file mode 100644 index 0000000000000000000000000000000000000000..8be9ff67494548951bc846ee9e959153d5d1eb1a --- /dev/null +++ b/LLaMA-Efficient-Tuning/data/output.json @@ -0,0 +1,1096 @@ +[{ + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "請問存到150萬元,現在應該先存股還是先買預售屋?", + "output": "[1, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "想請問我有看到有人在大量拋出,阿現在可以賣嗎?", + "output": "[1, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在建築業的股票中,有沒有和其他產業不同的特點", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "哪幾家公司有最高的EPS?這些公司都值得投資嗎?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "外資在買,你會跟隨他們的操作嗎?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": ",我該買誰?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近一期,季度稅後淨利是否保持在正數?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "股價很委屈,應該會觸底反彈?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我看到投信在買,這意味著現在可以考慮買入嗎?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如何量測的價值評估?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "請給我可以賺價差又安穩的存股標的", + "output": "[0, 0, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如果想投資可再生能源相關股票,您能給出幾個有潛力的選擇嗎?", + "output": "[0, 1, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "是否有任何新興市場的擴展計畫?", + "output": "[1, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近1日,請問有哪些股票法人或主力是否大量買進?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "今天跌好多喔,怎麼辦?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "鈦業股票的市場表現如何?", + "output": "[0, 1, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "看到外資在大量買,是否值得效仿?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近1週大戶是否增持,而羊群是否減碼?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "金屬業的股票適合買入嗎", + "output": "[0, 1, 0, 1, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "老師今天有買,明天應該會漲吧", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "電子臨界加工業中的關鍵技術有哪些?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "紡織產業有哪些股票和標的?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "半導體產業最近的情況,適合大量買入嗎?", + "output": "[1, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "今天跌太慘了吧,我現在該怎麼辦", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我只有10~30萬的資金,有推薦的「投資組合」嗎?", + "output": "[0, 0, 1, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "外資買意味著什麼?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "能否提供一些適合長期持有的健康科技股票建議,以達到資本穩定增長?", + "output": "[0, 1, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "到美國設廠是利多還是利空?現在還可以買嗎?", + "output": "[1, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我想長期投資,可以現在開始定期定額嗎?", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在這三支股票中,哪支較適合進行當沖操作?", + "output": "[1, 1, 0, 1, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如果想投資台灣自行車產業,是否值得將資金分散投資於不同公司?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "船運業的未來前景如何", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "公股民營的公司裡面,那些公司比較適合投資?", + "output": "[0, 0, 1, 1, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "現在的價格是多少?", + "output": "[1, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "家用電子產業是否會繼續興盛", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "總統大選前要買什麼股?", + "output": "[0, 0, 0, 1, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "是否有主力資金在單日買超1,000張以上的股票清", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "這檔如何操作,老師可以指導一下嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近股票好難沖,可以給我沖的清單嗎?", + "output": "[0, 0, 1, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "目前的趨勢是不是值得考慮賣出?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "生技類股好像很危險欸,但我又想投資,有推薦的嗎", + "output": "[0, 1, 0, 1, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "很會賺錢的公司有那些", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近一期,月營收較上個月有明顯的正成長?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我現在買了10張,我現在應該賣掉嗎?還是等到之後再說?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "3、6、9、10月都會有「投信作帳行情」嗎?", + "output": "[0, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "請幫我看看傳統製造業中有哪黨比較適合買入", + "output": "[0, 1, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "請給我值得長期投資的標的", + "output": "[0, 0, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "碰到月線了,不會再跌了吧?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "已經漲了的股票,還能買嗎?", + "output": "[0, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "兩張股票,應該一張一張賣嗎?", + "output": "[1, 0, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如果您想每個月定期投資穩定的科技類股,有哪些適合的選擇?", + "output": "[0, 1, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "今年有沒有開放普通股股東參與增資呢? 實在看不懂", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我有10萬想存股,要存哪幾隻?", + "output": "[0, 0, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "跟著外資買賣超、或投信買賣超名單來操作,勝算比較高嗎?", + "output": "[0, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "股票已經漲了一段時間,你認為現在還有追價的價值嗎?", + "output": "[0, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "鋁業股票有哪些值得關注的?", + "output": "[0, 1, 0, 1, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "哪一些股票最近一期,年度ROA是否超過5%?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "對於目前的市場情況,你會建議繼續持有嗎?", + "output": "[1, 1, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "籌碼已經洗乾淨,你的分析是會漲嗎?", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在能源價格波動的情況下,有哪些股票可能會受益,值得投資者關注?", + "output": "[1, 1, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "哪一些股票最近一期,年度ROE是否超過8%?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "AI發展對金融股是利好還是利空?我該買還是賣?", + "output": "[1, 1, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我要怎麼找到下一隻來投資?", + "output": "[0, 0, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": ",該選哪一支?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "自動車產業適合投資嗎", + "output": "[0, 1, 0, 1, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "醫療器械產業的長期投資價值如何?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "擁有高ROE的科技公司股票是否都值得投資?", + "output": "[0, 0, 1, 1, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "建議繼續抱嗎", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "想追股票,可以嗎?追高好嗎?", + "output": "[0, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "常常看錯方向、做錯行情,該如何判斷多頭或空頭呢?", + "output": "[0, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "想每個月拿出一點錢,買穩定的股票,可以買什麼?", + "output": "[0, 0, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "股價淨值比是否大於等於0.5?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "到底在跌什麼呢", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在股市「大空頭」期間可以做空嗎?怎麼操作最好?", + "output": "[0, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我喜歡本益比低的股票,有推薦的嗎", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "公司是否屬於太陽能產業?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如何選出能持續上漲的「飆股」或「成長股」?", + "output": "[0, 0, 1, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "有營收高的優質股票嗎", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "的適宜買入價位大約在哪個區間?你有預估嗎?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "給我最近20日,前10大交易分點(20日)的總買超金額是否大於總賣超金額的清單", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "是否有任何政府或監管機構的合規要求?", + "output": "[1, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "現增後,已經弭補累計虧損,加上今年營運開始獲利,是否已經符合上市櫃申請條件", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近一期,季度營業淨利是否呈現出正數?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "股票外資買超第一名耶,可以搶上車嗎?", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "EPS最高的公司有哪幾家?他們都適合投資嗎?", + "output": "[0, 0, 1, 1, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "近期有什麼影響因素值得關注?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "想要跟進飆股的漲勢,但要如何避免踩到地雷股呢?", + "output": "[0, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在與其他股票的股價比較中是否具有優勢?", + "output": "[1, 1, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "給我五支買了就會漲的標的", + "output": "[0, 0, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "投信今天在買,你覺得明天會有上漲趨勢嗎?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "為甚麼一直跌", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "該賣了嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "已經漲幅不小了,還值得考慮買進嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "請問這支股票還能買嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "投信正在買,是否值得跟進?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我當沖賠了很多錢,想要剁手,明天追回來得及嗎?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在現在買入是不是已經太貴了?", + "output": "[0, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在中,您會傾向選擇哪一支股票?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在電子臨界加工業中,有哪些特殊的點?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我買來的已經漲10%了欸 要繼續追高嗎", + "output": "[1, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "存股是選股重要還是堅持定期定額比較重要?", + "output": "[0, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "為什麼有人說「別用融資買股票」?有適合的時機嗎?", + "output": "[0, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近1日和5日,籌碼的集中度是否呈現正向趨勢?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在這三支股票中,哪一家公司的表現更受到青睞?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "有個疑問問什麼外資都不買進啊?營收都很好的說⋯⋯", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "ROE高的股票適合購買嗎", + "output": "[0, 0, 1, 1, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我套牢在100,現在該怎麼辦?等一陣子會漲嗎?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "會漲回來嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "電子組裝業的大哥是誰", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "當金融股成為漲勢主流時,為什麼代表多頭即將結束?", + "output": "[0, 1, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "應該要長抱嗎 還是要趕快賣掉", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近的股票表現如何?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我想存股,遇到高價股是要10股10股的買嗎?還是有更好的標的?", + "output": "[0, 0, 1, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "很昂貴,我是否可以考慮買0050來替代?", + "output": "[1, 1, 1, 1, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我想存但他太貴了,我應該買0050來代替嗎?有半導體的ETF可以做到跟台積電類似的效果嗎?", + "output": "[1, 1, 1, 1, 0, 1, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近20日,有哪間股票的短期股懂券商連續買超?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我應該等跌到30再進場嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "為什麼今天跌停?是因為什麼什麼原因嗎", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "AI概念股的狀況如何?該進行調整嗎", + "output": "[0, 1, 1, 1, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "IC設計、IC組裝和IC銷售有哪些不同的地方", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近一期,季度每股盈餘是否超過1元?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "聽說高手在進場時都會先「試單」,怎麼試才對呢?", + "output": "[0, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "外資在買,我應該共襄盛舉嗎?", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "智慧家居產業未來的發展趨勢如何?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "買在100還有救嗎", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "的未來投資潛力如何?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "的股價是否會受到全球經濟的影響?", + "output": "[1, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "高科技材料產業的成長前景如何?", + "output": "[0, 1, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我投資的跌幅太大了,還有可能回升嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "還好我壓對了,要不要再加碼?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "製藥業的ETF適合買入嗎", + "output": "[0, 1, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "外資在買,這是個反向操作的機會嗎?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如何判斷股市的「第五波末升段」何時會結束?", + "output": "[0, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": ",投資哪個比較好?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "該如何選擇並決定標的?哪種選股方法勝算較高呢?", + "output": "[0, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "哪些公司具有最高的每股盈利(EPS)?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我現在買生技概念股要買那支", + "output": "[0, 1, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "貨運航運業面臨的挑戰和機遇是什麼?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "現在還可以買股票嗎?", + "output": "[0, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "高速運算產業的前景如何", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "是否處於食品加工行業", + "output": "[1, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "投資會有什麼樣的風險?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "有哪些股票最近20日,前10大交易分點(每日)是否連續買超?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "投資是否會變得更加安全?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我買的技術指標不佳,該怎麼辦", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "電子元件製造業的龍頭企業是誰?", + "output": "[0, 1, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "現在適合買入嗎?你看好它嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "IC設計,,。超級比一比。", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "你覺得這支股票目前值得買入嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "看什麼信號能夠判斷是否適合買入?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "碰到月線了,不會再跌了吧?!", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "IC設計、IC組裝和IC銷售的核心區別是什麼?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在美國設廠是利多還是利空?目前還適合買進嗎?", + "output": "[1, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "可再生能源產業的現況和前景如何?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "投資半導體產業鏈需要注意的風險有哪些?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "現在能夠賣出嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如果您有10萬資金,能推薦幾支消費品股票來達到穩健的投資目標?", + "output": "[0, 1, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "自駕車概念股建議繼續持有嗎", + "output": "[0, 1, 1, 1, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "現金增資的股票應該避開嗎?", + "output": "[0, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "可以幫我介紹能源業嗎", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我只有10萬元,該如何開始「投資」呢?", + "output": "[0, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "3D列印產業目前的發展狀況如何?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我該放空生技股嗎", + "output": "[0, 1, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "的研發項目中,是否有望成為未來增長引擎?", + "output": "[1, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我現在有好幾張,現在我應該跳船嗎?還是要等到他觸底反彈", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我想要賣得漂亮,如何設定「移動式停利」與停損呢?", + "output": "[0, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我應該投資半導體產業鏈嗎", + "output": "[0, 1, 0, 1, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "生物科技股票在目前經濟環境下的風險如何?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近20日,有哪些股票的短期最威券商連續買超?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我會斷頭嗎?", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "XXX股票的本益比是否大於等於10?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "的市場定位和競爭策略是?", + "output": "[1, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "現金股利殖利率是否大於3%?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "20MA跟60MA交會了 我該怎麼做接下來的操作", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "AI概念股拿來存股的話,要存哪一檔?", + "output": "[0, 1, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "存股應該是要存嗎?是現在開始存還是等一陣子?", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近月營收是否創下10個月以上的新高?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "能否給我一些ROE高的股票選項?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如何挑出「隔天會漲停」的股票?", + "output": "[0, 0, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": ",存股應該要選誰?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": ",哪一支股票表現較好?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我套好久了,應該差不多要發動了吧?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "的股價走勢對購買行為有什麼影響?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在目前的市場環境下,您是否可以提供一些值得考慮的能源公司股票投資建議?", + "output": "[0, 1, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "買43參加除息大家覺得可以嗎?還是等除息後再買", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "的股價會受到市場和投資者看法的影響嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": ",哪支比較適合當沖?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "大家心中的目標價是多少?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如何縮小範圍,找出有機會拉出較大漲幅的個股?", + "output": "[0, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "某產業之下有眾多類股,如何挑出漲幅相對大的股票?", + "output": "[0, 1, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": ",哪個存股起來表現比較好", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如何簡單用毛利率、營業利益率、淨利率評估一家公司的股票,值不值得持有?", + "output": "[0, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在中,你會選擇哪一支?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "如何判斷個股有主力在偷偷吃貨?或主力已在倒貨?", + "output": "[0, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "要賣這支股票了嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我的績效已經超乎我的預期了 要繼續留者嗎", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我只有10~30萬的資金,該如何建立「投資組合」呢?", + "output": "[0, 0, 1, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近一次的盈餘電話會議中,管理層有提到哪些關鍵訊息?", + "output": "[1, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近2日,有哪些股票的中長線股懂券商是否持續買超?", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "Ai產業現在的狀況如何?", + "output": "[0, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "公佈營收了耶,可以繼續抱嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "現在建議繼續抱者嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "請問大家集團裡哪些個股比較看好,其中哪隻最推薦?", + "output": "[1, 0, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "在投資時,如何計算出一間公司的真正價值?", + "output": "[0, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "對於,你能指導一下該如何操作嗎?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "AI發展對是有利還是不利?我現在該買台積電還是該賣?", + "output": "[1, 1, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "的未來發展趨勢如何?", + "output": "[1, 0, 0, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我該買, 還是呢?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "最近的興櫃指數走向如何?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "持有價是109 請問持抱到KD線往下在賣嗎?", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "是否有外資和投信連續2天進行買超的股票清單", + "output": "[0, 0, 1, 0, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "是否有穩固的供應鏈?", + "output": "[1, 0, 0, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "股價下跌,你認為這是回調後的安全時機嗎?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "請給我最近3日成交量顯著增加的清單", + "output": "[0, 0, 1, 1, 1, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "我覺得的基本面強勁,但是的表現也很亮眼,我該買哪支?", + "output": "[1, 0, 0, 0, 0, 0, 1]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "8月營收噴到1940萬 比7月營收800萬跳增1倍 請問大家明天會噴漲停嗎?\n", + "output": "[1, 0, 1, 1, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "的最佳買入時機是什麼?你覺得它目前合適嗎?", + "output": "[1, 0, 1, 0, 0, 0, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "請給我長期績優股", + "output": "[0, 0, 0, 0, 0, 1, 0]" + }, + { + "instruction": "你是一個股市問題分析專家,你現在需要進行思考,並且給予一個裝有1跟0的列表。\\\\n可以代表任何一家股票或公司的名稱。\\\\n第一個面向是「個股分析」,如果這個問題需要檢查、分析這檔股票的狀態,則設定為1,反之則為0\\\\n第二個面向是「產業分析」,如果這個問題需要特定產業的相關知識才能回答,則設定為1,反之則為0\\\\n第三個面向是「股市制度/交易名詞」,如果這個問題需要瞭解股市的專有名詞才能回答的話,則設定為1,反之則為0\\\\n第四個面向是「google」,如果這個問題需要透過搜尋引擎的協助比較好回答的話,則設定為1,反之則為0\\\\n第五個面向是「ranking(可比較)」,如果問這個問題的人背後想要透過一些客觀條件得到股票列表的話,則設定為1,反之則為0\\\\n第六個面向是「ranking(明牌)」,如果問這個問題的人背後想要透過非客觀資料獲得股票列表進行投資的話,則設定為1,反之則為0\\\\n第七個面向是「個股比較」,如果有兩檔股票名稱想要相互比較的話,則設定為1,反之則為0\n", + "input": "多隻電動車股票基本面相似,那麼您會選擇買入哪支?", + "output": "[0, 1, 0, 0, 0, 1, 0]" + } +] \ No newline at end of file diff --git a/LLaMA-Efficient-Tuning/data/ultra_chat/ultra_chat.py b/LLaMA-Efficient-Tuning/data/ultra_chat/ultra_chat.py new file mode 100644 index 0000000000000000000000000000000000000000..dd29311c5dc1626c1ec90b0d69fa6e2909a89538 --- /dev/null +++ b/LLaMA-Efficient-Tuning/data/ultra_chat/ultra_chat.py @@ -0,0 +1,76 @@ +import json +import datasets +from typing import Any, Dict, List + + +_DESCRIPTION = "UltraChat: Large-scale, Informative, and Diverse Multi-round Dialogue Data." + +_CITATION = """\ +@misc{UltraChat, + author = {Ding, Ning and Chen, Yulin and Xu, Bokai and Hu, Shengding and Qin, Yujia and Liu, Zhiyuan and Sun, Maosong and Zhou, Bowen}, + title = {UltraChat: A Large-scale Auto-generated Multi-round Dialogue Data}, + year = {2023}, + publisher = {GitHub}, + journal = {GitHub repository}, + howpublished = {\\url{https://github.com/thunlp/ultrachat}}, +} +""" + +_HOMEPAGE = "https://huggingface.co/datasets/stingning/ultrachat" +_LICENSE = "cc-by-nc-4.0" +_BASE_DATA_URL = "https://huggingface.co/datasets/stingning/ultrachat/resolve/main/train_{idx}.jsonl" + + +class BelleMultiturn(datasets.GeneratorBasedBuilder): + + VERSION = datasets.Version("0.0.0") + + def _info(self) -> datasets.DatasetInfo: + features = datasets.Features({ + "instruction": datasets.Value("string"), + "output": datasets.Value("string"), + "history": datasets.Sequence(datasets.Sequence(datasets.Value("string"))) + }) + return datasets.DatasetInfo( + description=_DESCRIPTION, + features=features, + homepage=_HOMEPAGE, + license=_LICENSE, + citation=_CITATION + ) + + def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]: + file_paths = [dl_manager.download(_BASE_DATA_URL.format(idx=idx)) for idx in range(9)] # multiple shards + return [ + datasets.SplitGenerator( + name=datasets.Split.TRAIN, + gen_kwargs={ + "filepaths": file_paths + } + ) + ] + + def _generate_examples(self, filepaths: List[str]) -> Dict[int, Dict[str, Any]]: # generate multi-turn chat for ChatGLM + for filepath in filepaths: + with open(filepath, "r", encoding="utf-8") as f: + for row in f: + try: + data = json.loads(row) + except: + continue + key = data["id"] + content = data["data"] + if len(content) % 2 == 1: + content.pop(-1) + if len(content) < 2: + continue + + query = content[-2] + response = content[-1] + history = [[content[2*i], content[2*i+1]] for i in range(len(content) // 2 - 1)] + + yield key, { + "instruction": query, + "output": response, + "history": history + } diff --git a/LLaMA-Efficient-Tuning/data/wiki_demo.txt b/LLaMA-Efficient-Tuning/data/wiki_demo.txt new file mode 100644 index 0000000000000000000000000000000000000000..4b6fd2b06ee9302e157d98690ea9dc133c84b273 --- /dev/null +++ b/LLaMA-Efficient-Tuning/data/wiki_demo.txt @@ -0,0 +1,50 @@ +Machine learning (ML) is a field devoted to understanding and building methods that let machines "learn" – that is, methods that leverage data to improve computer performance on some set of tasks. +Machine learning algorithms build a model based on sample data, known as training data, in order to make predictions or decisions without being explicitly programmed to do so. Machine learning algorithms are used in a wide variety of applications, such as in medicine, email filtering, speech recognition, agriculture, and computer vision, where it is difficult or unfeasible to develop conventional algorithms to perform the needed tasks. +A subset of machine learning is closely related to computational statistics, which focuses on making predictions using computers, but not all machine learning is statistical learning. The study of mathematical optimization delivers methods, theory and application domains to the field of machine learning. Data mining is a related field of study, focusing on exploratory data analysis through unsupervised learning. +Some implementations of machine learning use data and neural networks in a way that mimics the working of a biological brain. +In its application across business problems, machine learning is also referred to as predictive analytics. +Learning algorithms work on the basis that strategies, algorithms, and inferences that worked well in the past are likely to continue working well in the future. These inferences can sometimes be obvious, such as "since the sun rose every morning for the last 10,000 days, it will probably rise tomorrow morning as well". Other times, they can be more nuanced, such as "X% of families have geographically separate species with color variants, so there is a Y% chance that undiscovered black swans exist". +Machine learning programs can perform tasks without being explicitly programmed to do so. It involves computers learning from data provided so that they carry out certain tasks. For simple tasks assigned to computers, it is possible to program algorithms telling the machine how to execute all steps required to solve the problem at hand; on the computer's part, no learning is needed. For more advanced tasks, it can be challenging for a human to manually create the needed algorithms. In practice, it can turn out to be more effective to help the machine develop its own algorithm, rather than having human programmers specify every needed step. +The discipline of machine learning employs various approaches to teach computers to accomplish tasks where no fully satisfactory algorithm is available. In cases where vast numbers of potential answers exist, one approach is to label some of the correct answers as valid. This can then be used as training data for the computer to improve the algorithm(s) it uses to determine correct answers. For example, to train a system for the task of digital character recognition, the MNIST dataset of handwritten digits has often been used. +The term machine learning was coined in 1959 by Arthur Samuel, an IBM employee and pioneer in the field of computer gaming and artificial intelligence. The synonym self-teaching computers was also used in this time period. +By the early 1960s an experimental "learning machine" with punched tape memory, called Cybertron, had been developed by Raytheon Company to analyze sonar signals, electrocardiograms, and speech patterns using rudimentary reinforcement learning. It was repetitively "trained" by a human operator/teacher to recognize patterns and equipped with a "goof" button to cause it to re-evaluate incorrect decisions. A representative book on research into machine learning during the 1960s was Nilsson's book on Learning Machines, dealing mostly with machine learning for pattern classification. Interest related to pattern recognition continued into the 1970s, as described by Duda and Hart in 1973. In 1981 a report was given on using teaching strategies so that a neural network learns to recognize 40 characters (26 letters, 10 digits, and 4 special symbols) from a computer terminal. +Tom M. Mitchell provided a widely quoted, more formal definition of the algorithms studied in the machine learning field: "A computer program is said to learn from experience E with respect to some class of tasks T and performance measure P if its performance at tasks in T, as measured by P, improves with experience E." This definition of the tasks in which machine learning is concerned offers a fundamentally operational definition rather than defining the field in cognitive terms. This follows Alan Turing's proposal in his paper "Computing Machinery and Intelligence", in which the question "Can machines think?" is replaced with the question "Can machines do what we (as thinking entities) can do?". +Modern-day machine learning has two objectives, one is to classify data based on models which have been developed, the other purpose is to make predictions for future outcomes based on these models. A hypothetical algorithm specific to classifying data may use computer vision of moles coupled with supervised learning in order to train it to classify the cancerous moles. A machine learning algorithm for stock trading may inform the trader of future potential predictions. +As a scientific endeavor, machine learning grew out of the quest for artificial intelligence (AI). In the early days of AI as an academic discipline, some researchers were interested in having machines learn from data. They attempted to approach the problem with various symbolic methods, as well as what were then termed "neural networks"; these were mostly perceptrons and other models that were later found to be reinventions of the generalized linear models of statistics. Probabilistic reasoning was also employed, especially in automated medical diagnosis.: 488  +However, an increasing emphasis on the logical, knowledge-based approach caused a rift between AI and machine learning. Probabilistic systems were plagued by theoretical and practical problems of data acquisition and representation.: 488  By 1980, expert systems had come to dominate AI, and statistics was out of favor. Work on symbolic/knowledge-based learning did continue within AI, leading to inductive logic programming, but the more statistical line of research was now outside the field of AI proper, in pattern recognition and information retrieval.: 708–710, 755  Neural networks research had been abandoned by AI and computer science around the same time. This line, too, was continued outside the AI/CS field, as "connectionism", by researchers from other disciplines including Hopfield, Rumelhart, and Hinton. Their main success came in the mid-1980s with the reinvention of backpropagation.: 25  +Machine learning (ML), reorganized and recognized as its own field, started to flourish in the 1990s. The field changed its goal from achieving artificial intelligence to tackling solvable problems of a practical nature. It shifted focus away from the symbolic approaches it had inherited from AI, and toward methods and models borrowed from statistics, fuzzy logic, and probability theory. +Machine learning and data mining often employ the same methods and overlap significantly, but while machine learning focuses on prediction, based on known properties learned from the training data, data mining focuses on the discovery of (previously) unknown properties in the data (this is the analysis step of knowledge discovery in databases). Data mining uses many machine learning methods, but with different goals; on the other hand, machine learning also employs data mining methods as "unsupervised learning" or as a preprocessing step to improve learner accuracy. Much of the confusion between these two research communities (which do often have separate conferences and separate journals, ECML PKDD being a major exception) comes from the basic assumptions they work with: in machine learning, performance is usually evaluated with respect to the ability to reproduce known knowledge, while in knowledge discovery and data mining (KDD) the key task is the discovery of previously unknown knowledge. Evaluated with respect to known knowledge, an uninformed (unsupervised) method will easily be outperformed by other supervised methods, while in a typical KDD task, supervised methods cannot be used due to the unavailability of training data. +Machine learning also has intimate ties to optimization: many learning problems are formulated as minimization of some loss function on a training set of examples. Loss functions express the discrepancy between the predictions of the model being trained and the actual problem instances (for example, in classification, one wants to assign a label to instances, and models are trained to correctly predict the pre-assigned labels of a set of examples). +The difference between optimization and machine learning arises from the goal of generalization: while optimization algorithms can minimize the loss on a training set, machine learning is concerned with minimizing the loss on unseen samples. Characterizing the generalization of various learning algorithms is an active topic of current research, especially for deep learning algorithms. +Machine learning and statistics are closely related fields in terms of methods, but distinct in their principal goal: statistics draws population inferences from a sample, while machine learning finds generalizable predictive patterns. According to Michael I. Jordan, the ideas of machine learning, from methodological principles to theoretical tools, have had a long pre-history in statistics. He also suggested the term data science as a placeholder to call the overall field. +Leo Breiman distinguished two statistical modeling paradigms: data model and algorithmic model, wherein "algorithmic model" means more or less the machine learning algorithms like Random Forest. +Some statisticians have adopted methods from machine learning, leading to a combined field that they call statistical learning. +Analytical and computational techniques derived from deep-rooted physics of disordered systems can be extended to large-scale problems, including machine learning, e.g., to analyze the weight space of deep neural networks. Statistical physics is thus finding applications in the area of medical diagnostics. +A core objective of a learner is to generalize from its experience. Generalization in this context is the ability of a learning machine to perform accurately on new, unseen examples/tasks after having experienced a learning data set. The training examples come from some generally unknown probability distribution (considered representative of the space of occurrences) and the learner has to build a general model about this space that enables it to produce sufficiently accurate predictions in new cases. +The computational analysis of machine learning algorithms and their performance is a branch of theoretical computer science known as computational learning theory via the Probably Approximately Correct Learning (PAC) model. Because training sets are finite and the future is uncertain, learning theory usually does not yield guarantees of the performance of algorithms. Instead, probabilistic bounds on the performance are quite common. The bias–variance decomposition is one way to quantify generalization error. +For the best performance in the context of generalization, the complexity of the hypothesis should match the complexity of the function underlying the data. If the hypothesis is less complex than the function, then the model has under fitted the data. If the complexity of the model is increased in response, then the training error decreases. But if the hypothesis is too complex, then the model is subject to overfitting and generalization will be poorer. +In addition to performance bounds, learning theorists study the time complexity and feasibility of learning. In computational learning theory, a computation is considered feasible if it can be done in polynomial time. There are two kinds of time complexity results: Positive results show that a certain class of functions can be learned in polynomial time. Negative results show that certain classes cannot be learned in polynomial time. +Machine learning approaches are traditionally divided into three broad categories, which correspond to learning paradigms, depending on the nature of the "signal" or "feedback" available to the learning system: +Supervised learning: The computer is presented with example inputs and their desired outputs, given by a "teacher", and the goal is to learn a general rule that maps inputs to outputs. +Unsupervised learning: No labels are given to the learning algorithm, leaving it on its own to find structure in its input. Unsupervised learning can be a goal in itself (discovering hidden patterns in data) or a means towards an end (feature learning). +Reinforcement learning: A computer program interacts with a dynamic environment in which it must perform a certain goal (such as driving a vehicle or playing a game against an opponent). As it navigates its problem space, the program is provided feedback that's analogous to rewards, which it tries to maximize. Although each algorithm has advantages and limitations, no single algorithm works for all problems. +Supervised learning algorithms build a mathematical model of a set of data that contains both the inputs and the desired outputs. The data is known as training data, and consists of a set of training examples. Each training example has one or more inputs and the desired output, also known as a supervisory signal. In the mathematical model, each training example is represented by an array or vector, sometimes called a feature vector, and the training data is represented by a matrix. Through iterative optimization of an objective function, supervised learning algorithms learn a function that can be used to predict the output associated with new inputs. An optimal function will allow the algorithm to correctly determine the output for inputs that were not a part of the training data. An algorithm that improves the accuracy of its outputs or predictions over time is said to have learned to perform that task. +Types of supervised-learning algorithms include active learning, classification and regression. Classification algorithms are used when the outputs are restricted to a limited set of values, and regression algorithms are used when the outputs may have any numerical value within a range. As an example, for a classification algorithm that filters emails, the input would be an incoming email, and the output would be the name of the folder in which to file the email. +Similarity learning is an area of supervised machine learning closely related to regression and classification, but the goal is to learn from examples using a similarity function that measures how similar or related two objects are. It has applications in ranking, recommendation systems, visual identity tracking, face verification, and speaker verification. +Unsupervised learning algorithms take a set of data that contains only inputs, and find structure in the data, like grouping or clustering of data points. The algorithms, therefore, learn from test data that has not been labeled, classified or categorized. Instead of responding to feedback, unsupervised learning algorithms identify commonalities in the data and react based on the presence or absence of such commonalities in each new piece of data. A central application of unsupervised learning is in the field of density estimation in statistics, such as finding the probability density function. Though unsupervised learning encompasses other domains involving summarizing and explaining data features. Unsupervised learning algorithms streamlined the process of survey and graph large indel based haplotypes of a gene of interest from pan-genome. +Cluster analysis is the assignment of a set of observations into subsets (called clusters) so that observations within the same cluster are similar according to one or more predesignated criteria, while observations drawn from different clusters are dissimilar. Different clustering techniques make different assumptions on the structure of the data, often defined by some similarity metric and evaluated, for example, by internal compactness, or the similarity between members of the same cluster, and separation, the difference between clusters. Other methods are based on estimated density and graph connectivity. +Semi-supervised learning falls between unsupervised learning (without any labeled training data) and supervised learning (with completely labeled training data). Some of the training examples are missing training labels, yet many machine-learning researchers have found that unlabeled data, when used in conjunction with a small amount of labeled data, can produce a considerable improvement in learning accuracy. +In weakly supervised learning, the training labels are noisy, limited, or imprecise; however, these labels are often cheaper to obtain, resulting in larger effective training sets. +Reinforcement learning is an area of machine learning concerned with how software agents ought to take actions in an environment so as to maximize some notion of cumulative reward. Due to its generality, the field is studied in many other disciplines, such as game theory, control theory, operations research, information theory, simulation-based optimization, multi-agent systems, swarm intelligence, statistics and genetic algorithms. In machine learning, the environment is typically represented as a Markov decision process (MDP). Many reinforcements learning algorithms use dynamic programming techniques. Reinforcement learning algorithms do not assume knowledge of an exact mathematical model of the MDP and are used when exact models are infeasible. Reinforcement learning algorithms are used in autonomous vehicles or in learning to play a game against a human opponent. +Dimensionality reduction is a process of reducing the number of random variables under consideration by obtaining a set of principal variables. In other words, it is a process of reducing the dimension of the feature set, also called the "number of features". Most of the dimensionality reduction techniques can be considered as either feature elimination or extraction. One of the popular methods of dimensionality reduction is principal component analysis (PCA). PCA involves changing higher-dimensional data (e.g., 3D) to a smaller space (e.g., 2D). This results in a smaller dimension of data (2D instead of 3D), while keeping all original variables in the model without changing the data. The manifold hypothesis proposes that high-dimensional data sets lie along low-dimensional manifolds, and many dimensionality reduction techniques make this assumption, leading to the area of manifold learning and manifold regularization. +Although machine learning has been transformative in some fields, machine-learning programs often fail to deliver expected results. Reasons for this are numerous: lack of (suitable) data, lack of access to the data, data bias, privacy problems, badly chosen tasks and algorithms, wrong tools and people, lack of resources, and evaluation problems. +In 2018, a self-driving car from Uber failed to detect a pedestrian, who was killed after a collision. Attempts to use machine learning in healthcare with the IBM Watson system failed to deliver even after years of time and billions of dollars invested. +Machine learning has been used as a strategy to update the evidence related to a systematic review and increased reviewer burden related to the growth of biomedical literature. While it has improved with training sets, it has not yet developed sufficiently to reduce the workload burden without limiting the necessary sensitivity for the findings research themselves. +Machine learning approaches in particular can suffer from different data biases. A machine learning system trained specifically on current customers may not be able to predict the needs of new customer groups that are not represented in the training data. When trained on human-made data, machine learning is likely to pick up the constitutional and unconscious biases already present in society. Language models learned from data have been shown to contain human-like biases. Machine learning systems used for criminal risk assessment have been found to be biased against black people. In 2015, Google photos would often tag black people as gorillas, and in 2018 this still was not well resolved, but Google reportedly was still using the workaround to remove all gorillas from the training data, and thus was not able to recognize real gorillas at all. Similar issues with recognizing non-white people have been found in many other systems. In 2016, Microsoft tested a chatbot that learned from Twitter, and it quickly picked up racist and sexist language. Because of such challenges, the effective use of machine learning may take longer to be adopted in other domains. Concern for fairness in machine learning, that is, reducing bias in machine learning and propelling its use for human good is increasingly expressed by artificial intelligence scientists, including Fei-Fei Li, who reminds engineers that "There's nothing artificial about AI...It's inspired by people, it's created by people, and—most importantly—it impacts people. It is a powerful tool we are only just beginning to understand, and that is a profound responsibility." +Learners can also disappoint by "learning the wrong lesson". A toy example is that an image classifier trained only on pictures of brown horses and black cats might conclude that all brown patches are likely to be horses. A real-world example is that, unlike humans, current image classifiers often do not primarily make judgments from the spatial relationship between components of the picture, and they learn relationships between pixels that humans are oblivious to, but that still correlate with images of certain types of real objects. Modifying these patterns on a legitimate image can result in "adversarial" images that the system misclassifies. +Adversarial vulnerabilities can also result in nonlinear systems, or from non-pattern perturbations. Some systems are so brittle that changing a single adversarial pixel predictably induces misclassification.[citation needed] Machine learning models are often vulnerable to manipulation and/or evasion via adversarial machine learning. +Researchers have demonstrated how backdoors can be placed undetectably into classifying (e.g., for categories "spam" and well-visible "not spam" of posts) machine learning models which are often developed and/or trained by third parties. Parties can change the classification of any input, including in cases for which a type of data/software transparency is provided, possibly including white-box access. +Machine learning poses a host of ethical questions. Systems that are trained on datasets collected with biases may exhibit these biases upon use (algorithmic bias), thus digitizing cultural prejudices. For example, in 1988, the UK's Commission for Racial Equality found that St. George's Medical School had been using a computer program trained from data of previous admissions staff and this program had denied nearly 60 candidates who were found to be either women or had non-European sounding names. Using job hiring data from a firm with racist hiring policies may lead to a machine learning system duplicating the bias by scoring job applicants by similarity to previous successful applicants. Responsible collection of data and documentation of algorithmic rules used by a system thus is a critical part of machine learning. +AI can be well-equipped to make decisions in technical fields, which rely heavily on data and historical information. These decisions rely on the objectivity and logical reasoning. Because human languages contain biases, machines trained on language corpora will necessarily also learn these biases. +Other forms of ethical challenges, not related to personal biases, are seen in health care. There are concerns among health care professionals that these systems might not be designed in the public's interest but as income-generating machines. This is especially true in the United States where there is a long-standing ethical dilemma of improving health care, but also increase profits. For example, the algorithms could be designed to provide patients with unnecessary tests or medication in which the algorithm's proprietary owners hold stakes. There is potential for machine learning in health care to provide professionals an additional tool to diagnose, medicate, and plan recovery paths for patients, but this requires these biases to be mitigated. +Since the 2010s, advances in both machine learning algorithms and computer hardware have led to more efficient methods for training deep neural networks (a particular narrow subdomain of machine learning) that contain many layers of non-linear hidden units. By 2019, graphic processing units (GPUs), often with AI-specific enhancements, had displaced CPUs as the dominant method of training large-scale commercial cloud AI. OpenAI estimated the hardware computing used in the largest deep learning projects from AlexNet (2012) to AlphaZero (2017), and found a 300,000-fold increase in the amount of compute required, with a doubling-time trendline of 3.4 months. diff --git a/LLaMA-Efficient-Tuning/pyproject.toml b/LLaMA-Efficient-Tuning/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..638dd9c54fcbd5b70ee15947e45dc1a58dbfa458 --- /dev/null +++ b/LLaMA-Efficient-Tuning/pyproject.toml @@ -0,0 +1,3 @@ +[build-system] +requires = ["setuptools>=61.0"] +build-backend = "setuptools.build_meta" diff --git a/LLaMA-Efficient-Tuning/requirements.txt b/LLaMA-Efficient-Tuning/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..1d36fd3382b63302e738f79b9d10d60177fffa41 --- /dev/null +++ b/LLaMA-Efficient-Tuning/requirements.txt @@ -0,0 +1,19 @@ +torch>=1.13.1 +transformers>=4.30.0 +datasets>=2.12.0 +accelerate>=0.21.0 +peft>=0.4.0 +trl>=0.7.1 +scipy +sentencepiece +protobuf +tiktoken +jieba +rouge-chinese +nltk +gradio>=3.36.0 +uvicorn +pydantic==1.10.11 +fastapi==0.95.1 +sse-starlette +matplotlib diff --git a/LLaMA-Efficient-Tuning/setup.py b/LLaMA-Efficient-Tuning/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..930dabb2ae8421924722332f155fbbf0f439db19 --- /dev/null +++ b/LLaMA-Efficient-Tuning/setup.py @@ -0,0 +1,55 @@ +import os +import re +from setuptools import setup, find_packages + + +def get_version(): + with open(os.path.join("src", "llmtuner", "__init__.py"), "r", encoding="utf-8") as f: + file_content = f.read() + pattern = r"{0}\W*=\W*\"([^\"]+)\"".format("__version__") + version, = re.findall(pattern, file_content) + return version + + +def get_requires(): + with open("requirements.txt", "r", encoding="utf-8") as f: + file_content = f.read() + lines = [line.strip() for line in file_content.strip().split("\n") if not line.startswith("#")] + return lines + + +def main(): + + setup( + name="llmtuner", + version=get_version(), + author="hiyouga", + author_email="hiyouga" "@" "buaa.edu.cn", + description="Easy-to-use fine-tuning framework using PEFT", + long_description=open("README.md", "r", encoding="utf-8").read(), + long_description_content_type="text/markdown", + keywords=["LLaMA", "BLOOM", "Falcon", "LLM", "ChatGPT", "transformer", "pytorch", "deep learning"], + license="Apache 2.0 License", + url="https://github.com/hiyouga/LLaMA-Efficient-Tuning", + package_dir={"": "src"}, + packages=find_packages("src"), + python_requires=">=3.8.0", + install_requires=get_requires(), + classifiers=[ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + ] + ) + + +if __name__ == "__main__": + main() diff --git a/LLaMA-Efficient-Tuning/src/api_demo.py b/LLaMA-Efficient-Tuning/src/api_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..777f9dcff8311209e6fb8ae7273ba5e399e33a17 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/api_demo.py @@ -0,0 +1,14 @@ +import uvicorn + +from llmtuner import ChatModel, create_app + + +def main(): + chat_model = ChatModel() + app = create_app(chat_model) + uvicorn.run(app, host="0.0.0.0", port=8000, workers=1) + print("Visit http://localhost:8000/docs for API document.") + + +if __name__ == "__main__": + main() diff --git a/LLaMA-Efficient-Tuning/src/cli_demo.py b/LLaMA-Efficient-Tuning/src/cli_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..05e8c8eb32f9cb4db102b925c900efcdd6e59cb1 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/cli_demo.py @@ -0,0 +1,38 @@ +from llmtuner import ChatModel + + +def main(): + chat_model = ChatModel() + history = [] + print("Welcome to the CLI application, use `clear` to remove the history, use `exit` to exit the application.") + + while True: + try: + query = input("\nUser: ") + except UnicodeDecodeError: + print("Detected decoding error at the inputs, please set the terminal encoding to utf-8.") + continue + except Exception: + raise + + if query.strip() == "exit": + break + + if query.strip() == "clear": + history = [] + print("History has been removed.") + continue + + print("Assistant: ", end="", flush=True) + + response = "" + for new_text in chat_model.stream_chat(query, history): + print(new_text, end="", flush=True) + response += new_text + print() + + history = history + [(query, response)] + + +if __name__ == "__main__": + main() diff --git a/LLaMA-Efficient-Tuning/src/export_model.py b/LLaMA-Efficient-Tuning/src/export_model.py new file mode 100644 index 0000000000000000000000000000000000000000..4baeb2c3eb9423ace07a32bd019587596f6083cf --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/export_model.py @@ -0,0 +1,9 @@ +from llmtuner import export_model + + +def main(): + export_model() + + +if __name__ == "__main__": + main() diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..320be69cc9df35fb28a254551f7a849a313ffa40 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/__init__.py @@ -0,0 +1,9 @@ +# Level: api, webui > chat > tuner > dsets > extras, hparams + +from llmtuner.api import create_app +from llmtuner.chat import ChatModel +from llmtuner.tuner import export_model, run_exp +from llmtuner.webui import create_ui, create_web_demo + + +__version__ = "0.1.8" diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/api/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/api/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b3ce183a5a14b791b19e506dd1086a625da81965 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/api/__init__.py @@ -0,0 +1 @@ +from llmtuner.api.app import create_app diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/api/app.py b/LLaMA-Efficient-Tuning/src/llmtuner/api/app.py new file mode 100644 index 0000000000000000000000000000000000000000..41a7fe9a39df6ef8ff58ef70bde5c70f72ce7408 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/api/app.py @@ -0,0 +1,126 @@ +import uvicorn +from fastapi import FastAPI, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from contextlib import asynccontextmanager +from sse_starlette import EventSourceResponse +from typing import List, Tuple + +from llmtuner.extras.misc import torch_gc +from llmtuner.chat import ChatModel +from llmtuner.api.protocol import ( + Role, + Finish, + ModelCard, + ModelList, + ChatMessage, + DeltaMessage, + ChatCompletionRequest, + ChatCompletionResponse, + ChatCompletionStreamResponse, + ChatCompletionResponseChoice, + ChatCompletionResponseStreamChoice, + ChatCompletionResponseUsage +) + + +@asynccontextmanager +async def lifespan(app: FastAPI): # collects GPU memory + yield + torch_gc() + + +def create_app(chat_model: ChatModel) -> FastAPI: + app = FastAPI(lifespan=lifespan) + + app.add_middleware( + CORSMiddleware, + allow_origins=["*"], + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], + ) + + @app.get("/v1/models", response_model=ModelList) + async def list_models(): + model_card = ModelCard(id="gpt-3.5-turbo") + return ModelList(data=[model_card]) + + @app.post("/v1/chat/completions", response_model=ChatCompletionResponse) + async def create_chat_completion(request: ChatCompletionRequest): + if len(request.messages) < 1 or request.messages[-1].role != Role.USER: + raise HTTPException(status_code=400, detail="Invalid request") + + query = request.messages[-1].content + prev_messages = request.messages[:-1] + if len(prev_messages) > 0 and prev_messages[0].role == Role.SYSTEM: + system = prev_messages.pop(0).content + else: + system = None + + history = [] + if len(prev_messages) % 2 == 0: + for i in range(0, len(prev_messages), 2): + if prev_messages[i].role == Role.USER and prev_messages[i+1].role == Role.ASSISTANT: + history.append([prev_messages[i].content, prev_messages[i+1].content]) + + if request.stream: + generate = predict(query, history, system, request) + return EventSourceResponse(generate, media_type="text/event-stream") + + response, (prompt_length, response_length) = chat_model.chat( + query, history, system, temperature=request.temperature, top_p=request.top_p, max_new_tokens=request.max_tokens + ) + + usage = ChatCompletionResponseUsage( + prompt_tokens=prompt_length, + completion_tokens=response_length, + total_tokens=prompt_length+response_length + ) + + choice_data = ChatCompletionResponseChoice( + index=0, + message=ChatMessage(role=Role.ASSISTANT, content=response), + finish_reason=Finish.STOP + ) + + return ChatCompletionResponse(model=request.model, choices=[choice_data], usage=usage) + + async def predict(query: str, history: List[Tuple[str, str]], system: str, request: ChatCompletionRequest): + choice_data = ChatCompletionResponseStreamChoice( + index=0, + delta=DeltaMessage(role=Role.ASSISTANT), + finish_reason=None + ) + chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data]) + yield chunk.json(exclude_unset=True, ensure_ascii=False) + + for new_text in chat_model.stream_chat( + query, history, system, temperature=request.temperature, top_p=request.top_p, max_new_tokens=request.max_tokens + ): + if len(new_text) == 0: + continue + + choice_data = ChatCompletionResponseStreamChoice( + index=0, + delta=DeltaMessage(content=new_text), + finish_reason=None + ) + chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data]) + yield chunk.json(exclude_unset=True, ensure_ascii=False) + + choice_data = ChatCompletionResponseStreamChoice( + index=0, + delta=DeltaMessage(), + finish_reason=Finish.STOP + ) + chunk = ChatCompletionStreamResponse(model=request.model, choices=[choice_data]) + yield chunk.json(exclude_unset=True, ensure_ascii=False) + yield "[DONE]" + + return app + + +if __name__ == "__main__": + chat_model = ChatModel() + app = create_app(chat_model) + uvicorn.run(app, host="0.0.0.0", port=8000, workers=1) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/api/protocol.py b/LLaMA-Efficient-Tuning/src/llmtuner/api/protocol.py new file mode 100644 index 0000000000000000000000000000000000000000..cba0b6a6e60e34473897c41e74f0c2afe86e84e4 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/api/protocol.py @@ -0,0 +1,85 @@ +import time +from enum import Enum +from pydantic import BaseModel, Field +from typing import List, Optional + + +class Role(str, Enum): + USER = "user" + ASSISTANT = "assistant" + SYSTEM = "system" + + +class Finish(str, Enum): + STOP = "stop" + LENGTH = "length" + + +class ModelCard(BaseModel): + id: str + object: Optional[str] = "model" + created: Optional[int] = Field(default_factory=lambda: int(time.time())) + owned_by: Optional[str] = "owner" + root: Optional[str] = None + parent: Optional[str] = None + permission: Optional[list] = [] + + +class ModelList(BaseModel): + object: Optional[str] = "list" + data: Optional[List[ModelCard]] = [] + + +class ChatMessage(BaseModel): + role: Role + content: str + + +class DeltaMessage(BaseModel): + role: Optional[Role] = None + content: Optional[str] = None + + +class ChatCompletionRequest(BaseModel): + model: str + messages: List[ChatMessage] + temperature: Optional[float] = None + top_p: Optional[float] = None + n: Optional[int] = 1 + max_tokens: Optional[int] = None + stream: Optional[bool] = False + + +class ChatCompletionResponseChoice(BaseModel): + index: int + message: ChatMessage + finish_reason: Finish + + +class ChatCompletionResponseStreamChoice(BaseModel): + index: int + delta: DeltaMessage + finish_reason: Optional[Finish] = None + + +class ChatCompletionResponseUsage(BaseModel): + prompt_tokens: int + completion_tokens: int + total_tokens: int + + +class ChatCompletionResponse(BaseModel): + id: Optional[str] = "chatcmpl-default" + object: Optional[str] = "chat.completion" + created: Optional[int] = Field(default_factory=lambda: int(time.time())) + model: str + choices: List[ChatCompletionResponseChoice] + usage: ChatCompletionResponseUsage + + +class ChatCompletionStreamResponse(BaseModel): + id: Optional[str] = "chatcmpl-default" + object: Optional[str] = "chat.completion.chunk" + created: Optional[int] = Field(default_factory=lambda: int(time.time())) + model: str + choices: List[ChatCompletionResponseStreamChoice] diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/chat/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/chat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ba240d05f3b999a460a8e8cb01d4bba5adad24cf --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/chat/__init__.py @@ -0,0 +1 @@ +from llmtuner.chat.stream_chat import ChatModel diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/chat/stream_chat.py b/LLaMA-Efficient-Tuning/src/llmtuner/chat/stream_chat.py new file mode 100644 index 0000000000000000000000000000000000000000..af785dd30534049f99b46720ce3ad94ae959961f --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/chat/stream_chat.py @@ -0,0 +1,101 @@ +import torch +from typing import Any, Dict, Generator, List, Optional, Tuple +from threading import Thread +from transformers import GenerationConfig, TextIteratorStreamer + +from llmtuner.extras.misc import dispatch_model, get_logits_processor +from llmtuner.extras.template import get_template_and_fix_tokenizer +from llmtuner.tuner.core import get_infer_args, load_model_and_tokenizer + + +class ChatModel: + + def __init__(self, args: Optional[Dict[str, Any]] = None) -> None: + model_args, data_args, finetuning_args, self.generating_args = get_infer_args(args) + self.model, self.tokenizer = load_model_and_tokenizer(model_args, finetuning_args) + self.tokenizer.padding_side = "left" + self.model = dispatch_model(self.model) + self.template = get_template_and_fix_tokenizer(data_args.template, self.tokenizer) + self.system_prompt = data_args.system_prompt + + def process_args( + self, + query: str, + history: Optional[List[Tuple[str, str]]] = None, + system: Optional[str] = None, + **input_kwargs + ) -> Tuple[Dict[str, Any], int]: + system = system or self.system_prompt + + prompt, _ = self.template.encode_oneturn( + tokenizer=self.tokenizer, query=query, resp="", history=history, system=system + ) + input_ids = torch.tensor([prompt], device=self.model.device) + prompt_length = len(input_ids[0]) + + do_sample = input_kwargs.pop("do_sample", None) + temperature = input_kwargs.pop("temperature", None) + top_p = input_kwargs.pop("top_p", None) + top_k = input_kwargs.pop("top_k", None) + repetition_penalty = input_kwargs.pop("repetition_penalty", None) + max_length = input_kwargs.pop("max_length", None) + max_new_tokens = input_kwargs.pop("max_new_tokens", None) + + generating_args = self.generating_args.to_dict() + generating_args.update(dict( + do_sample=do_sample if do_sample is not None else generating_args["do_sample"], + temperature=temperature or generating_args["temperature"], + top_p=top_p or generating_args["top_p"], + top_k=top_k or generating_args["top_k"], + repetition_penalty=repetition_penalty or generating_args["repetition_penalty"], + eos_token_id=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids, + pad_token_id=self.tokenizer.pad_token_id + )) + + if max_length: + generating_args.pop("max_new_tokens", None) + generating_args["max_length"] = max_length + + if max_new_tokens: + generating_args.pop("max_length", None) + generating_args["max_new_tokens"] = max_new_tokens + + gen_kwargs = dict( + inputs=input_ids, + generation_config=GenerationConfig(**generating_args), + logits_processor=get_logits_processor() + ) + + return gen_kwargs, prompt_length + + @torch.inference_mode() + def chat( + self, + query: str, + history: Optional[List[Tuple[str, str]]] = None, + system: Optional[str] = None, + **input_kwargs + ) -> Tuple[str, Tuple[int, int]]: + gen_kwargs, prompt_length = self.process_args(query, history, system, **input_kwargs) + generation_output = self.model.generate(**gen_kwargs) + outputs = generation_output.tolist()[0][prompt_length:] + response = self.tokenizer.decode(outputs, skip_special_tokens=True) + response_length = len(outputs) + return response, (prompt_length, response_length) + + @torch.inference_mode() + def stream_chat( + self, + query: str, + history: Optional[List[Tuple[str, str]]] = None, + system: Optional[str] = None, + **input_kwargs + ) -> Generator[str, None, None]: + gen_kwargs, _ = self.process_args(query, history, system, **input_kwargs) + streamer = TextIteratorStreamer(self.tokenizer, timeout=60.0, skip_prompt=True, skip_special_tokens=True) + gen_kwargs["streamer"] = streamer + + thread = Thread(target=self.model.generate, kwargs=gen_kwargs) + thread.start() + + yield from streamer diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/dsets/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/dsets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cccbd74530776e878098de461952ad7a65e0f55c --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/dsets/__init__.py @@ -0,0 +1,3 @@ +from llmtuner.dsets.loader import get_dataset +from llmtuner.dsets.preprocess import preprocess_dataset +from llmtuner.dsets.utils import split_dataset diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/dsets/loader.py b/LLaMA-Efficient-Tuning/src/llmtuner/dsets/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..08c35f2728ce54df80d856b275a167463a8f7f73 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/dsets/loader.py @@ -0,0 +1,92 @@ +import os +from typing import TYPE_CHECKING, List, Union + +from datasets import concatenate_datasets, interleave_datasets, load_dataset + +from llmtuner.dsets.utils import checksum, EXT2TYPE +from llmtuner.extras.logging import get_logger + +if TYPE_CHECKING: + from datasets import Dataset, IterableDataset + from llmtuner.hparams import ModelArguments, DataArguments + + +logger = get_logger(__name__) + + +def get_dataset( + model_args: "ModelArguments", + data_args: "DataArguments" +) -> Union["Dataset", "IterableDataset"]: + max_samples = data_args.max_samples + all_datasets: List[Union["Dataset", "IterableDataset"]] = [] # support multiple datasets + + for dataset_attr in data_args.dataset_list: + logger.info("Loading dataset {}...".format(dataset_attr)) + + if dataset_attr.load_from == "hf_hub": + data_path = dataset_attr.dataset_name + data_files = None + elif dataset_attr.load_from == "script": + data_path = os.path.join(data_args.dataset_dir, dataset_attr.dataset_name) + data_files = None + elif dataset_attr.load_from == "file": + data_path = None + data_files: List[str] = [] + + if os.path.isdir(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)): # directory + for file_name in os.listdir(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)): + data_files.append(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name, file_name)) + if data_path is None: + data_path = EXT2TYPE.get(file_name.split(".")[-1], None) + else: + assert data_path == EXT2TYPE.get(file_name.split(".")[-1], None), "file type does not match." + elif os.path.isfile(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)): # single file + data_files.append(os.path.join(data_args.dataset_dir, dataset_attr.dataset_name)) + data_path = EXT2TYPE.get(dataset_attr.dataset_name.split(".")[-1], None) + else: + raise ValueError("File not found.") + + assert data_path, "File extension must be txt, csv, json or jsonl." + checksum(data_files, dataset_attr.dataset_sha1) + else: + raise NotImplementedError + + dataset = load_dataset( + data_path, + data_files=data_files, + split=data_args.split, + cache_dir=model_args.cache_dir, + streaming=data_args.streaming, + use_auth_token=True if model_args.use_auth_token else None + ) + + if max_samples is not None: + max_samples_temp = min(len(dataset), max_samples) + dataset = dataset.select(range(max_samples_temp)) + + for column_name in ["prompt", "query", "response", "history"]: # align datasets + if getattr(dataset_attr, column_name) and getattr(dataset_attr, column_name) != column_name: + dataset = dataset.rename_column(getattr(dataset_attr, column_name), column_name) + + if dataset_attr.system_prompt: # add system prompt + if data_args.streaming: + dataset = dataset.map(lambda _: {"system": dataset_attr.system_prompt}) + else: + dataset = dataset.add_column("system", [dataset_attr.system_prompt] * len(dataset)) + + all_datasets.append(dataset) + + if len(data_args.dataset_list) == 1: + return all_datasets[0] + elif data_args.mix_strategy == "concat": + if data_args.streaming: + logger.warning("The samples between different datasets will not be mixed in streaming mode.") + return concatenate_datasets(all_datasets) + elif data_args.mix_strategy.startswith("interleave"): + if not data_args.streaming: + logger.warning("We recommend using `mix_strategy=concat` in non-streaming mode.") + stopping_strategy = "first_exhausted" if data_args.mix_strategy.endswith("under") else "all_exhausted" + return interleave_datasets(all_datasets, data_args.interleave_probs, stopping_strategy=stopping_strategy) + else: + raise ValueError("Unknown mixing strategy.") diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/dsets/preprocess.py b/LLaMA-Efficient-Tuning/src/llmtuner/dsets/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..320a54efa2d3902174d025552b7ffbe8e149d0b6 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/dsets/preprocess.py @@ -0,0 +1,201 @@ +import tiktoken +from typing import TYPE_CHECKING, Any, Dict, Generator, List, Literal, Union +from itertools import chain + +from llmtuner.extras.constants import IGNORE_INDEX +from llmtuner.extras.template import get_template_and_fix_tokenizer + +if TYPE_CHECKING: + from datasets import Dataset, IterableDataset + from transformers import Seq2SeqTrainingArguments + from transformers.tokenization_utils import PreTrainedTokenizer + from llmtuner.hparams import DataArguments + + +def preprocess_dataset( + dataset: Union["Dataset", "IterableDataset"], + tokenizer: "PreTrainedTokenizer", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + stage: Literal["pt", "sft", "rm", "ppo"] +) -> Union["Dataset", "IterableDataset"]: + column_names = list(next(iter(dataset)).keys()) + template = get_template_and_fix_tokenizer(data_args.template, tokenizer) + + def construct_example(examples: Dict[str, List[Any]]) -> Generator[Any, None, None]: + for i in range(len(examples["prompt"])): + query, response = examples["prompt"][i], examples["response"][i] + query = query + "\n" + examples["query"][i] if "query" in examples and examples["query"][i] else query + history = examples["history"][i] if "history" in examples else None + system = examples["system"][i] if "system" in examples else None + yield query, response, history, system + + def preprocess_pretrain_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: + # build grouped texts with format `X1 X2 X3 ...` + if isinstance(getattr(tokenizer, "tokenizer", None), tiktoken.Encoding): + kwargs = dict(allowed_special="all") # for tiktoken tokenizer (Qwen) + else: + kwargs = dict(add_special_tokens=True) + + if hasattr(tokenizer, "add_bos_token") and hasattr(tokenizer, "add_eos_token"): + setattr(tokenizer, "add_bos_token", True) # for LLaMA tokenizer + setattr(tokenizer, "add_eos_token", True) + + tokenized_examples = tokenizer(examples["prompt"], **kwargs) + concatenated_examples = {k: list(chain(*tokenized_examples[k])) for k in tokenized_examples.keys()} + total_length = len(concatenated_examples[list(concatenated_examples.keys())[0]]) + block_size = data_args.cutoff_len + # we drop the small remainder, and if the total_length < block_size, we exclude this batch + total_length = (total_length // block_size) * block_size + # split by chunks of cutoff_len + result = { + k: [t[i: i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + return result + + def preprocess_supervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: + # build inputs with format ` X Y ` and labels with format ` ... Y ` + # for multiturn examples, we only mask the prompt part in each prompt-response pair. + model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} + + for query, response, history, system in construct_example(examples): + input_ids, labels = [], [] + + for turn_idx, (source_ids, target_ids) in enumerate(template.encode_multiturn( + tokenizer, query, response, history, system + )): + total_len = len(source_ids) + len(target_ids) + max_source_len = int(data_args.cutoff_len * (len(source_ids) / total_len)) + max_target_len = int(data_args.cutoff_len * (len(target_ids) / total_len)) + + if len(source_ids) > max_source_len: + source_ids = source_ids[:max_source_len] + if len(target_ids) > max_target_len: + target_ids = target_ids[:max_target_len] + + if turn_idx != 0 and template.efficient_eos: + source_mask = [tokenizer.eos_token_id] + [IGNORE_INDEX] * (len(source_ids) - 1) + else: + source_mask = [IGNORE_INDEX] * len(source_ids) + + input_ids += source_ids + target_ids + labels += source_mask + target_ids + + if template.efficient_eos: + input_ids += [tokenizer.eos_token_id] + labels += [tokenizer.eos_token_id] + + if len(input_ids) > data_args.cutoff_len: + input_ids = input_ids[:data_args.cutoff_len] + labels = labels[:data_args.cutoff_len] + + model_inputs["input_ids"].append(input_ids) + model_inputs["attention_mask"].append([1] * len(input_ids)) + model_inputs["labels"].append(labels) + + return model_inputs + + def preprocess_unsupervised_dataset(examples: Dict[str, List[Any]]) -> Dict[str, Any]: + # build inputs with format ` X` and labels with format `Y ` + model_inputs = {"input_ids": [], "attention_mask": [], "labels": []} + + for query, response, history, system in construct_example(examples): + input_ids, labels = template.encode_oneturn(tokenizer, query, response, history, system) + + if template.efficient_eos: + labels += [tokenizer.eos_token_id] + + if len(input_ids) > data_args.cutoff_len: + input_ids = input_ids[:data_args.cutoff_len] + if len(labels) > data_args.cutoff_len: + labels = labels[:data_args.cutoff_len] + + model_inputs["input_ids"].append(input_ids) + model_inputs["attention_mask"].append([1] * len(input_ids)) + model_inputs["labels"].append(labels) + + return model_inputs + + def preprocess_pairwise_dataset(examples): + # build input pairs with format ` X`, `Y1 ` and `Y2 ` + model_inputs = {"prompt_ids": [], "chosen_ids": [], "rejected_ids": []} + for query, response, history, system in construct_example(examples): + prompt_ids, chosen_ids = template.encode_oneturn(tokenizer, query, response[0], history, system) + _, rejected_ids = template.encode_oneturn(tokenizer, query, response[1], history, system) + + if template.efficient_eos: + chosen_ids += [tokenizer.eos_token_id] + rejected_ids += [tokenizer.eos_token_id] + + total_len = len(prompt_ids) + max(len(chosen_ids), len(rejected_ids)) + max_source_len = int(data_args.cutoff_len * (len(prompt_ids) / total_len)) + max_target_len = int(data_args.cutoff_len * (max(len(chosen_ids), len(rejected_ids)) / total_len)) + + if len(prompt_ids) > max_source_len: + prompt_ids = prompt_ids[:max_source_len] + if len(chosen_ids) > max_target_len: + chosen_ids = chosen_ids[:max_target_len] + if len(rejected_ids) > max_target_len: + rejected_ids = rejected_ids[:max_target_len] + + model_inputs["prompt_ids"].append(prompt_ids) + model_inputs["chosen_ids"].append(chosen_ids) + model_inputs["rejected_ids"].append(rejected_ids) + return model_inputs + + def print_supervised_dataset_example(example): + print("input_ids:\n{}".format(example["input_ids"])) + print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False))) + print("label_ids:\n{}".format(example["labels"])) + print("labels:\n{}".format( + tokenizer.decode(list(filter(lambda x: x != IGNORE_INDEX, example["labels"])), skip_special_tokens=False) + )) + + def print_pairwise_dataset_example(example): + print("prompt_ids:\n{}".format(example["prompt_ids"])) + print("prompt:\n{}".format(tokenizer.decode(example["prompt_ids"], skip_special_tokens=False))) + print("chosen_ids:\n{}".format(example["chosen_ids"])) + print("chosen:\n{}".format(tokenizer.decode(example["chosen_ids"], skip_special_tokens=False))) + print("rejected_ids:\n{}".format(example["rejected_ids"])) + print("rejected:\n{}".format(tokenizer.decode(example["rejected_ids"], skip_special_tokens=False))) + + def print_unsupervised_dataset_example(example): + print("input_ids:\n{}".format(example["input_ids"])) + print("inputs:\n{}".format(tokenizer.decode(example["input_ids"], skip_special_tokens=False))) + + if stage == "pt": + dataset = dataset.filter(lambda example: example["prompt"]) + preprocess_function = preprocess_pretrain_dataset + print_function = print_unsupervised_dataset_example + elif stage == "sft" and not training_args.predict_with_generate: + dataset = dataset.filter(lambda example: example["prompt"] and example["response"]) + preprocess_function = preprocess_supervised_dataset + print_function = print_supervised_dataset_example + elif stage == "rm": + dataset = dataset.filter(lambda example: example["prompt"] and len(example["response"]) > 1) + preprocess_function = preprocess_pairwise_dataset + print_function = print_pairwise_dataset_example + else: + dataset = dataset.filter(lambda example: example["prompt"]) + preprocess_function = preprocess_unsupervised_dataset + print_function = print_unsupervised_dataset_example + + with training_args.main_process_first(desc="dataset map pre-processing"): + kwargs = {} + if not data_args.streaming: + kwargs = dict( + num_proc=data_args.preprocessing_num_workers, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on dataset" + ) + + dataset = dataset.map( + preprocess_function, + batched=True, + remove_columns=column_names, + **kwargs + ) + + print_function(next(iter(dataset))) + return dataset diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/dsets/utils.py b/LLaMA-Efficient-Tuning/src/llmtuner/dsets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bf337014ab39f2ebfd7a0225480bd2fa79bdd327 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/dsets/utils.py @@ -0,0 +1,59 @@ +import hashlib +from typing import TYPE_CHECKING, Dict, List, Optional, Union + +from llmtuner.extras.logging import get_logger + +if TYPE_CHECKING: + from datasets import Dataset, IterableDataset + from transformers import TrainingArguments + from llmtuner.hparams import DataArguments + + +logger = get_logger(__name__) + + +EXT2TYPE = { + "csv": "csv", + "json": "json", + "jsonl": "json", + "txt": "text" +} + + +def checksum(data_files: List[str], file_sha1: Optional[str] = None) -> None: + if file_sha1 is None: + logger.warning("Checksum failed: missing SHA-1 hash value in dataset_info.json.") + return + + if len(data_files) != 1: + logger.warning("Checksum failed: too many files.") + return + + with open(data_files[0], "rb") as f: + sha1 = hashlib.sha1(f.read()).hexdigest() + if sha1 != file_sha1: + logger.warning("Checksum failed: mismatched SHA-1 hash value at {}.".format(data_files[0])) + + +def split_dataset( + dataset: Union["Dataset", "IterableDataset"], + data_args: "DataArguments", + training_args: "TrainingArguments" +) -> Dict[str, "Dataset"]: + if training_args.do_train: + if data_args.val_size > 1e-6: # Split the dataset + if data_args.streaming: + val_set = dataset.take(int(data_args.val_size)) + train_set = dataset.skip(int(data_args.val_size)) + dataset = dataset.shuffle(buffer_size=data_args.buffer_size, seed=training_args.seed) + return {"train_dataset": train_set, "eval_dataset": val_set} + else: + val_size = int(data_args.val_size) if data_args.val_size > 1 else data_args.val_size + dataset = dataset.train_test_split(test_size=val_size, seed=training_args.seed) + return {"train_dataset": dataset["train"], "eval_dataset": dataset["test"]} + else: + if data_args.streaming: + dataset = dataset.shuffle(buffer_size=data_args.buffer_size, seed=training_args.seed) + return {"train_dataset": dataset} + else: # do_eval or do_predict + return {"eval_dataset": dataset} diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/extras/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/extras/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/extras/callbacks.py b/LLaMA-Efficient-Tuning/src/llmtuner/extras/callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..beb13bfa5f6316d258be2881f4450cc45394ed8b --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/extras/callbacks.py @@ -0,0 +1,150 @@ +import os +import json +import time +from typing import TYPE_CHECKING +from datetime import timedelta + +from transformers import TrainerCallback +from transformers.trainer_utils import has_length, PREFIX_CHECKPOINT_DIR + +from llmtuner.extras.constants import LOG_FILE_NAME +from llmtuner.extras.logging import get_logger + +if TYPE_CHECKING: + from transformers import TrainingArguments, TrainerState, TrainerControl + + +logger = get_logger(__name__) + + +class SavePeftModelCallback(TrainerCallback): + + def on_save(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + r""" + Event called after a checkpoint save. + """ + if args.should_save: + output_dir = os.path.join(args.output_dir, "{}-{}".format(PREFIX_CHECKPOINT_DIR, state.global_step)) + model = kwargs.pop("model") + if getattr(model, "is_peft_model", False): + getattr(model, "pretrained_model").save_pretrained(output_dir) + + def on_train_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + r""" + Event called at the end of training. + """ + if args.should_save: + model = kwargs.pop("model") + if getattr(model, "is_peft_model", False): + getattr(model, "pretrained_model").save_pretrained(args.output_dir) + + +class LogCallback(TrainerCallback): + + def __init__(self, runner=None): + self.runner = runner + self.in_training = False + self.start_time = time.time() + self.cur_steps = 0 + self.max_steps = 0 + self.elapsed_time = "" + self.remaining_time = "" + + def timing(self): + cur_time = time.time() + elapsed_time = cur_time - self.start_time + avg_time_per_step = elapsed_time / self.cur_steps if self.cur_steps != 0 else 0 + remaining_time = (self.max_steps - self.cur_steps) * avg_time_per_step + self.elapsed_time = str(timedelta(seconds=int(elapsed_time))) + self.remaining_time = str(timedelta(seconds=int(remaining_time))) + + def on_train_begin(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + r""" + Event called at the beginning of training. + """ + if state.is_local_process_zero: + self.in_training = True + self.start_time = time.time() + self.max_steps = state.max_steps + if os.path.exists(os.path.join(args.output_dir, LOG_FILE_NAME)): + logger.warning("Previous log file in this folder will be deleted.") + os.remove(os.path.join(args.output_dir, LOG_FILE_NAME)) + + def on_train_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + r""" + Event called at the end of training. + """ + if state.is_local_process_zero: + self.in_training = False + self.cur_steps = 0 + self.max_steps = 0 + + def on_substep_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + r""" + Event called at the end of an substep during gradient accumulation. + """ + if state.is_local_process_zero and self.runner is not None and self.runner.aborted: + control.should_epoch_stop = True + control.should_training_stop = True + + def on_step_end(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + r""" + Event called at the end of a training step. + """ + if state.is_local_process_zero: + self.cur_steps = state.global_step + self.timing() + if self.runner is not None and self.runner.aborted: + control.should_epoch_stop = True + control.should_training_stop = True + + def on_evaluate(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + r""" + Event called after an evaluation phase. + """ + if state.is_local_process_zero and not self.in_training: + self.cur_steps = 0 + self.max_steps = 0 + + def on_predict(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", *other, **kwargs): + r""" + Event called after a successful prediction. + """ + if state.is_local_process_zero and not self.in_training: + self.cur_steps = 0 + self.max_steps = 0 + + def on_log(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs) -> None: + r""" + Event called after logging the last logs. + """ + if not state.is_local_process_zero: + return + + logs = dict( + current_steps=self.cur_steps, + total_steps=self.max_steps, + loss=state.log_history[-1].get("loss", None), + eval_loss=state.log_history[-1].get("eval_loss", None), + predict_loss=state.log_history[-1].get("predict_loss", None), + reward=state.log_history[-1].get("reward", None), + learning_rate=state.log_history[-1].get("learning_rate", None), + epoch=state.log_history[-1].get("epoch", None), + percentage=round(self.cur_steps / self.max_steps * 100, 2) if self.max_steps != 0 else 100, + elapsed_time=self.elapsed_time, + remaining_time=self.remaining_time + ) + os.makedirs(args.output_dir, exist_ok=True) + with open(os.path.join(args.output_dir, "trainer_log.jsonl"), "a", encoding="utf-8") as f: + f.write(json.dumps(logs) + "\n") + + def on_prediction_step(self, args: "TrainingArguments", state: "TrainerState", control: "TrainerControl", **kwargs): + r""" + Event called after a prediction step. + """ + eval_dataloader = kwargs.pop("eval_dataloader", None) + if state.is_local_process_zero and has_length(eval_dataloader) and not self.in_training: + if self.max_steps == 0: + self.max_steps = len(eval_dataloader) + self.cur_steps += 1 + self.timing() diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/extras/constants.py b/LLaMA-Efficient-Tuning/src/llmtuner/extras/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..0d1694b446a7ed5bfd879f7ff0a6284444c11c9e --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/extras/constants.py @@ -0,0 +1,82 @@ +IGNORE_INDEX = -100 + +LOG_FILE_NAME = "trainer_log.jsonl" + +LAYERNORM_NAMES = ["norm", "ln_f", "ln_attn", "ln_mlp"] + +METHODS = ["full", "freeze", "lora"] + +TRAINING_STAGES = { + "Supervised Fine-Tuning": "sft", + "Reward Modeling": "rm", + "PPO": "ppo", + "DPO": "dpo", + "Pre-Training": "pt" +} + +SUPPORTED_MODELS = { + "LLaMA-7B": "huggyllama/llama-7b", + "LLaMA-13B": "huggyllama/llama-13b", + "LLaMA-30B": "huggyllama/llama-30b", + "LLaMA-65B": "huggyllama/llama-65b", + "LLaMA2-7B": "meta-llama/Llama-2-7b-hf", + "LLaMA2-13B": "meta-llama/Llama-2-13b-hf", + "LLaMA2-70B": "meta-llama/Llama-2-70b-hf", + "LLaMA2-7B-Chat": "meta-llama/Llama-2-7b-chat-hf", + "LLaMA2-13B-Chat": "meta-llama/Llama-2-13b-chat-hf", + "LLaMA2-70B-Chat": "meta-llama/Llama-2-70b-chat-hf", + "ChineseLLaMA2-7B": "ziqingyang/chinese-llama-2-7b", + "ChineseLLaMA2-13B": "ziqingyang/chinese-llama-2-13b", + "ChineseLLaMA2-7B-Chat": "ziqingyang/chinese-alpaca-2-7b", + "ChineseLLaMA2-13B-Chat": "ziqingyang/chinese-alpaca-2-13b", + "BLOOM-560M": "bigscience/bloom-560m", + "BLOOM-3B": "bigscience/bloom-3b", + "BLOOM-7B1": "bigscience/bloom-7b1", + "BLOOMZ-560M": "bigscience/bloomz-560m", + "BLOOMZ-3B": "bigscience/bloomz-3b", + "BLOOMZ-7B1-mt": "bigscience/bloomz-7b1-mt", + "Falcon-7B": "tiiuae/falcon-7b", + "Falcon-7B-Chat": "tiiuae/falcon-7b-instruct", + "Falcon-40B": "tiiuae/falcon-40b", + "Falcon-40B-Chat": "tiiuae/falcon-40b-instruct", + "Baichuan-7B": "baichuan-inc/Baichuan-7B", + "Baichuan-13B": "baichuan-inc/Baichuan-13B-Base", + "Baichuan-13B-Chat": "baichuan-inc/Baichuan-13B-Chat", + "Baichuan2-7B": "baichuan-inc/Baichuan2-7B-Base", + "Baichuan2-13B": "baichuan-inc/Baichuan2-13B-Base", + "Baichuan2-7B-Chat": "baichuan-inc/Baichuan2-7B-Chat", + "Baichuan2-13B-Chat": "baichuan-inc/Baichuan2-13B-Chat", + "InternLM-7B": "internlm/internlm-7b", + "InternLM-7B-Chat": "internlm/internlm-chat-7b", + "Qwen-7B": "Qwen/Qwen-7B", + "Qwen-7B-Chat": "Qwen/Qwen-7B-Chat", + "XVERSE-13B": "xverse/XVERSE-13B", + "XVERSE-13B-Chat": "xverse/XVERSE-13B-Chat", + "ChatGLM2-6B-Chat": "THUDM/chatglm2-6b" +} + +DEFAULT_MODULE = { + "LLaMA": "q_proj,v_proj", + "LLaMA2": "q_proj,v_proj", + "ChineseLLaMA2": "q_proj,v_proj", + "BLOOM": "query_key_value", + "BLOOMZ": "query_key_value", + "Falcon": "query_key_value", + "Baichuan": "W_pack", + "Baichuan2": "W_pack", + "InternLM": "q_proj,v_proj", + "Qwen": "c_attn", + "XVERSE": "q_proj,v_proj", + "ChatGLM2": "query_key_value" +} + +DEFAULT_TEMPLATE = { + "LLaMA2": "llama2", + "ChineseLLaMA2": "llama2_zh", + "Baichuan": "baichuan", + "Baichuan2": "baichuan2", + "InternLM": "intern", + "Qwen": "chatml", + "XVERSE": "xverse", + "ChatGLM2": "chatglm2" +} diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/extras/logging.py b/LLaMA-Efficient-Tuning/src/llmtuner/extras/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f185e6d6e3714461865cb1db98a90fcc6659fe --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/extras/logging.py @@ -0,0 +1,43 @@ +import sys +import logging + + +class LoggerHandler(logging.Handler): + + def __init__(self): + super().__init__() + self.log = "" + + def reset(self): + self.log = "" + + def emit(self, record): + if record.name == "httpx": + return + log_entry = self.format(record) + self.log += log_entry + self.log += "\n\n" + + +def reset_logging(): + r""" + Removes basic config of root logger + """ + root = logging.getLogger() + list(map(root.removeHandler, root.handlers)) + list(map(root.removeFilter, root.filters)) + + +def get_logger(name: str) -> logging.Logger: + formatter = logging.Formatter( + fmt="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S" + ) + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(formatter) + + logger = logging.getLogger(name) + logger.setLevel(logging.INFO) + logger.addHandler(handler) + + return logger diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/extras/misc.py b/LLaMA-Efficient-Tuning/src/llmtuner/extras/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..f9ee2bea16d4e32bad8eb58a82c1953494d975aa --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/extras/misc.py @@ -0,0 +1,90 @@ +import gc +import torch +from typing import TYPE_CHECKING, Tuple +from transformers import InfNanRemoveLogitsProcessor, LogitsProcessorList + +if TYPE_CHECKING: + from transformers.modeling_utils import PreTrainedModel + + +class AverageMeter: + r""" + Computes and stores the average and current value. + """ + def __init__(self): + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + +def count_parameters(model: torch.nn.Module) -> Tuple[int, int]: + r""" + Returns the number of trainable parameters and number of all parameters in the model. + """ + trainable_params, all_param = 0, 0 + for param in model.parameters(): + num_params = param.numel() + # if using DS Zero 3 and the weights are initialized empty + if num_params == 0 and hasattr(param, "ds_numel"): + num_params = param.ds_numel + + # Due to the design of 4bit linear layers from bitsandbytes, multiply the number of parameters by 2 + if param.__class__.__name__ == "Params4bit": + num_params = num_params * 2 + + all_param += num_params + if param.requires_grad: + trainable_params += num_params + + return trainable_params, all_param + + +def get_logits_processor() -> LogitsProcessorList: + logits_processor = LogitsProcessorList() + logits_processor.append(InfNanRemoveLogitsProcessor()) + return logits_processor + + +def torch_gc() -> None: + r""" + Collects GPU memory. + """ + gc.collect() + if torch.cuda.is_available(): + torch.cuda.empty_cache() + torch.cuda.ipc_collect() + + +def dispatch_model(model: "PreTrainedModel") -> "PreTrainedModel": + r""" + Dispatches a pre-trained model to GPUs with balanced memory. + Borrowed from: https://github.com/huggingface/transformers/blob/v4.31.0/src/transformers/modeling_utils.py#L2803 + """ + if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False): # do nothing + return model + + if torch.cuda.device_count() > 1: + from accelerate import dispatch_model + from accelerate.utils import infer_auto_device_map, get_balanced_memory + + if model._no_split_modules is None: + raise ValueError("The model class needs to implement the `_no_split_modules` attribute.") + + kwargs = {"dtype": model.dtype, "no_split_module_classes": model._no_split_modules} + max_memory = get_balanced_memory(model, **kwargs) + # Make sure tied weights are tied before creating the device map. + model.tie_weights() + device_map = infer_auto_device_map(model, max_memory=max_memory, **kwargs) + return dispatch_model(model, device_map) + else: + return model.cuda() diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/extras/patches/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/extras/patches/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/extras/patches/flash_llama.py b/LLaMA-Efficient-Tuning/src/llmtuner/extras/patches/flash_llama.py new file mode 100644 index 0000000000000000000000000000000000000000..1d6ee66dcf8ddeb3559963c3a7aed7de18456a15 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/extras/patches/flash_llama.py @@ -0,0 +1,301 @@ +# coding=utf-8 +# Modified from: +# [1] https://huggingface.co/Birchlabs/flash_llama/blob/main/modeling_flash_llama.py +# [2] https://github.com/lm-sys/FastChat/blob/main/fastchat/train/llama2_flash_attn_monkey_patch.py +# [3] https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/blob/main/modeling_flash_llama.py +# [4] https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py +# With fix from Alex Birch: https://huggingface.co/togethercomputer/LLaMA-2-7B-32K/discussions/17 + +import torch +from typing import TYPE_CHECKING, Optional, Tuple +from transformers.utils import logging + +if TYPE_CHECKING: + from transformers.models.llama.configuration_llama import LlamaConfig + +try: + from flash_attn.flash_attn_interface import ( + flash_attn_kvpacked_func, + flash_attn_varlen_kvpacked_func + ) + from flash_attn.bert_padding import pad_input, unpad_input + print(">>>> FlashAttention installed") +except ImportError: + raise ImportError("Please install FlashAttention from https://github.com/Dao-AILab/flash-attention") + +try: + from flash_attn.layers.rotary import apply_rotary_emb_func + print(">>>> Flash RoPE installed") +except ImportError: + raise ImportError("Please install RoPE kernels from https://github.com/Dao-AILab/flash-attention") + + +logger = logging.get_logger(__name__) + + +class LlamaRMSNorm(torch.nn.Module): + + def __init__(self, hidden_size, eps=1e-6): + super().__init__() + self.weight = torch.nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + variance = hidden_states.pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + return (self.weight * hidden_states).to(input_dtype) # for fp32 weight + + +class FlashRotaryEmbedding(torch.nn.Module): + + def __init__( + self, + dim: int, + base=10000.0, + interleaved=False, + scale_base=None, + scaling_factor=1.0, + pos_idx_in_fp32=True, + device=None + ): + super().__init__() + self.dim = dim + self.base = float(base) + self.pos_idx_in_fp32 = pos_idx_in_fp32 + # Generate and save the inverse frequency buffer (non trainable) + inv_freq = self._compute_inv_freq(device) + self.register_buffer("inv_freq", inv_freq, persistent=False) + self.interleaved = interleaved + self.scale_base = scale_base + self.scaling_factor = scaling_factor + scale = ( + (torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim) + if scale_base is not None else None + ) + self.register_buffer("scale", scale) + + self._seq_len_cached = 0 + self._cos_cached = None + self._sin_cached = None + self._cos_k_cached = None + self._sin_k_cached = None + + def _compute_inv_freq(self, device=None): + return 1 / (self.base ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim)) + + def _update_cos_sin_cache(self, seqlen, device=None, dtype=None): + if ( + seqlen > self._seq_len_cached or self._cos_cached.device != device + or self._cos_cached.dtype != dtype + or (self.training and self._cos_cached.is_inference()) + ): + self._seq_len_cached = seqlen + if self.pos_idx_in_fp32: + t = torch.arange(seqlen, device=device, dtype=torch.float32) + t /= self.scaling_factor + if self.inv_freq.dtype != torch.float32: + inv_freq = self.inv_freq.to(torch.float32) + else: + inv_freq = self.inv_freq + else: + t = torch.arange(seqlen, device=device, dtype=self.inv_freq.dtype) + t /= self.scaling_factor + inv_freq = self.inv_freq + freqs = torch.outer(t, inv_freq) + if self.scale is None: + self._cos_cached = torch.cos(freqs).to(dtype) + self._sin_cached = torch.sin(freqs).to(dtype) + else: + power = ( + (torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device) - seqlen // 2) / self.scale_base + ) + scale = self.scale.to(device=power.device) ** power.unsqueeze(-1) + # We want the multiplication by scale to happen in fp32 + self._cos_cached = (torch.cos(freqs) * scale).to(dtype) + self._sin_cached = (torch.sin(freqs) * scale).to(dtype) + self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype) + self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype) + + def forward(self, q: torch.Tensor, k: torch.Tensor, seqlen_offset: int = 0) -> Tuple[torch.Tensor, torch.Tensor]: + r""" + q: (batch, seqlen, nheads, headdim) + k: (batch, seqlen, nheads, headdim) + seqlen_offset: can be used in generation where the qkv being passed in is only the last + token in the batch. + """ + self._update_cos_sin_cache(q.shape[1] + seqlen_offset, device=q.device, dtype=q.dtype) + if self.scale is None: + return apply_rotary_emb_func( + q, self._cos_cached[seqlen_offset:], self._sin_cached[seqlen_offset:], + self.interleaved, True # inplace=True + ), apply_rotary_emb_func( + k, self._cos_cached[seqlen_offset:], self._sin_cached[seqlen_offset:], + self.interleaved, True # inplace=True + ) + else: + assert False + + +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + r""" + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, slen, _, num_key_value_heads, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, :, :, None, :].expand(batch, slen, 2, num_key_value_heads, n_rep, head_dim) + return hidden_states.reshape(batch, slen, 2, num_key_value_heads * n_rep, head_dim) + + +class LlamaAttention(torch.nn.Module): + + def __init__(self, config: "LlamaConfig"): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + + self.q_proj = torch.nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) + self.k_proj = torch.nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) + self.v_proj = torch.nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False) + self.o_proj = torch.nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + + self.register_buffer( + "norm_factor", + torch.sqrt(torch.tensor(self.head_dim, dtype=torch.float32)).to(torch.get_default_dtype()), + persistent=False, + ) + + if self.config.rope_scaling is None: + scaling_factor = 1 + else: + scaling_type = self.config.rope_scaling["type"] + scaling_factor = self.config.rope_scaling["factor"] + assert scaling_type == "linear" + + self.rotary_emb = FlashRotaryEmbedding( + self.head_dim, base=10000, interleaved=False, scaling_factor=scaling_factor + ) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, h_size = hidden_states.size() + + has_layer_past = past_key_value is not None + + if has_layer_past: + past_kv = past_key_value[0] + past_len = past_key_value[1] + else: + past_len = 0 + + q = self.q_proj(hidden_states) + k = self.k_proj(hidden_states) + v = self.v_proj(hidden_states) + + q = q.view(bsz, q_len, self.num_heads, self.head_dim) + k = k.view(bsz, q_len, self.num_key_value_heads, self.head_dim) + v = v.view(bsz, q_len, self.num_key_value_heads, self.head_dim) + + q, k = self.rotary_emb(q, k, past_len) + + kv = torch.stack([k, v], 2) + kv = repeat_kv(kv, self.num_key_value_groups) + + # Cache QKV values + if has_layer_past: + new_len = past_len+q.size(1) + if new_len > past_kv.size(1): + past_kv = torch.cat( + [past_kv, torch.empty(bsz, 256, 2, kv.size(3), kv.size(4), dtype=kv.dtype, device=kv.device)], + dim=1 + ) + past_kv[:, past_len:new_len] = kv + kv = past_kv[:, :new_len] + else: + past_kv = kv + + past_key_value = (past_kv, past_len + q.size(1)) if use_cache else None + + if attention_mask is not None: + # varlen, ignore padding tokens, efficient for large batch with many paddings + logger.warning_once("padded sequences is less efficient") + + unpadded_kv, indices_k, cu_seqlens_k, max_seqlen_k = unpad_input(kv, attention_mask) + unpadded_q, indices_q, cu_seqlens_q, max_seqlen_q = unpad_input(q, attention_mask[:, -q.size(1):]) + attn_outputs = flash_attn_varlen_kvpacked_func( + unpadded_q, unpadded_kv, cu_seqlens_q, cu_seqlens_k, + max_seqlen_q, max_seqlen_k, + dropout_p=0.0, softmax_scale=1.0 / self.norm_factor, + causal=(not has_layer_past), return_attn_probs=output_attentions + ) + + attn_output = attn_outputs[0] if output_attentions else attn_outputs + attn_output = pad_input(attn_output, indices_q, bsz, q_len).reshape(bsz, q_len, h_size) + attn_weights = attn_outputs[2] if output_attentions else None + + else: + # no padding tokens, more efficient + attn_outputs = flash_attn_kvpacked_func( + q, kv, dropout_p=0.0, softmax_scale=1.0 / self.norm_factor, + causal=(not has_layer_past), return_attn_probs=output_attentions + ) + attn_output = attn_outputs[0] if output_attentions else attn_outputs + attn_output = attn_output.reshape(bsz, q_len, h_size) + attn_weights = attn_outputs[2] if output_attentions else None + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +# Disable the transformation of the attention mask in LlamaModel as flash attention +# takes a boolean key_padding_mask. Fills in the past kv length for use in forward. +def _prepare_decoder_attention_mask( + self, attention_mask, input_shape, inputs_embeds, past_key_values_length +): + # [bsz, seq_len] + if past_key_values_length > 0 and attention_mask is not None: + attention_mask = torch.cat( + ( + torch.full( + (input_shape[0], past_key_values_length), + True, + dtype=attention_mask.dtype, + device=attention_mask.device + ), + attention_mask + ), + dim=-1 + ) + + if attention_mask is not None and torch.all(attention_mask): + return None # This uses the faster call when training with full samples + + return attention_mask diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/extras/ploting.py b/LLaMA-Efficient-Tuning/src/llmtuner/extras/ploting.py new file mode 100644 index 0000000000000000000000000000000000000000..82530e4518b584231f3e81ddc0d233c3df2e5dab --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/extras/ploting.py @@ -0,0 +1,52 @@ +import os +import math +import json +import matplotlib.pyplot as plt +from typing import List, Optional +from transformers.trainer import TRAINER_STATE_NAME + +from llmtuner.extras.logging import get_logger + + +logger = get_logger(__name__) + + +def smooth(scalars: List[float]) -> List[float]: + r""" + EMA implementation according to TensorBoard. + """ + last = scalars[0] + smoothed = list() + weight = 1.8 * (1 / (1 + math.exp(-0.05 * len(scalars))) - 0.5) # a sigmoid function + for next_val in scalars: + smoothed_val = last * weight + (1 - weight) * next_val + smoothed.append(smoothed_val) + last = smoothed_val + return smoothed + + +def plot_loss(save_dictionary: os.PathLike, keys: Optional[List[str]] = ["loss"]) -> None: + + with open(os.path.join(save_dictionary, TRAINER_STATE_NAME), "r", encoding="utf-8") as f: + data = json.load(f) + + for key in keys: + steps, metrics = [], [] + for i in range(len(data["log_history"])): + if key in data["log_history"][i]: + steps.append(data["log_history"][i]["step"]) + metrics.append(data["log_history"][i][key]) + + if len(metrics) == 0: + logger.warning(f"No metric {key} to plot.") + continue + + plt.figure() + plt.plot(steps, metrics, alpha=0.4, label="original") + plt.plot(steps, smooth(metrics), label="smoothed") + plt.title("training {} of {}".format(key, save_dictionary)) + plt.xlabel("step") + plt.ylabel(key) + plt.legend() + plt.savefig(os.path.join(save_dictionary, "training_{}.png".format(key)), format="png", dpi=100) + print("Figure saved:", os.path.join(save_dictionary, "training_{}.png".format(key))) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/extras/save_and_load.py b/LLaMA-Efficient-Tuning/src/llmtuner/extras/save_and_load.py new file mode 100644 index 0000000000000000000000000000000000000000..6d819ce622680d3553c2df6b3410b55b8fe8c446 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/extras/save_and_load.py @@ -0,0 +1,21 @@ +import os +import torch +from transformers.trainer import WEIGHTS_NAME + +from llmtuner.extras.logging import get_logger + + +logger = get_logger(__name__) + + +def load_valuehead_params(model: torch.nn.Module, checkpoint_dir: os.PathLike) -> bool: + vhead_file = os.path.join(checkpoint_dir, WEIGHTS_NAME) + if not os.path.exists(vhead_file): + logger.warning("Provided path ({}) does not contain valuehead weights.".format(checkpoint_dir)) + return False + vhead_params = torch.load(vhead_file, map_location="cpu") + model.register_buffer("reward_head_weight", vhead_params["v_head.summary.weight"], persistent=False) + model.register_buffer("reward_head_bias", vhead_params["v_head.summary.bias"], persistent=False) + model.register_buffer("default_head_weight", torch.zeros_like(vhead_params["v_head.summary.weight"]), persistent=False) + model.register_buffer("default_head_bias", torch.zeros_like(vhead_params["v_head.summary.bias"]), persistent=False) + return True diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/extras/template.py b/LLaMA-Efficient-Tuning/src/llmtuner/extras/template.py new file mode 100644 index 0000000000000000000000000000000000000000..9ae77d5d608ba784f220f0f42d052325e33a81d4 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/extras/template.py @@ -0,0 +1,603 @@ +import tiktoken +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union + +from llmtuner.extras.logging import get_logger + +if TYPE_CHECKING: + from transformers import PreTrainedTokenizer + + +logger = get_logger(__name__) + + +@dataclass +class Template: + + prefix: List[Union[str, Dict[str, str]]] + prompt: List[Union[str, Dict[str, str]]] + system: str + sep: List[Union[str, Dict[str, str]]] + stop_words: List[str] + use_history: bool + efficient_eos: bool + + def encode_oneturn( + self, + tokenizer: "PreTrainedTokenizer", + query: str, + resp: str, + history: Optional[List[Tuple[str, str]]] = None, + system: Optional[str] = None + ) -> Tuple[List[int], List[int]]: + r""" + Returns a single pair of token ids representing prompt and response respectively. + """ + system, history = self._format(query, resp, history, system) + encoded_pairs = self._encode(tokenizer, system, history) + prompt_ids = [] + for query_ids, resp_ids in encoded_pairs[:-1]: + prompt_ids = prompt_ids + query_ids + resp_ids + prompt_ids, answer_ids = prompt_ids + encoded_pairs[-1][0], encoded_pairs[-1][1] + return prompt_ids, answer_ids + + def encode_multiturn( + self, + tokenizer: "PreTrainedTokenizer", + query: str, + resp: str, + history: Optional[List[Tuple[str, str]]] = None, + system: Optional[str] = None + ) -> List[Tuple[List[int], List[int]]]: + r""" + Returns multiple pairs of token ids representing prompts and responses respectively. + """ + system, history = self._format(query, resp, history, system) + encoded_pairs = self._encode(tokenizer, system, history) + return encoded_pairs + + def _format( + self, + query: str, + resp: str, + history: Optional[List[Tuple[str, str]]] = None, + system: Optional[str] = None + ) -> Tuple[str, List[Tuple[str, str]]]: + r""" + Aligns inputs to the standard format. + """ + system = system or self.system # use system if provided + history = history if (history and self.use_history) else [] + history = history + [(query, resp)] + return system, history + + def _get_special_ids( + self, + tokenizer: "PreTrainedTokenizer" + ) -> Tuple[List[int], List[int]]: + if tokenizer.bos_token_id is not None and getattr(tokenizer, "add_bos_token", True): + bos_ids = [tokenizer.bos_token_id] + else: # baichuan, qwen and gpt2 models have no bos token + bos_ids = [] + + if tokenizer.eos_token_id is None: + raise ValueError("EOS token is required.") + + if self.efficient_eos: # used in baichuan, qwen, chatglm, etc. + eos_ids = [] + else: + eos_ids = [tokenizer.eos_token_id] + + return bos_ids, eos_ids + + def _encode( + self, + tokenizer: "PreTrainedTokenizer", + system: str, + history: List[Tuple[str, str]] + ) -> List[Tuple[List[int], List[int]]]: + r""" + Encodes formatted inputs to pairs of token ids. + Turn 0: bos + prefix + sep + query resp + eos + Turn t: sep + bos + query resp + eos + """ + bos_ids, eos_ids = self._get_special_ids(tokenizer) + sep_ids = self._convert_inputs_to_ids(tokenizer, context=self.sep) + encoded_pairs = [] + for turn_idx, (query, resp) in enumerate(history): + if turn_idx == 0: + prefix_ids = self._convert_inputs_to_ids(tokenizer, context=self.prefix, system=system) + if len(prefix_ids) != 0: # has prefix + prefix_ids = bos_ids + prefix_ids + sep_ids + else: + prefix_ids = bos_ids + else: + prefix_ids = sep_ids + bos_ids + + query_ids = self._convert_inputs_to_ids(tokenizer, context=self.prompt, query=query, idx=str(turn_idx)) + resp_ids = self._convert_inputs_to_ids(tokenizer, context=[resp]) + encoded_pairs.append((prefix_ids + query_ids, resp_ids + eos_ids)) + return encoded_pairs + + def _convert_inputs_to_ids( + self, + tokenizer: "PreTrainedTokenizer", + context: List[Union[str, Dict[str, str]]], + system: Optional[str] = None, + query: Optional[str] = None, + idx: Optional[str] = None + ) -> List[int]: + r""" + Converts context to token ids. + """ + if isinstance(getattr(tokenizer, "tokenizer", None), tiktoken.Encoding): # for tiktoken tokenizer (Qwen) + kwargs = dict(allowed_special="all") + else: + kwargs = dict(add_special_tokens=False) + + token_ids = [] + for elem in context: + if isinstance(elem, str): + if len(elem) == 0: + continue + elem = elem.replace("{{system}}", system, 1) if system is not None else elem + elem = elem.replace("{{query}}", query, 1) if query is not None else elem + elem = elem.replace("{{idx}}", idx, 1) if idx is not None else elem + token_ids = token_ids + tokenizer.encode(elem, **kwargs) + elif isinstance(elem, dict): + token_ids = token_ids + [tokenizer.convert_tokens_to_ids(elem.get("token"))] + else: + raise ValueError("Input must be string or dict[str, str], got {}".format(type(elem))) + + return token_ids + + +@dataclass +class Llama2Template(Template): + + def _encode( + self, + tokenizer: "PreTrainedTokenizer", + system: str, + history: List[Tuple[str, str]] + ) -> List[Tuple[List[int], List[int]]]: + r""" + Encodes formatted inputs to pairs of token ids. + Turn 0: bos + prefix + query resp + eos + Turn t: bos + query resp + eos + """ + bos_ids, eos_ids = self._get_special_ids(tokenizer) + encoded_pairs = [] + for turn_idx, (query, resp) in enumerate(history): + if turn_idx == 0: # llama2 template has no sep_ids + query = self.prefix[0].replace("{{system}}", system) + query + query_ids = self._convert_inputs_to_ids(tokenizer, context=self.prompt, query=query) + resp_ids = self._convert_inputs_to_ids(tokenizer, context=[resp]) + encoded_pairs.append((bos_ids + query_ids, resp_ids + eos_ids)) + return encoded_pairs + + +templates: Dict[str, Template] = {} + + +def register_template( + name: str, + prefix: List[Union[str, Dict[str, str]]], + prompt: List[Union[str, Dict[str, str]]], + system: str, + sep: List[Union[str, Dict[str, str]]], + stop_words: Optional[List[str]] = [], + use_history: Optional[bool] = True, + efficient_eos: Optional[bool] = False +) -> None: + template_class = Llama2Template if "llama2" in name else Template + templates[name] = template_class( + prefix=prefix, + prompt=prompt, + system=system, + sep=sep, + stop_words=stop_words, + use_history=use_history, + efficient_eos=efficient_eos + ) + + +def get_template_and_fix_tokenizer( + name: str, + tokenizer: "PreTrainedTokenizer" +) -> Template: + if tokenizer.eos_token_id is None: + tokenizer.eos_token = "<|endoftext|>" + logger.info("Add eos token: {}".format(tokenizer.eos_token)) + + if tokenizer.pad_token_id is None: + tokenizer.pad_token = tokenizer.eos_token + logger.info("Add pad token: {}".format(tokenizer.pad_token)) + + if name is None: + return None + + template = templates.get(name, None) + assert template is not None, "Template {} does not exist.".format(name) + tokenizer.add_special_tokens( + dict(additional_special_tokens=template.stop_words), + replace_additional_special_tokens=False + ) + return template + + +r""" +Supports language model inference without histories. +""" +register_template( + name="vanilla", + prefix=[], + prompt=[ + "{{query}}" + ], + system="", + sep=[], + use_history=False +) + + +r""" +Default template. +""" +register_template( + name="default", + prefix=[ + "{{system}}" + ], + prompt=[ + "Human: {{query}}\nAssistant: " + ], + system=( + "A chat between a curious user and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the user's questions." + ), + sep=[ + "\n" + ] +) + + +r""" +Supports: https://huggingface.co/meta-llama/Llama-2-7b-chat-hf + https://huggingface.co/meta-llama/Llama-2-13b-chat-hf + https://huggingface.co/meta-llama/Llama-2-70b-chat-hf +""" +register_template( + name="llama2", + prefix=[ + "<>\n{{system}}\n<>\n\n" + ], + prompt=[ + "[INST] {{query}} [/INST] " + ], + system=( + "You are a helpful, respectful and honest assistant. " + "Always answer as helpfully as possible, while being safe. " + "Your answers should not include any harmful, unethical, " + "racist, sexist, toxic, dangerous, or illegal content. " + "Please ensure that your responses are socially unbiased and positive in nature.\n\n" + "If a question does not make any sense, or is not factually coherent, " + "explain why instead of answering something not correct. " + "If you don't know the answer to a question, please don't share false information." + ), + sep=[] +) + + +r""" +Supports: https://github.com/ymcui/Chinese-LLaMA-Alpaca-2 + https://huggingface.co/ziqingyang/chinese-alpaca-2-7b +""" +register_template( + name="llama2_zh", + prefix=[ + "<>\n{{system}}\n<>\n\n" + ], + prompt=[ + "[INST] {{query}} [/INST] " + ], + system="You are a helpful assistant. 你是一个乐于助人的助手。", + sep=[] +) + + +r""" +Supports: https://huggingface.co/tatsu-lab/alpaca-7b-wdiff + https://github.com/ymcui/Chinese-LLaMA-Alpaca +""" +register_template( + name="alpaca", + prefix=[ + "{{system}}" + ], + prompt=[ + "### Instruction:\n{{query}}\n\n### Response:\n" + ], + system=( + "Below is an instruction that describes a task. " + "Write a response that appropriately completes the request." + ), + sep=[ + "\n\n" + ] +) + + +r""" +Supports: https://huggingface.co/lmsys/vicuna-7b-delta-v1.1 + https://huggingface.co/lmsys/vicuna-13b-delta-v1.1 +""" +register_template( + name="vicuna", + prefix=[ + "{{system}}" + ], + prompt=[ + "USER: {{query}} ASSISTANT: " + ], + system=( + "A chat between a curious user and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the user's questions." + ), + sep=[] +) + + +r""" +Supports: https://huggingface.co/BelleGroup/BELLE-LLaMA-EXT-13B +""" +register_template( + name="belle", + prefix=[ + "{{system}}" + ], + prompt=[ + "Human: {{query}}\n\nBelle: " + ], + system="", + sep=[ + "\n\n" + ] +) + + +r""" +Supports: https://github.com/CVI-SZU/Linly +""" +register_template( + name="linly", + prefix=[ + "{{system}}" + ], + prompt=[ + "User: {{query}}\nBot: " + ], + system="", + sep=[ + "\n" + ] +) + + +r""" +Supports: https://github.com/Neutralzz/BiLLa +""" +register_template( + name="billa", + prefix=[ + "{{system}}" + ], + prompt=[ + "Human: {{query}}\nAssistant: " + ], + system="", + sep=[ + "\n" + ] +) + + +r""" +Supports: https://huggingface.co/IDEA-CCNL/Ziya-LLaMA-13B-v1 +""" +register_template( + name="ziya", + prefix=[ + "{{system}}" + ], + prompt=[ + {"token": ""}, + ":{{query}}\n", + {"token": ""}, + ":" + ], + system="", + sep=[ + "\n" + ] +) + + +r""" +Supports: https://huggingface.co/qhduan/aquilachat-7b +""" +register_template( + name="aquila", + prefix=[ + "{{system}}" + ], + prompt=[ + "Human: {{query}}###Assistant: " + ], + system=( + "A chat between a curious human and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the human's questions." + ), + sep=[ + "###" + ] +) + + +r""" +Supports: https://huggingface.co/internlm/internlm-chat-7b +""" +register_template( + name="intern", + prefix=[ + "{{system}}" + ], + prompt=[ + "<|User|>:{{query}}", + {"token": ""}, + "\n<|Bot|>:" + ], + system="", + sep=[ + {"token": ""}, + "\n" + ], + stop_words=[ + "" + ], + efficient_eos=True +) + + +r""" +Supports: https://huggingface.co/baichuan-inc/Baichuan-13B-Chat +""" +register_template( + name="baichuan", + prefix=[ + "{{system}}" + ], + prompt=[ + {"token": ""}, # user token + "{{query}}", + {"token": ""} # assistant token + ], + system="", + sep=[], + efficient_eos=True +) + + +r""" +Supports: https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat + https://huggingface.co/baichuan-inc/Baichuan2-13B-Chat +""" +register_template( + name="baichuan2", + prefix=[ + "{{system}}" + ], + prompt=[ + {"token": ""}, # user token + "{{query}}", + {"token": ""} # assistant token + ], + system="", + sep=[], + efficient_eos=True +) + + +r""" +Supports: https://huggingface.co/HuggingFaceH4/starchat-alpha + https://huggingface.co/HuggingFaceH4/starchat-beta +""" +register_template( + name="starchat", + prefix=[ + {"token": "<|system|>"}, + "\n{{system}}", + ], + prompt=[ + {"token": "<|user|>"}, + "\n{{query}}", + {"token": "<|end|>"}, + "\n", + {"token": "<|assistant|>"} + ], + system="", + sep=[ + {"token": "<|end|>"}, + "\n" + ], + stop_words=[ + "<|end|>" + ], + efficient_eos=True +) + + +r""" +Supports: https://huggingface.co/Qwen/Qwen-7B-Chat +""" +register_template( + name="chatml", + prefix=[ + {"token": "<|im_start|>"}, + "system\n{{system}}" + ], + prompt=[ + {"token": "<|im_start|>"}, + "user\n{{query}}", + {"token": "<|im_end|>"}, + "\n", + {"token": "<|im_start|>"}, + "assistant\n" + ], + system="You are a helpful assistant.", + sep=[ + {"token": "<|im_end|>"}, + "\n" + ], + stop_words=[ + "<|im_end|>" + ], + efficient_eos=True +) + + +r""" +Supports: https://huggingface.co/THUDM/chatglm2-6b +""" +register_template( + name="chatglm2", + prefix=[ + {"token": "[gMASK]"}, + {"token": "sop"}, + "{{system}}" + ], + prompt=[ + "[Round {{idx}}]\n\n问:{{query}}\n\n答:" + ], + system="", + sep=[ + "\n\n" + ], + efficient_eos=True +) + + +r""" +Supports: https://huggingface.co/xverse/XVERSE-13B-Chat +""" +register_template( + name="xverse", + prefix=[ + "{{system}}" + ], + prompt=[ + "Human: {{query}}\n\nAssistant: " + ], + system="", + sep=[] +) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/hparams/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0fabfa33aa3959478b97013239aedbce8afad9ba --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/__init__.py @@ -0,0 +1,5 @@ +from .data_args import DataArguments +from .finetuning_args import FinetuningArguments +from .general_args import GeneralArguments +from .generating_args import GeneratingArguments +from .model_args import ModelArguments diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/hparams/data_args.py b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/data_args.py new file mode 100644 index 0000000000000000000000000000000000000000..02a603f027a8797738c30cca6c4176daa7d77b20 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/data_args.py @@ -0,0 +1,130 @@ +import os +import json +from typing import List, Literal, Optional +from dataclasses import dataclass, field + + +@dataclass +class DatasetAttr: + + load_from: str + dataset_name: Optional[str] = None + dataset_sha1: Optional[str] = None + system_prompt: Optional[str] = None + ranking: Optional[bool] = False + prompt: Optional[str] = "instruction" + query: Optional[str] = "input" + response: Optional[str] = "output" + history: Optional[str] = None + + def __repr__(self) -> str: + return self.dataset_name + + +@dataclass +class DataArguments: + r""" + Arguments pertaining to what data we are going to input our model for training and evaluation. + """ + template: Optional[str] = field( + default=None, + metadata={"help": "Which template to use for constructing prompts in training and inference."} + ) + dataset: Optional[str] = field( + default="alpaca_en", + metadata={"help": "The name of provided dataset(s) to use. Use commas to separate multiple datasets."} + ) + dataset_dir: Optional[str] = field( + default="data", + metadata={"help": "The name of the folder containing datasets."} + ) + split: Optional[str] = field( + default="train", + metadata={"help": "Which dataset split to use for training and evaluation."} + ) + cutoff_len: Optional[int] = field( + default=1024, + metadata={"help": "The maximum length of the model inputs after tokenization."} + ) + streaming: Optional[bool] = field( + default=False, + metadata={"help": "Enable streaming mode."} + ) + buffer_size: Optional[int] = field( + default=16384, + metadata={"help": "Size of the buffer to randomly sample examples from in streaming mode."} + ) + mix_strategy: Optional[Literal["concat", "interleave_under", "interleave_over"]] = field( + default="concat", + metadata={"help": "Strategy to use in dataset mixing."} + ) + interleave_probs: Optional[str] = field( + default=None, + metadata={"help": "Probabilities to sample data from datasets. Use commas to separate multiple datasets."} + ) + overwrite_cache: Optional[bool] = field( + default=False, + metadata={"help": "Overwrite the cached training and evaluation sets."} + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."} + ) + max_samples: Optional[int] = field( + default=None, + metadata={"help": "For debugging purposes, truncate the number of examples for each dataset."} + ) + eval_num_beams: Optional[int] = field( + default=None, + metadata={"help": "Number of beams to use for evaluation. This argument will be passed to `model.generate`"} + ) + ignore_pad_token_for_loss: Optional[bool] = field( + default=True, + metadata={"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."} + ) + system_prompt: Optional[str] = field( + default=None, + metadata={"help": "System prompt to add before the user query. Use `|` to separate multiple prompts in training."} + ) + val_size: Optional[float] = field( + default=0, + metadata={"help": "Size of the development set, should be an integer or a float in range `[0,1)`."} + ) + + def init_for_training(self): # support mixing multiple datasets + dataset_names = [ds.strip() for ds in self.dataset.split(",")] + with open(os.path.join(self.dataset_dir, "dataset_info.json"), "r") as f: + dataset_info = json.load(f) + + prompt_list = self.system_prompt.split("|") if self.system_prompt else [None] + prompt_list = prompt_list * (len(dataset_names) // len(prompt_list)) + assert len(prompt_list) == len(dataset_names), "Number of system prompts should be equal to datasets or 1." + + if self.interleave_probs is not None: + self.interleave_probs = [float(prob.strip()) for prob in self.interleave_probs.split(",")] + + self.dataset_list: List[DatasetAttr] = [] + for i, name in enumerate(dataset_names): + if name not in dataset_info: + raise ValueError("Undefined dataset {} in dataset_info.json.".format(name)) + + if "hf_hub_url" in dataset_info[name]: + dataset_attr = DatasetAttr("hf_hub", dataset_name=dataset_info[name]["hf_hub_url"]) + elif "script_url" in dataset_info[name]: + dataset_attr = DatasetAttr("script", dataset_name=dataset_info[name]["script_url"]) + else: + dataset_attr = DatasetAttr( + "file", + dataset_name=dataset_info[name]["file_name"], + dataset_sha1=dataset_info[name].get("file_sha1", None) + ) + + if "columns" in dataset_info[name]: + dataset_attr.prompt = dataset_info[name]["columns"].get("prompt", None) + dataset_attr.query = dataset_info[name]["columns"].get("query", None) + dataset_attr.response = dataset_info[name]["columns"].get("response", None) + dataset_attr.history = dataset_info[name]["columns"].get("history", None) + + dataset_attr.ranking = dataset_info[name].get("ranking", False) + dataset_attr.system_prompt = prompt_list[i] + self.dataset_list.append(dataset_attr) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/hparams/finetuning_args.py b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/finetuning_args.py new file mode 100644 index 0000000000000000000000000000000000000000..bda0adafcf5b003dfe798b4889938341376f47ca --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/finetuning_args.py @@ -0,0 +1,98 @@ +import json +from typing import Literal, Optional +from dataclasses import asdict, dataclass, field + + +@dataclass +class FinetuningArguments: + r""" + Arguments pertaining to which techniques we are going to fine-tuning with. + """ + finetuning_type: Optional[Literal["lora", "freeze", "full", "none"]] = field( + default="lora", + metadata={"help": "Which fine-tuning method to use."} + ) + num_hidden_layers: Optional[int] = field( + default=32, + metadata={"help": "Number of decoder blocks in the model for partial-parameter (freeze) fine-tuning. \ + LLaMA choices: [\"32\", \"40\", \"60\", \"80\"], \ + LLaMA-2 choices: [\"32\", \"40\", \"80\"], \ + BLOOM choices: [\"24\", \"30\", \"70\"], \ + Falcon choices: [\"32\", \"60\"], \ + Baichuan choices: [\"32\", \"40\"] \ + Qwen choices: [\"32\"], \ + XVERSE choices: [\"40\"], \ + ChatGLM2 choices: [\"28\"]"} + ) + num_layer_trainable: Optional[int] = field( + default=3, + metadata={"help": "Number of trainable layers for partial-parameter (freeze) fine-tuning."} + ) + name_module_trainable: Optional[Literal["mlp", "self_attn", "self_attention"]] = field( + default="mlp", + metadata={"help": "Name of trainable modules for partial-parameter (freeze) fine-tuning. \ + LLaMA choices: [\"mlp\", \"self_attn\"], \ + BLOOM & Falcon & ChatGLM2 choices: [\"mlp\", \"self_attention\"], \ + Baichuan choices: [\"mlp\", \"self_attn\"], \ + Qwen choices: [\"mlp\", \"attn\"], \ + LLaMA-2, InternLM, XVERSE choices: the same as LLaMA."} + ) + lora_rank: Optional[int] = field( + default=8, + metadata={"help": "The intrinsic dimension for LoRA fine-tuning."} + ) + lora_alpha: Optional[float] = field( + default=32.0, + metadata={"help": "The scale factor for LoRA fine-tuning (similar with the learning rate)."} + ) + lora_dropout: Optional[float] = field( + default=0.1, + metadata={"help": "Dropout rate for the LoRA fine-tuning."} + ) + lora_target: Optional[str] = field( + default=None, + metadata={"help": "Name(s) of target modules to apply LoRA. Use commas to separate multiple modules. \ + LLaMA choices: [\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \ + BLOOM & Falcon & ChatGLM2 choices: [\"query_key_value\", \"self_attention.dense\", \"mlp.dense\"], \ + Baichuan choices: [\"W_pack\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"], \ + Qwen choices: [\"c_attn\", \"attn.c_proj\", \"w1\", \"w2\", \"mlp.c_proj\"], \ + LLaMA-2, InternLM, XVERSE choices: the same as LLaMA."} + ) + resume_lora_training: Optional[bool] = field( + default=True, + metadata={"help": "Whether to resume training from the last LoRA weights or create new weights after merging them."} + ) + ppo_score_norm: Optional[bool] = field( + default=False, + metadata={"help": "Use score normalization in PPO Training."} + ) + dpo_beta: Optional[float] = field( + default=0.1, + metadata={"help": "The beta parameter for the DPO loss."} + ) + + def __post_init__(self): + if isinstance(self.lora_target, str): # support custom target modules/layers of LoRA + self.lora_target = [target.strip() for target in self.lora_target.split(",")] + + if self.num_layer_trainable > 0: # fine-tuning the last n layers if num_layer_trainable > 0 + trainable_layer_ids = [self.num_hidden_layers - k - 1 for k in range(self.num_layer_trainable)] + else: # fine-tuning the first n layers if num_layer_trainable < 0 + trainable_layer_ids = [k for k in range(-self.num_layer_trainable)] + + self.trainable_layers = ["{:d}.{}".format(idx, self.name_module_trainable) for idx in trainable_layer_ids] + + assert self.finetuning_type in ["lora", "freeze", "full", "none"], "Invalid fine-tuning method." + + def save_to_json(self, json_path: str): + r"""Saves the content of this instance in JSON format inside `json_path`.""" + json_string = json.dumps(asdict(self), indent=2, sort_keys=True) + "\n" + with open(json_path, "w", encoding="utf-8") as f: + f.write(json_string) + + @classmethod + def load_from_json(cls, json_path: str): + r"""Creates an instance from the content of `json_path`.""" + with open(json_path, "r", encoding="utf-8") as f: + text = f.read() + return cls(**json.loads(text)) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/hparams/general_args.py b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/general_args.py new file mode 100644 index 0000000000000000000000000000000000000000..c0c1a0deb40d87ae2f13d296112b07d312eb7b2c --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/general_args.py @@ -0,0 +1,13 @@ +from typing import Literal, Optional +from dataclasses import dataclass, field + + +@dataclass +class GeneralArguments: + r""" + Arguments pertaining to which stage we are going to perform. + """ + stage: Optional[Literal["pt", "sft", "rm", "ppo", "dpo"]] = field( + default="sft", + metadata={"help": "Which stage will be performed in training."} + ) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/hparams/generating_args.py b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/generating_args.py new file mode 100644 index 0000000000000000000000000000000000000000..f8b935fbc96cacf15365127ef333ba752226f336 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/generating_args.py @@ -0,0 +1,51 @@ +from typing import Any, Dict, Optional +from dataclasses import asdict, dataclass, field + + +@dataclass +class GeneratingArguments: + r""" + Arguments pertaining to specify the decoding parameters. + """ + do_sample: Optional[bool] = field( + default=True, + metadata={"help": "Whether or not to use sampling, use greedy decoding otherwise."} + ) + temperature: Optional[float] = field( + default=0.95, + metadata={"help": "The value used to modulate the next token probabilities."} + ) + top_p: Optional[float] = field( + default=0.7, + metadata={"help": "The smallest set of most probable tokens with probabilities that add up to top_p or higher are kept."} + ) + top_k: Optional[int] = field( + default=50, + metadata={"help": "The number of highest probability vocabulary tokens to keep for top-k filtering."} + ) + num_beams: Optional[int] = field( + default=1, + metadata={"help": "Number of beams for beam search. 1 means no beam search."} + ) + max_length: Optional[int] = field( + default=None, + metadata={"help": "The maximum length the generated tokens can have. It can be overridden by max_new_tokens."} + ) + max_new_tokens: Optional[int] = field( + default=512, + metadata={"help": "The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt."} + ) + repetition_penalty: Optional[float] = field( + default=1.0, + metadata={"help": "The parameter for repetition penalty. 1.0 means no penalty."} + ) + length_penalty: Optional[float] = field( + default=1.0, + metadata={"help": "Exponential penalty to the length that is used with beam-based generation."} + ) + + def to_dict(self) -> Dict[str, Any]: + args = asdict(self) + if args.get("max_new_tokens", None): + args.pop("max_length", None) + return args diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/hparams/model_args.py b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/model_args.py new file mode 100644 index 0000000000000000000000000000000000000000..0d7a3d52a224fc23ff64b3c259b4aadac0b2ddba --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/hparams/model_args.py @@ -0,0 +1,79 @@ +import torch +from typing import Literal, Optional +from dataclasses import dataclass, field + + +@dataclass +class ModelArguments: + r""" + Arguments pertaining to which model/config/tokenizer we are going to fine-tune. + """ + model_name_or_path: str = field( + metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} + ) + cache_dir: Optional[str] = field( + default=None, + metadata={"help": "Where to store the pretrained models downloaded from huggingface.co."} + ) + use_fast_tokenizer: Optional[bool] = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} + ) + use_auth_token: Optional[bool] = field( + default=False, + metadata={"help": "Will use the token generated when running `huggingface-cli login`."} + ) + model_revision: Optional[str] = field( + default="main", + metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} + ) + quantization_bit: Optional[int] = field( + default=None, + metadata={"help": "The number of bits to quantize the model."} + ) + quantization_type: Optional[Literal["fp4", "nf4"]] = field( + default="nf4", + metadata={"help": "Quantization data type to use in int4 training."} + ) + double_quantization: Optional[bool] = field( + default=True, + metadata={"help": "Whether to use double quantization in int4 training or not."} + ) + rope_scaling: Optional[Literal["linear", "dynamic"]] = field( + default=None, + metadata={"help": "Adopt scaled rotary positional embeddings."} + ) + flash_attn: Optional[bool] = field( + default=False, + metadata={"help": "Enable flash attention for faster training."} + ) + checkpoint_dir: Optional[str] = field( + default=None, + metadata={"help": "Path to the directory(s) containing the delta model checkpoints as well as the configurations."} + ) + reward_model: Optional[str] = field( + default=None, + metadata={"help": "Path to the directory containing the checkpoints of the reward model."} + ) + plot_loss: Optional[bool] = field( + default=False, + metadata={"help": "Whether to plot the training loss after fine-tuning or not."} + ) + hf_auth_token: Optional[str] = field( + default=None, + metadata={"help": "Auth token to log in with Hugging Face Hub."} + ) + + def __post_init__(self): + self.compute_dtype = None + self.model_max_length = None + + if self.checkpoint_dir is not None: # support merging multiple lora weights + self.checkpoint_dir = [cd.strip() for cd in self.checkpoint_dir.split(",")] + + if self.quantization_bit is not None: + assert self.quantization_bit in [4, 8], "We only accept 4-bit or 8-bit quantization." + + if self.use_auth_token == True and self.hf_auth_token is not None: + from huggingface_hub.hf_api import HfFolder # lazy load + HfFolder.save_token(self.hf_auth_token) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4d5a83e4b8ee7a21c117353123d220371f2b3567 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/__init__.py @@ -0,0 +1 @@ +from llmtuner.tuner.tune import export_model, run_exp diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bd1c5cf072a875aa1ee1d25b0354b3b88af55297 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/__init__.py @@ -0,0 +1,2 @@ +from llmtuner.tuner.core.parser import get_train_args, get_infer_args +from llmtuner.tuner.core.loader import load_model_and_tokenizer diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/adapter.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/adapter.py new file mode 100644 index 0000000000000000000000000000000000000000..6a9e454e5312129dab662343d134c1ab02eefb8f --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/adapter.py @@ -0,0 +1,101 @@ +import os +import torch +from typing import TYPE_CHECKING + +from peft import ( + PeftModel, + TaskType, + LoraConfig, + get_peft_model +) +from peft.utils import CONFIG_NAME, WEIGHTS_NAME + +from llmtuner.extras.logging import get_logger +from llmtuner.tuner.core.utils import find_all_linear_modules + +if TYPE_CHECKING: + from transformers.modeling_utils import PreTrainedModel + from llmtuner.hparams import ModelArguments, FinetuningArguments + + +logger = get_logger(__name__) + + +def init_adapter( + model: "PreTrainedModel", + model_args: "ModelArguments", + finetuning_args: "FinetuningArguments", + is_trainable: bool, + is_mergeable: bool +) -> "PreTrainedModel": + r""" + Initializes the adapters. + + Support full-parameter, freeze and LoRA training. + + Note that the trainable parameters must be cast to float32. + """ + + if finetuning_args.finetuning_type == "none" and is_trainable: + raise ValueError("You cannot use finetuning_type=none while training.") + + if finetuning_args.finetuning_type == "full" and is_trainable: + logger.info("Fine-tuning method: Full") + model = model.float() + + if finetuning_args.finetuning_type == "freeze": + logger.info("Fine-tuning method: Freeze") + + for name, param in model.named_parameters(): + if not any(trainable_layer in name for trainable_layer in finetuning_args.trainable_layers): + param.requires_grad_(False) + else: + param.data = param.data.to(torch.float32) + + if finetuning_args.finetuning_type == "lora": + logger.info("Fine-tuning method: LoRA") + latest_checkpoint = None + + if model_args.checkpoint_dir is not None: + assert os.path.exists(os.path.join(model_args.checkpoint_dir[0], WEIGHTS_NAME)), \ + "Provided path ({}) does not contain a LoRA weight.".format(model_args.checkpoint_dir[0]) + assert os.path.exists(os.path.join(model_args.checkpoint_dir[0], CONFIG_NAME)), \ + "The given checkpoint may be not a LoRA checkpoint, please specify `--finetuning_type full/freeze` instead." + + if (is_trainable and finetuning_args.resume_lora_training) or (not is_mergeable): # continually fine-tuning + checkpoints_to_merge, latest_checkpoint = model_args.checkpoint_dir[:-1], model_args.checkpoint_dir[-1] + else: + checkpoints_to_merge = model_args.checkpoint_dir + + for checkpoint in checkpoints_to_merge: + model = PeftModel.from_pretrained(model, checkpoint) + model = model.merge_and_unload() + + if len(checkpoints_to_merge) > 0: + logger.info("Merged {} model checkpoint(s).".format(len(checkpoints_to_merge))) + + if latest_checkpoint is not None: # resume lora training or quantized inference + model = PeftModel.from_pretrained(model, latest_checkpoint, is_trainable=is_trainable) + + if is_trainable and latest_checkpoint is None: # create new lora weights while training + if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all": + target_modules = find_all_linear_modules(model, model_args.quantization_bit) + else: + target_modules = finetuning_args.lora_target + + lora_config = LoraConfig( + task_type=TaskType.CAUSAL_LM, + inference_mode=False, + r=finetuning_args.lora_rank, + lora_alpha=finetuning_args.lora_alpha, + lora_dropout=finetuning_args.lora_dropout, + target_modules=target_modules + ) + model = get_peft_model(model, lora_config) + if id(model.peft_config) != id(model.base_model.peft_config): # https://github.com/huggingface/peft/issues/923 + model.base_model.peft_config = model.peft_config + + if model_args.checkpoint_dir is not None: + logger.info("Loaded fine-tuned model from checkpoint(s): {}".format(",".join(model_args.checkpoint_dir))) + + return model diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/loader.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/loader.py new file mode 100644 index 0000000000000000000000000000000000000000..911b0c5d9efcad626c39856a99f3bd999d2ee71d --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/loader.py @@ -0,0 +1,225 @@ +import os +import math +import torch +from types import MethodType +from typing import TYPE_CHECKING, Literal, Optional, Tuple + +from transformers import ( + AutoConfig, + AutoModelForCausalLM, + AutoTokenizer, + BitsAndBytesConfig, + PretrainedConfig, + PreTrainedModel, + PreTrainedTokenizerBase +) +from transformers.utils import check_min_version +from transformers.utils.versions import require_version +from trl import AutoModelForCausalLMWithValueHead + +try: + from transformers.integrations import is_deepspeed_zero3_enabled +except ImportError: + from transformers.deepspeed import is_deepspeed_zero3_enabled + +from llmtuner.extras.logging import reset_logging, get_logger +from llmtuner.extras.misc import count_parameters +from llmtuner.extras.save_and_load import load_valuehead_params +from llmtuner.hparams import FinetuningArguments +from llmtuner.tuner.core.adapter import init_adapter +from llmtuner.tuner.core.utils import prepare_model_for_training + +if TYPE_CHECKING: + from transformers import PreTrainedTokenizer + from llmtuner.hparams import ModelArguments + + +logger = get_logger(__name__) + + +check_min_version("4.30.0") +require_version("datasets>=2.12.0", "To fix: pip install datasets>=2.12.0") +require_version("accelerate>=0.21.0", "To fix: pip install accelerate>=0.21.0") +require_version("peft>=0.4.0", "To fix: pip install peft>=0.4.0") +require_version("trl>=0.7.1", "To fix: pip install trl>=0.7.1") + + +def load_model_and_tokenizer( + model_args: "ModelArguments", + finetuning_args: "FinetuningArguments", + is_trainable: Optional[bool] = False, + stage: Optional[Literal["pt", "sft", "rm", "ppo"]] = "sft" +) -> Tuple[PreTrainedModel, "PreTrainedTokenizer"]: + r""" + Loads pretrained model and tokenizer. + + Support both training and inference. + """ + if (not is_trainable) and model_args.checkpoint_dir is None: + logger.warning("Checkpoint is not found at evaluation, load the original model.") + finetuning_args = FinetuningArguments(finetuning_type="none") + + config_kwargs = { + "trust_remote_code": True, + "cache_dir": model_args.cache_dir, + "revision": model_args.model_revision, + "use_auth_token": True if model_args.use_auth_token else None, + } + + tokenizer = AutoTokenizer.from_pretrained( + model_args.model_name_or_path, + use_fast=model_args.use_fast_tokenizer, + padding_side="right", # training with left-padded tensors in fp16 precision may cause overflow + **config_kwargs + ) + + # Fix tokenizer (for ChatGLM2) + if "PreTrainedTokenizerBase" not in str(tokenizer._pad.__func__): + tokenizer._pad = MethodType(PreTrainedTokenizerBase._pad, tokenizer) + + if finetuning_args.finetuning_type != "lora" and model_args.checkpoint_dir is not None: + model_to_load = model_args.checkpoint_dir[0] + else: + model_to_load = model_args.model_name_or_path + + config = AutoConfig.from_pretrained(model_to_load, **config_kwargs) + + # Fix config (for Qwen) + if hasattr(config, "fp16") and hasattr(config, "bf16"): + setattr(config, "fp16", model_args.compute_dtype == torch.float16) + setattr(config, "bf16", model_args.compute_dtype == torch.bfloat16) + + # Set RoPE scaling + if model_args.rope_scaling is not None: + if hasattr(config, "use_dynamic_ntk"): # for Qwen models + if is_trainable: + logger.warning("Qwen model does not support RoPE scaling in training.") + else: + setattr(config, "use_dynamic_ntk", True) + setattr(config, "use_logn_attn", True) + logger.info("Using dynamic NTK scaling.") + + elif hasattr(config, "rope_scaling"): # for LLaMA and Falcon models + require_version("transformers>=4.31.0", "RoPE scaling requires transformers>=4.31.0") + if is_trainable: + if model_args.rope_scaling == "dynamic": + assert not model_args.flash_attn, "Flash attention does not support dynamic rope scaling." + logger.warning( + "Dynamic NTK may not work well with fine-tuning. " + "See: https://github.com/huggingface/transformers/pull/24653" + ) + + current_max_length = getattr(config, "max_position_embeddings", None) + if current_max_length and model_args.model_max_length > current_max_length: + scaling_factor = float(math.ceil(model_args.model_max_length / current_max_length)) + else: + logger.warning("Input length is smaller than max length. Consider increase input length.") + scaling_factor = 1.0 + else: + scaling_factor = 2.0 + + setattr(config, "rope_scaling", {"type": model_args.rope_scaling, "factor": scaling_factor}) + logger.info("Using {} scaling strategy and setting scaling factor to {}".format( + model_args.rope_scaling, scaling_factor + )) + + else: + logger.warning("Current model does not support RoPE scaling.") + + # Set flash attention + if model_args.flash_attn and getattr(config, "model_type", None) == "llama": + import transformers.models.llama.modeling_llama as LlamaModule + import llmtuner.extras.patches.flash_llama as FlashLlama + LlamaModule.LlamaRMSNorm = FlashLlama.LlamaRMSNorm + LlamaModule.LlamaAttention = FlashLlama.LlamaAttention + LlamaModule.LlamaModel._prepare_decoder_attention_mask = FlashLlama._prepare_decoder_attention_mask + if not hasattr(config, "num_key_value_heads"): # for LLaMA-1 models + setattr(config, "num_key_value_heads", getattr(config, "num_attention_heads")) + if getattr(config, "pretraining_tp", 1) != 1: + setattr(config, "pretraining_tp", 1) + + # Quantization configurations (using bitsandbytes library). + is_mergeable = True + if model_args.quantization_bit is not None: + if is_deepspeed_zero3_enabled(): + raise ValueError("DeepSpeed ZeRO-3 is incompatible with quantization.") + + if model_args.quantization_bit == 8: + require_version("bitsandbytes>=0.37.0", "To fix: pip install bitsandbytes>=0.37.0") + config_kwargs["load_in_8bit"] = True + config_kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True) + + elif model_args.quantization_bit == 4: + require_version("bitsandbytes>=0.39.0", "To fix: pip install bitsandbytes>=0.39.0") + config_kwargs["load_in_4bit"] = True + config_kwargs["quantization_config"] = BitsAndBytesConfig( + load_in_4bit=True, + bnb_4bit_compute_dtype=model_args.compute_dtype, + bnb_4bit_use_double_quant=model_args.double_quantization, + bnb_4bit_quant_type=model_args.quantization_type + ) + + is_mergeable = False + config_kwargs["device_map"] = {"": int(os.environ.get("LOCAL_RANK", "0"))} if is_trainable else "auto" + logger.info("Quantizing model to {} bit.".format(model_args.quantization_bit)) + + # Load and prepare pre-trained models (without valuehead). + model = AutoModelForCausalLM.from_pretrained( + model_to_load, + config=config, + torch_dtype=model_args.compute_dtype, + low_cpu_mem_usage=(not is_deepspeed_zero3_enabled()), + **config_kwargs + ) + + # Disable custom generate method (for Qwen) + if isinstance(model, PreTrainedModel) and "GenerationMixin" not in str(model.generate.__func__): + model.generate = MethodType(PreTrainedModel.generate, model) + + # Fix LM head (for ChatGLM2) + if not hasattr(model, "lm_head") and hasattr(model, "transformer"): + setattr(model, "lm_head", model.transformer.output_layer) + + # Register auto class to save the custom code files. + if isinstance(config, PretrainedConfig) and "AutoConfig" in getattr(config, "auto_map", {}): + config.__class__.register_for_auto_class() + if isinstance(model, PreTrainedModel) and "AutoModelForCausalLM" in getattr(config, "auto_map", {}): + model.__class__.register_for_auto_class() + if isinstance(tokenizer, PreTrainedTokenizerBase) and "AutoTokenizer" in tokenizer.init_kwargs.get("auto_map", {}): + tokenizer.__class__.register_for_auto_class() + + # Initialize adapters + model = prepare_model_for_training(model, finetuning_args.finetuning_type) if is_trainable else model + model = init_adapter(model, model_args, finetuning_args, is_trainable, is_mergeable) + model = model.train() if is_trainable else model.eval() + + # Prepare model with valuehead for RLHF + if stage == "rm" or stage == "ppo": + model = AutoModelForCausalLMWithValueHead.from_pretrained(model) + model._keys_to_ignore_on_save = None + reset_logging() + if stage == "rm" and model_args.checkpoint_dir is not None: # load valuehead weights to evaluate reward model + logger.warning("Only the last checkpoint containing valuehead will be loaded.") + if load_valuehead_params(model, model_args.checkpoint_dir[-1]): + model.v_head.load_state_dict({ + "summary.weight": getattr(model, "reward_head_weight"), + "summary.bias": getattr(model, "reward_head_bias") + }) + + if stage == "ppo": # load reward model + logger.info("Load reward model from {}".format(model_args.reward_model)) + if getattr(model, "is_peft_model", False): + model.pretrained_model.load_adapter(model_args.reward_model, "reward") + assert load_valuehead_params(model, model_args.reward_model), "Reward model is not correctly loaded." + + # Prepare model for inference + if not is_trainable: + model.requires_grad_(False) # fix all model params + model = model.to(model_args.compute_dtype) if model_args.quantization_bit is None else model + + trainable_params, all_param = count_parameters(model) + logger.info("trainable params: {:d} || all params: {:d} || trainable%: {:.4f}".format( + trainable_params, all_param, 100 * trainable_params / all_param + )) + + return model, tokenizer diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/parser.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..e5ebcb78ac6b144f12bed3a5275190c027d04ef4 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/parser.py @@ -0,0 +1,262 @@ +import os +import sys +import torch +import datasets +import transformers +from typing import Any, Dict, Optional, Tuple +from transformers import HfArgumentParser, Seq2SeqTrainingArguments +from transformers.utils.versions import require_version +from transformers.trainer_utils import get_last_checkpoint + +try: + from transformers.utils import is_torch_bf16_gpu_available, is_torch_npu_available + is_bf16_available = is_torch_bf16_gpu_available() + is_npu_available = is_torch_npu_available() +except ImportError: + is_bf16_available = torch.cuda.is_bf16_supported() + is_npu_available = False + +from llmtuner.extras.logging import get_logger +from llmtuner.hparams import ( + ModelArguments, + DataArguments, + FinetuningArguments, + GeneratingArguments, + GeneralArguments +) + + +logger = get_logger(__name__) + + +def _parse_args(parser: HfArgumentParser, args: Optional[Dict[str, Any]] = None) -> Tuple[Any]: + if args is not None: + return parser.parse_dict(args) + elif len(sys.argv) == 2 and sys.argv[1].endswith(".yaml"): + return parser.parse_yaml_file(os.path.abspath(sys.argv[1])) + elif len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + return parser.parse_json_file(os.path.abspath(sys.argv[1])) + else: + return parser.parse_args_into_dataclasses() + + +def parse_train_args( + args: Optional[Dict[str, Any]] = None +) -> Tuple[ + ModelArguments, + DataArguments, + Seq2SeqTrainingArguments, + FinetuningArguments, + GeneratingArguments, + GeneralArguments +]: + parser = HfArgumentParser(( + ModelArguments, + DataArguments, + Seq2SeqTrainingArguments, + FinetuningArguments, + GeneratingArguments, + GeneralArguments + )) + return _parse_args(parser, args) + + +def parse_infer_args( + args: Optional[Dict[str, Any]] = None +) -> Tuple[ + ModelArguments, + DataArguments, + FinetuningArguments, + GeneratingArguments +]: + parser = HfArgumentParser(( + ModelArguments, + DataArguments, + FinetuningArguments, + GeneratingArguments + )) + return _parse_args(parser, args) + + +def get_train_args( + args: Optional[Dict[str, Any]] = None +) -> Tuple[ + ModelArguments, + DataArguments, + Seq2SeqTrainingArguments, + FinetuningArguments, + GeneratingArguments, + GeneralArguments +]: + model_args, data_args, training_args, finetuning_args, generating_args, general_args = parse_train_args(args) + + # Setup logging + if training_args.should_log: + # The default of training_args.log_level is passive, so we set log level at info here to have that default. + transformers.utils.logging.set_verbosity_info() + + log_level = training_args.get_process_log_level() + datasets.utils.logging.set_verbosity(log_level) + transformers.utils.logging.set_verbosity(log_level) + transformers.utils.logging.enable_default_handler() + transformers.utils.logging.enable_explicit_format() + + # Check arguments (do not check finetuning_args since it may be loaded from checkpoints) + data_args.init_for_training() + + if general_args.stage != "pt" and data_args.template is None: + raise ValueError("Please specify which `template` to use.") + + if general_args.stage != "sft" and training_args.predict_with_generate: + raise ValueError("`predict_with_generate` cannot be set as True except SFT.") + + if general_args.stage == "sft" and training_args.do_predict and not training_args.predict_with_generate: + raise ValueError("Please enable `predict_with_generate` to save model predictions.") + + if general_args.stage in ["rm", "ppo"] and finetuning_args.finetuning_type != "lora": + raise ValueError("RM and PPO stages can only be performed with the LoRA method.") + + if general_args.stage in ["rm", "ppo"] and training_args.resume_from_checkpoint is not None: + raise ValueError("RM and PPO stages do not support `resume_from_checkpoint`.") + + if general_args.stage in ["ppo", "dpo"] and not training_args.do_train: + raise ValueError("PPO and DPO stages can only be performed at training.") + + if general_args.stage in ["rm", "dpo"]: + for dataset_attr in data_args.dataset_list: + if not dataset_attr.ranking: + raise ValueError("Please use ranked datasets for reward modeling or DPO training.") + + if general_args.stage == "ppo" and model_args.reward_model is None: + raise ValueError("Reward model is necessary for PPO training.") + + if general_args.stage == "ppo" and training_args.deepspeed is not None: + raise ValueError("PPO training is incompatible with DeepSpeed, use Accelerate instead.") + + if general_args.stage == "ppo" and data_args.streaming: + raise ValueError("Streaming mode does not suppport PPO training currently.") + + if training_args.max_steps == -1 and data_args.streaming: + raise ValueError("Please specify `max_steps` in streaming mode.") + + if data_args.val_size > 1e-6 and data_args.val_size < 1 and data_args.streaming: + raise ValueError("Streaming mode should have an integer val size.") + + if training_args.do_train and training_args.predict_with_generate: + raise ValueError("`predict_with_generate` cannot be set as True while training.") + + if training_args.do_train and finetuning_args.finetuning_type == "lora" and finetuning_args.lora_target is None: + raise ValueError("Please specify `lora_target` in LoRA training.") + + if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora": + raise ValueError("Quantization is only compatible with the LoRA method.") + + if model_args.checkpoint_dir is not None: + if finetuning_args.finetuning_type != "lora" and len(model_args.checkpoint_dir) != 1: + raise ValueError("Only LoRA tuning accepts multiple checkpoints.") + + if model_args.quantization_bit is not None: + if len(model_args.checkpoint_dir) != 1: + raise ValueError("Quantized model only accepts a single checkpoint. Merge them first.") + + if not finetuning_args.resume_lora_training: + raise ValueError("Quantized model cannot create new LoRA weight. Merge them first.") + + if model_args.quantization_bit is not None and (not training_args.do_train): + logger.warning("Evaluating model in 4/8-bit mode may cause lower scores.") + + if training_args.do_train and (not training_args.fp16) and (not training_args.bf16): + logger.warning("We recommend enable mixed precision training.") + + # postprocess data_args + if data_args.max_samples is not None and data_args.streaming: + logger.warning("`max_samples` is incompatible with `streaming`. Disabling max_samples.") + data_args.max_samples = None + + # postprocess training_args + if ( + training_args.local_rank != -1 + and training_args.ddp_find_unused_parameters is None + and finetuning_args.finetuning_type == "lora" + ): + logger.warning("`ddp_find_unused_parameters` needs to be set as False for LoRA in DDP training.") + training_args_dict = training_args.to_dict() + training_args_dict.update(dict(ddp_find_unused_parameters=False)) + training_args = Seq2SeqTrainingArguments(**training_args_dict) + + if ( + training_args.resume_from_checkpoint is None + and training_args.do_train + and os.path.isdir(training_args.output_dir) + and not training_args.overwrite_output_dir + ): + require_version("transformers>=4.31.0", "Resuming training requires transformers>=4.31.0.") + last_checkpoint = get_last_checkpoint(training_args.output_dir) + if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: + raise ValueError("Output directory already exists and is not empty. Use `overwrite_output_dir`.") + + if last_checkpoint is not None: + training_args_dict = training_args.to_dict() + training_args_dict.update(dict(resume_from_checkpoint=last_checkpoint)) + training_args = Seq2SeqTrainingArguments(**training_args_dict) + logger.info( + "Resuming from checkpoint. Change `output_dir` or use `overwrite_output_dir` to avoid." + ) + + # postprocess model_args + if training_args.bf16: + if not is_bf16_available: + raise ValueError("Current device does not support bf16 training.") + model_args.compute_dtype = torch.bfloat16 + elif training_args.fp16: + model_args.compute_dtype = torch.float16 + else: + model_args.compute_dtype = torch.float32 + + model_args.model_max_length = data_args.cutoff_len + + # Log on each process the small summary: + logger.info("Process rank: {}, device: {}, n_gpu: {}\n distributed training: {}, compute dtype: {}".format( + training_args.local_rank, training_args.device, training_args.n_gpu, + bool(training_args.local_rank != -1), str(model_args.compute_dtype) + )) + logger.info(f"Training/evaluation parameters {training_args}") + + # Set seed before initializing model. + transformers.set_seed(training_args.seed) + + return model_args, data_args, training_args, finetuning_args, generating_args, general_args + + +def get_infer_args( + args: Optional[Dict[str, Any]] = None +) -> Tuple[ + ModelArguments, + DataArguments, + FinetuningArguments, + GeneratingArguments +]: + model_args, data_args, finetuning_args, generating_args = parse_infer_args(args) + + if data_args.template is None: + raise ValueError("Please specify which `template` to use.") + + if model_args.quantization_bit is not None and finetuning_args.finetuning_type != "lora": + raise ValueError("Quantization is only compatible with the LoRA method.") + + if model_args.checkpoint_dir is not None: + if finetuning_args.finetuning_type != "lora" and len(model_args.checkpoint_dir) != 1: + raise ValueError("Only LoRA tuning accepts multiple checkpoints.") + + if model_args.quantization_bit is not None and len(model_args.checkpoint_dir) != 1: + raise ValueError("Quantized model only accepts a single checkpoint. Merge them first.") + + # auto-detect cuda capability + if is_npu_available: + model_args.compute_dtype = torch.float16 + elif is_bf16_available: + model_args.compute_dtype = torch.bfloat16 + else: + model_args.compute_dtype = torch.float16 + + return model_args, data_args, finetuning_args, generating_args diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/utils.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..74ff075f63c2e77809a32eee6506e9a7d94d3abd --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/core/utils.py @@ -0,0 +1,72 @@ +import torch +from typing import TYPE_CHECKING, List, Optional + +from llmtuner.extras.constants import LAYERNORM_NAMES + +if TYPE_CHECKING: + from transformers.modeling_utils import PreTrainedModel + + +def find_all_linear_modules( + model: "PreTrainedModel", + quantization_bit: Optional[int] = None, + output_layer_name: Optional[str] = "lm_head" +) -> List[str]: + if quantization_bit is not None: + import bitsandbytes as bnb + linear_cls = bnb.nn.Linear4bit if quantization_bit == 4 else bnb.nn.Linear8bitLt + else: + linear_cls = torch.nn.Linear + + module_names = set() + for name, module in model.named_modules(): + if output_layer_name not in name and isinstance(module, linear_cls): + module_names.add(name.split(".")[-1]) + + if output_layer_name in module_names: + module_names.pop(output_layer_name) + + return list(module_names) + + +def prepare_model_for_training( + model: "PreTrainedModel", + finetuning_type: str, + output_layer_name: Optional[str] = "lm_head", + use_gradient_checkpointing: Optional[bool] = True, + layer_norm_names: Optional[List[str]] = LAYERNORM_NAMES +) -> "PreTrainedModel": + r""" + Includes: + (1) cast the layernorm in fp32 + (2) make output embedding layer require grads + (3) upcast the lm_head to fp32 + Inspired by: https://github.com/huggingface/peft/blob/v0.2.0/src/peft/utils/other.py#L33 + """ + for name, param in model.named_parameters(): + if param.ndim == 1 and any(layer_norm_name in name for layer_norm_name in layer_norm_names): + param.data = param.data.to(torch.float32) + + if use_gradient_checkpointing: + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + else: + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) + + model.gradient_checkpointing_enable() + model.config.use_cache = False # turn off when gradient checkpointing is enabled + + if finetuning_type != "full" and hasattr(model, output_layer_name): + output_layer: torch.nn.Linear = getattr(model, output_layer_name) + input_dtype = output_layer.weight.dtype + + class CastOutputToFloat(torch.nn.Sequential): + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return super().forward(x.to(input_dtype)).to(torch.float32) + + setattr(model, output_layer_name, CastOutputToFloat(output_layer)) + + return model diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f2b5cfb5867cde6c968bbf56ed30b3ce439539bb --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/__init__.py @@ -0,0 +1 @@ +from llmtuner.tuner.dpo.workflow import run_dpo diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/collator.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/collator.py new file mode 100644 index 0000000000000000000000000000000000000000..5c862b4f89af2d5cf0c1e32c446c54f21a475b5d --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/collator.py @@ -0,0 +1,51 @@ +import torch +from dataclasses import dataclass +from typing import Any, Dict, List, Sequence, Tuple +from transformers import DataCollatorForSeq2Seq + + +@dataclass +class DPODataCollatorWithPadding(DataCollatorForSeq2Seq): + r""" + Data collator for pairwise data. + """ + + def _pad_labels(self, batch: torch.Tensor, positions: List[Tuple[int, int]]) -> torch.Tensor: + padded_labels = [] + for feature, (prompt_len, answer_len) in zip(batch, positions): + if self.tokenizer.padding_side == "left": + start, end = feature.size(0) - answer_len, feature.size(0) + else: + start, end = prompt_len, prompt_len + answer_len + padded_tensor = self.label_pad_token_id * torch.ones_like(feature) + padded_tensor[start:end] = feature[start:end] + padded_labels.append(padded_tensor) + return torch.stack(padded_labels, dim=0).contiguous() # in contiguous memory + + def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, torch.Tensor]: + r""" + Pads batched data to the longest sequence in the batch. + + We generate 2 * n examples where the first n examples represent chosen examples and + the last n examples represent rejected examples. + """ + concatenated_features = [] + label_positions = [] + for key in ("chosen_ids", "rejected_ids"): + for feature in features: + prompt_len, answer_len = len(feature["prompt_ids"]), len(feature[key]) + concatenated_features.append({ + "input_ids": feature["prompt_ids"] + feature[key], + "attention_mask": [1] * (prompt_len + answer_len) + }) + label_positions.append((prompt_len, answer_len)) + + batch = self.tokenizer.pad( + concatenated_features, + padding=self.padding, + max_length=self.max_length, + pad_to_multiple_of=self.pad_to_multiple_of, + return_tensors=self.return_tensors, + ) + batch["labels"] = self._pad_labels(batch["input_ids"], label_positions) + return batch diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/trainer.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..c1d2f0543b90bf3dd61bc23ea8b22c0ba8b9fc56 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/trainer.py @@ -0,0 +1,69 @@ +import torch +from collections import defaultdict +from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union +from transformers import BatchEncoding, Trainer +from trl import DPOTrainer +from trl.trainer.utils import disable_dropout_in_model + +from llmtuner.extras.constants import IGNORE_INDEX + +if TYPE_CHECKING: + from transformers import PreTrainedModel + + +class CustomDPOTrainer(DPOTrainer): + + def __init__( + self, + beta: float, + model: Union["PreTrainedModel", torch.nn.Module], + ref_model: Optional[Union["PreTrainedModel", torch.nn.Module]] = None, + disable_dropout: Optional[bool] = True, + **kwargs + ): + if disable_dropout: + disable_dropout_in_model(model) + if ref_model is not None: + disable_dropout_in_model(ref_model) + + self.is_encoder_decoder = model.config.is_encoder_decoder + self.ref_model = ref_model + self.use_dpo_data_collator = True # hack to avoid warning + self.label_pad_token_id = IGNORE_INDEX + self.padding_value = 0 + self.beta = beta + self._stored_metrics = defaultdict(lambda: defaultdict(list)) + + Trainer.__init__(self, model=model, **kwargs) + if not hasattr(self, "accelerator"): + raise AttributeError("Please update `transformers`.") + + if ref_model is not None: + if self.is_deepspeed_enabled: + self.ref_model, = self.accelerator._prepare_deepspeed(self.ref_model) + self.ref_model.eval() + else: + self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True) + + def concatenated_forward( + self, + model: Optional[torch.nn.Module] = None, + batch: Optional[Dict[str, torch.Tensor]] = None + ) -> Tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]: + batch_copied = BatchEncoding({k: v.detach().clone() for k, v in batch.items()}) # avoid error + + all_logits = model( + input_ids=batch_copied["input_ids"], + attention_mask=batch_copied["attention_mask"], + return_dict=True + ).logits.to(torch.float32) + + all_logps = self._get_batch_logps( + all_logits, + batch["labels"], + average_log_prob=False + ) + batch_size = batch["input_ids"].size(0) // 2 + chosen_logps, rejected_logps = all_logps.split(batch_size, dim=0) + chosen_logits, rejected_logits = all_logits.split(batch_size, dim=0) + return chosen_logps, rejected_logps, chosen_logits, rejected_logits diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/workflow.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..4abd3894f952b137d6636d250354b2763b570aec --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/dpo/workflow.py @@ -0,0 +1,59 @@ +# Inspired by: https://github.com/huggingface/trl/blob/main/examples/research_projects/stack_llama_2/scripts/dpo_llama2.py + +from copy import deepcopy +from peft import PeftModel +from typing import TYPE_CHECKING, Optional, List +from transformers import Seq2SeqTrainingArguments + +from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset +from llmtuner.extras.constants import IGNORE_INDEX +from llmtuner.extras.ploting import plot_loss +from llmtuner.tuner.core import load_model_and_tokenizer +from llmtuner.tuner.dpo.collator import DPODataCollatorWithPadding +from llmtuner.tuner.dpo.trainer import CustomDPOTrainer + +if TYPE_CHECKING: + from transformers import TrainerCallback + from llmtuner.hparams import ModelArguments, DataArguments, FinetuningArguments + + +def run_dpo( + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + finetuning_args: "FinetuningArguments", + callbacks: Optional[List["TrainerCallback"]] = None +): + dataset = get_dataset(model_args, data_args) + model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args, training_args.do_train, stage="sft") + dataset = preprocess_dataset(dataset, tokenizer, data_args, training_args, stage="rm") + data_collator = DPODataCollatorWithPadding( + tokenizer=tokenizer, + label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id + ) + + training_args_dict = training_args.to_dict() + training_args_dict.update(dict(remove_unused_columns=False)) # important for pairwise dataset + training_args = Seq2SeqTrainingArguments(**training_args_dict) + + # Initialize our Trainer + trainer = CustomDPOTrainer( + beta=finetuning_args.dpo_beta, + model=model, + ref_model=deepcopy(model) if not isinstance(model, PeftModel) else None, + args=training_args, + tokenizer=tokenizer, + data_collator=data_collator, + callbacks=callbacks, + **split_dataset(dataset, data_args, training_args) + ) + + # Training + if training_args.do_train: + train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) + trainer.log_metrics("train", train_result.metrics) + trainer.save_metrics("train", train_result.metrics) + trainer.save_state() + trainer.save_model() + if trainer.is_world_process_zero() and model_args.plot_loss: + plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..11519bab917c52ebcbb954d0de89e2c3aad5ff70 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/__init__.py @@ -0,0 +1 @@ +from llmtuner.tuner.ppo.workflow import run_ppo diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/trainer.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..ce5ecb98c655e2b0f206eb596b1d81ccd2b40dfa --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/trainer.py @@ -0,0 +1,299 @@ +import os +import math +import torch +from tqdm import tqdm +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple + +from transformers import GenerationConfig, Trainer, TrainerState, TrainerControl +from transformers.trainer_utils import PREFIX_CHECKPOINT_DIR + +from trl import PPOTrainer +from trl.core import PPODecorators, logprobs_from_logits + +from llmtuner.extras.logging import get_logger +from llmtuner.extras.misc import AverageMeter, count_parameters, get_logits_processor +from llmtuner.tuner.ppo.utils import cast_layernorm_dtype, replace_model + +if TYPE_CHECKING: + from transformers import Seq2SeqTrainingArguments, TrainerCallback + from trl import AutoModelForCausalLMWithValueHead + from llmtuner.hparams import GeneratingArguments + + +logger = get_logger(__name__) + + +class CustomPPOTrainer(PPOTrainer, Trainer): + r""" + Inherits PPOTrainer. + """ + + def __init__( + self, + training_args: "Seq2SeqTrainingArguments", + generating_args: "GeneratingArguments", + callbacks: List["TrainerCallback"], + compute_dtype: torch.dtype, + **kwargs + ): + PPOTrainer.__init__(self, **kwargs) + if getattr(self.accelerator.state, "deepspeed_plugin", None) is not None: + raise ValueError("PPOTrainer is incompatible with DeepSpeed.") + + self.args = training_args + self.generating_args = generating_args + self.log_callback, self.save_callback = callbacks[0], callbacks[1] + self.compute_dtype = compute_dtype + self.state = TrainerState() + self.control = TrainerControl() + + def ppo_train(self) -> None: + r""" + Implements training loop for the PPO stage, like _inner_training_loop() in Huggingface's Trainer. + """ + total_train_batch_size = ( + self.args.per_device_train_batch_size * self.args.gradient_accumulation_steps * self.args.world_size + ) + len_dataloader = len(self.dataloader) + num_examples = len(self.dataset) + num_train_epochs = self.args.num_train_epochs + max_steps = math.ceil(num_train_epochs * len_dataloader) + + self.state.max_steps = max_steps + self.state.num_train_epochs = num_train_epochs + self.state.is_local_process_zero = self.is_local_process_zero() + self.state.is_world_process_zero = self.is_world_process_zero() + + if self.is_world_process_zero(): + logger.info("***** Running training *****") + logger.info(f" Num examples = {num_examples}") + logger.info(f" Num Epochs = {num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {self.args.per_device_train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") + logger.info(f" Gradient Accumulation steps = {self.args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {max_steps}") + logger.info(f" Number of trainable parameters = {count_parameters(self.model)[0]}") + + # Keyword arguments for `model.generate` + generating_args = self.generating_args.to_dict() + generating_args.update(dict( + eos_token_id=[self.tokenizer.eos_token_id] + self.tokenizer.additional_special_tokens_ids, + pad_token_id=self.tokenizer.pad_token_id + )) + + unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model) + dataiter = iter(self.dataloader) + steps_trained = 0 + loss_meter = AverageMeter() + reward_meter = AverageMeter() + self.log_callback.on_train_begin(self.args, self.state, self.control) + + for step in tqdm(range(max_steps), disable=not self.is_local_process_zero()): + batch = next(dataiter) + steps_trained += 1 + + # Cast to inference mode + unwrapped_model.gradient_checkpointing_disable() + unwrapped_model.config.use_cache = True + self.model.eval() + + # Get inputs + queries, responses = self.get_inputs(batch, generating_args) + self.tokenizer.padding_side = "right" # change padding side + rewards = self.get_rewards(queries, responses, unwrapped_model) + + # Cast to training mode + unwrapped_model.gradient_checkpointing_enable() + unwrapped_model.config.use_cache = False + self.model.train() + + # Run PPO step + stats = self.step(queries, responses, rewards) + self.tokenizer.padding_side = "left" # restore padding side + loss_meter.update(stats["ppo/loss/total"], n=len(rewards)) + reward_meter.update(torch.stack(rewards).mean().item(), n=len(rewards)) + + self.state.global_step += 1 + self.log_callback.on_step_end(self.args, self.state, self.control) + + if self.is_local_process_zero() and (step+1) % self.args.logging_steps == 0: + logs = dict( + loss=round(loss_meter.avg, 4), + reward=round(reward_meter.avg, 4), + learning_rate=stats["ppo/learning_rate"], + epoch=round(step / len_dataloader, 2) + ) + tqdm.write(str(logs)) + logs["step"] = step + self.state.log_history.append(logs) + self.log_callback.on_log(self.args, self.state, self.control) + loss_meter.reset() + reward_meter.reset() + + if (step+1) % self.args.save_steps == 0: # save checkpoint + self.save_model(os.path.join( + self.args.output_dir, "{}-{}".format(PREFIX_CHECKPOINT_DIR, self.state.global_step) + )) + self.save_callback.on_save( + self.args, self.state, self.control, model=self.accelerator.unwrap_model(self.model) + ) + + if self.control.should_epoch_stop or self.control.should_training_stop: + break + + if steps_trained == len_dataloader: + dataiter = iter(self.dataloader) + steps_trained = 0 + + self.log_callback.on_train_end(self.args, self.state, self.control) + self.save_callback.on_train_end( + self.args, self.state, self.control, model=self.accelerator.unwrap_model(self.model) + ) + + @torch.no_grad() + def get_inputs( + self, + batch: Dict[str, torch.Tensor], + generating_args: Dict[str, Any] + ) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + r""" + Generates model's responses given queries. + """ + gen_kwargs = dict( + generation_config=GenerationConfig(**generating_args), + logits_processor=get_logits_processor(), + **batch + ) + + input_ids = batch["input_ids"] + self.model, layer_norm_params = cast_layernorm_dtype(self.model, self.compute_dtype) + unwrapped_model: "AutoModelForCausalLMWithValueHead" = self.accelerator.unwrap_model(self.model) + response: torch.Tensor = unwrapped_model.generate(**gen_kwargs) + self.model, _ = cast_layernorm_dtype(self.model, self.compute_dtype, layer_norm_params) + query, response = input_ids.detach().cpu(), response[:, input_ids.size(-1):].detach().cpu() + + queries, responses = [], [] + for i in range(len(query)): + query_length = (query[i] != self.tokenizer.pad_token_id).nonzero()[0] + response_index = (response[i] != self.tokenizer.pad_token_id).nonzero() + + if len(response_index) == 0: + response_length = 1 # allow empty response + elif self.tokenizer.pad_token_id == self.tokenizer.eos_token_id: + response_length = response_index[-1] + 2 # save the EOS token + else: + response_length = response_index[-1] + 1 + + queries.append(query[i, query_length:]) # remove padding from left + responses.append(response[i, :response_length]) # remove padding from right + + return queries, responses + + @torch.no_grad() + def get_rewards( + self, + queries: List[torch.Tensor], + responses: List[torch.Tensor], + unwrapped_model: "AutoModelForCausalLMWithValueHead" + ) -> List[torch.Tensor]: + r""" + Computes scores using given reward model. + """ + replace_model(unwrapped_model, target="reward") + batch = self.prepare_model_inputs(queries, responses) + + with torch.cuda.amp.autocast(dtype=self.compute_dtype): # support bf16 + _, _, values = self.model(**batch, output_hidden_states=True, return_dict=True) + + if values.size(0) != batch["input_ids"].size(0): # adapt to chatglm2 + values = torch.transpose(values, 0, 1) + + rewards = [] + for i in range(values.size(0)): + end_index = batch["attention_mask"][i].nonzero()[-1] # use the score on the EOS token + rewards.append(values[i, end_index].float().detach().cpu()) # use fp32 type + + replace_model(unwrapped_model, target="default") + return rewards + + @PPODecorators.empty_cuda_cache() + def batched_forward_pass( + self, + model: "AutoModelForCausalLMWithValueHead", + queries: torch.Tensor, + responses: torch.Tensor, + model_inputs: dict, + return_logits: Optional[bool] = False, + response_masks: Optional[torch.Tensor] = None + ): + r""" + Calculates model outputs in multiple batches. + + Subclass and override to inject custom behavior. + """ + bs = len(queries) + fbs = self.config.mini_batch_size + all_logprobs = [] + all_logits = [] + all_masks = [] + all_values = [] + + for i in range(math.ceil(bs / fbs)): + input_kwargs = {key: value[i * fbs : (i + 1) * fbs] for key, value in model_inputs.items()} + query_batch = queries[i * fbs : (i + 1) * fbs] + response_batch = responses[i * fbs : (i + 1) * fbs] + if response_masks is not None: + response_masks_batch = response_masks[i * fbs : (i + 1) * fbs] + input_ids = input_kwargs["input_ids"] + attention_mask = input_kwargs["attention_mask"] + + with torch.cuda.amp.autocast(dtype=self.compute_dtype): # support bf16 + logits, _, values = model(**input_kwargs) + + if values.size(0) != input_ids.size(0): # adapt to chatglm2 + values = torch.transpose(values, 0, 1) + + logprobs = logprobs_from_logits(logits[:, :-1, :], input_ids[:, 1:]) + masks = torch.zeros_like(attention_mask) + masks[:, :-1] = attention_mask[:, 1:] + + for j in range(len(query_batch)): + start = len(query_batch[j]) - 1 + if attention_mask[j, 0] == 0: # offset left padding + start += attention_mask[j, :].nonzero()[0] + end = start + len(response_batch[j]) + + if response_masks is not None: + response_masks_batch = torch.cat( + (torch.zeros_like(query_batch[j]), response_masks_batch[j]) + )[1:] + + masks[j, :start] = 0 + masks[j, end:] = 0 + if response_masks is not None: + masks[j, start:end] = masks[j, start:end] * response_masks_batch[j][start:end] + + if return_logits: + all_logits.append(logits) + else: + del logits + + all_values.append(values) + all_logprobs.append(logprobs) + all_masks.append(masks) + + return ( + torch.cat(all_logprobs), + torch.cat(all_logits)[:, :-1] if return_logits else None, + torch.cat(all_values)[:, :-1], + torch.cat(all_masks)[:, :-1], + ) + + def save_model(self, output_dir: Optional[str] = None) -> None: + r""" + Saves model checkpoint. + + Subclass and override to inject custom behavior. + """ + if self.args.should_save: + self._save(output_dir) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/utils.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..2257eeadc3b7998a4920449f1d7df49fe0c1e76a --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/utils.py @@ -0,0 +1,40 @@ +import torch +from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Tuple + +from llmtuner.extras.constants import LAYERNORM_NAMES + +if TYPE_CHECKING: + from trl import AutoModelForCausalLMWithValueHead + + +def replace_model(model: "AutoModelForCausalLMWithValueHead", target: Literal["default", "reward"]) -> None: + if target == "reward": # save default head temporarily + valuehead_state_dict = model.v_head.state_dict() + setattr(model, "default_head_weight", valuehead_state_dict["summary.weight"].detach().clone()) + setattr(model, "default_head_bias", valuehead_state_dict["summary.bias"].detach().clone()) + + model.pretrained_model.set_adapter(target) # set the LoRA adapter to be active + model.v_head.load_state_dict({ + "summary.weight": getattr(model, "{}_head_weight".format(target)), + "summary.bias": getattr(model, "{}_head_bias".format(target)) + }) + + +def cast_layernorm_dtype( + model: "AutoModelForCausalLMWithValueHead", + compute_dtype: torch.dtype, + layer_norm_params: Optional[Dict[str, torch.Tensor]] = None, + layer_norm_names: Optional[List[str]] = LAYERNORM_NAMES +) -> Tuple["AutoModelForCausalLMWithValueHead", Dict[str, torch.Tensor]]: + + layer_norm_state_dict = {} + + for name, param in model.named_parameters(): + if param.ndim == 1 and any(layer_norm_name in name for layer_norm_name in layer_norm_names): + if layer_norm_params is None: + layer_norm_state_dict[name] = param.data.detach().clone() # store float32 weights for stability + param.data = param.data.to(compute_dtype) + else: + param.data = layer_norm_params[name] # restore float32 weights + + return model, layer_norm_state_dict diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/workflow.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..be8fca5da7bc16b82fca824f1440fae92797c4e4 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/ppo/workflow.py @@ -0,0 +1,86 @@ +# Inspired by: https://github.com/lvwerra/trl/blob/main/examples/research_projects/stack_llama/scripts/rl_training.py + +import math +from trl import PPOConfig +from torch.optim import AdamW +from typing import TYPE_CHECKING, Optional, List +from transformers import DataCollatorWithPadding +from transformers.optimization import get_scheduler + +from llmtuner.dsets import get_dataset, preprocess_dataset +from llmtuner.extras.callbacks import SavePeftModelCallback +from llmtuner.extras.ploting import plot_loss +from llmtuner.tuner.core import load_model_and_tokenizer +from llmtuner.tuner.ppo.trainer import CustomPPOTrainer + +if TYPE_CHECKING: + from transformers import Seq2SeqTrainingArguments, TrainerCallback + from llmtuner.hparams import ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments + + +def run_ppo( + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + finetuning_args: "FinetuningArguments", + generating_args: "GeneratingArguments", + callbacks: Optional[List["TrainerCallback"]] = None +): + dataset = get_dataset(model_args, data_args) + model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args, training_args.do_train, stage="ppo") + dataset = preprocess_dataset(dataset, tokenizer, data_args, training_args, stage="ppo") + + tokenizer.padding_side = "left" # use left-padding in generation while using right-padding in training + data_collator = DataCollatorWithPadding(tokenizer=tokenizer) + + ppo_config = PPOConfig( + model_name=model_args.model_name_or_path, + learning_rate=training_args.learning_rate, + mini_batch_size=training_args.per_device_train_batch_size, + batch_size=training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps, + gradient_accumulation_steps=training_args.gradient_accumulation_steps, + ppo_epochs=1, + max_grad_norm=training_args.max_grad_norm, + seed=training_args.seed, + optimize_cuda_cache=True + ) + + if finetuning_args.ppo_score_norm: + ppo_config.use_score_scaling = True + ppo_config.use_score_norm = True + + optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=training_args.learning_rate) + total_train_batch_size = ( + training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size + ) + num_training_steps = training_args.num_train_epochs * math.ceil(len(dataset) / total_train_batch_size) + lr_scheduler = get_scheduler( + training_args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=training_args.get_warmup_steps(num_training_steps), + num_training_steps=num_training_steps + ) + + # Initialize our Trainer + ppo_trainer = CustomPPOTrainer( + training_args=training_args, + generating_args=generating_args, + callbacks=callbacks + [SavePeftModelCallback()], + compute_dtype=model_args.compute_dtype, + config=ppo_config, + model=model, + ref_model=None, + tokenizer=tokenizer, + dataset=dataset, + data_collator=data_collator, + optimizer=optimizer, + lr_scheduler=lr_scheduler + ) + + # Training + if training_args.do_train: + ppo_trainer.ppo_train() + ppo_trainer.save_model() + ppo_trainer.save_state() # must be called after save_model to have a folder + if ppo_trainer.is_world_process_zero() and model_args.plot_loss: + plot_loss(training_args.output_dir, keys=["loss", "reward"]) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/pt/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/pt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ce509db66e33bf12797378e1668394f974f7bb6 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/pt/__init__.py @@ -0,0 +1 @@ +from llmtuner.tuner.pt.workflow import run_pt diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/pt/workflow.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/pt/workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..66d08de75b6594799065efb8d2e88834c5fa633a --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/pt/workflow.py @@ -0,0 +1,58 @@ +# Inspired by: https://github.com/huggingface/transformers/blob/v4.29.2/examples/pytorch/language-modeling/run_clm.py + +import math +from typing import TYPE_CHECKING, Optional, List +from transformers import DataCollatorForLanguageModeling, Trainer + +from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset +from llmtuner.extras.ploting import plot_loss +from llmtuner.tuner.core import load_model_and_tokenizer + +if TYPE_CHECKING: + from transformers import Seq2SeqTrainingArguments, TrainerCallback + from llmtuner.hparams import ModelArguments, DataArguments, FinetuningArguments + + +def run_pt( + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + finetuning_args: "FinetuningArguments", + callbacks: Optional[List["TrainerCallback"]] = None +): + dataset = get_dataset(model_args, data_args) + model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args, training_args.do_train, stage="pt") + dataset = preprocess_dataset(dataset, tokenizer, data_args, training_args, stage="pt") + data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False) + + # Initialize our Trainer + trainer = Trainer( + model=model, + args=training_args, + tokenizer=tokenizer, + data_collator=data_collator, + callbacks=callbacks, + **split_dataset(dataset, data_args, training_args) + ) + + # Training + if training_args.do_train: + train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) + trainer.log_metrics("train", train_result.metrics) + trainer.save_metrics("train", train_result.metrics) + trainer.save_state() + trainer.save_model() + if trainer.is_world_process_zero() and model_args.plot_loss: + plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) + + # Evaluation + if training_args.do_eval: + metrics = trainer.evaluate(metric_key_prefix="eval") + try: + perplexity = math.exp(metrics["eval_loss"]) + except OverflowError: + perplexity = float("inf") + + metrics["perplexity"] = perplexity + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..54d3d94306bd3ef25cd319948495a8aaf0efe5e4 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/__init__.py @@ -0,0 +1 @@ +from llmtuner.tuner.rm.workflow import run_rm diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/collator.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/collator.py new file mode 100644 index 0000000000000000000000000000000000000000..161f003d0ac106a203bd04418676883d943a4da7 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/collator.py @@ -0,0 +1,27 @@ +import torch +from dataclasses import dataclass +from typing import Any, Dict, Sequence +from transformers import DataCollatorWithPadding + + +@dataclass +class PairwiseDataCollatorWithPadding(DataCollatorWithPadding): + r""" + Data collator for pairwise data. + """ + + def __call__(self, features: Sequence[Dict[str, Any]]) -> Dict[str, torch.Tensor]: + r""" + Pads batched data to the longest sequence in the batch. + + We generate 2 * n examples where the first n examples represent chosen examples and + the last n examples represent rejected examples. + """ + features = [ + { + "input_ids": feature["prompt_ids"] + feature[key], + "attention_mask": [1] * (len(feature["prompt_ids"]) + len(feature[key])) + } + for key in ("chosen_ids", "rejected_ids") for feature in features + ] + return super().__call__(features) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/metric.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..db9c924304a11aaa1babce8c7820d49b3828a046 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/metric.py @@ -0,0 +1,7 @@ +import numpy as np +from typing import Dict, Sequence, Tuple, Union + + +def compute_accuracy(eval_preds: Sequence[Union[np.ndarray, Tuple[np.ndarray]]]) -> Dict[str, float]: + preds, _ = eval_preds + return {"accuracy": (preds[0] > preds[1]).sum() / len(preds[0])} diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/trainer.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..8050293776a2b52d9b151b9b855722dcda0c61f2 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/trainer.py @@ -0,0 +1,105 @@ +import os +import json +import torch +from typing import TYPE_CHECKING, Dict, List, Optional, Tuple, Union +from transformers import Trainer + +from llmtuner.extras.logging import get_logger + +if TYPE_CHECKING: + from transformers.trainer import PredictionOutput + from transformers.modeling_utils import PreTrainedModel + + +logger = get_logger(__name__) + + +class PairwiseTrainer(Trainer): + r""" + Inherits PeftTrainer to compute pairwise loss. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.can_return_loss = True # override property to return eval_loss + + def compute_loss( + self, + model: "PreTrainedModel", + inputs: Dict[str, torch.Tensor], + return_outputs: Optional[bool] = False + ) -> Union[torch.Tensor, Tuple[torch.Tensor, List[torch.Tensor]]]: + r""" + Computes pairwise loss. The first n examples are chosen and the last n examples are rejected. + + Subclass and override to inject custom behavior. + + Note that the first element will be removed from the output tuple. + See: https://github.com/huggingface/transformers/blob/v4.30.2/src/transformers/trainer.py#L3509 + """ + # Compute rewards + _, _, values = model(**inputs, output_hidden_states=True, return_dict=True) + if values.size(0) != inputs["input_ids"].size(0): # adapt to chatglm2 + values = torch.transpose(values, 0, 1) + + # Split the inputs and rewards into two parts, chosen and rejected + batch_size = inputs["input_ids"].size(0) // 2 + chosen_input_ids, rejected_input_ids = inputs["input_ids"][:batch_size], inputs["input_ids"][batch_size:] + chosen_attn_mask, rejected_attn_mask = ( + inputs["attention_mask"][:batch_size], inputs["attention_mask"][batch_size:] + ) + chosen_rewards, rejected_rewards = values[:batch_size], values[batch_size:] + chosen_scores, rejected_scores = [], [] + + # Compute pairwise loss. Only backprop on the different tokens before padding + # Inspired by: https://github.com/CarperAI/trlx/blob/main/examples/summarize_rlhf/reward_model/reward_model.py + loss = 0 + for i in range(batch_size): + chosen_length = chosen_attn_mask[i].nonzero()[-1] + 1 + rejected_length = rejected_attn_mask[i].nonzero()[-1] + 1 + check_divergence = (chosen_input_ids[i] != rejected_input_ids[i]).nonzero() + + if len(check_divergence) == 0: + end_index = chosen_length + div_index = end_index - 1 + else: + end_index = max(chosen_length, rejected_length) + div_index = check_divergence[0] + + assert div_index > 0 + chosen_trunc_rewards = chosen_rewards[i, div_index:end_index] + rejected_trunc_rewards = rejected_rewards[i, div_index:end_index] + if return_outputs: # use the score on the EOS token for inference + chosen_scores.append(chosen_rewards[i, chosen_length-1]) + rejected_scores.append(rejected_rewards[i, rejected_length-1]) + loss += -torch.nn.functional.logsigmoid(chosen_trunc_rewards - rejected_trunc_rewards).mean() + + loss = loss / batch_size + if return_outputs: + chosen_scores, rejected_scores = torch.stack(chosen_scores), torch.stack(rejected_scores) + return loss, [loss, chosen_scores, rejected_scores] + + return loss + + def save_predictions( + self, + predict_results: "PredictionOutput" + ) -> None: + r""" + Saves model predictions to `output_dir`. + + A custom behavior that not contained in Seq2SeqTrainer. + """ + if not self.is_world_process_zero(): + return + + output_prediction_file = os.path.join(self.args.output_dir, "generated_predictions.jsonl") + logger.info(f"Saving prediction results to {output_prediction_file}") + + chosen_scores, rejected_scores = predict_results.predictions + + with open(output_prediction_file, "w", encoding="utf-8") as writer: + res: List[str] = [] + for c_score, r_score in zip(chosen_scores, rejected_scores): + res.append(json.dumps({"chosen": round(float(c_score), 2), "rejected": round(float(r_score), 2)})) + writer.write("\n".join(res)) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/workflow.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..edc8e7c537410487bd9e11925b0d9301f731a291 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/rm/workflow.py @@ -0,0 +1,68 @@ +# Inspired by: +# https://github.com/CarperAI/trlx/blob/main/examples/summarize_rlhf/reward_model/train_reward_model_gptj.py + +from typing import TYPE_CHECKING, Optional, List +from transformers import Seq2SeqTrainingArguments + +from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset +from llmtuner.extras.callbacks import SavePeftModelCallback +from llmtuner.extras.ploting import plot_loss +from llmtuner.tuner.core import load_model_and_tokenizer +from llmtuner.tuner.rm.metric import compute_accuracy +from llmtuner.tuner.rm.collator import PairwiseDataCollatorWithPadding +from llmtuner.tuner.rm.trainer import PairwiseTrainer + +if TYPE_CHECKING: + from transformers import TrainerCallback + from llmtuner.hparams import ModelArguments, DataArguments, FinetuningArguments + + +def run_rm( + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + finetuning_args: "FinetuningArguments", + callbacks: Optional[List["TrainerCallback"]] = None +): + dataset = get_dataset(model_args, data_args) + model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args, training_args.do_train, stage="rm") + dataset = preprocess_dataset(dataset, tokenizer, data_args, training_args, stage="rm") + data_collator = PairwiseDataCollatorWithPadding(tokenizer) + + training_args_dict = training_args.to_dict() + training_args_dict.update(dict(remove_unused_columns=False)) # important for pairwise dataset + training_args = Seq2SeqTrainingArguments(**training_args_dict) + + # Initialize our Trainer + trainer = PairwiseTrainer( + model=model, + args=training_args, + tokenizer=tokenizer, + data_collator=data_collator, + callbacks=callbacks + [SavePeftModelCallback()], + compute_metrics=compute_accuracy, + **split_dataset(dataset, data_args, training_args) + ) + + # Training + if training_args.do_train: + train_result = trainer.train() + trainer.log_metrics("train", train_result.metrics) + trainer.save_metrics("train", train_result.metrics) + trainer.save_state() + trainer.save_model() + if trainer.is_world_process_zero() and model_args.plot_loss: + plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) + + # Evaluation + if training_args.do_eval: + metrics = trainer.evaluate(metric_key_prefix="eval") + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) + + # Predict + if training_args.do_predict: + predict_results = trainer.predict(dataset, metric_key_prefix="predict") + trainer.log_metrics("predict", predict_results.metrics) + trainer.save_metrics("predict", predict_results.metrics) + trainer.save_predictions(predict_results) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..493dd1a7f0efcb18414495f6e73661112723fcc2 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/__init__.py @@ -0,0 +1 @@ +from llmtuner.tuner.sft.workflow import run_sft diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/metric.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/metric.py new file mode 100644 index 0000000000000000000000000000000000000000..812896eee8c20f1c12e19c179176811d8e23ebdb --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/metric.py @@ -0,0 +1,53 @@ +import numpy as np +from dataclasses import dataclass +from typing import TYPE_CHECKING, Dict, Sequence, Tuple, Union + +import jieba +from rouge_chinese import Rouge +from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction + +from llmtuner.extras.constants import IGNORE_INDEX + +if TYPE_CHECKING: + from transformers.tokenization_utils import PreTrainedTokenizer + + +@dataclass +class ComputeMetrics: + r""" + Wraps the tokenizer into metric functions, used in Seq2SeqPeftTrainer. + """ + + tokenizer: "PreTrainedTokenizer" + + def __call__(self, eval_preds: Sequence[Union[np.ndarray, Tuple[np.ndarray]]]) -> Dict[str, float]: + r""" + Uses the model predictions to compute metrics. + """ + preds, labels = eval_preds + score_dict = {"rouge-1": [], "rouge-2": [], "rouge-l": [], "bleu-4": []} + + preds = np.where(preds != IGNORE_INDEX, preds, self.tokenizer.pad_token_id) + labels = np.where(labels != IGNORE_INDEX, labels, self.tokenizer.pad_token_id) + + decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True) + decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True) + + for pred, label in zip(decoded_preds, decoded_labels): + hypothesis = list(jieba.cut(pred)) + reference = list(jieba.cut(label)) + + if len(" ".join(hypothesis).split()) == 0 or len(" ".join(reference).split()) == 0: + result = {"rouge-1": {"f": 0.0}, "rouge-2": {"f": 0.0}, "rouge-l": {"f": 0.0}} + else: + rouge = Rouge() + scores = rouge.get_scores(" ".join(hypothesis), " ".join(reference)) + result = scores[0] + + for k, v in result.items(): + score_dict[k].append(round(v["f"] * 100, 4)) + + bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3) + score_dict["bleu-4"].append(round(bleu_score * 100, 4)) + + return {k: float(np.mean(v)) for k, v in score_dict.items()} diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/trainer.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..4fafc76b576970d688469d559898ddf89143f6e1 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/trainer.py @@ -0,0 +1,101 @@ +import os +import json +import torch +import numpy as np +import torch.nn as nn +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from transformers import Seq2SeqTrainer + +from llmtuner.extras.constants import IGNORE_INDEX +from llmtuner.extras.logging import get_logger + +if TYPE_CHECKING: + from transformers.trainer import PredictionOutput + + +logger = get_logger(__name__) + + +class CustomSeq2SeqTrainer(Seq2SeqTrainer): + r""" + Inherits PeftTrainer to compute generative metrics such as BLEU and ROUGE. + """ + + def prediction_step( + self, + model: nn.Module, + inputs: Dict[str, Union[torch.Tensor, Any]], + prediction_loss_only: bool, + ignore_keys: Optional[List[str]] = None, + ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: + r""" + Removes the prompt part in the generated tokens. + + Subclass and override to inject custom behavior. + """ + if self.args.predict_with_generate: + assert self.tokenizer.padding_side == "left", "This method only accepts left-padded tensor." + assert self.tokenizer.pad_token_id is not None, "Pad token is required." + prompt_len, label_len = inputs["input_ids"].size(-1), inputs["labels"].size(-1) + if prompt_len > label_len: + inputs["labels"] = self._pad_tensors_to_target_len(inputs["labels"], inputs["input_ids"]) + if label_len > prompt_len: + inputs["input_ids"] = self._pad_tensors_to_target_len(inputs["input_ids"], inputs["labels"]) + if "attention_mask" in inputs: + inputs["attention_mask"] = self._pad_tensors_to_target_len( + inputs["attention_mask"], inputs["labels"], pad_token_id=0 + ) + if "position_ids" in inputs: + inputs["position_ids"] = self._pad_tensors_to_target_len( + inputs["position_ids"], inputs["labels"], pad_token_id=0 + ) + + loss, generated_tokens, labels = super().prediction_step( + model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys + ) + if generated_tokens is not None and self.args.predict_with_generate: + generated_tokens[:, :max(prompt_len, label_len)] = self.tokenizer.pad_token_id + generated_tokens = generated_tokens.contiguous() + + return loss, generated_tokens, labels + + def _pad_tensors_to_target_len( + self, + src_tensor: torch.Tensor, + tgt_tensor: torch.Tensor, + pad_token_id: Optional[int] = None + ) -> torch.Tensor: + r""" + Pads the tensor to the same length as the target tensor. + """ + pad_token_id = pad_token_id if pad_token_id is not None else self.tokenizer.pad_token_id + padded_tensor = pad_token_id * torch.ones_like(tgt_tensor) + padded_tensor[:, -src_tensor.shape[-1]:] = src_tensor # adopt left-padding + return padded_tensor.contiguous() # in contiguous memory + + def save_predictions( + self, + predict_results: "PredictionOutput" + ) -> None: + r""" + Saves model predictions to `output_dir`. + + A custom behavior that not contained in Seq2SeqTrainer. + """ + if not self.is_world_process_zero(): + return + + output_prediction_file = os.path.join(self.args.output_dir, "generated_predictions.jsonl") + logger.info(f"Saving prediction results to {output_prediction_file}") + + preds = np.where(predict_results.predictions != IGNORE_INDEX, predict_results.predictions, self.tokenizer.pad_token_id) + labels = np.where(predict_results.label_ids != IGNORE_INDEX, predict_results.label_ids, self.tokenizer.pad_token_id) + + decoded_preds = self.tokenizer.batch_decode(preds, skip_special_tokens=True, clean_up_tokenization_spaces=True) + decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True, clean_up_tokenization_spaces=True) + + with open(output_prediction_file, "w", encoding="utf-8") as writer: + res: List[str] = [] + for pred, label in zip(decoded_preds, decoded_labels): + res.append(json.dumps({"label": label, "predict": pred}, ensure_ascii=False)) + writer.write("\n".join(res)) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/workflow.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/workflow.py new file mode 100644 index 0000000000000000000000000000000000000000..d45571d2f4e4531e00116d374374f58567f1d211 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/sft/workflow.py @@ -0,0 +1,89 @@ +# Inspired by: https://github.com/huggingface/transformers/blob/v4.29.2/examples/pytorch/summarization/run_summarization.py + +from typing import TYPE_CHECKING, Optional, List +from transformers import DataCollatorForSeq2Seq, Seq2SeqTrainingArguments + +from llmtuner.dsets import get_dataset, preprocess_dataset, split_dataset +from llmtuner.extras.constants import IGNORE_INDEX +from llmtuner.extras.misc import get_logits_processor +from llmtuner.extras.ploting import plot_loss +from llmtuner.tuner.core import load_model_and_tokenizer +from llmtuner.tuner.sft.metric import ComputeMetrics +from llmtuner.tuner.sft.trainer import CustomSeq2SeqTrainer + +if TYPE_CHECKING: + from transformers import TrainerCallback + from llmtuner.hparams import ModelArguments, DataArguments, FinetuningArguments, GeneratingArguments + + +def run_sft( + model_args: "ModelArguments", + data_args: "DataArguments", + training_args: "Seq2SeqTrainingArguments", + finetuning_args: "FinetuningArguments", + generating_args: "GeneratingArguments", + callbacks: Optional[List["TrainerCallback"]] = None +): + dataset = get_dataset(model_args, data_args) + model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args, training_args.do_train, stage="sft") + dataset = preprocess_dataset(dataset, tokenizer, data_args, training_args, stage="sft") + + if training_args.predict_with_generate: + tokenizer.padding_side = "left" # use left-padding in generation + + data_collator = DataCollatorForSeq2Seq( + tokenizer=tokenizer, + label_pad_token_id=IGNORE_INDEX if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id + ) + + # Override the decoding parameters of Seq2SeqTrainer + training_args_dict = training_args.to_dict() + training_args_dict.update(dict( + generation_max_length=training_args.generation_max_length or data_args.cutoff_len, + generation_num_beams=data_args.eval_num_beams or training_args.generation_num_beams + )) + training_args = Seq2SeqTrainingArguments(**training_args_dict) + + # Initialize our Trainer + trainer = CustomSeq2SeqTrainer( + model=model, + args=training_args, + tokenizer=tokenizer, + data_collator=data_collator, + callbacks=callbacks, + compute_metrics=ComputeMetrics(tokenizer) if training_args.predict_with_generate else None, + **split_dataset(dataset, data_args, training_args) + ) + + # Keyword arguments for `model.generate` + gen_kwargs = generating_args.to_dict() + gen_kwargs["eos_token_id"] = [tokenizer.eos_token_id] + tokenizer.additional_special_tokens_ids + gen_kwargs["pad_token_id"] = tokenizer.pad_token_id + gen_kwargs["logits_processor"] = get_logits_processor() + + # Training + if training_args.do_train: + train_result = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint) + trainer.log_metrics("train", train_result.metrics) + trainer.save_metrics("train", train_result.metrics) + trainer.save_state() + trainer.save_model() + if trainer.is_world_process_zero() and model_args.plot_loss: + plot_loss(training_args.output_dir, keys=["loss", "eval_loss"]) + + # Evaluation + if training_args.do_eval: + metrics = trainer.evaluate(metric_key_prefix="eval", **gen_kwargs) + if training_args.predict_with_generate: # eval_loss will be wrong if predict_with_generate is enabled + metrics.pop("eval_loss", None) + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) + + # Predict + if training_args.do_predict: + predict_results = trainer.predict(dataset, metric_key_prefix="predict", **gen_kwargs) + if training_args.predict_with_generate: # predict_loss will be wrong if predict_with_generate is enabled + predict_results.metrics.pop("predict_loss", None) + trainer.log_metrics("predict", predict_results.metrics) + trainer.save_metrics("predict", predict_results.metrics) + trainer.save_predictions(predict_results) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/tuner/tune.py b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/tune.py new file mode 100644 index 0000000000000000000000000000000000000000..356122cf2bd4bc923d63996040007000bc76ac44 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/tuner/tune.py @@ -0,0 +1,49 @@ +from typing import TYPE_CHECKING, Any, Dict, List, Optional + +from llmtuner.extras.callbacks import LogCallback +from llmtuner.extras.logging import get_logger +from llmtuner.tuner.core import get_train_args, load_model_and_tokenizer +from llmtuner.tuner.pt import run_pt +from llmtuner.tuner.sft import run_sft +from llmtuner.tuner.rm import run_rm +from llmtuner.tuner.ppo import run_ppo +from llmtuner.tuner.dpo import run_dpo + +if TYPE_CHECKING: + from transformers import TrainerCallback + + +logger = get_logger(__name__) + + +def run_exp(args: Optional[Dict[str, Any]] = None, callbacks: Optional[List["TrainerCallback"]] = None): + model_args, data_args, training_args, finetuning_args, generating_args, general_args = get_train_args(args) + callbacks = [LogCallback()] if callbacks is None else callbacks + + if general_args.stage == "pt": + run_pt(model_args, data_args, training_args, finetuning_args, callbacks) + elif general_args.stage == "sft": + run_sft(model_args, data_args, training_args, finetuning_args, generating_args, callbacks) + elif general_args.stage == "rm": + run_rm(model_args, data_args, training_args, finetuning_args, callbacks) + elif general_args.stage == "ppo": + run_ppo(model_args, data_args, training_args, finetuning_args, generating_args, callbacks) + elif general_args.stage == "dpo": + run_dpo(model_args, data_args, training_args, finetuning_args, callbacks) + else: + raise ValueError("Unknown task.") + + +def export_model(args: Optional[Dict[str, Any]] = None, max_shard_size: Optional[str] = "10GB"): + model_args, _, training_args, finetuning_args, _, _ = get_train_args(args) + model, tokenizer = load_model_and_tokenizer(model_args, finetuning_args) + tokenizer.padding_side = "left" # restore padding side + model.save_pretrained(training_args.output_dir, max_shard_size=max_shard_size) + try: + tokenizer.save_pretrained(training_args.output_dir) + except: + logger.warning("Cannot save tokenizer, please copy the files manually.") + + +if __name__ == "__main__": + run_exp() diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a27c7f6ea0d98ad23b3f5b239d678748f0d38f76 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/__init__.py @@ -0,0 +1 @@ +from llmtuner.webui.interface import create_ui, create_web_demo diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/chat.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/chat.py new file mode 100644 index 0000000000000000000000000000000000000000..1499c1718d589efa21b8348fd43cf820c6e93291 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/chat.py @@ -0,0 +1,97 @@ +import os +from typing import Any, Dict, List, Optional, Tuple + +from llmtuner.chat.stream_chat import ChatModel +from llmtuner.extras.misc import torch_gc +from llmtuner.hparams import GeneratingArguments +from llmtuner.webui.common import get_model_path, get_save_dir +from llmtuner.webui.locales import ALERTS + + +class WebChatModel(ChatModel): + + def __init__(self, args: Optional[Dict[str, Any]] = None, lazy_init: Optional[bool] = True) -> None: + if lazy_init: + self.model = None + self.tokenizer = None + self.generating_args = GeneratingArguments() + else: + super().__init__(args) + + def load_model( + self, + lang: str, + model_name: str, + checkpoints: List[str], + finetuning_type: str, + quantization_bit: str, + template: str, + system_prompt: str + ): + if self.model is not None: + yield ALERTS["err_exists"][lang] + return + + if not model_name: + yield ALERTS["err_no_model"][lang] + return + + model_name_or_path = get_model_path(model_name) + if not model_name_or_path: + yield ALERTS["err_no_path"][lang] + return + + if checkpoints: + checkpoint_dir = ",".join( + [os.path.join(get_save_dir(model_name), finetuning_type, checkpoint) for checkpoint in checkpoints] + ) + else: + checkpoint_dir = None + + yield ALERTS["info_loading"][lang] + args = dict( + model_name_or_path=model_name_or_path, + checkpoint_dir=checkpoint_dir, + finetuning_type=finetuning_type, + quantization_bit=int(quantization_bit) if quantization_bit and quantization_bit != "None" else None, + template=template, + system_prompt=system_prompt + ) + super().__init__(args) + + yield ALERTS["info_loaded"][lang] + + def unload_model(self, lang: str): + yield ALERTS["info_unloading"][lang] + self.model = None + self.tokenizer = None + torch_gc() + yield ALERTS["info_unloaded"][lang] + + def predict( + self, + chatbot: List[Tuple[str, str]], + query: str, + history: List[Tuple[str, str]], + system: str, + max_new_tokens: int, + top_p: float, + temperature: float + ): + chatbot.append([query, ""]) + response = "" + for new_text in self.stream_chat( + query, history, system, max_new_tokens=max_new_tokens, top_p=top_p, temperature=temperature + ): + response += new_text + response = self.postprocess(response) + new_history = history + [(query, response)] + chatbot[-1] = [query, response] + yield chatbot, new_history + + def postprocess(self, response: str) -> str: + blocks = response.split("```") + for i, block in enumerate(blocks): + if i % 2 == 0: + blocks[i] = block.replace("<", "<").replace(">", ">") + return "```".join(blocks) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/common.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/common.py new file mode 100644 index 0000000000000000000000000000000000000000..95d7b613cb7bbdc3801078bb2ca2612fd1e7026a --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/common.py @@ -0,0 +1,87 @@ +import json +import os +from typing import Any, Dict, Optional + +import gradio as gr +from peft.utils import WEIGHTS_NAME as PEFT_WEIGHTS_NAME +from transformers.trainer import WEIGHTS_NAME, WEIGHTS_INDEX_NAME + +from llmtuner.extras.constants import DEFAULT_TEMPLATE, SUPPORTED_MODELS, TRAINING_STAGES + + +DEFAULT_CACHE_DIR = "cache" +DEFAULT_DATA_DIR = "data" +DEFAULT_SAVE_DIR = "saves" +USER_CONFIG = "user.config" +DATA_CONFIG = "dataset_info.json" + + +def get_save_dir(*args) -> os.PathLike: + return os.path.join(DEFAULT_SAVE_DIR, *args) + + +def get_config_path() -> os.PathLike: + return os.path.join(DEFAULT_CACHE_DIR, USER_CONFIG) + + +def load_config() -> Dict[str, Any]: + try: + with open(get_config_path(), "r", encoding="utf-8") as f: + return json.load(f) + except: + return {"lang": None, "last_model": None, "path_dict": {}, "cache_dir": None} + + +def save_config(lang: str, model_name: str, model_path: str) -> None: + os.makedirs(DEFAULT_CACHE_DIR, exist_ok=True) + user_config = load_config() + user_config["lang"] = lang or user_config["lang"] + if model_name: + user_config["last_model"] = model_name + user_config["path_dict"][model_name] = model_path + with open(get_config_path(), "w", encoding="utf-8") as f: + json.dump(user_config, f, indent=2, ensure_ascii=False) + + +def get_model_path(model_name: str) -> str: + user_config = load_config() + return user_config["path_dict"].get(model_name, SUPPORTED_MODELS.get(model_name, "")) + + +def get_template(model_name: str) -> str: + if model_name.endswith("Chat") and model_name.split("-")[0] in DEFAULT_TEMPLATE: + return DEFAULT_TEMPLATE[model_name.split("-")[0]] + return "default" + + +def list_checkpoint(model_name: str, finetuning_type: str) -> Dict[str, Any]: + checkpoints = [] + save_dir = get_save_dir(model_name, finetuning_type) + if save_dir and os.path.isdir(save_dir): + for checkpoint in os.listdir(save_dir): + if ( + os.path.isdir(os.path.join(save_dir, checkpoint)) + and any([ + os.path.isfile(os.path.join(save_dir, checkpoint, name)) + for name in (WEIGHTS_NAME, WEIGHTS_INDEX_NAME, PEFT_WEIGHTS_NAME) + ]) + ): + checkpoints.append(checkpoint) + return gr.update(value=[], choices=checkpoints) + + +def load_dataset_info(dataset_dir: str) -> Dict[str, Any]: + try: + with open(os.path.join(dataset_dir, DATA_CONFIG), "r", encoding="utf-8") as f: + return json.load(f) + except: + return {} + + +def list_dataset( + dataset_dir: Optional[str] = None, training_stage: Optional[str] = list(TRAINING_STAGES.keys())[0] +) -> Dict[str, Any]: + dataset_info = load_dataset_info(dataset_dir if dataset_dir is not None else DEFAULT_DATA_DIR) + ranking = TRAINING_STAGES[training_stage] in ["rm", "dpo"] + datasets = [k for k, v in dataset_info.items() if v.get("ranking", False) == ranking] + return gr.update(value=[], choices=datasets) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/__init__.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..32228b8e7b00f5bc7cda374f4527a1a868e6b8a2 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/__init__.py @@ -0,0 +1,6 @@ +from llmtuner.webui.components.top import create_top +from llmtuner.webui.components.train import create_train_tab +from llmtuner.webui.components.eval import create_eval_tab +from llmtuner.webui.components.infer import create_infer_tab +from llmtuner.webui.components.export import create_export_tab +from llmtuner.webui.components.chatbot import create_chat_box diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/chatbot.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/chatbot.py new file mode 100644 index 0000000000000000000000000000000000000000..928a568cbded91b9288b420a422137e5f1e412d9 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/chatbot.py @@ -0,0 +1,51 @@ +from typing import TYPE_CHECKING, Dict, Optional, Tuple + +import gradio as gr + +if TYPE_CHECKING: + from gradio.blocks import Block + from gradio.components import Component + from llmtuner.webui.chat import WebChatModel + + +def create_chat_box( + chat_model: "WebChatModel", + visible: Optional[bool] = False +) -> Tuple["Block", "Component", "Component", Dict[str, "Component"]]: + with gr.Box(visible=visible) as chat_box: + chatbot = gr.Chatbot() + + with gr.Row(): + with gr.Column(scale=4): + system = gr.Textbox(show_label=False) + query = gr.Textbox(show_label=False, lines=8) + submit_btn = gr.Button(variant="primary") + + with gr.Column(scale=1): + clear_btn = gr.Button() + max_new_tokens = gr.Slider(10, 2048, value=chat_model.generating_args.max_new_tokens, step=1) + top_p = gr.Slider(0.01, 1, value=chat_model.generating_args.top_p, step=0.01) + temperature = gr.Slider(0.01, 1.5, value=chat_model.generating_args.temperature, step=0.01) + + history = gr.State([]) + + submit_btn.click( + chat_model.predict, + [chatbot, query, history, system, max_new_tokens, top_p, temperature], + [chatbot, history], + show_progress=True + ).then( + lambda: gr.update(value=""), outputs=[query] + ) + + clear_btn.click(lambda: ([], []), outputs=[chatbot, history], show_progress=True) + + return chat_box, chatbot, history, dict( + system=system, + query=query, + submit_btn=submit_btn, + clear_btn=clear_btn, + max_new_tokens=max_new_tokens, + top_p=top_p, + temperature=temperature + ) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/data.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/data.py new file mode 100644 index 0000000000000000000000000000000000000000..af19cc41474fc12974c802b8626b9540dd7ee13d --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/data.py @@ -0,0 +1,21 @@ +import gradio as gr +from typing import TYPE_CHECKING, Tuple + +if TYPE_CHECKING: + from gradio.blocks import Block + from gradio.components import Component + + +def create_preview_box() -> Tuple["Block", "Component", "Component", "Component"]: + with gr.Box(visible=False, elem_classes="modal-box") as preview_box: + with gr.Row(): + preview_count = gr.Number(interactive=False) + + with gr.Row(): + preview_samples = gr.JSON(interactive=False) + + close_btn = gr.Button() + + close_btn.click(lambda: gr.update(visible=False), outputs=[preview_box], queue=False) + + return preview_box, preview_count, preview_samples, close_btn diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/eval.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/eval.py new file mode 100644 index 0000000000000000000000000000000000000000..d37fe74610d5395318cccfd91a9fd9a219c2abf8 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/eval.py @@ -0,0 +1,98 @@ +from typing import TYPE_CHECKING, Dict +import gradio as gr + +from llmtuner.webui.common import list_dataset, DEFAULT_DATA_DIR +from llmtuner.webui.components.data import create_preview_box +from llmtuner.webui.utils import can_preview, get_preview + +if TYPE_CHECKING: + from gradio.components import Component + from llmtuner.webui.runner import Runner + + +def create_eval_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dict[str, "Component"]: + with gr.Row(): + dataset_dir = gr.Textbox(value=DEFAULT_DATA_DIR, scale=2) + dataset = gr.Dropdown(multiselect=True, scale=4) + data_preview_btn = gr.Button(interactive=False, scale=1) + + preview_box, preview_count, preview_samples, close_btn = create_preview_box() + + dataset_dir.change(list_dataset, [dataset_dir], [dataset]) + dataset.change(can_preview, [dataset_dir, dataset], [data_preview_btn]) + data_preview_btn.click( + get_preview, + [dataset_dir, dataset], + [preview_count, preview_samples, preview_box], + queue=False + ) + + with gr.Row(): + cutoff_len = gr.Slider(value=1024, minimum=4, maximum=8192, step=1) + max_samples = gr.Textbox(value="100000") + batch_size = gr.Slider(value=8, minimum=1, maximum=512, step=1) + predict = gr.Checkbox(value=True) + + with gr.Row(): + max_new_tokens = gr.Slider(10, 2048, value=128, step=1) + top_p = gr.Slider(0.01, 1, value=0.7, step=0.01) + temperature = gr.Slider(0.01, 1.5, value=0.95, step=0.01) + + with gr.Row(): + cmd_preview_btn = gr.Button() + start_btn = gr.Button() + stop_btn = gr.Button() + + with gr.Row(): + process_bar = gr.Slider(visible=False, interactive=False) + + with gr.Box(): + output_box = gr.Markdown() + + input_components = [ + top_elems["lang"], + top_elems["model_name"], + top_elems["checkpoints"], + top_elems["finetuning_type"], + top_elems["quantization_bit"], + top_elems["template"], + top_elems["system_prompt"], + dataset_dir, + dataset, + cutoff_len, + max_samples, + batch_size, + predict, + max_new_tokens, + top_p, + temperature + ] + + output_components = [ + output_box, + process_bar + ] + + cmd_preview_btn.click(runner.preview_eval, input_components, output_components) + start_btn.click(runner.run_eval, input_components, output_components) + stop_btn.click(runner.set_abort, queue=False) + + return dict( + dataset_dir=dataset_dir, + dataset=dataset, + data_preview_btn=data_preview_btn, + preview_count=preview_count, + preview_samples=preview_samples, + close_btn=close_btn, + cutoff_len=cutoff_len, + max_samples=max_samples, + batch_size=batch_size, + predict=predict, + max_new_tokens=max_new_tokens, + top_p=top_p, + temperature=temperature, + cmd_preview_btn=cmd_preview_btn, + start_btn=start_btn, + stop_btn=stop_btn, + output_box=output_box + ) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/export.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/export.py new file mode 100644 index 0000000000000000000000000000000000000000..1b18fca0a1d9d5ce8c4a8d27adbeb63858121f2f --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/export.py @@ -0,0 +1,37 @@ +from typing import TYPE_CHECKING, Dict +import gradio as gr + +from llmtuner.webui.utils import save_model + +if TYPE_CHECKING: + from gradio.components import Component + + +def create_export_tab(top_elems: Dict[str, "Component"]) -> Dict[str, "Component"]: + with gr.Row(): + save_dir = gr.Textbox() + max_shard_size = gr.Slider(value=10, minimum=1, maximum=100) + + export_btn = gr.Button() + info_box = gr.Textbox(show_label=False, interactive=False) + + export_btn.click( + save_model, + [ + top_elems["lang"], + top_elems["model_name"], + top_elems["checkpoints"], + top_elems["finetuning_type"], + top_elems["template"], + max_shard_size, + save_dir + ], + [info_box] + ) + + return dict( + save_dir=save_dir, + max_shard_size=max_shard_size, + export_btn=export_btn, + info_box=info_box + ) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/infer.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/infer.py new file mode 100644 index 0000000000000000000000000000000000000000..489ccf2ed850bebf5974a32194d4e95a2ecc71ec --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/infer.py @@ -0,0 +1,51 @@ +from typing import TYPE_CHECKING, Dict + +import gradio as gr + +from llmtuner.webui.chat import WebChatModel +from llmtuner.webui.components.chatbot import create_chat_box + +if TYPE_CHECKING: + from gradio.components import Component + + +def create_infer_tab(top_elems: Dict[str, "Component"]) -> Dict[str, "Component"]: + with gr.Row(): + load_btn = gr.Button() + unload_btn = gr.Button() + + info_box = gr.Textbox(show_label=False, interactive=False) + + chat_model = WebChatModel(lazy_init=True) + chat_box, chatbot, history, chat_elems = create_chat_box(chat_model) + + load_btn.click( + chat_model.load_model, + [ + top_elems["lang"], + top_elems["model_name"], + top_elems["checkpoints"], + top_elems["finetuning_type"], + top_elems["quantization_bit"], + top_elems["template"], + top_elems["system_prompt"] + ], + [info_box] + ).then( + lambda: gr.update(visible=(chat_model.model is not None)), outputs=[chat_box] + ) + + unload_btn.click( + chat_model.unload_model, [top_elems["lang"]], [info_box] + ).then( + lambda: ([], []), outputs=[chatbot, history] + ).then( + lambda: gr.update(visible=(chat_model.model is not None)), outputs=[chat_box] + ) + + return dict( + info_box=info_box, + load_btn=load_btn, + unload_btn=unload_btn, + **chat_elems + ) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/top.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/top.py new file mode 100644 index 0000000000000000000000000000000000000000..62c1f9c963d9ecc5009b598edbe2b3b4fc65584d --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/top.py @@ -0,0 +1,66 @@ +from typing import TYPE_CHECKING, Dict + +import gradio as gr + +from llmtuner.extras.constants import METHODS, SUPPORTED_MODELS +from llmtuner.extras.template import templates +from llmtuner.webui.common import list_checkpoint, get_model_path, get_template, save_config +from llmtuner.webui.utils import can_quantize + +if TYPE_CHECKING: + from gradio.components import Component + + +def create_top() -> Dict[str, "Component"]: + available_models = list(SUPPORTED_MODELS.keys()) + ["Custom"] + + with gr.Row(): + lang = gr.Dropdown(choices=["en", "zh"], scale=1) + model_name = gr.Dropdown(choices=available_models, scale=3) + model_path = gr.Textbox(scale=3) + + with gr.Row(): + finetuning_type = gr.Dropdown(choices=METHODS, value="lora", scale=1) + checkpoints = gr.Dropdown(multiselect=True, scale=5) + refresh_btn = gr.Button(scale=1) + + with gr.Accordion(label="Advanced config", open=False) as advanced_tab: + with gr.Row(): + quantization_bit = gr.Dropdown(choices=["None", "8", "4"], value="None", scale=1) + template = gr.Dropdown(choices=list(templates.keys()), value="default", scale=1) + system_prompt = gr.Textbox(scale=2) + + lang.change(save_config, [lang, model_name, model_path]) + + model_name.change( + list_checkpoint, [model_name, finetuning_type], [checkpoints] + ).then( + get_model_path, [model_name], [model_path] + ).then( + get_template, [model_name], [template] + ) # do not save config since the below line will save + + model_path.change(save_config, [lang, model_name, model_path]) + + finetuning_type.change( + list_checkpoint, [model_name, finetuning_type], [checkpoints] + ).then( + can_quantize, [finetuning_type], [quantization_bit] + ) + + refresh_btn.click( + list_checkpoint, [model_name, finetuning_type], [checkpoints], queue=False + ) + + return dict( + lang=lang, + model_name=model_name, + model_path=model_path, + finetuning_type=finetuning_type, + checkpoints=checkpoints, + refresh_btn=refresh_btn, + advanced_tab=advanced_tab, + quantization_bit=quantization_bit, + template=template, + system_prompt=system_prompt + ) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/train.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/train.py new file mode 100644 index 0000000000000000000000000000000000000000..d12f8c9f88c99facf6ce0e46e907a1296d90615f --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/components/train.py @@ -0,0 +1,187 @@ +from typing import TYPE_CHECKING, Dict +from transformers.trainer_utils import SchedulerType + +import gradio as gr + +from llmtuner.extras.constants import TRAINING_STAGES +from llmtuner.webui.common import list_checkpoint, list_dataset, DEFAULT_DATA_DIR +from llmtuner.webui.components.data import create_preview_box +from llmtuner.webui.utils import can_preview, get_preview, gen_plot + +if TYPE_CHECKING: + from gradio.components import Component + from llmtuner.webui.runner import Runner + + +def create_train_tab(top_elems: Dict[str, "Component"], runner: "Runner") -> Dict[str, "Component"]: + with gr.Row(): + training_stage = gr.Dropdown( + choices=list(TRAINING_STAGES.keys()), value=list(TRAINING_STAGES.keys())[0], scale=2 + ) + dataset_dir = gr.Textbox(value=DEFAULT_DATA_DIR, scale=2) + dataset = gr.Dropdown(multiselect=True, scale=4) + data_preview_btn = gr.Button(interactive=False, scale=1) + + preview_box, preview_count, preview_samples, close_btn = create_preview_box() + + training_stage.change(list_dataset, [dataset_dir, training_stage], [dataset]) + dataset_dir.change(list_dataset, [dataset_dir, training_stage], [dataset]) + dataset.change(can_preview, [dataset_dir, dataset], [data_preview_btn]) + data_preview_btn.click( + get_preview, + [dataset_dir, dataset], + [preview_count, preview_samples, preview_box], + queue=False + ) + + with gr.Row(): + cutoff_len = gr.Slider(value=1024, minimum=4, maximum=8192, step=1) + learning_rate = gr.Textbox(value="5e-5") + num_train_epochs = gr.Textbox(value="3.0") + max_samples = gr.Textbox(value="100000") + compute_type = gr.Radio(choices=["fp16", "bf16"], value="fp16") + + with gr.Row(): + batch_size = gr.Slider(value=4, minimum=1, maximum=512, step=1) + gradient_accumulation_steps = gr.Slider(value=4, minimum=1, maximum=512, step=1) + lr_scheduler_type = gr.Dropdown( + choices=[scheduler.value for scheduler in SchedulerType], value="cosine" + ) + max_grad_norm = gr.Textbox(value="1.0") + val_size = gr.Slider(value=0, minimum=0, maximum=1, step=0.001) + + with gr.Accordion(label="Advanced config", open=False) as advanced_tab: + with gr.Row(): + logging_steps = gr.Slider(value=5, minimum=5, maximum=1000, step=5) + save_steps = gr.Slider(value=100, minimum=10, maximum=5000, step=10) + warmup_steps = gr.Slider(value=0, minimum=0, maximum=5000, step=1) + flash_attn = gr.Checkbox(value=False) + rope_scaling = gr.Checkbox(value=False) + + with gr.Accordion(label="LoRA config", open=False) as lora_tab: + with gr.Row(): + lora_rank = gr.Slider(value=8, minimum=1, maximum=1024, step=1, scale=1) + lora_dropout = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01, scale=1) + lora_target = gr.Textbox(scale=2) + resume_lora_training = gr.Checkbox(value=True, scale=1) + + with gr.Accordion(label="RLHF config", open=False) as rlhf_tab: + with gr.Row(): + dpo_beta = gr.Slider(value=0.1, minimum=0, maximum=1, step=0.01, scale=2) + reward_model = gr.Dropdown(scale=2) + refresh_btn = gr.Button(scale=1) + + refresh_btn.click( + list_checkpoint, + [top_elems["model_name"], top_elems["finetuning_type"]], + [reward_model], + queue=False + ) + + with gr.Row(): + cmd_preview_btn = gr.Button() + start_btn = gr.Button() + stop_btn = gr.Button() + + with gr.Row(): + with gr.Column(scale=3): + with gr.Row(): + output_dir = gr.Textbox() + + with gr.Row(): + process_bar = gr.Slider(visible=False, interactive=False) + + with gr.Box(): + output_box = gr.Markdown() + + with gr.Column(scale=1): + loss_viewer = gr.Plot() + + input_components = [ + top_elems["lang"], + top_elems["model_name"], + top_elems["checkpoints"], + top_elems["finetuning_type"], + top_elems["quantization_bit"], + top_elems["template"], + top_elems["system_prompt"], + training_stage, + dataset_dir, + dataset, + cutoff_len, + learning_rate, + num_train_epochs, + max_samples, + compute_type, + batch_size, + gradient_accumulation_steps, + lr_scheduler_type, + max_grad_norm, + val_size, + logging_steps, + save_steps, + warmup_steps, + flash_attn, + rope_scaling, + lora_rank, + lora_dropout, + lora_target, + resume_lora_training, + dpo_beta, + reward_model, + output_dir + ] + + output_components = [ + output_box, + process_bar + ] + + cmd_preview_btn.click(runner.preview_train, input_components, output_components) + start_btn.click(runner.run_train, input_components, output_components) + stop_btn.click(runner.set_abort, queue=False) + + process_bar.change( + gen_plot, [top_elems["model_name"], top_elems["finetuning_type"], output_dir], loss_viewer, queue=False + ) + + return dict( + training_stage=training_stage, + dataset_dir=dataset_dir, + dataset=dataset, + data_preview_btn=data_preview_btn, + preview_count=preview_count, + preview_samples=preview_samples, + close_btn=close_btn, + cutoff_len=cutoff_len, + learning_rate=learning_rate, + num_train_epochs=num_train_epochs, + max_samples=max_samples, + compute_type=compute_type, + batch_size=batch_size, + gradient_accumulation_steps=gradient_accumulation_steps, + lr_scheduler_type=lr_scheduler_type, + max_grad_norm=max_grad_norm, + val_size=val_size, + advanced_tab=advanced_tab, + logging_steps=logging_steps, + save_steps=save_steps, + warmup_steps=warmup_steps, + flash_attn=flash_attn, + rope_scaling=rope_scaling, + lora_tab=lora_tab, + lora_rank=lora_rank, + lora_dropout=lora_dropout, + lora_target=lora_target, + resume_lora_training=resume_lora_training, + rlhf_tab=rlhf_tab, + dpo_beta=dpo_beta, + reward_model=reward_model, + refresh_btn=refresh_btn, + cmd_preview_btn=cmd_preview_btn, + start_btn=start_btn, + stop_btn=stop_btn, + output_dir=output_dir, + output_box=output_box, + loss_viewer=loss_viewer + ) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/css.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/css.py new file mode 100644 index 0000000000000000000000000000000000000000..5d370c1f8a15f294e236892594c88d2a5bd0f119 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/css.py @@ -0,0 +1,18 @@ +CSS = r""" +.modal-box { + position: fixed !important; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); /* center horizontally */ + max-width: 1000px; + max-height: 750px; + overflow-y: scroll !important; + background-color: var(--input-background-fill); + border: 2px solid black !important; + z-index: 1000; +} + +.dark .modal-box { + border: 2px solid white !important; +} +""" diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/interface.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/interface.py new file mode 100644 index 0000000000000000000000000000000000000000..3b351f633ed3bd054bd87a27362035b586e63c7a --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/interface.py @@ -0,0 +1,78 @@ +import gradio as gr +from transformers.utils.versions import require_version + +from llmtuner.webui.components import ( + create_top, + create_train_tab, + create_eval_tab, + create_infer_tab, + create_export_tab, + create_chat_box +) +from llmtuner.webui.chat import WebChatModel +from llmtuner.webui.css import CSS +from llmtuner.webui.manager import Manager +from llmtuner.webui.runner import Runner + + +require_version("gradio>=3.36.0", "To fix: pip install gradio>=3.36.0") + + +def create_ui() -> gr.Blocks: + runner = Runner() + + with gr.Blocks(title="Web Tuner", css=CSS) as demo: + top_elems = create_top() + + with gr.Tab("Train"): + train_elems = create_train_tab(top_elems, runner) + + with gr.Tab("Evaluate"): + eval_elems = create_eval_tab(top_elems, runner) + + with gr.Tab("Chat"): + infer_elems = create_infer_tab(top_elems) + + with gr.Tab("Export"): + export_elems = create_export_tab(top_elems) + + elem_list = [top_elems, train_elems, eval_elems, infer_elems, export_elems] + manager = Manager(elem_list) + + demo.load( + manager.gen_label, + [top_elems["lang"]], + [elem for elems in elem_list for elem in elems.values()], + ) + + top_elems["lang"].change( + manager.gen_label, + [top_elems["lang"]], + [elem for elems in elem_list for elem in elems.values()], + queue=False + ) + + return demo + + +def create_web_demo() -> gr.Blocks: + chat_model = WebChatModel(lazy_init=False) + + with gr.Blocks(title="Web Demo", css=CSS) as demo: + lang = gr.Dropdown(choices=["en", "zh"], value="en") + + _, _, _, chat_elems = create_chat_box(chat_model, visible=True) + + manager = Manager([{"lang": lang}, chat_elems]) + + demo.load(manager.gen_label, [lang], [lang] + list(chat_elems.values())) + + lang.select(manager.gen_label, [lang], [lang] + list(chat_elems.values()), queue=False) + + return demo + + +if __name__ == "__main__": + demo = create_ui() + demo.queue() + demo.launch(server_name="0.0.0.0", server_port=7860, share=False, inbrowser=True) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/locales.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/locales.py new file mode 100644 index 0000000000000000000000000000000000000000..5ac3cd2e1f0e6082503aa42e64095689a2e4a208 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/locales.py @@ -0,0 +1,618 @@ +LOCALES = { + "lang": { + "en": { + "label": "Lang" + }, + "zh": { + "label": "语言" + } + }, + "model_name": { + "en": { + "label": "Model name" + }, + "zh": { + "label": "模型名称" + } + }, + "model_path": { + "en": { + "label": "Model path", + "info": "Path to pretrained model or model identifier from Hugging Face." + }, + "zh": { + "label": "模型路径", + "info": "本地模型的文件路径或 Hugging Face 的模型标识符。" + } + }, + "finetuning_type": { + "en": { + "label": "Finetuning method" + }, + "zh": { + "label": "微调方法" + } + }, + "checkpoints": { + "en": { + "label": "Checkpoints" + }, + "zh": { + "label": "模型断点" + } + }, + "refresh_btn": { + "en": { + "value": "Refresh checkpoints" + }, + "zh": { + "value": "刷新断点" + } + }, + "advanced_tab": { + "en": { + "label": "Advanced configurations" + }, + "zh": { + "label": "高级设置" + } + }, + "quantization_bit": { + "en": { + "label": "Quantization bit (optional)", + "info": "Enable 4/8-bit model quantization." + }, + "zh": { + "label": "量化等级(非必填)", + "info": "启用 4/8 比特模型量化。" + } + }, + "template": { + "en": { + "label": "Prompt template", + "info": "The template used in constructing prompts." + }, + "zh": { + "label": "提示模板", + "info": "构建提示词时使用的模板" + } + }, + "system_prompt": { + "en": { + "label": "System prompt (optional)", + "info": "A sequence used as the default system prompt." + }, + "zh": { + "label": "系统提示词(非必填)", + "info": "默认使用的系统提示词" + } + }, + "training_stage": { + "en": { + "label": "Stage", + "info": "The stage to perform in training." + }, + "zh": { + "label": "训练阶段", + "info": "目前采用的训练方式。" + } + }, + "dataset_dir": { + "en": { + "label": "Data dir", + "info": "Path of the data directory." + }, + "zh": { + "label": "数据路径", + "info": "数据文件夹的路径。" + } + }, + "dataset": { + "en": { + "label": "Dataset" + }, + "zh": { + "label": "数据集" + } + }, + "data_preview_btn": { + "en": { + "value": "Preview dataset" + }, + "zh": { + "value": "预览数据集" + } + }, + "preview_count": { + "en": { + "label": "Count" + }, + "zh": { + "label": "数量" + } + }, + "preview_samples": { + "en": { + "label": "Samples" + }, + "zh": { + "label": "样例" + } + }, + "close_btn": { + "en": { + "value": "Close" + }, + "zh": { + "value": "关闭" + } + }, + "cutoff_len": { + "en": { + "label": "Cutoff length", + "info": "Max tokens in input sequence." + }, + "zh": { + "label": "截断长度", + "info": "输入序列分词后的最大长度。" + } + }, + "learning_rate": { + "en": { + "label": "Learning rate", + "info": "Initial learning rate for AdamW." + }, + "zh": { + "label": "学习率", + "info": "AdamW 优化器的初始学习率。" + } + }, + "num_train_epochs": { + "en": { + "label": "Epochs", + "info": "Total number of training epochs to perform." + }, + "zh": { + "label": "训练轮数", + "info": "需要执行的训练总轮数。" + } + }, + "max_samples": { + "en": { + "label": "Max samples", + "info": "Maximum samples per dataset." + }, + "zh": { + "label": "最大样本数", + "info": "每个数据集最多使用的样本数。" + } + }, + "compute_type": { + "en": { + "label": "Compute type", + "info": "Whether to use fp16 or bf16 mixed precision training." + }, + "zh": { + "label": "计算类型", + "info": "是否启用 FP16 或 BF16 混合精度训练。" + } + }, + "batch_size": { + "en": { + "label": "Batch size", + "info": "Number of samples to process per GPU." + }, + "zh":{ + "label": "批处理大小", + "info": "每块 GPU 上处理的样本数量。" + } + }, + "gradient_accumulation_steps": { + "en": { + "label": "Gradient accumulation", + "info": "Number of gradient accumulation steps." + }, + "zh": { + "label": "梯度累积", + "info": "梯度累积的步数。" + } + }, + "lr_scheduler_type": { + "en": { + "label": "LR Scheduler", + "info": "Name of learning rate scheduler.", + }, + "zh": { + "label": "学习率调节器", + "info": "采用的学习率调节器名称。" + } + }, + "max_grad_norm": { + "en": { + "label": "Maximum gradient norm", + "info": "Norm for gradient clipping.." + }, + "zh": { + "label": "最大梯度范数", + "info": "用于梯度裁剪的范数。" + } + }, + "val_size": { + "en": { + "label": "Val size", + "info": "Proportion of data in the dev set." + }, + "zh": { + "label": "验证集比例", + "info": "验证集占全部样本的百分比。" + } + }, + "logging_steps": { + "en": { + "label": "Logging steps", + "info": "Number of steps between two logs." + }, + "zh": { + "label": "日志间隔", + "info": "每两次日志输出间的更新步数。" + } + }, + "save_steps": { + "en": { + "label": "Save steps", + "info": "Number of steps between two checkpoints." + }, + "zh": { + "label": "保存间隔", + "info": "每两次断点保存间的更新步数。" + } + }, + "warmup_steps": { + "en": { + "label": "Warmup steps", + "info": "Number of steps used for warmup." + }, + "zh": { + "label": "预热步数", + "info": "学习率预热采用的步数。" + } + }, + "flash_attn": { + "en": { + "label": "Use FlashAttention-2" + }, + "zh": { + "label": "使用 FlashAttention-2" + } + }, + "rope_scaling": { + "en": { + "label": "Use RoPE scaling" + }, + "zh": { + "label": "使用 RoPE 插值" + } + }, + "lora_tab": { + "en": { + "label": "LoRA configurations" + }, + "zh": { + "label": "LoRA 参数设置" + } + }, + "lora_rank": { + "en": { + "label": "LoRA rank", + "info": "The rank of LoRA matrices." + }, + "zh": { + "label": "LoRA 秩", + "info": "LoRA 矩阵的秩。" + } + }, + "lora_dropout": { + "en": { + "label": "LoRA Dropout", + "info": "Dropout ratio of LoRA weights." + }, + "zh": { + "label": "LoRA 随机丢弃", + "info": "LoRA 权重随机丢弃的概率。" + } + }, + "lora_target": { + "en": { + "label": "LoRA modules (optional)", + "info": "The name(s) of target modules to apply LoRA. Use commas to separate multiple modules." + }, + "zh": { + "label": "LoRA 作用层(非必填)", + "info": "应用 LoRA 的线性层名称。使用英文逗号分隔多个名称。" + } + }, + "resume_lora_training": { + "en": { + "label": "Resume LoRA training", + "info": "Whether to resume training from the last LoRA weights or create new lora weights." + }, + "zh": { + "label": "继续上次的训练", + "info": "接着上次的 LoRA 权重训练或创建一个新的 LoRA 权重。" + } + }, + "rlhf_tab": { + "en": { + "label": "RLHF configurations" + }, + "zh": { + "label": "RLHF 参数设置" + } + }, + "dpo_beta": { + "en": { + "label": "DPO beta", + "info": "Value of the beta parameter in the DPO loss." + }, + "zh": { + "label": "DPO beta 参数", + "info": "DPO 损失函数中 beta 超参数大小。" + } + }, + "reward_model": { + "en": { + "label": "Reward model", + "info": "Checkpoint of the reward model for PPO training." + }, + "zh": { + "label": "奖励模型", + "info": "PPO 训练中奖励模型的断点路径。" + } + }, + "cmd_preview_btn": { + "en": { + "value": "Preview command" + }, + "zh": { + "value": "预览命令" + } + }, + "start_btn": { + "en": { + "value": "Start" + }, + "zh": { + "value": "开始" + } + }, + "stop_btn": { + "en": { + "value": "Abort" + }, + "zh": { + "value": "中断" + } + }, + "output_dir": { + "en": { + "label": "Checkpoint name", + "info": "Directory to save checkpoint." + }, + "zh": { + "label": "断点名称", + "info": "保存模型断点的文件夹名称。" + } + }, + "output_box": { + "en": { + "value": "Ready." + }, + "zh": { + "value": "准备就绪。" + } + }, + "loss_viewer": { + "en": { + "label": "Loss" + }, + "zh": { + "label": "损失" + } + }, + "predict": { + "en": { + "label": "Save predictions" + }, + "zh": { + "label": "保存预测结果" + } + }, + "load_btn": { + "en": { + "value": "Load model" + }, + "zh": { + "value": "加载模型" + } + }, + "unload_btn": { + "en": { + "value": "Unload model" + }, + "zh": { + "value": "卸载模型" + } + }, + "info_box": { + "en": { + "value": "Model unloaded, please load a model first." + }, + "zh": { + "value": "模型未加载,请先加载模型。" + } + }, + "system": { + "en": { + "placeholder": "System prompt (optional)" + }, + "zh": { + "placeholder": "系统提示词(非必填)" + } + }, + "query": { + "en": { + "placeholder": "Input..." + }, + "zh": { + "placeholder": "输入..." + } + }, + "submit_btn": { + "en": { + "value": "Submit" + }, + "zh": { + "value": "提交" + } + }, + "clear_btn": { + "en": { + "value": "Clear history" + }, + "zh": { + "value": "清空历史" + } + }, + "max_length": { + "en": { + "label": "Maximum length" + }, + "zh": { + "label": "最大长度" + } + }, + "max_new_tokens": { + "en": { + "label": "Maximum new tokens" + }, + "zh": { + "label": "最大生成长度" + } + }, + "top_p": { + "en": { + "label": "Top-p" + }, + "zh": { + "label": "Top-p 采样值" + } + }, + "temperature": { + "en": { + "label": "Temperature" + }, + "zh": { + "label": "温度系数" + } + }, + "save_dir": { + "en": { + "label": "Export dir", + "info": "Directory to save exported model." + }, + "zh": { + "label": "导出目录", + "info": "保存导出模型的文件夹路径。" + } + }, + "max_shard_size": { + "en": { + "label": "Max shard size (GB)", + "info": "The maximum size for a model file." + }, + "zh": { + "label": "最大分块大小(GB)", + "info": "模型文件的最大大小。" + } + }, + "export_btn": { + "en": { + "value": "Export" + }, + "zh": { + "value": "开始导出" + } + } +} + + +ALERTS = { + "err_conflict": { + "en": "A process is in running, please abort it firstly.", + "zh": "任务已存在,请先中断训练。" + }, + "err_exists": { + "en": "You have loaded a model, please unload it first.", + "zh": "模型已存在,请先卸载模型。" + }, + "err_no_model": { + "en": "Please select a model.", + "zh": "请选择模型。" + }, + "err_no_path": { + "en": "Model not found.", + "zh": "模型未找到。" + }, + "err_no_dataset": { + "en": "Please choose a dataset.", + "zh": "请选择数据集。" + }, + "err_no_checkpoint": { + "en": "Please select a checkpoint.", + "zh": "请选择断点。" + }, + "err_no_save_dir": { + "en": "Please provide export dir.", + "zh": "请填写导出目录" + }, + "err_failed": { + "en": "Failed.", + "zh": "训练出错。" + }, + "info_aborting": { + "en": "Aborted, wait for terminating...", + "zh": "训练中断,正在等待线程结束……" + }, + "info_aborted": { + "en": "Ready.", + "zh": "准备就绪。" + }, + "info_finished": { + "en": "Finished.", + "zh": "训练完毕。" + }, + "info_loading": { + "en": "Loading model...", + "zh": "加载中……" + }, + "info_unloading": { + "en": "Unloading model...", + "zh": "卸载中……" + }, + "info_loaded": { + "en": "Model loaded, now you can chat with your model!", + "zh": "模型已加载,可以开始聊天了!" + }, + "info_unloaded": { + "en": "Model unloaded.", + "zh": "模型已卸载。" + }, + "info_exporting": { + "en": "Exporting model...", + "zh": "正在导出模型……" + }, + "info_exported": { + "en": "Model exported.", + "zh": "模型导出完成。" + } +} diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/manager.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/manager.py new file mode 100644 index 0000000000000000000000000000000000000000..0593657f9ee870f9ad18b2884496efcc6186b7c2 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/manager.py @@ -0,0 +1,46 @@ +import gradio as gr +from gradio.components import Component +from typing import Any, Dict, List + +from llmtuner.webui.common import get_model_path, list_dataset, load_config +from llmtuner.webui.locales import LOCALES +from llmtuner.webui.utils import get_time + + +class Manager: + + def __init__(self, elem_list: List[Dict[str, Component]]): + self.elem_list = elem_list + + def gen_refresh(self, lang: str) -> Dict[str, Any]: + refresh_dict = { + "dataset": {"choices": list_dataset()["choices"]}, + "output_dir": {"value": get_time()} + } + + user_config = load_config() + if not lang: + if user_config.get("lang", None): + lang = user_config["lang"] + else: + lang = "en" + + refresh_dict["lang"] = {"value": lang} + + if user_config.get("last_model", None): + refresh_dict["model_name"] = {"value": user_config["last_model"]} + refresh_dict["model_path"] = {"value": get_model_path(user_config["last_model"])} + + return refresh_dict + + def gen_label(self, lang: str) -> Dict[Component, Dict[str, Any]]: # cannot use TYPE_CHECKING + update_dict = {} + refresh_dict = self.gen_refresh(lang) + + for elems in self.elem_list: + for name, component in elems.items(): + update_dict[component] = gr.update( + **LOCALES[name][refresh_dict["lang"]["value"]], **refresh_dict.get(name, {}) + ) + + return update_dict diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/runner.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/runner.py new file mode 100644 index 0000000000000000000000000000000000000000..e2945158bee29f6d3bf0ebcdbf037d30c29310e8 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/runner.py @@ -0,0 +1,288 @@ +import gradio as gr +import logging +import os +import threading +import time +import transformers +from transformers.trainer import TRAINING_ARGS_NAME +from typing import Any, Dict, Generator, List, Tuple + +from llmtuner.extras.callbacks import LogCallback +from llmtuner.extras.constants import DEFAULT_MODULE, TRAINING_STAGES +from llmtuner.extras.logging import LoggerHandler +from llmtuner.extras.misc import torch_gc +from llmtuner.tuner import run_exp +from llmtuner.webui.common import get_model_path, get_save_dir, load_config +from llmtuner.webui.locales import ALERTS +from llmtuner.webui.utils import gen_cmd, get_eval_results, update_process_bar + + +class Runner: + + def __init__(self): + self.aborted = False + self.running = False + self.logger_handler = LoggerHandler() + self.logger_handler.setLevel(logging.INFO) + logging.root.addHandler(self.logger_handler) + transformers.logging.add_handler(self.logger_handler) + + def set_abort(self): + self.aborted = True + self.running = False + + def _initialize( + self, lang: str, model_name: str, dataset: List[str] + ) -> str: + if self.running: + return ALERTS["err_conflict"][lang] + + if not model_name: + return ALERTS["err_no_model"][lang] + + if not get_model_path(model_name): + return ALERTS["err_no_path"][lang] + + if len(dataset) == 0: + return ALERTS["err_no_dataset"][lang] + + self.aborted = False + self.logger_handler.reset() + self.trainer_callback = LogCallback(self) + return "" + + def _finalize( + self, lang: str, finish_info: str + ) -> str: + self.running = False + torch_gc() + if self.aborted: + return ALERTS["info_aborted"][lang] + else: + return finish_info + + def _parse_train_args( + self, + lang: str, + model_name: str, + checkpoints: List[str], + finetuning_type: str, + quantization_bit: str, + template: str, + system_prompt: str, + training_stage: str, + dataset_dir: str, + dataset: List[str], + cutoff_len: int, + learning_rate: str, + num_train_epochs: str, + max_samples: str, + compute_type: str, + batch_size: int, + gradient_accumulation_steps: int, + lr_scheduler_type: str, + max_grad_norm: str, + val_size: float, + logging_steps: int, + save_steps: int, + warmup_steps: int, + flash_attn: bool, + rope_scaling: bool, + lora_rank: int, + lora_dropout: float, + lora_target: str, + resume_lora_training: bool, + dpo_beta: float, + reward_model: str, + output_dir: str + ) -> Tuple[str, str, List[str], str, Dict[str, Any]]: + if checkpoints: + checkpoint_dir = ",".join( + [get_save_dir(model_name, finetuning_type, ckpt) for ckpt in checkpoints] + ) + else: + checkpoint_dir = None + + output_dir = get_save_dir(model_name, finetuning_type, output_dir) + + user_config = load_config() + cache_dir = user_config.get("cache_dir", None) + + args = dict( + stage=TRAINING_STAGES[training_stage], + model_name_or_path=get_model_path(model_name), + do_train=True, + overwrite_cache=False, + cache_dir=cache_dir, + checkpoint_dir=checkpoint_dir, + finetuning_type=finetuning_type, + quantization_bit=int(quantization_bit) if quantization_bit in ["8", "4"] else None, + template=template, + system_prompt=system_prompt, + dataset_dir=dataset_dir, + dataset=",".join(dataset), + cutoff_len=cutoff_len, + learning_rate=float(learning_rate), + num_train_epochs=float(num_train_epochs), + max_samples=int(max_samples), + per_device_train_batch_size=batch_size, + gradient_accumulation_steps=gradient_accumulation_steps, + lr_scheduler_type=lr_scheduler_type, + max_grad_norm=float(max_grad_norm), + logging_steps=logging_steps, + save_steps=save_steps, + warmup_steps=warmup_steps, + flash_attn=flash_attn, + rope_scaling="linear" if rope_scaling else None, + lora_rank=lora_rank, + lora_dropout=lora_dropout, + lora_target=lora_target or DEFAULT_MODULE.get(model_name.split("-")[0], "q_proj,v_proj"), + resume_lora_training=( + False if TRAINING_STAGES[training_stage] in ["rm", "ppo", "dpo"] else resume_lora_training + ), + output_dir=output_dir + ) + args[compute_type] = True + + if args["stage"] == "ppo": + args["reward_model"] = reward_model + val_size = 0 + + if args["stage"] == "dpo": + args["dpo_beta"] = dpo_beta + + if val_size > 1e-6: + args["val_size"] = val_size + args["evaluation_strategy"] = "steps" + args["eval_steps"] = save_steps + args["load_best_model_at_end"] = True + + return lang, model_name, dataset, output_dir, args + + def _parse_eval_args( + self, + lang: str, + model_name: str, + checkpoints: List[str], + finetuning_type: str, + quantization_bit: str, + template: str, + system_prompt: str, + dataset_dir: str, + dataset: List[str], + cutoff_len: int, + max_samples: str, + batch_size: int, + predict: bool, + max_new_tokens: int, + top_p: float, + temperature: float + ) -> Tuple[str, str, List[str], str, Dict[str, Any]]: + if checkpoints: + checkpoint_dir = ",".join( + [get_save_dir(model_name, finetuning_type, ckpt) for ckpt in checkpoints] + ) + output_dir = get_save_dir(model_name, finetuning_type, "eval_" + "_".join(checkpoints)) + else: + checkpoint_dir = None + output_dir = get_save_dir(model_name, finetuning_type, "eval_base") + + user_config = load_config() + cache_dir = user_config.get("cache_dir", None) + + args = dict( + stage="sft", + model_name_or_path=get_model_path(model_name), + do_eval=True, + overwrite_cache=False, + predict_with_generate=True, + cache_dir=cache_dir, + checkpoint_dir=checkpoint_dir, + finetuning_type=finetuning_type, + quantization_bit=int(quantization_bit) if quantization_bit in ["8", "4"] else None, + template=template, + system_prompt=system_prompt, + dataset_dir=dataset_dir, + dataset=",".join(dataset), + cutoff_len=cutoff_len, + max_samples=int(max_samples), + per_device_eval_batch_size=batch_size, + max_new_tokens=max_new_tokens, + top_p=top_p, + temperature=temperature, + output_dir=output_dir + ) + + if predict: + args.pop("do_eval", None) + args["do_predict"] = True + + return lang, model_name, dataset, output_dir, args + + def preview_train(self, *args) -> Generator[Tuple[str, Dict[str, Any]], None, None]: + lang, model_name, dataset, _, args = self._parse_train_args(*args) + error = self._initialize(lang, model_name, dataset) + if error: + yield error, gr.update(visible=False) + else: + yield gen_cmd(args), gr.update(visible=False) + + def preview_eval(self, *args) -> Generator[Tuple[str, Dict[str, Any]], None, None]: + lang, model_name, dataset, _, args = self._parse_eval_args(*args) + error = self._initialize(lang, model_name, dataset) + if error: + yield error, gr.update(visible=False) + else: + yield gen_cmd(args), gr.update(visible=False) + + def run_train(self, *args) -> Generator[Tuple[str, Dict[str, Any]], None, None]: + lang, model_name, dataset, output_dir, args = self._parse_train_args(*args) + error = self._initialize(lang, model_name, dataset) + if error: + yield error, gr.update(visible=False) + return + + self.running = True + run_kwargs = dict(args=args, callbacks=[self.trainer_callback]) + thread = threading.Thread(target=run_exp, kwargs=run_kwargs) + thread.start() + + while thread.is_alive(): + time.sleep(2) + if self.aborted: + yield ALERTS["info_aborting"][lang], gr.update(visible=False) + else: + yield self.logger_handler.log, update_process_bar(self.trainer_callback) + + if os.path.exists(os.path.join(output_dir, TRAINING_ARGS_NAME)): + finish_info = ALERTS["info_finished"][lang] + else: + finish_info = ALERTS["err_failed"][lang] + + yield self._finalize(lang, finish_info), gr.update(visible=False) + + def run_eval(self, *args) -> Generator[str, None, None]: + lang, model_name, dataset, output_dir, args = self._parse_eval_args(*args) + error = self._initialize(lang, model_name, dataset) + if error: + yield error, gr.update(visible=False) + return + + self.running = True + run_kwargs = dict(args=args, callbacks=[self.trainer_callback]) + thread = threading.Thread(target=run_exp, kwargs=run_kwargs) + thread.start() + + while thread.is_alive(): + time.sleep(2) + if self.aborted: + yield ALERTS["info_aborting"][lang], gr.update(visible=False) + else: + yield self.logger_handler.log, update_process_bar(self.trainer_callback) + + if os.path.exists(os.path.join(output_dir, "all_results.json")): + finish_info = get_eval_results(os.path.join(output_dir, "all_results.json")) + else: + finish_info = ALERTS["err_failed"][lang] + + yield self._finalize(lang, finish_info), gr.update(visible=False) diff --git a/LLaMA-Efficient-Tuning/src/llmtuner/webui/utils.py b/LLaMA-Efficient-Tuning/src/llmtuner/webui/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..52016378fba28ec6efa591685b0d727d2a687a59 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/llmtuner/webui/utils.py @@ -0,0 +1,159 @@ +import os +import json +import gradio as gr +import matplotlib.figure +import matplotlib.pyplot as plt +from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Tuple +from datetime import datetime + +from llmtuner.extras.ploting import smooth +from llmtuner.tuner import export_model +from llmtuner.webui.common import get_model_path, get_save_dir, DATA_CONFIG +from llmtuner.webui.locales import ALERTS + +if TYPE_CHECKING: + from llmtuner.extras.callbacks import LogCallback + + +def update_process_bar(callback: "LogCallback") -> Dict[str, Any]: + if not callback.max_steps: + return gr.update(visible=False) + + percentage = round(100 * callback.cur_steps / callback.max_steps, 0) if callback.max_steps != 0 else 100.0 + label = "Running {:d}/{:d}: {} < {}".format( + callback.cur_steps, + callback.max_steps, + callback.elapsed_time, + callback.remaining_time + ) + return gr.update(label=label, value=percentage, visible=True) + + +def get_time() -> str: + return datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + + +def can_preview(dataset_dir: str, dataset: list) -> Dict[str, Any]: + with open(os.path.join(dataset_dir, DATA_CONFIG), "r", encoding="utf-8") as f: + dataset_info = json.load(f) + + if ( + len(dataset) > 0 + and "file_name" in dataset_info[dataset[0]] + and os.path.isfile(os.path.join(dataset_dir, dataset_info[dataset[0]]["file_name"])) + ): + return gr.update(interactive=True) + else: + return gr.update(interactive=False) + + +def get_preview( + dataset_dir: str, dataset: list, start: Optional[int] = 0, end: Optional[int] = 2 +) -> Tuple[int, list, Dict[str, Any]]: + with open(os.path.join(dataset_dir, DATA_CONFIG), "r", encoding="utf-8") as f: + dataset_info = json.load(f) + + data_file: str = dataset_info[dataset[0]]["file_name"] + with open(os.path.join(dataset_dir, data_file), "r", encoding="utf-8") as f: + if data_file.endswith(".json"): + data = json.load(f) + elif data_file.endswith(".jsonl"): + data = [json.loads(line) for line in f] + else: + data = [line for line in f] + return len(data), data[start:end], gr.update(visible=True) + + +def can_quantize(finetuning_type: str) -> Dict[str, Any]: + if finetuning_type != "lora": + return gr.update(value="None", interactive=False) + else: + return gr.update(interactive=True) + + +def gen_cmd(args: Dict[str, Any]) -> str: + if args.get("do_train", None): + args["plot_loss"] = True + cmd_lines = ["CUDA_VISIBLE_DEVICES=0 python src/train_bash.py "] + for k, v in args.items(): + if v is not None and v != "": + cmd_lines.append(" --{} {} ".format(k, str(v))) + cmd_text = "\\\n".join(cmd_lines) + cmd_text = "```bash\n{}\n```".format(cmd_text) + return cmd_text + + +def get_eval_results(path: os.PathLike) -> str: + with open(path, "r", encoding="utf-8") as f: + result = json.dumps(json.load(f), indent=4) + return "```json\n{}\n```\n".format(result) + + +def gen_plot(base_model: str, finetuning_type: str, output_dir: str) -> matplotlib.figure.Figure: + log_file = get_save_dir(base_model, finetuning_type, output_dir, "trainer_log.jsonl") + if not os.path.isfile(log_file): + return None + + plt.close("all") + fig = plt.figure() + ax = fig.add_subplot(111) + steps, losses = [], [] + with open(log_file, "r", encoding="utf-8") as f: + for line in f: + log_info = json.loads(line) + if log_info.get("loss", None): + steps.append(log_info["current_steps"]) + losses.append(log_info["loss"]) + + if len(losses) == 0: + return None + + ax.plot(steps, losses, alpha=0.4, label="original") + ax.plot(steps, smooth(losses), label="smoothed") + ax.legend() + ax.set_xlabel("step") + ax.set_ylabel("loss") + return fig + + +def save_model( + lang: str, + model_name: str, + checkpoints: List[str], + finetuning_type: str, + template: str, + max_shard_size: int, + save_dir: str +) -> Generator[str, None, None]: + if not model_name: + yield ALERTS["err_no_model"][lang] + return + + model_name_or_path = get_model_path(model_name) + if not model_name_or_path: + yield ALERTS["err_no_path"][lang] + return + + if not checkpoints: + yield ALERTS["err_no_checkpoint"][lang] + return + + checkpoint_dir = ",".join( + [get_save_dir(model_name, finetuning_type, ckpt) for ckpt in checkpoints] + ) + + if not save_dir: + yield ALERTS["err_no_save_dir"][lang] + return + + args = dict( + model_name_or_path=model_name_or_path, + checkpoint_dir=checkpoint_dir, + finetuning_type=finetuning_type, + template=template, + output_dir=save_dir + ) + + yield ALERTS["info_exporting"][lang] + export_model(args, max_shard_size="{}GB".format(max_shard_size)) + yield ALERTS["info_exported"][lang] diff --git a/LLaMA-Efficient-Tuning/src/train_bash.py b/LLaMA-Efficient-Tuning/src/train_bash.py new file mode 100644 index 0000000000000000000000000000000000000000..9ddd0586dde8e2c84b61d361ac42a44277ee9337 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/train_bash.py @@ -0,0 +1,14 @@ +from llmtuner import run_exp + + +def main(): + run_exp() + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/LLaMA-Efficient-Tuning/src/train_web.py b/LLaMA-Efficient-Tuning/src/train_web.py new file mode 100644 index 0000000000000000000000000000000000000000..38efd64d65c5d1a89af9a592c59a868a41b8dcff --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/train_web.py @@ -0,0 +1,11 @@ +from llmtuner import create_ui + + +def main(): + demo = create_ui() + demo.queue() + demo.launch(server_name="0.0.0.0", server_port=7860, share=False, inbrowser=True) + + +if __name__ == "__main__": + main() diff --git a/LLaMA-Efficient-Tuning/src/web_demo.py b/LLaMA-Efficient-Tuning/src/web_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..257536abcfd5368c208096fab0c9d83fcb254470 --- /dev/null +++ b/LLaMA-Efficient-Tuning/src/web_demo.py @@ -0,0 +1,11 @@ +from llmtuner import create_web_demo + + +def main(): + demo = create_web_demo() + demo.queue() + demo.launch(server_name="0.0.0.0", server_port=7860, share=False, inbrowser=True) + + +if __name__ == "__main__": + main() diff --git a/LLaMA-Efficient-Tuning/tests/cal_flops.py b/LLaMA-Efficient-Tuning/tests/cal_flops.py new file mode 100644 index 0000000000000000000000000000000000000000..58ca6cae24b948dfa361eea6458440ce10e878c3 --- /dev/null +++ b/LLaMA-Efficient-Tuning/tests/cal_flops.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# Calculates the flops of pre-trained models. +# Usage: python cal_flops.py --model_name_or_path path_to_model --batch_size 1 --seq_length 512 +# Inspired by: https://www.deepspeed.ai/tutorials/flops-profiler/ + +import fire +import torch +from typing import Optional +from deepspeed.accelerator import get_accelerator +from deepspeed.profiling.flops_profiler import get_model_profile + +from llmtuner import ChatModel + + +def calculate( + model_name_or_path: str, + batch_size: Optional[int] = 1, + seq_length: Optional[int] = 256, + flash_attn: Optional[bool] = False +): + with get_accelerator().device(0): + chat_model = ChatModel(dict( + model_name_or_path=model_name_or_path, + template="vanilla", + flash_attn=flash_attn + )) + fake_input = torch.ones((batch_size, seq_length), dtype=torch.long, device=chat_model.model.device) + input_dict = { + "input_ids": fake_input, + "labels": fake_input.clone() + } + flops, macs, params = get_model_profile( + chat_model.model, + kwargs=input_dict, + print_profile=True, + detailed=True + ) + print("FLOPS:", flops) + print("MACs:", macs) + print("Params:", params) + + +if __name__ == "__main__": + fire.Fire(calculate) diff --git a/LLaMA-Efficient-Tuning/tests/evaluate_zh.py b/LLaMA-Efficient-Tuning/tests/evaluate_zh.py new file mode 100644 index 0000000000000000000000000000000000000000..b079cf7d9116291e9574fe452880521e795db510 --- /dev/null +++ b/LLaMA-Efficient-Tuning/tests/evaluate_zh.py @@ -0,0 +1,133 @@ +# coding=utf-8 +# Evaluates fine-tuned models automatically. +# Usage: python evaluate_zh.py --evalset ceval/ceval-exam:law --split dev --output_file result.json +# --api_base http://localhost:8000/v1 --task_type choice --n_samples 100 +# dataset format: question (string), A (string), B (string), C (string), D (string), answer (Literal["A", "B", "C", "D"]) + + +import os +import fire +import json +import openai +from tqdm import tqdm +from typing import Literal, Optional +from datasets import load_dataset + + +def format_example_choice(examples): + model_inputs = {"query": [], "label": []} + task_template = "请从ABCD四个选项中选出正确的选项,仅输出选项序号。\n{question}\nA. {A}\nB. {B}\nC. {C}\nD. {D}\n答案:" + for i in range(len(examples["id"])): + query = task_template.format( + question=examples["question"][i], + A=examples["A"][i], + B=examples["B"][i], + C=examples["C"][i], + D=examples["D"][i] + ) + label = examples["answer"][i] + model_inputs["query"].append(query) + model_inputs["label"].append(label) + return model_inputs + + +def format_example_cloze(examples): + model_inputs = {"query": [], "label": []} + task_template = "请选择正确的答案填空,仅输出正确的选项。\n{question}\n选项:{A}\n{B}\n{C}\n{D}\n答案:" + for i in range(len(examples["id"])): + query = task_template.format( + question=examples["question"][i], + A=examples["A"][i], + B=examples["B"][i], + C=examples["C"][i], + D=examples["D"][i] + ) + label = examples[examples["answer"][i]][i] + model_inputs["query"].append(query) + model_inputs["label"].append(label) + return model_inputs + + +def format_example_openqa(examples): + model_inputs = {"query": [], "label": []} + task_template = "回答以下问题:{question}\n答案:" + for i in range(len(examples["id"])): + query = task_template.format(question=examples["question"][i]) + label = examples[examples["answer"][i]][i] + model_inputs["query"].append(query) + model_inputs["label"].append(label) + return model_inputs + + +TASK_DICT = { + "choice": format_example_choice, + "cloze": format_example_cloze, + "openqa": format_example_openqa +} + + +EXT2TYPE = { + "csv": "csv", + "json": "json", + "jsonl": "json" +} + + +def evaluate( + evalset: str, + api_base: str, + output_file: str, + split: Optional[str] = "val", + task_type: Optional[Literal["choice", "cloze", "openqa"]] = "choice", + n_samples: Optional[int] = 20 +): + + openai.api_base = api_base + openai.api_key = "none" + + if os.path.isfile(evalset): + dataset = load_dataset(EXT2TYPE[evalset.split(".")[-1]], data_files=evalset)["train"] + elif ":" in evalset: + evalset, subset = evalset.split(":") + dataset = load_dataset(evalset, subset, split=split) + else: + dataset = load_dataset(evalset, split=split) + + n_samples = min(len(dataset), n_samples) + + dataset = dataset.map(TASK_DICT[task_type], batched=True) + dataset = dataset.select(range(n_samples)) + + n_correct = 0 + predictions = [] + for example in tqdm(dataset): + query, label = example["query"], example["label"] + predict = openai.ChatCompletion.create( + model="default", + messages=[{"role": "user", "content": query}], + temperature=0.01, + top_p=0.01, + max_new_tokens=20 + ).choices[0].message.content + + if task_type == "choice" and predict[0].lower() == label[0].lower(): + n_correct += 1 + if task_type == "cloze" and label in [predict[:len(label)], predict[-len(label):]]: + n_correct += 1 + if task_type == "openqa" and label in predict: + n_correct += 1 + + predictions.append({ + "query": query, + "label": label, + "predict": predict + }) + + print("Result: {}/{}\nAccuracy: {:.2f}%".format(n_correct, n_samples, n_correct / n_samples * 100)) + + with open(output_file, "w", encoding="utf-8") as f: + json.dump(predictions, f, indent=2, ensure_ascii=False) + + +if __name__ == "__main__": + fire.Fire(evaluate) diff --git a/LLaMA-Efficient-Tuning/tests/llamafy_baichuan2.py b/LLaMA-Efficient-Tuning/tests/llamafy_baichuan2.py new file mode 100644 index 0000000000000000000000000000000000000000..f7b074287837e7f370df4a350d4c0989fd676bce --- /dev/null +++ b/LLaMA-Efficient-Tuning/tests/llamafy_baichuan2.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# Converts the Baichuan2-7B model in the same format as LLaMA2-7B. +# Usage: python llamafy_baichuan2.py --llama2_json llama2.index.json --input_dir input --output_dir output +# Inspired by: https://huggingface.co/fireballoon/baichuan-llama-7b/blob/main/convert_baichuan_to_llama.py +# Converted model: https://huggingface.co/hiyouga/Baichuan2-7B-Base-LLaMAfied + +import os +import fire +import json +import torch +from collections import OrderedDict + + +SHARD_A = "pytorch_model-00001-of-00002.bin" +SHARD_B = "pytorch_model-00002-of-00002.bin" + + +def llamafy_baichuan2( + llama2_json: str, + input_dir: str, + output_dir: str +): + baichuan2_state_dict = OrderedDict() + for filepath in os.listdir(input_dir): + if os.path.isfile(os.path.join(input_dir, filepath)) and filepath.endswith(".bin"): + shard_weight = torch.load(os.path.join(input_dir, filepath), map_location="cpu") + baichuan2_state_dict.update(shard_weight) + + llama2_state_dict = OrderedDict() + total_size = 0 + for key, value in baichuan2_state_dict.items(): + total_size += 2 * value.numel() # half precision + if "W_pack" in key: + llama2_state_dict[key.replace("W_pack", "q_proj")] = value[:4096, :] + llama2_state_dict[key.replace("W_pack", "k_proj")] = value[4096:2*4096, :] + llama2_state_dict[key.replace("W_pack", "v_proj")] = value[2*4096:, :] + elif "lm_head" in key: + llama2_state_dict[key] = torch.nn.functional.normalize(value) + else: + llama2_state_dict[key] = value + + with open(os.path.join(input_dir, llama2_json), "r", encoding="utf-8") as f: + llama2_index = json.load(f) + + merged_index = OrderedDict() + merged_index["metadata"] = {"total_size": total_size} + merged_index["weight_map"] = llama2_index["weight_map"] + + state_dict_a, state_dict_b = OrderedDict(), OrderedDict() + for key, value in llama2_state_dict.items(): + if merged_index["weight_map"][key] == SHARD_A: + state_dict_a[key] = value + else: + state_dict_b[key] = value + + os.makedirs(output_dir, exist_ok=True) + torch.save(state_dict_a, os.path.join(output_dir, SHARD_A)) + torch.save(state_dict_b, os.path.join(output_dir, SHARD_B)) + with open(os.path.join(output_dir, "pytorch_model.bin.index.json"), "w", encoding="utf-8") as f: + json.dump(merged_index, f, indent=2) + print("Completed!") + + +if __name__ == "__main__": + fire.Fire(llamafy_baichuan2) diff --git a/LLaMA-Efficient-Tuning/tests/modeling_baichuan.py b/LLaMA-Efficient-Tuning/tests/modeling_baichuan.py new file mode 100644 index 0000000000000000000000000000000000000000..326a9c58bf8e3424f413a5b02b5970ef32b12fb8 --- /dev/null +++ b/LLaMA-Efficient-Tuning/tests/modeling_baichuan.py @@ -0,0 +1,654 @@ +# Copyright (c) 2023, Baichuan Intelligent Technology. All rights reserved. +# Modified by hiyouga, to support attention mask, the alibi implementation is largely borrowed from +# https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py + +import math +from typing import List, Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +import torch.nn.functional as F +from torch import nn +from torch.nn import CrossEntropyLoss +from transformers import PreTrainedModel +from transformers.activations import ACT2FN +from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast +from transformers.utils import logging + +from .configuration_baichuan import BaichuanConfig + + +logger = logging.get_logger(__name__) + + +# Copied from transformers.models.bloom.modeling_bloom._make_causal_mask +def _make_causal_mask( + input_ids_shape: torch.Size, device: torch.device, past_key_values_length: int +) -> torch.BoolTensor: + """ + Make causal mask used for self-attention. + """ + batch_size, target_length = input_ids_shape + mask = torch.empty((target_length, target_length + past_key_values_length), dtype=torch.bool, device=device) + # ONNX doesn't support `torch.Tensor.triu` properly, thus we use this workaround + seq_ids = torch.arange(target_length, device=device) + mask[:, past_key_values_length:] = seq_ids[:, None] < seq_ids[None, :] + + if past_key_values_length > 0: + mask[:, :past_key_values_length] = False + + expanded_mask = mask[None, None, :, :].expand(batch_size, 1, target_length, target_length + past_key_values_length) + return expanded_mask + + +# Copied from transformers.models.bloom.modeling_bloom._expand_mask +def _expand_mask(mask: torch.Tensor, tgt_length: int) -> torch.BoolTensor: + """ + Expands attention_mask from `[batch_size, src_length]` to `[batch_size, 1, tgt_length, src_length]`. + """ + batch_size, src_length = mask.shape + tgt_length = tgt_length if tgt_length is not None else src_length + + expanded_mask = ~(mask[:, None, None, :].to(torch.bool)) + return expanded_mask.expand(batch_size, 1, tgt_length, src_length) + + +# Copied from transformers.models.bloom.modeling_bloom.build_alibi_tensor +def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: + """ + Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it + relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value + `softmax(l+a) = softmax(l)`. + + Args: + Returns tensor shaped (batch_size * num_heads, 1, max_seq_len) + attention_mask (`torch.Tensor`): + Token-wise attention mask, this should be of shape (batch_size, max_seq_len). + num_heads (`int`, *required*): + number of heads + dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`): + dtype of the output tensor + """ + batch_size, seq_length = attention_mask.shape + closest_power_of_2 = 2 ** math.floor(math.log2(num_heads)) + base = torch.tensor( + 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 + ) + powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) + slopes = torch.pow(base, powers) + + if closest_power_of_2 != num_heads: + extra_base = torch.tensor( + 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32 + ) + num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) + extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32) + slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) + + # Note: alibi will added to the attention bias that will be applied to the query, key product of attention + # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length) + # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) + # => the query_length dimension will then be broadcasted correctly + arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] + alibi = slopes[..., None] * arange_tensor + return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) + + +class RMSNorm(nn.Module): + + def __init__(self, hidden_size, epsilon=1e-6): + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.epsilon = epsilon + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + input_dtype = hidden_states.dtype + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.epsilon) + + return (self.weight * hidden_states).to(input_dtype) + + +class MLP(nn.Module): + + def __init__( + self, + hidden_size: int, + intermediate_size: int, + hidden_act: str, + ): + super().__init__() + self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) + self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) + self.act_fn = ACT2FN[hidden_act] + + def forward(self, x): + return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + + +class BaichuanAttention(nn.Module): + + def __init__(self, config: "BaichuanConfig"): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.max_position_embeddings = config.model_max_length + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size {self.hidden_size} is not divisible by num_heads {self.num_heads}" + ) + + # Layer-wise attention scaling + self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim) + self.beta = 1.0 + + self.W_pack = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False) + self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states: torch.Tensor, + alibi: torch.Tensor, + attention_mask: torch.Tensor, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: bool = False, + use_cache: bool = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + + bsz, q_len, _ = hidden_states.size() + + proj = self.W_pack(hidden_states) # [batch_size, seq_length, 3 x hidden_size] + proj = proj.unflatten(-1, (3, self.hidden_size)).unsqueeze(0).transpose(0, -2).squeeze(-2) + query_states = proj[0].view(bsz, q_len, self.num_heads, self.head_dim) + key_states = proj[1].view(bsz, q_len, self.num_heads, self.head_dim) + value_states = proj[2].view(bsz, q_len, self.num_heads, self.head_dim) + + query_states = query_states.transpose(1, 2).reshape(bsz * self.num_heads, q_len, self.head_dim) + key_states = key_states.permute(0, 2, 3, 1).reshape(bsz * self.num_heads, self.head_dim, q_len) + value_states = value_states.transpose(1, 2).reshape(bsz * self.num_heads, q_len, self.head_dim) + + if past_key_value is not None: + # reuse k, v, self_attention + past_key, past_value = past_key_value + key_states = torch.cat([past_key, key_states], dim=2) + value_states = torch.cat([past_value, value_states], dim=1) + + _, _, kv_seq_len = key_states.shape + + past_key_value = (key_states, value_states) if use_cache else None + + # [batch_size * num_heads, q_length, kv_length] + # we use `torch.Tensor.baddbmm` instead of `torch.baddbmm` as the latter isn't supported by TorchScript v1.11 + matmul_result = alibi.baddbmm( + batch1=query_states, + batch2=key_states, + beta=self.beta, + alpha=self.inv_norm_factor, + ) + + # change view to [batch_size, num_heads, q_length, kv_length] + attention_scores = matmul_result.view(bsz, self.num_heads, q_len, kv_seq_len) + + # cast attention scores to fp32, compute scaled softmax and cast back to initial dtype + # [batch_size, num_heads, q_length, kv_length] + input_dtype = attention_scores.dtype + # `float16` has a minimum value of -65504.0, whereas `bfloat16` and `float32` have a minimum value of `-3.4e+38` + if input_dtype == torch.float16: + attention_scores = attention_scores.to(torch.float) + attn_weights = torch.masked_fill(attention_scores, attention_mask, torch.finfo(attention_scores.dtype).min) + attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(input_dtype) + + # change view [batch_size x num_heads, q_length, kv_length] + attention_probs_reshaped = attention_probs.view(bsz * self.num_heads, q_len, kv_seq_len) + + # matmul: [batch_size * num_heads, q_length, head_dim] + attn_output = torch.bmm(attention_probs_reshaped, value_states) + + attn_output = attn_output.view(bsz, self.num_heads, q_len, self.head_dim) + + attn_output = attn_output.transpose(1, 2).reshape(bsz, q_len, self.hidden_size) + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attention_probs = None + + return attn_output, attention_probs, past_key_value + + +class BaichuanLayer(nn.Module): + + def __init__(self, config: "BaichuanConfig"): + super().__init__() + self.hidden_size = config.hidden_size + self.self_attn = BaichuanAttention(config=config) + self.mlp = MLP( + hidden_size=self.hidden_size, + intermediate_size=config.intermediate_size, + hidden_act=config.hidden_act, + ) + self.input_layernorm = RMSNorm(config.hidden_size, epsilon=config.rms_norm_eps) + self.post_attention_layernorm = RMSNorm(config.hidden_size, epsilon=config.rms_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + alibi: torch.Tensor, + attention_mask: torch.Tensor, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + alibi=alibi, + attention_mask=attention_mask, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + hidden_states = residual + hidden_states + + # Fully Connected + residual = hidden_states + hidden_states = self.post_attention_layernorm(hidden_states) + hidden_states = self.mlp(hidden_states) + hidden_states = residual + hidden_states + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +class BaichuanPreTrainedModel(PreTrainedModel): + config_class = BaichuanConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["BaichuanLayer"] + _skip_keys_device_placement = "past_key_values" + _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, BaichuanModel): + module.gradient_checkpointing = value + + @staticmethod + def _convert_to_standard_cache( + past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], batch_size: int + ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: + """ + Standardizes the format of the cache so as to match most implementations, i.e. to tuple(tuple([batch_size, + num_heads, ...])) + """ + batch_size_times_num_heads, head_dim, seq_length = past_key_value[0][0].shape + num_heads = batch_size_times_num_heads // batch_size + # key: [batch_size * num_heads, head_dim, seq_length] -> [batch_size, num_heads, head_dim, seq_length] + # value: [batch_size * num_heads, seq_length, head_dim] -> [batch_size, num_heads, seq_length, head_dim] + return tuple( + ( + layer_past[0].view(batch_size, num_heads, head_dim, seq_length), + layer_past[1].view(batch_size, num_heads, seq_length, head_dim), + ) + for layer_past in past_key_value + ) + + @staticmethod + def _convert_to_baichuan_cache( + past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]] + ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]: + """ + Converts the cache to the format expected by Baichuan, i.e. to tuple(tuple([batch_size * num_heads, ...])) + """ + batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape + batch_size_times_num_heads = batch_size * num_heads + # key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length] + # value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim] + return tuple( + ( + layer_past[0].view(batch_size_times_num_heads, head_dim, seq_length), + layer_past[1].view(batch_size_times_num_heads, seq_length, head_dim), + ) + for layer_past in past_key_value + ) + + +class BaichuanModel(BaichuanPreTrainedModel): + + def __init__(self, config: "BaichuanConfig"): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + self.n_head = config.num_attention_heads + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList([BaichuanLayer(config) for _ in range(config.num_hidden_layers)]) + self.norm = RMSNorm(config.hidden_size, epsilon=config.rms_norm_eps) + + self.gradient_checkpointing = config.gradient_checkpointing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + def build_alibi_tensor(self, attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: + return build_alibi_tensor(attention_mask, num_heads, dtype) + + def _prepare_attn_mask( + self, attention_mask: torch.Tensor, input_shape: Tuple[int, int], past_key_values_length: int + ) -> torch.BoolTensor: + # create causal mask + # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length] + combined_attention_mask = None + device = attention_mask.device + _, src_length = input_shape + + if src_length > 1: + combined_attention_mask = _make_causal_mask( + input_shape, device=device, past_key_values_length=past_key_values_length + ) + + # [batch_size, seq_length] -> [batch_size, 1, tgt_length, src_length] + expanded_attn_mask = _expand_mask(attention_mask, tgt_length=src_length) + combined_attention_mask = ( + expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask + ) + + return combined_attention_mask + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot provide both input_ids and inputs_embeds simultaneously") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape + elif inputs_embeds is not None: + batch_size, seq_length, _ = inputs_embeds.shape + else: + raise ValueError("You need to provide input_ids or inputs_embeds") + + seq_length_with_past = seq_length + past_key_values_length = 0 + if past_key_values is not None: + past_key_values_length = past_key_values[0][0].shape[1] + seq_length_with_past = seq_length_with_past + past_key_values_length + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + hidden_states = inputs_embeds + + if attention_mask is None: + attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device) + else: + attention_mask = attention_mask.to(hidden_states.device) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + # Compute alibi tensor: check build_alibi_tensor documentation + alibi = self.build_alibi_tensor(attention_mask, self.n_head, dtype=hidden_states.dtype) + + causal_mask = self._prepare_attn_mask( + attention_mask, + input_shape=(batch_size, seq_length), + past_key_values_length=past_key_values_length, + ) + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = () if use_cache else None + + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, output_attentions, None) + + return custom_forward + + layer_outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(decoder_layer), + hidden_states, + alibi, + causal_mask, + None, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + alibi=alibi, + attention_mask=causal_mask, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = next_decoder_cache if use_cache else None + + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class BaichuanForCausalLM(BaichuanPreTrainedModel): + + def __init__(self, config): + super().__init__(config) + self.model = BaichuanModel(config) + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + **kwargs + ) -> Union[Tuple, CausalLMOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, + input_ids: torch.LongTensor, + past_key_values: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + **kwargs + ) -> dict: + if past_key_values: + input_ids = input_ids[:, -1:] + + # the cache may be in the standard format (e.g. in contrastive search) + if past_key_values[0][0].shape[0] == input_ids.shape[0]: + past_key_values = self._convert_to_baichuan_cache(past_key_values) + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + def _reorder_cache( + self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor + ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + + Output shares the same memory storage as `past`. + """ + standardized_past = self._convert_to_standard_cache(past, batch_size=len(beam_idx)) + + # Get a copy of `beam_idx` on all the devices where we need those indices. + device_to_beam_idx = { + past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past + } + reordered_past = tuple( + ( + layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]), + layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]), + ) + for layer_past in standardized_past + ) + return self._convert_to_baichuan_cache(reordered_past) diff --git a/LLaMA-Efficient-Tuning/tests/quantize.py b/LLaMA-Efficient-Tuning/tests/quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..4be02f896f88ebe2d9b5096ba43ad0913965a6d5 --- /dev/null +++ b/LLaMA-Efficient-Tuning/tests/quantize.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# Quantizes fine-tuned models with AutoGPTQ (https://github.com/PanQiWei/AutoGPTQ). +# Usage: python quantize.py --input_dir path_to_llama_model --output_dir path_to_quant_model --data_file alpaca.json +# --max_length 1024 --max_samples 1024 +# dataset format: instruction (string), input (string), output (string), history (List[string]) + + +import fire +from datasets import load_dataset +from transformers import AutoTokenizer +from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig + + +def quantize(input_dir: str, output_dir: str, data_file: str, max_length: int, max_samples: int): + tokenizer = AutoTokenizer.from_pretrained(input_dir, use_fast=False, padding_side="left") + + def format_example(examples): + prefix=("A chat between a curious user and an artificial intelligence assistant. " + "The assistant gives helpful, detailed, and polite answers to the user's questions.") + texts = [] + for i in range(len(examples["instruction"])): + prompt = prefix + "\n" + if "history" in examples: + for user_query, bot_resp in examples["history"][i]: + prompt += "Human: {}\nAssistant: {}\n".format(user_query, bot_resp) + prompt += "Human: {}\nAssistant: {}".format( + examples["instruction"][i] + "\n" + examples["input"][i], examples["output"][i] + ) + texts.append(prompt) + return tokenizer(texts, truncation=True, max_length=max_length) + + dataset = load_dataset("json", data_files=data_file)["train"] + column_names = list(dataset.column_names) + dataset = dataset.select(range(min(len(dataset), max_samples))) + dataset = dataset.map(format_example, batched=True, remove_columns=column_names) + dataset = dataset.shuffle() + + quantize_config = BaseQuantizeConfig( + bits=4, + group_size=128, + desc_act=False + ) + + model = AutoGPTQForCausalLM.from_pretrained(input_dir, quantize_config, trust_remote_code=True) + model.quantize(dataset) + model.save_quantized(output_dir) + + +if __name__ == "__main__": + fire.Fire(quantize)