diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..ac4aee55c0698300d21541d5395d452016585f7a --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright Zhengxiao Du + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/MODEL_LICENSE b/MODEL_LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..a43fae25a4146b784f761be6db87fbb18d783fa7 --- /dev/null +++ b/MODEL_LICENSE @@ -0,0 +1,33 @@ +The GLM-130B License + +1. Definitions + +“Licensor” means the GLM-130B Model Team that distributes its Software. + +“Software” means the GLM-130B model parameters made available under this license. + +2. License Grant + +Subject to the terms and conditions of this License, the Licensor hereby grants to you a non-exclusive, worldwide, non-transferable, non-sublicensable, revocable, royalty-free copyright license to use the Software solely for your non-commercial research purposes. + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +3. Restriction + +You will not use, copy, modify, merge, publish, distribute, reproduce, or create derivative works of the Software, in whole or in part, for any commercial, military, or illegal purposes. + +You will not use the Software for any act that may undermine China's national security and national unity, harm the public interest of society, or infringe upon the rights and interests of human beings. + +4. Disclaimer + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +5. Limitation of Liability + +EXCEPT TO THE EXTENT PROHIBITED BY APPLICABLE LAW, IN NO EVENT AND UNDER NO LEGAL THEORY, WHETHER BASED IN TORT, NEGLIGENCE, CONTRACT, LIABILITY, OR OTHERWISE WILL ANY LICENSOR BE LIABLE TO YOU FOR ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES, OR ANY OTHER COMMERCIAL LOSSES, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +6. Dispute Resolution + +This license shall be governed and construed in accordance with the laws of People’s Republic of China. Any dispute arising from or in connection with this License shall be submitted to Haidian District People's Court in Beijing. + +Note that the license is subject to update to a more comprehensive version. For any questions related to the license and copyright, please contact us at glm-130b@googlegroups.com. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..def8e04a1fb779c5990562dd1a8d1a0646230aee --- /dev/null +++ b/README.md @@ -0,0 +1,85 @@ +--- +language: +- zh +- en +tags: +- glm +- chatglm +- thudm +--- +# ChatGLM-6B +

+ 🌐 Blog • 💻 Github Repo • 🐦 Twitter • 📃 [GLM@ACL 22] [GitHub] • 📃 [GLM-130B@ICLR 23] [GitHub]
+

+ +

+ 👋 Join our Slack and WeChat +

+ +## 介绍 +ChatGLM-6B 是一个开源的、支持中英双语问答的对话语言模型,基于 [General Language Model (GLM)](https://github.com/THUDM/GLM) 架构,具有 62 亿参数。结合模型量化技术,用户可以在消费级的显卡上进行本地部署(INT4 量化级别下最低只需 6GB 显存)。ChatGLM-6B 使用了和 [ChatGLM](https://chatglm.cn) 相同的技术,针对中文问答和对话进行了优化。经过约 1T 标识符的中英双语训练,辅以监督微调、反馈自助、人类反馈强化学习等技术的加持,62 亿参数的 ChatGLM-6B 已经能生成相当符合人类偏好的回答。 + +ChatGLM-6B is an open bilingual language model based on [General Language Model (GLM)](https://github.com/THUDM/GLM) framework, with 6.2 billion parameters. With the quantization technique, users can deploy locally on consumer-grade graphics cards (only 6GB of GPU memory is required at the INT4 quantization level). ChatGLM-6B uses technology similar to ChatGPT, optimized for Chinese QA and dialogue. The model is trained for about 1T tokens of Chinese and English corpus, supplemented by supervised fine-tuning, feedback bootstrap, and reinforcement learning wit human feedback. With only about 6.2 billion parameters, the model is able to generate answers that are in line with human preference. + +## 软件依赖 + +```shell +pip install protobuf==3.20.0 transformers==4.27.1 icetk cpm_kernels +``` + +## 代码调用 + +可以通过如下代码调用 ChatGLM-6B 模型来生成对话: + +```ipython +>>> from transformers import AutoTokenizer, AutoModel +>>> tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) +>>> model = AutoModel.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True).half().cuda() +>>> response, history = model.chat(tokenizer, "你好", history=[]) +>>> print(response) +你好👋!我是人工智能助手 ChatGLM-6B,很高兴见到你,欢迎问我任何问题。 +>>> response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history) +>>> print(response) +晚上睡不着可能会让你感到焦虑或不舒服,但以下是一些可以帮助你入睡的方法: + +1. 制定规律的睡眠时间表:保持规律的睡眠时间表可以帮助你建立健康的睡眠习惯,使你更容易入睡。尽量在每天的相同时间上床,并在同一时间起床。 +2. 创造一个舒适的睡眠环境:确保睡眠环境舒适,安静,黑暗且温度适宜。可以使用舒适的床上用品,并保持房间通风。 +3. 放松身心:在睡前做些放松的活动,例如泡个热水澡,听些轻柔的音乐,阅读一些有趣的书籍等,有助于缓解紧张和焦虑,使你更容易入睡。 +4. 避免饮用含有咖啡因的饮料:咖啡因是一种刺激性物质,会影响你的睡眠质量。尽量避免在睡前饮用含有咖啡因的饮料,例如咖啡,茶和可乐。 +5. 避免在床上做与睡眠无关的事情:在床上做些与睡眠无关的事情,例如看电影,玩游戏或工作等,可能会干扰你的睡眠。 +6. 尝试呼吸技巧:深呼吸是一种放松技巧,可以帮助你缓解紧张和焦虑,使你更容易入睡。试着慢慢吸气,保持几秒钟,然后缓慢呼气。 + +如果这些方法无法帮助你入睡,你可以考虑咨询医生或睡眠专家,寻求进一步的建议。 +``` + +关于更多的使用说明,包括如何运行命令行和网页版本的 DEMO,以及使用模型量化以节省显存,请参考我们的 [Github Repo](https://github.com/THUDM/ChatGLM-6B)。 + +For more instructions, including how to run CLI and web demos, and model quantization, please refer to our [Github Repo](https://github.com/THUDM/ChatGLM-6B). + +## 协议 + +本仓库的代码依照 [Apache-2.0](LICENSE) 协议开源,ChatGLM-6B 模型的权重的使用则需要遵循 [Model License](MODEL_LICENSE)。 + +## 引用 + +如果你觉得我们的工作有帮助的话,请考虑引用下列论文: + +``` +@inproceedings{ + zeng2023glm-130b, + title={{GLM}-130B: An Open Bilingual Pre-trained Model}, + author={Aohan Zeng and Xiao Liu and Zhengxiao Du and Zihan Wang and Hanyu Lai and Ming Ding and Zhuoyi Yang and Yifan Xu and Wendi Zheng and Xiao Xia and Weng Lam Tam and Zixuan Ma and Yufei Xue and Jidong Zhai and Wenguang Chen and Zhiyuan Liu and Peng Zhang and Yuxiao Dong and Jie Tang}, + booktitle={The Eleventh International Conference on Learning Representations (ICLR)}, + year={2023}, + url={https://openreview.net/forum?id=-Aw0rrrPUF} +} +``` +``` +@inproceedings{du2022glm, + title={GLM: General Language Model Pretraining with Autoregressive Blank Infilling}, + author={Du, Zhengxiao and Qian, Yujie and Liu, Xiao and Ding, Ming and Qiu, Jiezhong and Yang, Zhilin and Tang, Jie}, + booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, + pages={320--335}, + year={2022} +} +``` \ No newline at end of file diff --git a/config.json b/config.json new file mode 100644 index 0000000000000000000000000000000000000000..2ab3caf549118f7baa491467f459eea52c555220 --- /dev/null +++ b/config.json @@ -0,0 +1,28 @@ +{ + "_name_or_path": "THUDM/chatglm-6b", + "architectures": [ + "ChatGLMModel" + ], + "auto_map": { + "AutoConfig": "configuration_chatglm.ChatGLMConfig", + "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration", + "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration" + }, + "bos_token_id": 130004, + "eos_token_id": 130005, + "mask_token_id": 130000, + "gmask_token_id": 130001, + "pad_token_id": 3, + "hidden_size": 4096, + "inner_hidden_size": 16384, + "layernorm_epsilon": 1e-05, + "max_sequence_length": 2048, + "model_type": "chatglm", + "num_attention_heads": 32, + "num_layers": 28, + "position_encoding_2d": true, + "torch_dtype": "float16", + "transformers_version": "4.23.1", + "use_cache": true, + "vocab_size": 130528 +} diff --git a/configuration_chatglm.py b/configuration_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..78f3425d5f63ad43f31b092b8d62b44d28d52f15 --- /dev/null +++ b/configuration_chatglm.py @@ -0,0 +1,103 @@ +""" ChatGLM model configuration """ + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class ChatGLMConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`~ChatGLMModel`]. + It is used to instantiate an ChatGLM model according to the specified arguments, defining the model + architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of + the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used + to control the model outputs. Read the documentation from [`PretrainedConfig`] + for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 150528): + Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`~ChatGLMModel`] or + [`~TFChatGLMModel`]. + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 28): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + inner_hidden_size (`int`, *optional*, defaults to 16384): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + max_sequence_length (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. + Typically set this to something large just in case (e.g., 512 or 1024 or 2048). + layernorm_epsilon (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether the model should return the last key/values attentions (not used by all models). + Example: + + ```python + >>> from configuration_chatglm import ChatGLMConfig + >>> from modeling_chatglm import ChatGLMModel + + >>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration + >>> configuration = ChatGLMConfig() + + >>> # Initializing a model from the THUDM/ChatGLM-6B style configuration + >>> model = ChatGLMModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ``` +""" + model_type = "chatglm" + + def __init__( + self, + vocab_size=150528, + hidden_size=4096, + num_layers=28, + num_attention_heads=32, + layernorm_epsilon=1e-5, + use_cache=False, + bos_token_id=150004, + eos_token_id=150005, + mask_token_id=150000, + gmask_token_id=150001, + pad_token_id=0, + max_sequence_length=2048, + inner_hidden_size=16384, + position_encoding_2d=True, + quantization_bit=0, + pre_seq_len=None, + prefix_projection=False, + **kwargs + ): + self.num_layers = num_layers + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + self.max_sequence_length = max_sequence_length + self.layernorm_epsilon = layernorm_epsilon + self.inner_hidden_size = inner_hidden_size + self.use_cache = use_cache + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + self.pad_token_id = pad_token_id + self.mask_token_id = mask_token_id + self.gmask_token_id = gmask_token_id + self.position_encoding_2d = position_encoding_2d + self.quantization_bit = quantization_bit + self.pre_seq_len = pre_seq_len + self.prefix_projection = prefix_projection + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + **kwargs + ) diff --git a/glms.py b/glms.py new file mode 100644 index 0000000000000000000000000000000000000000..632ebfa7ffd03d089dd9a05f729d0377dcea60c6 --- /dev/null +++ b/glms.py @@ -0,0 +1,28 @@ +import os +# os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7" +os.environ["CUDA_VISIBLE_DEVICES"] = "0" + + +import torch +import numpy as np + +from transformers import AutoModelForSequenceClassification, AutoModelForMultipleChoice, AutoModel +from transformers import TrainingArguments, Trainer +from transformers import AutoTokenizer +from transformers import DataCollatorWithPadding +from datasets import load_dataset +from datasets import load_metric +import torchsnooper + +model_name = "THUDM/chatglm-6b" + +if __name__ == "__main__": + device = "cuda" if torch.cuda.is_available() else "cpu" + tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) + model = AutoModel.from_pretrained(model_name, trust_remote_code=True).half().cuda() + response, history = model.chat(tokenizer, "你好", history=[]) + print(response) + response, history = model.chat(tokenizer, "晚上睡不着应该怎么办", history=history) + print(response) + + diff --git a/ice_text.model b/ice_text.model new file mode 100644 index 0000000000000000000000000000000000000000..0dcfe31e02ad0767e0c80a469340bf97f58e777a --- /dev/null +++ b/ice_text.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e974d9a69c242ce014c88c2b26089270f6198f3c0b700a887666cd3e816f17e +size 2706249 diff --git a/modeling_chatglm.py b/modeling_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..fc291119053cdf164b697ad022735f916fb3e8b1 --- /dev/null +++ b/modeling_chatglm.py @@ -0,0 +1,1435 @@ +""" PyTorch ChatGLM model. """ + +import math +import copy +import os +import warnings +import re +import sys + +import torch +import torch.utils.checkpoint +import torch.nn.functional as F +from torch import nn +from torch.nn import CrossEntropyLoss, LayerNorm +from torch.nn.utils import skip_init +from typing import Optional, Tuple, Union, List, Callable, Dict, Any + +from transformers.utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + BaseModelOutputWithPastAndCrossAttentions, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import logging +from transformers.generation.logits_process import LogitsProcessor +from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput + +from .configuration_chatglm import ChatGLMConfig + +# flags required to enable jit fusion kernels + +if sys.platform != 'darwin': + torch._C._jit_set_profiling_mode(False) + torch._C._jit_set_profiling_executor(False) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM-6B" +_CONFIG_FOR_DOC = "ChatGLM6BConfig" + +CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "THUDM/chatglm-6b", + # See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm +] + + +class InvalidScoreLogitsProcessor(LogitsProcessor): + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + if torch.isnan(scores).any() or torch.isinf(scores).any(): + scores.zero_() + scores[..., 5] = 5e4 + return scores + + +def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] + for n in name + ): + logger.info(f"Skipping {'/'.join(name)}") + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info(f"Skipping {'/'.join(name)}") + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + assert ( + pointer.shape == array.shape + ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + return model + + +class PrefixEncoder(torch.nn.Module): + """ + The torch.nn model to encode the prefix + Input shape: (batch-size, prefix-length) + Output shape: (batch-size, prefix-length, 2*layers*hidden) + """ + + def __init__(self, config): + super().__init__() + self.prefix_projection = config.prefix_projection + if self.prefix_projection: + # Use a two-layer MLP to encode the prefix + self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size) + self.trans = torch.nn.Sequential( + torch.nn.Linear(config.hidden_size, config.hidden_size), + torch.nn.Tanh(), + torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2) + ) + else: + self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.hidden_size * 2) + + def forward(self, prefix: torch.Tensor): + if self.prefix_projection: + prefix_tokens = self.embedding(prefix) + past_key_values = self.trans(prefix_tokens) + else: + past_key_values = self.embedding(prefix) + return past_key_values + + +@torch.jit.script +def gelu_impl(x): + """OpenAI's gelu implementation.""" + return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * + (1.0 + 0.044715 * x * x))) + + +def gelu(x): + return gelu_impl(x) + + +class RotaryEmbedding(torch.nn.Module): + def __init__(self, dim, base=10000, precision=torch.half, learnable=False): + super().__init__() + inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) + inv_freq = inv_freq.half() + self.learnable = learnable + if learnable: + self.inv_freq = torch.nn.Parameter(inv_freq) + self.max_seq_len_cached = None + else: + self.register_buffer('inv_freq', inv_freq) + self.max_seq_len_cached = None + self.cos_cached = None + self.sin_cached = None + self.precision = precision + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, + error_msgs): + pass + + def forward(self, x, seq_dim=1, seq_len=None): + if seq_len is None: + seq_len = x.shape[seq_dim] + if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached): + self.max_seq_len_cached = None if self.learnable else seq_len + t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + if self.precision == torch.bfloat16: + emb = emb.float() + + # [sx, 1 (b * np), hn] + cos_cached = emb.cos()[:, None, :] + sin_cached = emb.sin()[:, None, :] + if self.precision == torch.bfloat16: + cos_cached = cos_cached.bfloat16() + sin_cached = sin_cached.bfloat16() + if self.learnable: + return cos_cached, sin_cached + self.cos_cached, self.sin_cached = cos_cached, sin_cached + return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...] + + def _apply(self, fn): + if self.cos_cached is not None: + self.cos_cached = fn(self.cos_cached) + if self.sin_cached is not None: + self.sin_cached = fn(self.sin_cached) + return super()._apply(fn) + + +def rotate_half(x): + x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:] + return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions + + +@torch.jit.script +def apply_rotary_pos_emb_index(q, k, cos, sin, position_id): + # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn] + cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \ + F.embedding(position_id, sin.squeeze(1)).unsqueeze(2) + q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin) + return q, k + + +def attention_fn( + self, + query_layer, + key_layer, + value_layer, + attention_mask, + hidden_size_per_partition, + layer_id, + layer_past=None, + scaling_attention_score=True, + use_cache=False, +): + if layer_past is not None: + past_key, past_value = layer_past[0], layer_past[1] + key_layer = torch.cat((past_key, key_layer), dim=0) + value_layer = torch.cat((past_value, value_layer), dim=0) + + # seqlen, batch, num_attention_heads, hidden_size_per_attention_head + seq_len, b, nh, hidden_size = key_layer.shape + + if use_cache: + present = (key_layer, value_layer) + else: + present = None + + query_key_layer_scaling_coeff = float(layer_id + 1) + if scaling_attention_score: + query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff) + + # =================================== + # Raw attention scores. [b, np, s, s] + # =================================== + + # [b, np, sq, sk] + output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) + + # [sq, b, np, hn] -> [sq, b * np, hn] + query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) + # [sk, b, np, hn] -> [sk, b * np, hn] + key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) + + matmul_result = torch.zeros( + 1, 1, 1, + dtype=query_layer.dtype, + device=query_layer.device, + ) + + matmul_result = torch.baddbmm( + matmul_result, + query_layer.transpose(0, 1), # [b * np, sq, hn] + key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=1.0, + ) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + + if self.scale_mask_softmax: + self.scale_mask_softmax.scale = query_key_layer_scaling_coeff + attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous()) + else: + if not (attention_mask == 0).all(): + # if auto-regressive, skip + attention_scores.masked_fill_(attention_mask, -10000.0) + dtype = attention_scores.dtype + attention_scores = attention_scores.float() + attention_scores = attention_scores * query_key_layer_scaling_coeff + + attention_probs = F.softmax(attention_scores, dim=-1) + + attention_probs = attention_probs.type(dtype) + + # ========================= + # Context layer. [sq, b, hp] + # ========================= + + # value_layer -> context layer. + # [sk, b, np, hn] --> [b, np, sq, hn] + + # context layer shape: [b, np, sq, hn] + output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) + + # change view [sk, b * np, hn] + value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) + + # change view [b * np, sq, sk] + attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) + + # matmul: [b * np, sq, hn] + context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) + + # change view [b, np, sq, hn] + context_layer = context_layer.view(*output_size) + + # [b, np, sq, hn] --> [sq, b, np, hn] + context_layer = context_layer.permute(2, 0, 1, 3).contiguous() + + # [sq, b, np, hn] --> [sq, b, hp] + new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, present, attention_probs) + + return outputs + + +def default_init(cls, *args, **kwargs): + return cls(*args, **kwargs) + + +class SelfAttention(torch.nn.Module): + def __init__(self, hidden_size, num_attention_heads, + layer_id, hidden_size_per_attention_head=None, bias=True, + params_dtype=torch.float, position_encoding_2d=True, empty_init=True): + if empty_init: + init_method = skip_init + else: + init_method = default_init + super(SelfAttention, self).__init__() + + self.layer_id = layer_id + self.hidden_size = hidden_size + self.hidden_size_per_partition = hidden_size + self.num_attention_heads = num_attention_heads + self.num_attention_heads_per_partition = num_attention_heads + self.position_encoding_2d = position_encoding_2d + self.rotary_emb = RotaryEmbedding( + self.hidden_size // (self.num_attention_heads * 2) + if position_encoding_2d + else self.hidden_size // self.num_attention_heads, + base=10000, + precision=torch.half, + learnable=False, + ) + + self.scale_mask_softmax = None + + if hidden_size_per_attention_head is None: + self.hidden_size_per_attention_head = hidden_size // num_attention_heads + else: + self.hidden_size_per_attention_head = hidden_size_per_attention_head + + self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head + + # Strided linear layer. + self.query_key_value = init_method( + torch.nn.Linear, + hidden_size, + 3 * self.inner_hidden_size, + bias=bias, + dtype=params_dtype, + ) + + self.dense = init_method( + torch.nn.Linear, + self.inner_hidden_size, + hidden_size, + bias=bias, + dtype=params_dtype, + ) + + @staticmethod + def attention_mask_func(attention_scores, attention_mask): + attention_scores.masked_fill_(attention_mask, -10000.0) + return attention_scores + + def split_tensor_along_last_dim(self, tensor, num_partitions, + contiguous_split_chunks=False): + """Split a tensor along its last dimension. + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = tensor.size()[last_dim] // num_partitions + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + def forward( + self, + hidden_states: torch.Tensor, + position_ids, + attention_mask: torch.Tensor, + layer_id, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + """ + hidden_states: [seq_len, batch, hidden_size] + attention_mask: [(1, 1), seq_len, seq_len] + """ + + # [seq_len, batch, 3 * hidden_size] + mixed_raw_layer = self.query_key_value(hidden_states) + + # [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head] + new_tensor_shape = mixed_raw_layer.size()[:-1] + ( + self.num_attention_heads_per_partition, + 3 * self.hidden_size_per_attention_head, + ) + mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape) + + # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head] + (query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3) + + if self.position_encoding_2d: + q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1)) + k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1)) + cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1) + position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \ + position_ids[:, 1, :].transpose(0, 1).contiguous() + q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids) + q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids) + query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1)) + key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1)) + else: + position_ids = position_ids.transpose(0, 1) + cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1) + # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head] + query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids) + + # [seq_len, batch, hidden_size] + context_layer, present, attention_probs = attention_fn( + self=self, + query_layer=query_layer, + key_layer=key_layer, + value_layer=value_layer, + attention_mask=attention_mask, + hidden_size_per_partition=self.hidden_size_per_partition, + layer_id=layer_id, + layer_past=layer_past, + use_cache=use_cache + ) + + output = self.dense(context_layer) + + outputs = (output, present) + + if output_attentions: + outputs += (attention_probs,) + + return outputs # output, present, attention_probs + + +class GEGLU(torch.nn.Module): + def __init__(self): + super().__init__() + self.activation_fn = F.gelu + + def forward(self, x): + # dim=-1 breaks in jit for pt<1.10 + x1, x2 = x.chunk(2, dim=(x.ndim - 1)) + return x1 * self.activation_fn(x2) + + +class GLU(torch.nn.Module): + def __init__(self, hidden_size, inner_hidden_size=None, + layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float, empty_init=True): + super(GLU, self).__init__() + if empty_init: + init_method = skip_init + else: + init_method = default_init + self.layer_id = layer_id + self.activation_func = activation_func + + # Project to 4h. + self.hidden_size = hidden_size + if inner_hidden_size is None: + inner_hidden_size = 4 * hidden_size + self.inner_hidden_size = inner_hidden_size + self.dense_h_to_4h = init_method( + torch.nn.Linear, + self.hidden_size, + self.inner_hidden_size, + bias=bias, + dtype=params_dtype, + ) + # Project back to h. + self.dense_4h_to_h = init_method( + torch.nn.Linear, + self.inner_hidden_size, + self.hidden_size, + bias=bias, + dtype=params_dtype, + ) + + def forward(self, hidden_states): + """ + hidden_states: [seq_len, batch, hidden_size] + """ + + # [seq_len, batch, inner_hidden_size] + intermediate_parallel = self.dense_h_to_4h(hidden_states) + + intermediate_parallel = self.activation_func(intermediate_parallel) + + output = self.dense_4h_to_h(intermediate_parallel) + + return output + + +class GLMBlock(torch.nn.Module): + def __init__( + self, + hidden_size, + num_attention_heads, + layernorm_epsilon, + layer_id, + inner_hidden_size=None, + hidden_size_per_attention_head=None, + layernorm=LayerNorm, + use_bias=True, + params_dtype=torch.float, + num_layers=28, + position_encoding_2d=True, + empty_init=True + ): + super(GLMBlock, self).__init__() + # Set output layer initialization if not provided. + + self.layer_id = layer_id + + # Layernorm on the input data. + self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon) + + self.position_encoding_2d = position_encoding_2d + + # Self attention. + self.attention = SelfAttention( + hidden_size, + num_attention_heads, + layer_id, + hidden_size_per_attention_head=hidden_size_per_attention_head, + bias=use_bias, + params_dtype=params_dtype, + position_encoding_2d=self.position_encoding_2d, + empty_init=empty_init + ) + + # Layernorm on the input data. + self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon) + + self.num_layers = num_layers + + # GLU + self.mlp = GLU( + hidden_size, + inner_hidden_size=inner_hidden_size, + bias=use_bias, + layer_id=layer_id, + params_dtype=params_dtype, + empty_init=empty_init + ) + + def forward( + self, + hidden_states: torch.Tensor, + position_ids, + attention_mask: torch.Tensor, + layer_id, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + """ + hidden_states: [seq_len, batch, hidden_size] + attention_mask: [(1, 1), seq_len, seq_len] + """ + + # Layer norm at the begining of the transformer layer. + # [seq_len, batch, hidden_size] + attention_input = self.input_layernorm(hidden_states) + + # Self attention. + attention_outputs = self.attention( + attention_input, + position_ids, + attention_mask=attention_mask, + layer_id=layer_id, + layer_past=layer_past, + use_cache=use_cache, + output_attentions=output_attentions + ) + + attention_output = attention_outputs[0] + + outputs = attention_outputs[1:] + + # Residual connection. + alpha = (2 * self.num_layers) ** 0.5 + hidden_states = attention_input * alpha + attention_output + + mlp_input = self.post_attention_layernorm(hidden_states) + + # MLP. + mlp_output = self.mlp(mlp_input) + + # Second residual connection. + output = mlp_input * alpha + mlp_output + + if use_cache: + outputs = (output,) + outputs + else: + outputs = (output,) + outputs[1:] + + return outputs # hidden_states, present, attentions + + +class ChatGLMPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and + a simple interface for downloading and loading pretrained models. + """ + + is_parallelizable = False + supports_gradient_checkpointing = True + config_class = ChatGLMConfig + base_model_prefix = "transformer" + _no_split_modules = ["GLMBlock"] + + def __init__(self, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + def _init_weights(self, module: nn.Module): + """Initialize the weights.""" + return + + def get_masks(self, input_ids, device): + batch_size, seq_length = input_ids.shape + context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids] + attention_mask = torch.ones((batch_size, seq_length, seq_length), device=device) + attention_mask.tril_() + for i, context_length in enumerate(context_lengths): + attention_mask[i, :, :context_length] = 1 + attention_mask.unsqueeze_(1) + attention_mask = (attention_mask < 0.5).bool() + + return attention_mask + + def get_position_ids(self, input_ids, mask_positions, device, use_gmasks=None): + batch_size, seq_length = input_ids.shape + if use_gmasks is None: + use_gmasks = [False] * batch_size + context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids] + if self.position_encoding_2d: + position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + for i, context_length in enumerate(context_lengths): + position_ids[i, context_length:] = mask_positions[i] + block_position_ids = [torch.cat(( + torch.zeros(context_length, dtype=torch.long, device=device), + torch.arange(seq_length - context_length, dtype=torch.long, device=device) + 1 + )) for context_length in context_lengths] + block_position_ids = torch.stack(block_position_ids, dim=0) + position_ids = torch.stack((position_ids, block_position_ids), dim=1) + else: + position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + for i, context_length in enumerate(context_lengths): + if not use_gmasks[i]: + position_ids[context_length:] = mask_positions[i] + + return position_ids + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, ChatGLMModel): + module.gradient_checkpointing = value + + +CHATGLM_6B_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general + usage and behavior. + + Parameters: + config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the configuration. + Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CHATGLM_6B_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`ChatGLM6BTokenizer`]. + See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. + Selected in the range `[0, config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert *input_ids* indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.", + CHATGLM_6B_START_DOCSTRING, +) +class ChatGLMModel(ChatGLMPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well + as a decoder, in which case a layer of cross-attention is added between + the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, + Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the + `is_decoder` argument of the configuration set to `True`. + To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` + argument and `add_cross_attention` set to `True`; an + `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + def __init__(self, config: ChatGLMConfig, empty_init=True): + super().__init__(config) + if empty_init: + init_method = skip_init + else: + init_method = default_init + # recording parameters + self.max_sequence_length = config.max_sequence_length + self.hidden_size = config.hidden_size + self.params_dtype = torch.half + self.num_attention_heads = config.num_attention_heads + self.vocab_size = config.vocab_size + self.num_layers = config.num_layers + self.layernorm_epsilon = config.layernorm_epsilon + self.inner_hidden_size = config.inner_hidden_size + self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads + self.position_encoding_2d = config.position_encoding_2d + self.pre_seq_len = config.pre_seq_len + self.prefix_projection = config.prefix_projection + + self.word_embeddings = init_method( + torch.nn.Embedding, + num_embeddings=self.vocab_size, embedding_dim=self.hidden_size, + dtype=self.params_dtype + ) + self.gradient_checkpointing = False + + def get_layer(layer_id): + return GLMBlock( + self.hidden_size, + self.num_attention_heads, + self.layernorm_epsilon, + layer_id, + inner_hidden_size=self.inner_hidden_size, + hidden_size_per_attention_head=self.hidden_size_per_attention_head, + layernorm=LayerNorm, + use_bias=True, + params_dtype=self.params_dtype, + position_encoding_2d=self.position_encoding_2d, + empty_init=empty_init + ) + + self.layers = torch.nn.ModuleList( + [get_layer(layer_id) for layer_id in range(self.num_layers)] + ) + + # Final layer norm before output. + self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon) + + if self.pre_seq_len is not None: + for param in self.parameters(): + param.requires_grad = False + self.prefix_tokens = torch.arange(self.pre_seq_len).long() + self.prefix_encoder = PrefixEncoder(config) + self.dropout = torch.nn.Dropout(0.1) + + # total_params = sum(p.numel() for p in self.parameters()) + # trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad) + # print("Using p-tuning v2: # trainable_params = {} / {}".format(trainable_params, total_params)) + + def get_input_embeddings(self): + return self.word_embeddings + + def set_input_embeddings(self, new_embeddings: torch.Tensor): + self.word_embeddings = new_embeddings + + def get_prompt(self, batch_size, device, dtype=torch.half): + prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device) + past_key_values = self.prefix_encoder(prefix_tokens).type(dtype) + past_key_values = past_key_values.view( + batch_size, + self.pre_seq_len, + self.num_layers * 2, + self.num_attention_heads, + self.hidden_size // self.num_attention_heads + ) + # seq_len, b, nh, hidden_size + past_key_values = self.dropout(past_key_values) + past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2) + # past_key_values = [(v[0], v[1]) for v in past_key_values] + return past_key_values + + @add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPastAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + inputs_embeds: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]: + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + if past_key_values is None: + if self.pre_seq_len is not None: + past_key_values = self.get_prompt(batch_size=input_ids.shape[0], device=input_ids.device, + dtype=inputs_embeds.dtype) + else: + past_key_values = tuple([None] * len(self.layers)) + + if attention_mask is None: + attention_mask = self.get_masks( + input_ids, + device=input_ids.device + ) + + + if position_ids is None: + MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id + seqs = input_ids.tolist() + + mask_positions, use_gmasks = [], [] + for seq in seqs: + mask_token = gMASK if gMASK in seq else MASK + use_gmask = mask_token == gMASK + mask_positions.append(seq.index(mask_token)) + use_gmasks.append(use_gmask) + + position_ids = self.get_position_ids( + input_ids, + mask_positions=mask_positions, + device=input_ids.device, + use_gmasks=use_gmasks + ) + + if self.pre_seq_len is not None and attention_mask is not None: + prefix_attention_mask = torch.ones(batch_size, 1, input_ids.size(-1), self.pre_seq_len).to( + attention_mask.device) + prefix_attention_mask = (prefix_attention_mask < 0.5).bool() + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=3) + + # [seq_len, batch, hidden_size] + hidden_states = inputs_embeds.transpose(0, 1) + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + if attention_mask is None: + attention_mask = torch.zeros(1, 1, device=input_ids.device).bool() + else: + attention_mask = attention_mask.to(hidden_states.device) + + for i, layer in enumerate(self.layers): + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + layer_past = past_key_values[i] + + if self.gradient_checkpointing and self.training: + layer_ret = torch.utils.checkpoint.checkpoint( + layer, + hidden_states, + position_ids, + attention_mask, + torch.tensor(i), + layer_past, + use_cache, + output_attentions + ) + else: + layer_ret = layer( + hidden_states, + position_ids=position_ids, + attention_mask=attention_mask, + layer_id=torch.tensor(i), + layer_past=layer_past, + use_cache=use_cache, + output_attentions=output_attentions + ) + + hidden_states = layer_ret[0] + + if use_cache: + presents = presents + (layer_ret[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],) + + # Final layer norm. + hidden_states = self.final_layernorm(hidden_states) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): + def __init__(self, config: ChatGLMConfig, empty_init=True): + super().__init__(config) + if empty_init: + init_method = skip_init + else: + init_method = default_init + + # self.hidden_size = config.hidden_size + # self.params_dtype = torch.half + # self.vocab_size = config.vocab_size + self.max_sequence_length = config.max_sequence_length + + self.position_encoding_2d = config.position_encoding_2d + + self.transformer = ChatGLMModel(config, empty_init=empty_init) + + self.lm_head = init_method( + nn.Linear, + config.hidden_size, + config.vocab_size, + bias=False, + dtype=torch.half + ) + + self.config = config + + self.quantized = False + + if self.config.quantization_bit: + self.quantize(self.config.quantization_bit, empty_init=True) + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def _update_model_kwargs_for_generation( + self, + outputs: ModelOutput, + model_kwargs: Dict[str, Any], + is_encoder_decoder: bool = False, + standardize_cache_format: bool = False, + ) -> Dict[str, Any]: + # update past_key_values + model_kwargs["past_key_values"] = self._extract_past_from_model_output( + outputs, standardize_cache_format=standardize_cache_format + ) + + # update attention mask + if "attention_mask" in model_kwargs: + attention_mask = model_kwargs["attention_mask"] + if attention_mask is not None and attention_mask.dtype == torch.bool: + attention_mask = torch.cat( + [attention_mask, attention_mask.new_ones((*attention_mask.shape[:3], 1))], dim=3) + new_attention_mask = attention_mask[:, :, -1:].clone() + new_attention_mask[..., -1] = False + model_kwargs["attention_mask"] = torch.cat( + [attention_mask, new_attention_mask], dim=2 + ) + + # update position ids + if "position_ids" in model_kwargs: + position_ids = model_kwargs["position_ids"] + new_position_id = position_ids[..., -1:].clone() + new_position_id[:, 1, :] += 1 + model_kwargs["position_ids"] = torch.cat( + [position_ids, new_position_id], dim=-1 + ) + + return model_kwargs + + def prepare_inputs_for_generation( + self, + input_ids: torch.LongTensor, + past: Optional[torch.Tensor] = None, + past_key_values: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + **kwargs + ) -> dict: + batch_size, seq_length = input_ids.shape + MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id + seqs = input_ids.tolist() + mask_positions, use_gmasks = [], [] + for seq in seqs: + mask_token = gMASK if gMASK in seq else MASK + use_gmask = mask_token == gMASK + mask_positions.append(seq.index(mask_token)) + use_gmasks.append(use_gmask) + + # only last token for input_ids if past is not None + if past is not None or past_key_values is not None: + last_token = input_ids[:, -1].unsqueeze(-1) + if attention_mask is not None and attention_mask.dtype == torch.bool: + attention_mask = attention_mask[:, :, -1:] + else: + attention_mask = None + if position_ids is not None: + position_ids = position_ids[..., -1:] + else: + context_lengths = [seq.index(self.config.bos_token_id) for seq in seqs] + if self.position_encoding_2d: + position_ids = torch.tensor( + [[mask_position, seq_length - context_length] for mask_position, context_length in + zip(mask_positions, context_lengths)], dtype=torch.long, device=input_ids.device).unsqueeze(-1) + else: + position_ids = torch.tensor([mask_position for mask_position in mask_positions], dtype=torch.long, + device=input_ids.device).unsqueeze(-1) + + if past is None: + past = past_key_values + return { + "input_ids": last_token, + "past_key_values": past, + "position_ids": position_ids, + "attention_mask": attention_mask + } + else: + if attention_mask is not None and attention_mask.dtype != torch.bool: + logger.warning_once(f"The dtype of attention mask ({attention_mask.dtype}) is not bool") + attention_mask = None + if attention_mask is None: + attention_mask = self.get_masks( + input_ids, + device=input_ids.device + ) + if position_ids is None: + position_ids = self.get_position_ids( + input_ids, + device=input_ids.device, + mask_positions=mask_positions, + use_gmasks=use_gmasks + ) + + return { + "input_ids": input_ids, + "past_key_values": past, + "position_ids": position_ids, + "attention_mask": attention_mask + } + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + + lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous() + + loss = None + if labels is not None: + lm_logits = lm_logits.to(torch.float32) + + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss(ignore_index=-100) + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + lm_logits = lm_logits.to(hidden_states.dtype) + loss = loss.to(hidden_states.dtype) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + @staticmethod + def _reorder_cache( + past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor + ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + + Output shares the same memory storage as `past`. + """ + return tuple( + ( + layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)), + layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)), + ) + for layer_past in past + ) + + def process_response(self, response): + response = response.strip() + response = response.replace("[[训练时间]]", "2023年") + punkts = [ + [",", ","], + ["!", "!"], + [":", ":"], + [";", ";"], + ["\?", "?"], + ] + for item in punkts: + response = re.sub(r"([\u4e00-\u9fff])%s" % item[0], r"\1%s" % item[1], response) + response = re.sub(r"%s([\u4e00-\u9fff])" % item[0], r"%s\1" % item[1], response) + return response + + @torch.no_grad() + def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1, + do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs): + if history is None: + history = [] + if logits_processor is None: + logits_processor = LogitsProcessorList() + logits_processor.append(InvalidScoreLogitsProcessor()) + gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p, + "temperature": temperature, "logits_processor": logits_processor, **kwargs} + if not history: + prompt = query + else: + prompt = "" + for i, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + inputs = tokenizer([prompt], return_tensors="pt") + inputs = inputs.to(self.device) + outputs = self.generate(**inputs, **gen_kwargs) + outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] + response = tokenizer.decode(outputs) + response = self.process_response(response) + history = history + [(query, response)] + return response, history + + @torch.no_grad() + def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, + do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs): + if history is None: + history = [] + if logits_processor is None: + logits_processor = LogitsProcessorList() + logits_processor.append(InvalidScoreLogitsProcessor()) + gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p, + "temperature": temperature, "logits_processor": logits_processor, **kwargs} + if not history: + prompt = query + else: + prompt = "" + for i, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + inputs = tokenizer([prompt], return_tensors="pt") + inputs = inputs.to(self.device) + for outputs in self.stream_generate(**inputs, **gen_kwargs): + outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] + response = tokenizer.decode(outputs) + response = self.process_response(response) + new_history = history + [(query, response)] + yield response, new_history + + @torch.no_grad() + def stream_generate( + self, + input_ids, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, + **kwargs, + ): + batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1] + + if generation_config is None: + generation_config = self.generation_config + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) + bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if has_default_max_length and generation_config.max_new_tokens is None: + warnings.warn( + f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " + "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" + " recommend using `max_new_tokens` to control the maximum length of the generation.", + UserWarning, + ) + elif generation_config.max_new_tokens is not None: + generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length + if not has_default_max_length: + logger.warn( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)", + UserWarning, + ) + + if input_ids_seq_length >= generation_config.max_length: + input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" + logger.warning( + f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing `max_new_tokens`." + ) + + # 2. Set generation parameters if not already defined + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + encoder_input_ids=input_ids, + prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, + logits_processor=logits_processor, + ) + + stopping_criteria = self._get_stopping_criteria( + generation_config=generation_config, stopping_criteria=stopping_criteria + ) + logits_warper = self._get_logits_warper(generation_config) + + unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) + scores = None + while True: + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + # forward pass to get next token + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=False, + output_hidden_states=False, + ) + + next_token_logits = outputs.logits[:, -1, :] + + # pre-process distribution + next_token_scores = logits_processor(input_ids, next_token_logits) + next_token_scores = logits_warper(input_ids, next_token_scores) + + # sample + probs = nn.functional.softmax(next_token_scores, dim=-1) + if generation_config.do_sample: + next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) + else: + next_tokens = torch.argmax(probs, dim=-1) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long()) + + # stop when each sentence is finished, or if we exceed the maximum length + if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): + break + yield input_ids + + def quantize(self, bits: int, empty_init=False, **kwargs): + if bits == 0: + return + + from .quantization import quantize + + if self.quantized: + logger.info("Already quantized.") + return self + + self.quantized = True + + self.config.quantization_bit = bits + + self.transformer = quantize(self.transformer, bits, empty_init=empty_init, **kwargs) + return self diff --git a/ptuning/.gitattributes b/ptuning/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..4edd5acb13dba6a9f44a206fa1b6a1789fbcc50a --- /dev/null +++ b/ptuning/.gitattributes @@ -0,0 +1 @@ +*.bin filter=lfs diff=lfs merge=lfs -text diff --git a/ptuning/AdvertiseGen/dev.json b/ptuning/AdvertiseGen/dev.json new file mode 100644 index 0000000000000000000000000000000000000000..d035bb02c899cb81c23cdf7f9f329c4ae3ddc75a --- /dev/null +++ b/ptuning/AdvertiseGen/dev.json @@ -0,0 +1,1070 @@ +{"content": "类型#上衣*材质#牛仔布*颜色#白色*风格#简约*图案#刺绣*衣样式#外套*衣款式#破洞", "summary": "简约而不简单的牛仔外套,白色的衣身十分百搭。衣身多处有做旧破洞设计,打破单调乏味,增加一丝造型看点。衣身后背处有趣味刺绣装饰,丰富层次感,彰显别样时尚。"} +{"content": "类型#裙*材质#针织*颜色#纯色*风格#复古*风格#文艺*风格#简约*图案#格子*图案#纯色*图案#复古*裙型#背带裙*裙长#连衣裙*裙领型#半高领", "summary": "这款BRAND针织两件套连衣裙,简约的纯色半高领针织上衣,修饰着颈部线,尽显优雅气质。同时搭配叠穿起一条背带式的复古格纹裙,整体散发着一股怀旧的时髦魅力,很是文艺范。"} +{"content": "类型#上衣*风格#嘻哈*图案#卡通*图案#印花*图案#撞色*衣样式#卫衣*衣款式#连帽", "summary": "嘻哈玩转童年,随时,没错,出街还是要靠卫衣来装酷哦!时尚个性的连帽设计,率性有范还防风保暖。还有胸前撞色的卡通印花设计,靓丽抢眼更富有趣味性,加上前幅大容量又时尚美观的袋鼠兜,简直就是孩子耍帅装酷必备的利器。"} +{"content": "类型#裤*风格#英伦*风格#简约", "summary": "裤子是简约大方的版型设计,带来一种极简主义风格而且不乏舒适优雅感,是衣橱必不可少的一件百搭单品。标志性的logo可以体现出一股子浓郁的英伦风情,轻而易举带来独一无二的体验。"} +{"content": "类型#裙*裙下摆#弧形*裙腰型#高腰*裙长#半身裙*裙款式#不规则*裙款式#收腰", "summary": "这款来自梵凯的半身裙富有十足的设计感,采用了别致的不规则设计,凸显出时尚前卫的格调,再搭配俏皮的高腰设计,收腰提臀的同时还勾勒出优美迷人的身材曲线,而且还帮你拉长腿部比例,释放出优雅娇俏的小女人味。并且独特的弧形下摆还富有流畅的线条美,一颦一动间展现出灵动柔美的气质。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*图案#线条*衣样式#衬衫*衣袖型#泡泡袖*衣款式#抽绳", "summary": "这件衬衫的款式非常的宽松,利落的线条可以很好的隐藏身材上的小缺点,穿在身上有着很好的显瘦效果。领口装饰了一个可爱的抽绳,漂亮的绳结展现出了十足的个性,配合时尚的泡泡袖型,尽显女性甜美可爱的气息。"} +{"content": "类型#裙*材质#蕾丝*风格#宫廷*图案#刺绣*图案#蕾丝*裙型#大裙摆*裙下摆#花边*裙袖型#泡泡袖", "summary": "宫廷风的甜美蕾丝设计,清醒的蕾丝拼缝处,刺绣定制的贝壳花边,增添了裙子的精致感觉。超大的裙摆,加上精细的小花边设计,上身后既带着仙气撩人又很有女人味。泡泡袖上的提花面料,在细节处增加了浪漫感,春日的仙女姐姐。浪漫蕾丝布满整个裙身,美丽明艳,气质超仙。"} +{"content": "类型#裤*版型#显瘦*颜色#黑色*风格#简约*裤长#九分裤", "summary": "个性化的九分裤型,穿着在身上,能够从视觉上拉长你的身体比例,让你看起来更加的有范。简约的黑色系列,极具时尚的韵味,充分凸显你专属的成熟韵味。修身的立体廓形,为你塑造修长的曲线。"} +{"content": "类型#裙*版型#显瘦*风格#文艺*风格#简约*图案#印花*图案#撞色*裙下摆#压褶*裙长#连衣裙*裙领型#圆领", "summary": "文艺个性的印花连衣裙,藏青色底蕴,低调又大气,撞色太阳花分布整个裙身,绚丽而美好,带来时尚减龄的气质。基础款的舒适圆领,简约不失大方,勾勒精致脸庞。领后是一粒包布扣固定,穿脱十分方便。前片立体的打褶设计,搭配后片压褶的做工,增添层次和空间感,显瘦又有型。"} +{"content": "类型#裙*颜色#蓝色*风格#清新*图案#蝴蝶结", "summary": "裙身处采用立体蝴蝶结装饰辅以蓝色条带点缀,令衣身造型饱满富有层次的同时为其注入一丝甜美气息。将女孩清新娇俏的一面衬托而出。"} +{"content": "类型#裙*颜色#白色*风格#清新*图案#碎花*裙腰型#松紧腰*裙长#长裙*裙衣门襟#拉链*裙款式#拉链", "summary": "这条颜色素雅的长裙,以纯净的白色作为底色,辅以印在裙上的点点小碎花,勾勒出一幅生动优美的“风景图”,给人一种大自然的清新之感,好似吸收新鲜空气的那种舒畅感。腰间贴心地设计成松紧腰,将腰线很好地展现出来,十分纤巧,在裙子的侧边,有着一个隐形的拉链,能够让你穿脱自如。"} +{"content": "类型#裤*材质#羊毛*裤长#九分裤*裤口#微喇裤", "summary": "不同于一般的西服裤。这款小喇叭羊毛裤在样式上显得更加时髦优雅,特地采用微微的九分喇叭裤腿设计,视觉上将脚踝处显得更加纤细。并且特地甄选柔软的羊毛材质,就算直接贴肤穿着,也不会觉得寒冷,比较适合初秋穿噢。"} +{"content": "类型#上衣*风格#简约*衣门襟#拉链*衣款式#口袋*衣款式#拉链", "summary": "上衣与裤子的连体式设计从整体看起来十分的具有大牌的风范。简约,没有任何的其他装饰,把自己的独特尽情展现。上衣胸口两边设有两个加大口袋,更增添了层次感。衣襟了拉链,让穿脱更加的方便,轻轻一点,显得更加时尚。"} +{"content": "类型#上衣*版型#宽松*风格#英伦*风格#复古*图案#格子*图案#复古*图案#线条*衣样式#外套*衣样式#西装*衣领型#翻领", "summary": "这件西装外套选用了经久不衰的格纹元素,通过色彩的明暗对比,展现出丰富的视觉层次,又缔造了英伦风的复古气息。法式的大翻领,延长颈部线条,彰显出女性帅气干练的特殊魅力。宽松舒适的版型完美掩藏了身材的小秘密,给身体自由活动空间。"} +{"content": "类型#裙*版型#显瘦*材质#蕾丝*颜色#纯色*风格#知性*风格#高贵*风格#性感*图案#纯色*图案#蕾丝*裙型#背带裙*裙型#包臀裙*裙型#鱼尾裙*裙长#连衣裙*裙袖型#喇叭袖", "summary": "蕾丝喇叭袖上衣,搭配鱼尾包臀背带裙,整体造型给人甜美可人的感觉。偏爱蕾丝的浪漫柔情,流露别致女人味。喇叭袖的设计凸显别样浪漫,透露隐约小性感。两件套连衣裙,平添视觉层次感。鱼尾的设计修身显瘦,喇叭袖时尚减龄,纯色设计更加凸显女性知性高贵的气质。"} +{"content": "类型#裙*风格#淑女*风格#清新*风格#性感*图案#碎花*图案#线条*裙型#大裙摆*裙下摆#荷叶边", "summary": "性感的挂脖领设计,展现出迷人的肩部线条,尽显女人的妩媚气息。清新的碎花点缀裙身,凸显出秀雅温柔的韵味,衬的人很是气质不俗。灵动的荷叶边装饰,让整件上衣多了一些柔美和俏皮。散开的大摆裙剪裁,修饰出身材的小缺陷,行走间尽显温婉的淑女气质。"} +{"content": "类型#裙*材质#网纱*图案#蝴蝶结*裙下摆#层叠*裙长#半身裙*裙衣门襟#系带", "summary": "层叠网纱,仙气飘飘,却不会过于膨胀。腰间的蝴蝶结系带,恰到好处的增添了柔美感。膝盖以下,长度刚刚好的半身裙,比起“一览无遗魅力尽显”,专注于“完美隐藏”"} +{"content": "类型#裙*版型#宽松*颜色#焦糖色*风格#简约*风格#ol*风格#职场*裙型#百褶*裙长#连衣裙*裙领型#翻领*裙款式#腰带*裙款式#衬衫式", "summary": "来自自制的连衣裙采用今年大热的焦糖色,就像巧克力一样,甜蜜又不腻人。腰带的贴心设计,让宽松的版型也能拥有s曲线。上身简约的衬衫式翻领,衬托小v脸,带来一股职场ol风,加以百褶下摆的点缀,一起述说无尽温柔。"} +{"content": "类型#裙*裙型#鱼尾裙*裙款式#收腰", "summary": "率性大方的裙身,加上经典百搭的收腰版型,轻松打造出了时尚大方感呢。更有着俏皮可爱的鱼尾裙摆以及靓丽打眼的鹅黄色裙身,尽显元气少女风范,减龄效果特别好还特别的有青春活力气息,适合各个年龄阶段的女生们穿着。"} +{"content": "类型#上衣*颜色#红色*风格#青春*衣样式#外套*衣长#短款*衣款式#口袋", "summary": "这款外套对于个子矮小的妹纸来说就是福音了,短款穿在身上搭配起起来,立马就能变成大长腿,把整体身长比例拉长,呈现出黄金比例效果。鲜艳活泼的红色,穿在身上,视觉上给人呈现出青春的活力,元气满满的少女,还能衬托出肌肤的白皙,拥有一整天的好气色。大大的口袋,既可以作为装饰,出门携带东西也是非常的方便,还能增加整体的层次感。"} +{"content": "类型#上衣*材质#牛仔布*颜色#浅蓝色*颜色#深蓝色*风格#休闲*风格#潮*衣样式#外套*衣款式#拼接*衣款式#口袋*衣款式#纽扣", "summary": "BRAND牛仔外套,浅蓝色的衣身和深蓝色形成拼接的设计,充满了潮流的时尚感,翻折的领口造型,衬托在颈部肌肤,能修饰脸型。领口下有单排金属的纽扣门襟,开合很方便,很实用可以保暖。两侧有翻盖的口袋和斜插的口袋,在视觉上很有层次感。看起来很休闲。"} +{"content": "类型#裙*版型#显瘦*材质#蕾丝*图案#蝴蝶结*图案#蕾丝*裙下摆#花边*裙下摆#压褶*裙长#半身裙*裙袖长#长袖*裙领型#立领*裙款式#拼接*裙款式#钉珠", "summary": "成熟韵味和优雅气质并存的时尚两件套。上衣立领系蝴蝶结造型,俏皮优雅。喇叭长袖拼接压褶蕾丝花边,气质减龄。高腰包臀半身裙,修身效果特别好,收腹展示曼妙的身材曲线。两侧手工钉珠装饰,时髦立体,视觉拉长腿型,整体上身彰显成熟女人魅力。显瘦百搭。"} +{"content": "类型#裙*材质#蕾丝*图案#刺绣*图案#蕾丝*裙衣门襟#拉链*裙款式#拉链*裙款式#吊带*裙款式#收腰", "summary": "蕾丝吊带显露出精致的锁骨,让颈部显得更加修长。腰部采用款腰围收腰的方式,小蛮腰更诱人。裙摆上大朵刺绣花朵,非常逼真,仿佛真正的花朵撒在裙子上摇曳生姿。背后贴心的珍珠扣,美观的同时又避免了生活中忘记拉拉链的尴尬情况,精致不失优雅。"} +{"content": "类型#裙*版型#显瘦*图案#线条*裙下摆#花边*裙腰型#松紧腰*裙长#连衣裙", "summary": "连衣裙采用了松紧腰的设计,凸显出腰部纤细的线条,再加上过膝的长度,可以遮掩掉大腿上的小肉肉,更加显瘦,走路飘逸十足。采用了圆形领口的设计,修饰颈部线条,衣身上加了层次感分明的荷叶花边作为装饰,颇显甜美气质。"} +{"content": "类型#上衣*图案#字母*图案#文字*图案#印花*衣样式#外套*衣领型#圆领*衣长#中长款*衣袖长#长袖", "summary": "圆领款式设计的这一件长袖中长款的外套最大的设计亮点在于衣身上面的印花字母的设计哦,印花字母这样的款式的设计使得整一件外套看起来的感觉是很不错的呐,既显得个性又是很时髦的哟。"} +{"content": "类型#上衣*版型#宽松*风格#街头*风格#休闲*风格#青春*图案#印花*衣样式#卫衣*衣款式#连帽", "summary": "赋予活力标签的连帽连帽卫衣,是穿的出的舒适感,看得见的休闲风。这款卫衣在版式延续了经典的宽松廓形,让身体无拘无束的同时,更显放肆的青春减龄感。前中的人头印花点缀,个性而鲜明,轻松打造活跃于街头的潮酷风采,倍显时尚洒脱范儿。"} +{"content": "类型#裙*颜色#黑白*图案#条纹*图案#线条*裙下摆#荷叶边*裙下摆#压褶*裙长#连衣裙*裙领型#一字领*裙衣门襟#拉链*裙款式#口袋*裙款式#拉链*裙款式#吊带*裙款式#抽褶", "summary": "集甜美的少女感和简洁风格为一体的连衣裙,胸前延伸一圈的压褶荷叶边设计,增加了立体层次感,让黑白条纹呈现出水波般荡漾。明线外缝,凸出褶皱的线条,形成对比收边。两侧斜插口袋方便,背后拉链拉和顺滑,吊带一字肩型设计,贴合肩部的织带,可根据身形伸缩长短,非常具有实穿性。"} +{"content": "类型#裤*颜色#黑色*风格#简约*图案#条纹", "summary": "传承动感简约气质的条纹衣身,结合包边圆领和半开襟设计,造型显得活力有范,又不失男孩子的时尚帅气。胸前单侧小口袋点缀,让男宝宝帅气加倍。搭配纯黑色的底裤,整体显得层次十足,视觉也十分有美感,男宝宝穿起来独特魅力尽显。"} +{"content": "类型#裙*材质#雪纺*风格#休闲*裙下摆#花边*裙长#半身裙*裙领型#v领*裙款式#拼接*裙款式#吊带", "summary": "优美而动感的上衣。采用半透的雪纺材质工艺,深黑色系给您以非常魅惑的穿着体验,内里需要搭配深黑色的吊带。花边v字领口连襟拼接,举手投足更加优雅迷人,适合搭配各种半身裙和休闲长裤。"} +{"content": "类型#上衣*版型#宽松*材质#针织*材质#混纺*材质#纤维*风格#运动*风格#休闲*衣样式#卫衣*衣袖长#长袖*衣袖型#落肩袖*衣款式#拼接*衣款式#抽绳*衣款式#抽褶*衣款式#连帽*衣款式#罗纹", "summary": "柔和的纤维混纺针织面料,手感舒适且回弹性强不易褶皱,肌理感强,布面干爽抗起球性能好。休闲运动感连帽卫衣设计,加以流苏感织带抽绳点缀,品质感尽显。宽松舒适落肩拼接长袖,袖口罗纹窄口处理,打造立闲适运动廓形。"} +{"content": "类型#裙*颜色#宝蓝色*风格#复古*图案#复古*裙下摆#开叉*裙腰型#高腰*裙衣长#短款", "summary": "闭眼入的一款裙子,选用了光色感饱满的面料,营造出轻盈欢快的愉悦感。宝蓝与复古绿,轻熟风不二之色。侧开叉的设计,行走起来步履之间尽显女性柔美。高腰的版型设计,拉长身形比例,搭配任何短款上衣都会让你高挑吸睛~"} +{"content": "类型#上衣*版型#宽松*材质#棉*风格#简约*风格#潮*衣样式#衬衫*衣领型#v领*衣款式#拼接*衣款式#荷叶边", "summary": "这款来自massimodutti的衬衫,精选高品质棉质混纤,轻薄质地,吸湿透气,结实耐穿。整体的版型简约大方,在宽松的廓形下感受随性的时尚格调。v领领口的设计,简约之中展现干练硬朗的气场,潮味十足。袖口处双层荷叶边的拼接,低调吸睛,富有层次感。"} +{"content": "类型#上衣*材质#针织*衣样式#毛衣*衣袖长#短袖*衣袖长#长袖*衣袖型#落肩袖", "summary": "长袖的基础设计,活动舒适自在。微微落肩袖的设计,上身更修饰身形。这款毛衣有两个款式,一件是套头毛衣的款,斜下摆的设计。又让整体更具特色了一些;另一件是短袖针织连衣裙的款式。"} +{"content": "类型#上衣*版型#宽松*颜色#卡其色*风格#复古*图案#复古*衣样式#风衣*衣款式#腰带", "summary": "的风衣,没有之一,灵感来源于复古的欧洲军装,肩章排扣和腰带这些细节设计就能展现,然后搭配长款版型,上身自带气场。而且整体采用宽松直筒版型,穿着舒适不显臃肿,还能起到修饰身形的作用。而配色采用经典又时髦的卡其色,可搭性颇高,轻松穿出独特气场。"} +{"content": "类型#裙*颜色#白色*风格#清新*图案#刺绣*裙下摆#花边*裙长#连衣裙*裙领型#v领*裙款式#抽褶", "summary": "简单大气纯白色连衣裙,是开春季节最美好的穿搭单品。简单的小v领点缀领部,加以独特的花边绣花点缀,满满的清新活力悠然散发。加以纯粹的白色选料,上身亲肤透气,自带自然的褶皱肌理。同时,中长款式,修饰好身材,十分美腻。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#青春*风格#清新*图案#条纹*图案#线条*图案#撞色*衣样式#衬衫*衣领型#翻领*衣款式#腰带", "summary": "非常清新百搭的一款衬衫,蓝白撞色的条纹设计青春又减龄。经典的翻领造型暗藏小心机,领型是故意往后穿的设计,上身自然随性,不经意间展露迷人肩颈线条。下摆的开叉细节也是一大亮点,搭配腰间配置的腰带装饰,形成自然收紧的伞状效果,修饰腰身更加显瘦。腰带是可拆卸的设计,采用本布包扣装饰更显精致细节。将腰带去除时就是比较宽松的bf风款式,自带慵懒随意的感觉。"} +{"content": "类型#裤*版型#宽松*颜色#红色*风格#复古*风格#文艺*图案#复古*裤款式#木耳边", "summary": "轻盈有垂感的面料,上身舒适亲肤,甩腿的裤摆设计,行走画风干净利落。复古文艺的木耳边元素,细碎褶束腰,凸显腰身。显瘦高腰的宽松裤腿能够整体拉长腰身,使得摩登BRAND少女气质凸显。红色米色温柔色系,百搭时尚。"} +{"content": "类型#裙*材质#蚕丝*风格#性感*图案#印花*裙下摆#垂坠*裙领型#v领*裙款式#拼接*裙款式#飘带", "summary": "精选优质飘逸桑蚕丝面料,质感垂顺柔软,手感舒适细腻;优雅时尚的v领拼接飘带领口设计,展现完美颈部曲线,性感迷人;透视印花罩衫款式设计,给你更多的搭配选择。"} +{"content": "类型#裙*版型#显瘦*颜色#白色*颜色#黑色*图案#线条*裙型#a字*裙腰型#高腰*裙款式#不规则", "summary": "这款裙子采用黑色的颜色打底,裙身上装饰着白色的线条以及扣子装饰,丰富视觉上的变化。另外整体上a字裙裙型搭配高腰的设计,修身效果出众,还有着不规则的裙摆,展现出十足的设计感。"} +{"content": "类型#裙*材质#针织*颜色#黑色*风格#休闲*风格#性感*裙下摆#荷叶边*裙长#连衣裙*裙款式#拼接", "summary": "这款经典的黑色连衣裙,整体采用针织和冰丝两种材料拼接而成,使裙子在休闲中又透着些许法式优雅感。领口采用v形设计,修饰脸型,同时凸出性感又不过分的气质。肩部的荷叶边拼接,显得飘逸灵动,衬托出了女性活泼浪漫的魅力。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#蚕丝*图案#线条*衣样式#衬衫*衣领型#翻领*衣袖型#喇叭袖", "summary": "气质翻领设计,衬托出脖子优雅的线条,桑蚕丝面料,适合敏感肌穿着,穿着清凉透气。很显气质的一件衬衫,完美符合亚洲女性身材,扬长避短,版型很正。修身塑形效果尤为出众,小喇叭袖更是十分具有艺术感,小翻领挺括更显气质,怎么搭配都好看,系上丝巾更有女人味。宽松的版型更能衬托女性的娇小,慵懒和帅气的完美结合。"} +{"content": "类型#裤*材质#牛仔布*风格#街头*风格#复古*风格#文艺*风格#简约*风格#休闲*风格#潮*图案#复古*裤型#直筒裤*裤款式#拉链", "summary": "牛仔裤本身多添了复古文艺,简约直筒休闲款式,修饰腿型尽显挺拔俊朗,出街搭配更有范。时尚猫须磨白做旧处理,复古文艺更具街头潮流风尚。金属拉链暗门襟设计,推拉顺滑更富有质感。"} +{"content": "类型#上衣*版型#h*材质#棉麻*风格#简约*风格#清新*图案#字母*图案#文字*图案#刺绣*衣样式#西装*衣领型#翻领*衣款式#口袋", "summary": "BRAND棉麻西装采用别致的翻领设计,立体修饰脖型,挺括有精神,彰显优雅与品味。两侧翻盖口袋设计,可方便插手的同时,还可放置随身携带的物品,美观又实用。后背清新的字母刺绣,穿着利落精干,彰显女性优雅英气。简约的格调,舒适的h版型,打破对身体的束缚,让你穿得更轻松自在。"} +{"content": "类型#上衣*颜色#纯色*风格#简约*图案#条纹*图案#纯色*衣样式#外套*衣款式#口袋*衣款式#对称", "summary": "来自巴拉巴拉的女童长款外套,设计师采用直筒式衣袖裁剪,并在袖口加饰有纯色条纹,在打破了整体的单一性的同时,还增添了一丝简约时尚气息。再加上对称的斜插口袋,既能给予娇嫩双手温暖,同时还可放置孩子的随身物品,暖心又很实用呢。"} +{"content": "类型#裙*版型#显瘦*风格#淑女*风格#清新*图案#格子*裙下摆#荷叶边*裙长#连衣裙*裙款式#腰带", "summary": "这款连衣裙采用清新的格子元素,干净大方,视觉上给人十分舒服的体验。甜美的荷叶边从上延伸到下面,给人丰富的层次感,十分的淑女,一抹浪漫的气息油然而生,女神气质爆表。腰带的设计既起到装饰作用又显瘦。"} +{"content": "类型#裙*版型#宽松*风格#复古*图案#复古*裙袖型#灯笼袖", "summary": "袖子有灯笼袖的既视感,中世纪的复古韵味轻松展现,版型宽松舒适,上身贴合身材,不会显胖。超级百搭,秋季单穿,搭配裙子裤子都ok!冬天也能做打底,外搭毛呢大衣,气质满满。"} +{"content": "类型#裤*材质#雪纺*风格#性感*图案#线条*裤长#连体裤", "summary": "非常少女感的连体裤,做工非常的细致,面料是雪纺的面料,手感非常好,上身舒适,非常的亲肤。一字领加上吊带的设计,展现了迷人的锁骨,突出了颈部线条,非常的性感。腰间的系带飘逸灵动。荷叶边的袖口浪漫梦幻,非常的有女人味,优雅十足。"} +{"content": "类型#上衣*版型#显瘦*颜色#白色*颜色#纯色*图案#纯色*图案#碎花*衣样式#衬衫*衣袖长#长袖*衣款式#收腰", "summary": "推荐这款来自时尚服装品牌tedbaker的长袖衬衫。这款衬衫采用纯色色调搭载小碎花的点缀设计,同时修身版型,穿起来又显得大气时尚,给人十分成熟的感觉,胸前白色扣子设计更显时尚的气息,束紧的袖口设计是的活动更加自然。排扣设计更有容易穿脱。修身版型设计,收腰,勾勒出纤细的腰线。适合25岁到30岁的年轻男性穿着,彰显气质。"} +{"content": "类型#裤*图案#线条*图案#撞色*裤长#连体裤*裤款式#拉链", "summary": "率性的一定是喜欢拥有利落线条的服装,不重复不拖沓,这件连体裤便具有这样干练的气质。宽大松垮的廓型给身体自由舒展的空间,慵懒又个性。拉链从领口直通到裤脚,双头的拉链可以从裤脚拉开,解决了去的问题。酷酷的高领设计,利落的窄袖,外穿内搭都合适。撞色的处理,更加精致、牢固。"} +{"content": "类型#裙*风格#文艺*风格#知性*风格#潮*裙长#连衣裙*裙衣门襟#系带", "summary": "唯美时髦的连衣裙,充满了文艺的气息,呈现出知性洒脱的风采,随时随地都能显得曼妙别致。配以绚丽精美的系带设计,呈现出了新潮时尚的风采,优雅大气,给人以眼前一亮的视觉惊喜。一抹个性的系带,在领口处精心设计。展现出了潇洒知性的魔力,时刻都能够带来新潮的视觉美感,靓丽十足。"} +{"content": "类型#上衣*材质#棉*材质#斜纹*风格#文艺*衣样式#风衣", "summary": "喜欢这样的风衣,简单又带点文艺气的,在春天的时候一穿,很难不让人为她着迷。用了有挺括度和质感很好的面料,有点儿斜纹的样子,手感摸起来有棉的样子,灰常的实在。"} +{"content": "类型#裙*版型#显瘦*材质#蚕丝*风格#欧美*风格#复古*风格#潮*图案#条纹*图案#格子*图案#复古*裙领型#polo领", "summary": "气质的格子设计,复古时尚,polo领的设计,简洁大方时尚,越简单的版型,亮点就越让人惊艳,上身效果很好,款式很时尚有范,面料质感很高档。真丝材质摸起来手感顺滑,很有高级感。欧美范大气经典,修身遮挡腹部赘肉,让你穿上充满自信的魅力。条纹是经久不过时的潮流元素,尽显女性时尚气质。"} +{"content": "类型#上衣*风格#韩版*衣样式#衬衫", "summary": "这一款衬衫手工磨边的设计,做工精湛特别考究,精致的韩版设计,符合女性的身材曲线,自然衬托纤美妙身姿。时尚的双贴袋装饰,立体时尚美观实用。精挑细选的天丝棉布料,丝滑垂坠亲肤细腻。"} +{"content": "类型#上衣*风格#复古*风格#知性*图案#格子*图案#复古*图案#线条*衣样式#衬衫*衣领型#v领*衣袖型#灯笼袖*衣款式#抽绳", "summary": "轻薄舒适的衬衫,质地飘逸,上身后有种轻纱般的朦胧美感,让人爱不释手。淡雅的格纹做点缀,彰显复古的时尚韵味,很好的衬托出白皙的肌肤。流畅的v领,修饰纤细的颈部线条,展现知性的都市风情。袖口处有抽绳收紧的处理,呈现出微蓬的灯笼袖质感,包容手臂的线条。下摆处开衩,体贴又温柔,减少束缚感。"} +{"content": "类型#裤*版型#显瘦*颜色#红色*风格#性感*图案#线条*裤长#连体裤*裤型#直筒裤*裤腰型#高腰", "summary": "连体裤采用了优质面料,精心剪裁而成,修身的版型,让身材凹凸有致,轻松塑造黄金比例。亮眼的红色系设计,凸显出你张扬的个性,同时让你具有出挑的魅力。精美的吊带领设计,修饰颈部线条,还可以防止滑落,美观又兼具实用性。v字领口,让裸露的锁骨更显性感,个性又时尚。高腰线直筒裤型,让身材更显高挑,无论是单穿还是作为内搭,都是不错的选择。"} +{"content": "类型#裙*版型#宽松*颜色#粉色*裙型#百褶*裙领型#圆领*裙袖型#泡泡袖", "summary": "的这款礼服裙,采用丝质面料,以营造亲肤柔软的穿着体验。泡泡袖娃娃圆领的版型,可爱俏皮。衣身以粉色色彩简洁,视觉丰富,展现乖巧甜美的气息,举手投足间,尽显温婉活泼气质。下摆的大百褶设计,宽松无束缚感的同时,还展现孩童的公主风范。"} +{"content": "类型#上衣*版型#显瘦*颜色#黑色*风格#青春*图案#刺绣*衣样式#针织衫*衣样式#开衫*衣领型#圆领*衣款式#钉珠", "summary": "本款的针织衫图案真的太美啦!精美刺绣加手工钉珠,图案俏皮可爱。五角星、流行都是一个个珠子串上去的,纯人工,很费时的,阳光下blingbling,如同黑夜里的星空,多彩欢乐又别致。黑色基础款开衫,百搭的圆领,上身减龄显瘦,总而言之就是一款很美的开衫。"} +{"content": "类型#上衣*风格#潮*风格#性感*图案#刺绣*衣样式#雪纺衫*衣款式#露肩*衣款式#不规则", "summary": "这件非常精美时尚的雪纺衫,最最别样灵动的设计亮点和设计的曼妙之处就在于。在整体领口的设计上搭配了别样灵动的一字的露肩的搭配,足以轻轻松松的打造了整体的性感优雅的气质,独特灵动的绣花搭配不规则的下摆,完美的彰显了时尚潮流感。"} +{"content": "类型#裤*版型#宽松*材质#棉*材质#牛仔布*材质#水洗*材质#混纺*图案#线条*裤长#长裤*裤长#连体裤*裤型#阔腿裤*裤款式#抽褶", "summary": "这款BRAND的阔腿连体长裤,精选质量上乘的含棉混纺牛仔布料裁剪缝制而成,经过特殊水洗工艺处理更加柔软亲肤,混纺布料相比纯棉布料更具有弹性不易褶皱变形。阔腿裤的版型宽松自在不挑身材容易驾驭。背带连体裤款式百搭无疑是减龄单品。背后对称贴袋设计修饰臀部线条丰富整体层次感。"} +{"content": "类型#上衣*颜色#粉色*图案#刺绣*衣样式#卫衣*衣款式#露肩*衣款式#荷叶边", "summary": "一款刺绣荷叶边露肩粉色卫衣,粉色的设计,甜美而又俏皮,满足了每个女孩心中的少女心。荷叶边的设计,更添几分灵动感。花草刺绣的设计十分的别致,笼罩着浪漫的气息,更显精致的生活品味。"} +{"content": "类型#裤*图案#字母*图案#文字*裤腰型#松紧腰*裤口#小脚", "summary": "舒适的松紧裤腰,穿脱十分方便。而且一点也不会勒着孩子的肚子,很受宝妈们青睐。优雅的松紧裤脚收口设计,既可以防止裤腿灌风,又可以展现出酷帅的气质。炫酷的字母印图装饰,挡不住的时尚感,塑造出活力四射的时尚男孩形象。"} +{"content": "类型#裙*风格#淑女*风格#简约*风格#知性*风格#高贵*裙领型#v领*裙衣门襟#双排扣*裙款式#绑带", "summary": "这款适合在明媚动人的温暖季节,张扬你淑女的迷人风情。简单优雅的合身版型,精致细腻的v字领口,凸显柔美颈部,领口的绑带搭配金属装饰的设计,简约中增添细节感,诠释高雅大气。包裙的设计,彰显女性高贵大方,配上经典的双排扣,自然流露知性典雅的气质。"} +{"content": "类型#裤*风格#复古*风格#简约*风格#休闲*图案#字母*图案#文字*图案#复古*图案#线条*图案#撞色*裤型#直筒裤*裤款式#抽褶", "summary": "裤子整体风格样式简约休闲,直筒版型穿起来更添笔直挺拔。裤面褶皱工艺理念渗透,营造复古做旧的质感,穿起来典雅绅士。裤脚自然微微收束,修饰腿部线条更添高挑帅气。腰头撞色字母点缀,协调色调更有范。"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*图案#线条*裤长#九分裤*裤款式#破洞", "summary": "这么一款修身的牛仔裤版型设计,上身遮肉显瘦的同时更显百搭耐看,腰部的裁剪大方迷人。收腰的版型做工巧妙的勾勒出纤细的腰身线条,裤身上的破洞做工精致巧妙,凸显时尚感。而利落干净的九分裤裁剪,视觉上尽显腿部修长。"} +{"content": "类型#裤*版型#宽松*材质#棉*风格#简约*风格#休闲*裤款式#口袋", "summary": "这款宽松休闲裤精选优质纯棉面料,以能够藏肉的宽松剪裁打板,成品在着用感舒适的同时修饰你的身形。此外,简约版型的基础上配以多口袋的细节设计,吸睛度和时髦度满满。更值得一提的是,它简约的配色,能让你成为更好的自己。是一款春夏季节里休闲场合的实用单品。"} +{"content": "类型#裤*材质#羊毛", "summary": "精纺羊毛面料加入了少量的尼龙,性能得到了改良和发挥既有羊毛的透气亲肤,也有尼龙的抗皱抗起球这种羊毛面料轻薄柔和,做成裤子太合适,穿着舒适又好打理"} +{"content": "类型#裤*材质#羊毛", "summary": "底衫是以美利奴羊毛精制而成。布料极度舒适,排汗力绝佳并且具有天然的透气效果。这款女性短袖底衫是依女性身型量身打造,精致的内嵌式v领设计,收边比男性款式更加精巧。其他特点包括内嵌式袖筒和绷缝线织法,确保衣料不会造成擦伤,同时减少因连身和背包肩带造成的磨擦。"} +{"content": "类型#上衣*版型#显瘦*衣样式#卫衣", "summary": "设计师把整个风格定义为,一件卫衣承包了整个秋季。经典又百搭的卫衣绝对是秋季首选。不仅舒适还让人觉得温馨又阳光。最重要的是,它不仅减龄还非常显瘦。"} +{"content": "类型#裙*风格#性感*图案#印花*裙长#连衣裙*裙领型#v领*裙衣门襟#系带", "summary": "超长的睡袍系带款式的连衣裙,柔软的材质彰显轻柔的优雅。性感的v领以及围裹式的版型,搭配热带的印花,营造出性感种带有慵懒的度假风情。"} +{"content": "类型#上衣*风格#文艺*风格#青春*风格#潮*图案#撞色*衣样式#衬衫*衣领型#翻领*衣款式#拼接*衣款式#口袋", "summary": "充满文艺风格的衬衫,在经典的同时,又拥有活泼的设计元素。在右上方点缀了撞色的贴标,不但拥有了立体感,显得青春率性,而且也迎合了当下时尚潮流。采用翻领的领口设计,还可以凸显出干练的男性气质,尽显挺拔身姿。拼接了多个立体的大口袋,还可以凸显出男孩的率性魅力。"} +{"content": "类型#裙*材质#网纱*风格#性感*裙型#网纱裙*裙腰型#高腰*裙款式#钉珠", "summary": "轻盈的网纱裙上绣上树叶和花朵,别致的造型更显甜美,没有大面积的运用,适当留白更有艺术气息,钉珠的加入又为它增加了奢华感,立体精致。大面积的网纱为你营造出浪漫气息,朦胧的的小性感呼之欲出,胸部和裙摆都配上内衬,贴心防走光。高腰线的运用,凸显挺拔,过膝长度更女神范,优雅大气。"} +{"content": "类型#裤*版型#显瘦*颜色#黑色*风格#复古*风格#简约*风格#职场*图案#条纹*图案#复古*图案#线条*裤腰型#高腰", "summary": "经典的纯黑色调,最为基础百搭不易出错,融合入线条干净利落的版型设计之中,衬托出干练气势的职场风范,更有视觉上的显瘦效果。复古的条纹元素的加入,为单调的正装之中增添丝丝时髦气息,配合高腰的设计,提高腰线巧妙的纵向拉长腿部比例。简约的窄腿裤,避免了软塌没精神,上身更加精致有气势。"} +{"content": "类型#上衣*颜色#姜黄色*风格#休闲*衣样式#风衣*衣样式#外套*衣样式#打底衫*衣长#短款*衣袖型#收口*衣款式#螺纹", "summary": "利落有型的短款风衣外套,颜色采用了衬托肤色的姜黄色,内搭简洁打底衫,休闲随意,彰显青春活力。衣身做了机器人图案装饰,童趣十足,美观大方,给略显单的衣身了几分情趣,穿出孩子独有的青春活力。衣身四周螺纹的收口,松紧度好,服帖舒适,防风保暖。"} +{"content": "类型#裙*材质#棉*材质#牛仔布*材质#水洗*风格#复古*风格#潮*图案#复古*裙下摆#开叉*裙款式#拼接*裙款式#不对称", "summary": "精选进口纯棉牛仔面料经过特殊的水洗做旧工艺,弥漫着别样复古的味道,解构式拼接腰头设计极具前卫感,加上不对称裙摆和前后开叉剪裁打破沉闷的造型,都更加有当下潮流个性。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*颜色#黑色*图案#条纹*裙型#鱼尾裙*裙下摆#荷叶边*裙款式#螺纹", "summary": "本品版型宽松,造型独特时尚。竖坑条设计,拉伸黑色条纹,视觉显瘦感好,凸显你的优雅好身材。浪漫荷叶边设计,裙摆蹁跹优雅,螺纹搭配鱼尾,美观又蓬松,给身体预留出足够的空间,穿着的同时,可以尽情展现自己。"} +{"content": "类型#上衣*颜色#纯色*颜色#黑白*风格#文艺*风格#高贵*图案#纯色*衣样式#棒球服*衣袖型#灯笼袖*衣款式#收腰", "summary": "这款连衣裙选用黑白两种色彩设计,简单纯色设计带有精致高贵的气质。两件套设计设计可以让下面的纱裙带有朦胧隐约的美感。七分灯笼袖设计与棒球服版型结合,凸显出混搭的别致风格演绎专属的美感。收腰设计可以展现出你的腰身,文艺范下摆张扬你的气质。"} +{"content": "类型#裙*材质#牛仔布*材质#水洗*裙型#牛仔裙*裙款式#破洞", "summary": "束脚的牛仔裤在现在也是非常少见的,现在大部分的人都穿过破洞的乞丐裤。因此束脚的别样成为我们这款商品的。大腿的水洗效果是为了冲破本是牛仔的规格,腰间的卡扣是这款牛仔裤和其他牛仔裤的别样之处。小腿的束脚更是为略带微风的夏日带来保暖的我效果。"} +{"content": "类型#裙*材质#牛仔布*材质#水洗*裙型#牛仔裙*裙款式#破洞", "summary": "这款牛仔做了特殊的水洗工艺,呈现出的效果有反穿感,但是不会突兀,可以说是相当有自己个性的裤子啦。跟日常见到中规中矩的牛仔裤不一样,裤脚部分的折叠呈现的内浅效果。让裤子的层次感更丰富,破洞效果随性自然。用本身自带的水洗色变化,打造出裤子的与众不同。牛仔裤什么的,只要你喜欢,属于闭眼入不会后悔系列。耐穿耐用,起来也非常方便,省心省力,一句,喜欢不要错过哦~"} +{"content": "类型#裙*材质#牛仔布*材质#水洗*裙型#牛仔裙*裙款式#破洞", "summary": "这款时尚牛仔裤选用优质的牛仔面料制作而成,质地厚实,纹路清晰,经水洗磨白破洞工艺处理后,增强了设计效果,上身穿着尽现不羁潮感;而半月型前袋,加入牛仔风格小口袋,裁剪贴合手型,穿着舒适更美观。"} +{"content": "类型#裙*风格#清新*风格#性感*图案#碎花*图案#线条*裙长#连衣裙*裙款式#勾花镂空*裙款式#收腰", "summary": "这款连衣裙采用了丰富的碎花图案,一眼就给人清新明媚的视觉体验,而且还衬托出了女性优雅温柔的一面。收腰版型,更加凸显纤细小蛮腰。领口的v形镂空设计,不仅在视觉上拉长了颈部线条,还体现了几分不过分的性感气质。"} +{"content": "类型#裙*颜色#黑色*风格#潮*裙款式#腰带", "summary": "以个性张扬的黑色为主打,配以大胆前卫的装饰瞬间提高了裙子的整体形象。炫酷的腰带点缀与纤细柔软的之上,一股潮流酷感迸发而出。加之舒适合体的版型设计,诠释出女子婀娜多姿的身姿,亦带来几许高级时髦格调。"} +{"content": "类型#裤*版型#宽松*版型#显瘦*材质#牛仔布*图案#线条*图案#刺绣*裤型#直筒裤*裤款式#破洞*裤款式#亮片*裤口#毛边", "summary": "这款短袖牛仔裤两件套,上衣凭着经典圆领设计,轻松显露脖颈线条。修身显瘦的宽松直筒版型,百搭上身显气质。衣面和裤面上点缀的亮片猫咪刺绣,耐看显气质,瞬间提升衣品。破洞和毛边细节,兼具个性与质感,大大丰富上身时尚度。"} +{"content": "类型#裙*颜色#粉红色*图案#条纹*图案#印花*裙长#连衣裙", "summary": "这款粉红色条纹连衣裙精美大方,充满青春活力气息,十分唯美大气,尽显女性俏丽活泼感。且配以可爱亮眼的印花设计,更显女性甜美气息。"} +{"content": "类型#裙*材质#蕾丝*图案#蝴蝶结*图案#撞色*图案#蕾丝*裙下摆#花边*裙长#连衣裙*裙领型#圆领*裙袖型#荷叶袖*裙款式#拼接", "summary": "一款比较甜美精致的连衣裙,裙身多处拼接了撞色蕾丝花边,丰富了视觉层次,带来了几分甜美浪漫气息,激起满满的少女心,上身甜美减龄。小圆领领型,领口还有蝴蝶结点缀,温婉含蓄,增加了细节看点。多层荷叶袖袖型,更显精致甜美,提升了裙子的设计感。"} +{"content": "类型#上衣*颜色#黄色*风格#简约*图案#印花*衣样式#卫衣*衣门襟#系带*衣款式#抽绳*衣款式#连帽", "summary": "如今卫衣已经成功逆袭身为自带时髦属性的利器,年轻又减龄。靓丽的黄色调在清冷的季节带来一丝暖意,胸前简约的英文字母印花点缀,丰富层次感,让伴随着我们。连帽抽绳系带的版型,为我们带来了无限的青春活力。"} +{"content": "类型#裙*颜色#黑色*颜色#墨绿色*风格#复古*风格#ol*风格#职场*图案#复古*裙长#半身裙*裙款式#拼接", "summary": "衣身以复古的墨绿色为主体,拼接内敛的黑色,既时髦又不乏庄重的气质感。非常适合职场ol一族,与半身裙组合在一起,或者作为大衣的内搭,都不失为吸睛的组合。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#蚕丝*图案#印花*衣样式#衬衫", "summary": "来自BRAND,美元印花真丝衬衫。采用真丝材质打造的衣身,穿着亲肤透气。宽松版型穿着舒适显瘦,毫无束缚感。衣身饰有美元印花图案装饰,视觉效果强烈。后背配有彩色的大号标签装饰,辨识。"} +{"content": "类型#裙*颜色#蓝色*风格#淑女*风格#性感*裙长#连衣裙*裙领型#一字领*裙袖型#喇叭袖", "summary": "不管是什么季节连衣裙都是那一抹优雅的存在,就像newlook家的这款连衣裙,深沉的蓝色调,上身不仅衬托出肌肤的白皙。搭配着性感的一字肩,又展现出性感不失优雅的淑女气息。而袖口出精致的喇叭袖,灵动的样子,举手投足间散发出浪漫格调。"} +{"content": "类型#裙*材质#网纱*材质#雪纺*风格#复古*图案#手绘*图案#复古*图案#线条", "summary": "轻盈透气的双层雪纺面料,手感柔软仙气满满。复古的的双层网纱领口,修饰颈脖线条,勾勒柔美脸型。斜开门襟结合开合,新颖独特穿脱便捷。裙身手兰花设计浪漫吸睛,优雅女人味十足。过膝的版型微露脚踝,端庄大方。"} +{"content": "类型#裙*风格#简约*裙袖长#七分袖*裙领型#圆领*裙款式#拼接", "summary": "典雅圆领设计展露净白脖颈,简约七分袖设计将女性优雅气质完美呈现,裙摆拼接细腻轻纱彰显浪漫唯美气质,表面细致纹理彰显自然纯粹的美感。"} +{"content": "类型#上衣*版型#宽松*版型#h*风格#休闲*图案#撞色*衣样式#外套", "summary": "丰盈厚实的毛呢面料,内里做了加棉加厚处理,提升外套的防风保暖性,让你的春秋季节温暖倍增。撞色的羊羔毛领,带来冲击感的视觉,并为颈部添加柔软与温暖。加长的衣身,h版型宽松不挑身材,慵懒休闲风打造。"} +{"content": "类型#裙*材质#网纱*材质#蕾丝*风格#高贵*风格#潮*风格#性感*图案#蝴蝶结*图案#刺绣*图案#蕾丝*裙型#百褶*裙型#抹胸裙*裙款式#勾花镂空*裙款式#腰带", "summary": "镂空蕾丝结合精美的绣花,奢侈华丽加一点点的小性感,结合里面抹胸的内衬,增加了优雅不失潮流。丝绸的腰带以蝴蝶结的形式,装饰在腰间,凸显腰部纤细。百褶纱网的裙子,呈现出朦胧神秘感,更是增加母女们高贵优雅的气质。"} +{"content": "类型#裙*图案#印花*裙长#长裙*裙长#连衣裙", "summary": "这条优雅的长裙,简直是春夏季节,每个爱美女生衣柜里不可或缺的单品,连衣裙设计,省却了整体搭配造型的苦恼,穿在身上尽显仙女气质,细节之处精美绝伦,印花设计配合饱满的色彩,让你立刻成为人群中的焦点。不管是旅游还是出街拍照又或者是春游踏青,穿上它都是妥妥的完美。"} +{"content": "类型#上衣*风格#潮*风格#中国风*图案#刺绣*衣样式#外套", "summary": "这款BRAND的刺绣外套,制作工艺非常的精致立体,并采用了如丝滑般的面料,凸显满满的高级质感,穿着也倍增清爽舒适度;侧面衣袖并设计了logo标志,增添了酷炫个性,可谓在中国风中注入潮流,彰显独特韵味,超有态度款式,出街尽显国潮男子。"} +{"content": "类型#裤*风格#青春*风格#性感", "summary": "这一款弹力窄腿版形的速干休闲裤,设计亮点在于裤子的紧身版形设计。这样的表现手法使得整休裤子看起来非常性感,既充满时尚感又彰显青春动感的气息,动感个性。"} +{"content": "类型#上衣*材质#牛仔布*颜色#灰色*图案#刺绣*衣样式#衬衫*衣款式#口袋", "summary": "如果只是单纯色的灰色未免过于单调,因此,b在胸前加入牛仔衬衫口袋设计,不仅能让衣身更加立体还能提升时尚指数和色彩饱满度。并在口袋上增加小黑熊刺绣,让可爱的孩子穿着萌趣感立显。袖口、领口和下摆的平滑衍缝减少缝边对宝贝肌肤的摩擦感,保护娇嫩肌肤穿着舒适加倍。"} +{"content": "类型#裤*材质#牛仔布*风格#复古*风格#休闲*图案#复古*裤长#长裤*裤款式#拼接", "summary": "休闲范十足的牛仔长裤,贴布点缀背面矩形贴袋,与做旧裤身营造的复古感觉相契合,裤脚两端巧妙拼接,使裤身造型更为立体饱满,并展现不失率性的休闲少年感。裤脚的设计很有小心机,可以放下,也可以卷起来,怎么穿都很潮。"} +{"content": "类型#上衣*颜色#白色*风格#休闲*图案#刺绣*衣样式#衬衫*衣袖长#长袖*衣款式#纽扣", "summary": "一款以优雅白色为主打色调的简洁衬衫,以休闲长袖的版型赋予穿搭者舒适的体验。简洁的衣身设计,搭配大方的纽扣门襟,自带满满的青春活力,日常穿搭显得尤为便捷。同时,精致的绣花点缀衣身,烘托出小女生的精致与美腻,倍显少女感。"} +{"content": "类型#裤*材质#水洗*图案#印花*裤款式#破洞", "summary": "要人,不做!个性磨破破洞,时尚印花设计,街拍潮人必备。舒适耐磨不变形不掉色,走线工整,水洗工艺。年轻,要“裤”!"} +{"content": "类型#裙*材质#羊毛*材质#针织*风格#通勤*风格#淑女*裙型#包臀裙*裙长#半身裙", "summary": "此款羊毛针织半裙女采用半身裙设计,半身包臀勾勒出曲线与美腿,展现轻熟优雅的淑女气质。轻松搞定日常通勤打扮,很好的修饰了身材比例,魅力大方诱惑人心。精选优质羊毛面料,穿着舒适自然。"} +{"content": "类型#上衣*版型#显瘦*风格#清新*图案#印花*衣样式#衬衫*衣领型#翻领*衣款式#纽扣", "summary": "更容易引起关注率的印花元素,添加在衬衫上,穿着既不单调乏味又能很好凸显自我。经典的翻领,凸显气质优雅大方,别致的鹅黄色印花,洋气十足,浪漫更显小清新,还好藏肉显瘦,纽扣的位子添加粗黑条点缀,提升整体的吸睛力,穿着上身更利落更时髦。"} +{"content": "类型#上衣*材质#棉*颜色#白色*风格#简约*风格#休闲*风格#清新*图案#拼色*衣样式#卫衣", "summary": "来自BRAND的这款卫衣,采用柔软透气的纯棉面料,营造出舒适的穿着体验。简约的衣身,白色调,清新干净,融入了拼色的按扣装饰,既可以系上扣子休闲舒适的穿着,也可以将扣子解开,露出诱人的香肩,打造多变得穿着,让你不做平庸女。"} +{"content": "类型#上衣*版型#显瘦*颜色#黑色*图案#字母*图案#文字*图案#印花*图案#撞色*衣样式#卫衣*衣领型#高领*衣袖型#罗纹袖口*衣袖型#收口*衣款式#拼接*衣款式#螺纹", "summary": "精选100%绒卫衣面料,舒适透气。捏省设计+罗纹收口塑造蝙蝠廓形,还起到点缀衣身的作用,底摆螺纹收口腰位勾勒出纤细线头,让廓形版型也显瘦。小高领设计凸显气质,弹力螺纹拼接穿脱方便。原创字母印花有一点点叛逆的,凸显自由追求。字母和黑色衣身大胆撞色,时尚感十足。"} +{"content": "类型#裙*风格#性感*裙下摆#荷叶边*裙领型#v领*裙款式#抽褶", "summary": "袖子的荷叶边与领口,很有层次感,尽显甜美优雅的气息。立体褶皱的荷叶边裙摆更显得浪漫与梦幻,v字领口露出锁骨线,尽显性感的情调。"} +{"content": "类型#上衣*版型#显瘦*材质#针织*风格#复古*图案#格子*图案#复古*衣样式#衬衫*衣款式#拼接", "summary": "显型男格调的一款修身衬衫,采用了前后幅拼接的设计款,在肩部通过精致的车缝线连接,呈现出不同与众的独特品位。前幅有致密格纹提花大气装饰,显出些许复古气息,个性的弧形下摆,以及针织贴袋的装饰点缀,带来俊朗帅气的穿衣风格。后幅为平纹肌理,平实而质朴,增添稳重气质。"} +{"content": "类型#上衣*材质#针织*颜色#浅蓝色*风格#清新*衣样式#开衫*衣领型#v领*衣款式#纽扣", "summary": "长款针织开衫为这个春夏而打造,半透明的薄款设计在即能满足搭配,又能略微的保暖,也能作为空调的穿着。清新淡雅的浅蓝色衣身有着湖水般的清澈,非常干净美好。v领的领口设计穿着更加舒适,前短后长的开叉下摆更显良好比例的身型。同色纽扣浑然一体。"} +{"content": "类型#上衣*版型#显瘦*版型#立体剪裁*颜色#白色*颜色#黑白*风格#复古*图案#复古*图案#波点*图案#线条*图案#印花*衣样式#衬衫*衣长#短款", "summary": "这款衬衫复古风的黑白波点印花款,非常的时髦百搭!小短款的设计,小个子女生也能轻松驾驭。修身的立体剪裁,对身材包容性更大,上身遮肉显瘦。复古别致的小尖角领面,采用白色的线条包边,系上更显甜美浪漫的法式风。衣身的隐藏式小口袋,和袖口,设计简洁大气,出街实力吸睛!"} +{"content": "类型#裙*版型#宽松*材质#牛仔布*颜色#黑色*风格#复古*风格#简约*风格#休闲*图案#复古*裙型#牛仔裙*裙款式#抽绳", "summary": "这款来自ehe的牛仔裤,采用黑色棉感牛仔制成,表层泛着淡淡的白点,复古静谧感十足。宽松的裤型,休闲自在,结合松紧式的抽绳腰头设计,上身毫无束缚感,舒适有型。简约的设计搭上利落的剪裁,适合日常多种造型。"} +{"content": "类型#裤*风格#简约*风格#ol*裤型#阔腿裤", "summary": "阔腿裤是人手必备一条的百搭单品,这款阔腿裤整体造型简约大方,简单百搭的同时时髦又自然,带了气质的ol感。垂感极佳的麻纱面料光滑亲肤,走起来飘逸显气质,气场自动释放。"} +{"content": "类型#上衣*版型#显瘦*风格#韩版*图案#字母*图案#文字*图案#印花*衣样式#卫衣*衣领型#圆领*衣门襟#套头", "summary": "这一款卫衣字母印花,青春活力自然减龄,精致裁剪加上韩版的设计,显瘦的同时拉长身型比例,将整个身材比例拉伸,凸显曲线。套头设计,方便穿脱气质利落。简洁圆领,修饰脖颈落落大方。"} +{"content": "类型#裤*版型#宽松*风格#简约*风格#休闲*图案#菱形*裤型#直筒裤*裤型#阔腿裤", "summary": "简约大方,时尚休闲,宽松直筒阔腿裤的版型设计简约却不简单,使得行走间自带清风,随性洒脱的魅力令人无法抵挡,同时,彰显出英姿飒爽的女王范儿。结合立体感的菱形提花面料,使得这条阔腿裤富有肌理感,低调而不失奢华地诠释着精致魅力。"} +{"content": "类型#上衣*材质#蕾丝*图案#刺绣*图案#蕾丝*衣样式#衬衫*衣款式#勾花镂空", "summary": "这款衬衣镂空的蕾丝花边领口,装点着梦幻般的唯美视觉感受;衣袖处相同面料的蕾丝装饰,充满了统一感;姿态各异的纯白花朵,以刺绣的工艺,仿佛朵朵鲜花绽放在衣摆上,优雅动人。"} +{"content": "类型#裙*颜色#紫色*颜色#纯色*颜色#粉色*风格#淑女*风格#简约*图案#纯色*裙型#鱼尾裙*裙款式#拼接*裙款式#抽褶", "summary": "纯色的过膝裙,有粉色、紫色和棕色三种款式可供选择,全面大幅度的纯色底色能很好地铺陈出一种别样的简约而质朴的视觉美感。而裙子的拼接式褶皱鱼尾裙下摆打破了传统的视觉比例,凸显出优雅的淑女之美。"} +{"content": "类型#裙*颜色#纯色*风格#简约*风格#性感*图案#纯色*裙型#直筒裙*裙款式#腰带", "summary": "这款BRAND的吊带衫。纯色的衣身,简约又不失大气,还很百搭。直筒的版型,巧妙的挡住赘肉,视觉上更显苗条。腰带的添加,勾勒出女性曼妙的身姿。多了一丝性感的调调。"} +{"content": "类型#上衣*版型#宽松*风格#英伦*风格#休闲*图案#线条*衣样式#风衣*衣款式#口袋", "summary": "风衣大多为英伦复古风,这款继承了基础版型的风衣,设计成宽松的廓型样式,版型上更偏休闲风格。大气驳领剪裁加上两侧的斜插口袋设计,帅气个性具有独特韵味搭配。两袖袖口的搭扣样式,使其具有修饰手臂线条的作用,并且防止冷风灌入。较长的款式剪裁,能够很好的遮住身型的不完美,打造修长曲线美感。"} +{"content": "类型#上衣*版型#宽松*风格#简约*风格#休闲*衣样式#卫衣*衣领型#一字领*衣袖型#喇叭袖*衣款式#吊带", "summary": "blank的这款一字领卫衣设计贴心简约。一字领展露玲珑锁骨,花边堆褶的衣领围绕在肩部,凸显女性气质。吊带设计轻巧精致,还免除了衣领滑落的可能,增强自信。双层喇叭袖设计甜美可爱,减龄又俏皮。宽松版型适合多种身材,穿着舒适休闲。"} +{"content": "类型#裙*材质#牛仔布*材质#水洗*颜色#浅蓝色*风格#休闲*图案#线条*裙型#牛仔裙*裙腰型#高腰", "summary": "别小看基础款牛仔裤的魅力,这款水洗牛仔裤,遵循一贯极简的设计,却更符合大众的口味,可见其亲和力之高。猫抓痕的经典设计,增添几分不浮夸的潮范感。结合浅蓝色的水洗牛仔设计,更是凸显干净休闲的feel。中高腰的版型,展现完美腿部线条。"} +{"content": "类型#裤*颜色#纯色*图案#条纹*图案#纯色*裤长#九分裤*裤型#哈伦裤*裤款式#口袋*裤腰型#高腰", "summary": "高腰的款式设计时尚大气,选用纯色的版型,搭配哈伦裤的样式,简洁利落中不失腔调。九分裤的设计风格,微微露出脚裸不得不分,帅气感爆棚。双侧的口袋和细腻的条纹搭配,呈现出饱满的层次感,唯美大方。"} +{"content": "类型#上衣*版型#宽松*颜色#粉色*颜色#绿色*图案#印花*衣样式#卫衣*衣袖型#落肩袖*衣款式#抽绳*衣款式#连帽", "summary": "一眼看上去就能吸引住眼光的一款卫衣,荧光绿色在人群中显得特别的显眼,搭配着身前同样很鲜艳的贴布印花,上身满满的个性,粉色的抽绳也与整体卫衣的风格融合起来。整体是不挑人的宽松落肩袖版型,经典的连帽设计穿起来很有活力感。"} +{"content": "类型#裙*材质#牛仔布*材质#水洗*颜色#白色*风格#街头*风格#运动*图案#条纹*裙型#牛仔裙*裙款式#拼接*裙款式#破洞*裙款式#抽绳", "summary": "水洗做旧工艺加上裤身上的磨破破洞元素,营造出饱满的街头气息,同时裤腰加宽裤袢的设计。辅以白色抽绳,以及裤侧的白色条纹拼接,代入轻快灵动的减龄运动款型,区别于传统的牛仔设计。"} +{"content": "类型#裙*颜色#白色*颜色#粉色*风格#性感*图案#线条*裙型#直筒裙*裙长#连衣裙*裙领型#v领", "summary": "对于一些粉色控的小仙女来说,这款粉色连衣裙绝对是不能错过的存在:大气的直筒版型不挑身材,而且也能让你的身形看着更加的高挑。白色的包边处理不会影响整体淡雅的气质韵味,而且也让衣身的线条更显清爽,交叉的v领剪裁能让你秀出精致的锁骨,无意中也添了几分性感的气息了。"} +{"content": "类型#裙*材质#蕾丝*风格#青春*图案#蕾丝*裙下摆#花边*裙款式#拼接*裙款式#吊带", "summary": "灵动的蕾丝元素给人很青春甜美的感觉,巧妙的将女性的柔美优雅尽情展现。而这款吊带大方的利用蕾丝花边拼接,增添一丝女人味,加上细腻的提花设计,萦绕裙身犹如精灵般动人,瞬间让整个人变得柔和起来,尽显不拘一格的时尚格调。"} +{"content": "类型#上衣*版型#宽松*风格#性感*衣样式#衬衫*衣袖型#灯笼袖*衣款式#荷叶边", "summary": "洋气的荷叶边设计的一款衬衫上衣,不管你是选择单穿,还是选择搭配穿着,都是非常的百搭时尚,还富有个性感。荷叶边的领口设计,修饰脸型,增添服饰的层次,显露甜美感的穿着。灯笼袖的袖口设计,轻松遮挡了腰部的赘肉,显露穿着的个性与独特感。宽松的衣型轮廓,不显身材,不挑人穿,好驾驭。"} +{"content": "类型#上衣*材质#棉麻*颜色#白色*颜色#黑色*风格#文艺*图案#卡通*图案#波点*图案#印花*衣样式#外套*衣款式#抽褶*衣款式#连帽", "summary": "自带吸睛技能的外套,醒目清爽的白色调,加上装饰的连帽,以及一身黑色的波点印花,轻松营造出趣味可爱的卡通波点狗形象,让宝宝上身简直不要萌活泼;双层棉麻面料的褶皱独特性,使整体看起来恬静文艺,而又有着透气亲肤的特性,赋予宝宝舒适自在的体验。"} +{"content": "类型#裤*版型#宽松*材质#牛仔布*材质#水洗*风格#复古*风格#休闲*图案#复古*裤长#长裤*裤型#直筒裤*裤款式#拼接*裤款式#破洞*裤款式#不规则*裤腰型#高腰", "summary": "BRAND带来的这款长裤,后幅采用解构式双腰头设计,加以小心机的高腰处理,能够有效提高腰线;前后拼接结合水洗磨白工艺,带来富有层次感的复古牛仔视效;宽松的直筒裤型,对身材有良好的包容性,打造休闲随性的穿着视效;加以裤身不规则的破洞设计,尽显叛逆不羁的个性。"} +{"content": "类型#上衣*风格#简约*风格#知性*图案#条纹*图案#线条*衣样式#衬衫*衣领型#一字领*衣款式#勾花镂空", "summary": "sitiselected这款条纹一字领衬衫,简约的一字领设计,尽显优雅知性。镂空排扣袖设计,修饰手臂的额线条。下摆两侧开衩裁剪,方便穿着,提升整衣的细节感。"} +{"content": "类型#上衣*颜色#黑色*风格#复古*图案#复古*图案#线条*衣样式#衬衫*衣款式#纽扣", "summary": "这是一款时尚感十足的小上衣,它采用了衬衫的设计款式,具有美观且百搭的穿着效果。袖口处采用纽扣设计,看上去十分大气优雅。它采用了精致的v型领口设计,能够凸显出女性独特的颈部柔美线条。然后再其配合黑色的贴布图案,十分个性且新颖。贴布上有着金属环的装饰设计,无形之中就能够增添许多复古的腔调,尽显你的时髦与大气。"} +{"content": "类型#裤*版型#显瘦*风格#简约*裤款式#口袋*裤口#卷边", "summary": "很简约时髦的一款纸袋裤,简约的版型设计,上身修身时髦,穿出优雅的气质。多口袋设计,增添时髦气息,更方便实用。裤脚卷边装饰,更独特时尚。"} +{"content": "类型#裙*风格#淑女*风格#复古*风格#清新*图案#复古*裙长#连衣裙*裙款式#抽褶*裙款式#收腰", "summary": "一款汉元素的连衣裙让我们的心瞬间沉静下来,它清新温婉的色系仿佛让时光都了,上身真的很显文静淑女。复古的一片式交领设计轻便利落,而收腰的版型又增加了层次感,更加好穿显高。蓬松的下摆自然褶皱,更显气质绰约。"} +{"content": "类型#裙*材质#雪纺*裙型#a字*裙长#连衣裙*裙领型#v领*裙衣门襟#系带", "summary": "暗红色的调调,可以完美的胜过所有妩媚的颜色;布满风琴褶的雪纺料,深v系带的a字连衣裙,不如为这件带有法式田园的风格的裙子,挑选几件富有现代感单品来搭配,比如踩上一双芭蕾舞平底鞋,背上天鹅绒,来平衡单品的法式田园风的气质。"} +{"content": "类型#上衣*版型#宽松*风格#简约*风格#性感*衣样式#开衫*衣样式#毛衣*衣领型#v领", "summary": "百搭糖果色开衫毛衣,时尚的大v领,彰显性感,简约百搭宽松,这样的设计也很显肤白,质地端庄大气,却可以衬托出你与众不同的气质来,适合花开的季节穿。"} +{"content": "类型#上衣*版型#宽松*颜色#纯色*风格#简约*风格#性感*图案#纯色*图案#创意*图案#线条*衣样式#开衫*衣领型#v领*衣款式#勾花镂空*衣款式#纽扣", "summary": "宽松的开衫版型,采用了单排纽扣,给人一种简约随性的气息。精致的v字领口,衬托颈部的线条,又增加了性感干练的魅力。纯色的衣身,采用了勾花镂空的设计,富余创意性的美感,灵动又欢脱。袖口微收,能够贴合腕部的线条。"} +{"content": "类型#上衣*版型#显瘦*颜色#粉色*颜色#深蓝色*风格#运动*风格#青春*图案#条纹*衣样式#外套*衣领型#立领*衣袖型#喇叭袖", "summary": "出自品牌fiveplus的这款棒球外套,充满运动风情。衣身制作采用光泽细腻,触感柔滑的纤材质,搭配流畅的剪裁手法,打造出立体修身的立领喇叭袖造型。既能巧妙凸显出白皙肌肤与精致五官,同时还能用飘逸柔美的袖管中和衣型的中性气息,烘托出娇俏少女韵味。深蓝色调与袖管两侧粉色的条纹装饰结合,演绎出了沉静而不失甜美的观感,能助你诠释出活力青春范儿。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*颜色#浅蓝色*风格#简约*风格#休闲*风格#清新*图案#印花*衣样式#polo", "summary": "时尚简约款polo衫,宽松休闲版型以清新浅蓝色为设计,舒缓炎热夏季带来的感,给视觉带来清凉舒爽。撞色系印花点缀简约衣身,给衣身增添亮点时尚。宽松衣身,简单修身,凸显男性时尚魅力。"} +{"content": "类型#上衣*版型#宽松*颜色#黑色*风格#简约*衣样式#风衣*衣样式#外套*衣领型#翻领*衣门襟#双排扣*衣款式#纽扣", "summary": "一款宽松版型设计的风衣外套,直筒宽松的衣型轮廓,不显身材的设计,不挑人穿的穿着,无论你是高个子,还是小个子,都可以轻松的驾驭。翻领的领口设计,修饰脸型的轮廓,显脸小的视觉效果。黑色的双排扣纽扣点缀,为简约版型的衣身,增加了层次感,让上身穿着没有单调感,显露满满的个性魅力。"} +{"content": "类型#上衣*材质#牛仔布*风格#清新*图案#创意*衣样式#衬衫*衣款式#破洞", "summary": "一直很喜欢这个风格,衬衫款又不会太过俏皮的像小女生,适合更多年龄层穿着。个性破洞的效果,打破单调,不像一般那样硬朗,更生趣。清新的牛仔蓝,整体的风格清爽干净。前短后长的下摆设计,造型时尚有趣,创意感强,并增添了整衣的层次效果,即便简单一搭,就能让自己很有品位的感觉。"} +{"content": "类型#裙*风格#运动*风格#性感*裙型#包臀裙*裙型#一步裙*裙长#短裙*裙衣门襟#拉链*裙款式#拉链", "summary": "来自品牌BRAND的这款一步短裙,设计师采用包臀的设计让这条原本运动学院风的单品多了一份性感,并且可展示出穿着者的好身材。其次,还在背面设计了一个小拉链,这样的装饰设计,不会让裙子显得单调,而且更加的有设计感。"} +{"content": "类型#上衣*版型#h*颜色#白色*风格#复古*图案#复古*衣样式#毛衣", "summary": "市面上很少见到肌理感如此顺畅的毛衣,从领口开始一直下摆,让整件毛衣的条理感清晰的好似一行诗,随性的故意做长袖口是为了让你可以把袖口告别呆板表情。h版型身材包容性高,适合外穿也适合内搭,三种颜色各有各的性格,白色安静温柔,深绿一如既往的内敛,则复古显白,总之总有颜色适合你。"} +{"content": "类型#裙*版型#显瘦*风格#休闲*风格#潮*裙衣门襟#拉链*裙款式#拉链*裙款式#收腰", "summary": "立体修身的版型,符合人体工学,藏肉显瘦,修饰美腿,拒绝臃肿,适合大多数体型,更好的修饰身形,根据人体美学打造属于你的黄金比例,隐形的品质拉链,经久耐用,收腰裙头尽显品质。适合潮流爱美女性在休闲的场合穿着。"} +{"content": "类型#裙*版型#显瘦*材质#绸缎*风格#复古*风格#知性*图案#格子*图案#复古*图案#线条*图案#撞色*裙腰型#高腰*裙款式#拼接*裙款式#木耳边", "summary": "优雅经典的撞色格纹,复古而知性的韵味搭配柔美木耳边,灵动中展现令人窒息的浪漫。修身显瘦的版型搭配绸缎腰部拼接,收腰身的同时大大提升了气场,魅力挡不住。高腰线拉升腿部线条,打造小蛮腰不费力,恰到好处的裙长衬显双腿纤长高挑。"} +{"content": "类型#上衣*材质#牛仔布*材质#水洗*风格#简约*风格#休闲*衣样式#衬衫*衣领型#翻领", "summary": "想用简单的单品或者搭配穿出时尚感,牛仔绝对是首要选择。而这个春季刮起的大热复古风,牛仔衬衫是一大热门单品,无论是想穿出帅气,还是休闲的酷感,牛仔衬衫都可以使你成为抢镜的焦点。江南布衣休闲衬衣经典的小翻领,搭载水洗的视觉效果,简约休闲又百搭。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙衣长#短款", "summary": "蕾丝面料哒,女人味达到造ji的位置连衣短款裙,优雅时尚范满满,名媛气质仙女时尚风格满满裙身细节部分也是real感人,不低俗,竟然还过分的仙袖子处的若隐若现,撩人到无懈可击polo领,有点禁欲,总体搭配出的效果翻倍!短款剪裁"} +{"content": "类型#裙*材质#棉*材质#羊毛*材质#混纺*颜色#黑色*风格#通勤*风格#休闲*风格#潮*裙型#包臀裙*裙腰型#自然腰*裙长#半身裙", "summary": "这是一款设计很别具一格的半裙,是都市女性日常通勤或者休闲时必备的单品。精选混纺的羊毛和棉料面料,有型更有范。沉稳大气的黑色配上包臀半裙的廓形,具有良好的包容性,迷人又俏丽。舒适的自然腰穿脱方便,新潮又时髦。"} +{"content": "类型#裙*风格#复古*图案#格子*图案#复古*图案#线条*裙型#鱼尾裙*裙腰型#高腰*裙长#半身裙", "summary": "看到这款别致的棕色系格纹裙,让人忍不住想穿上它,展现自己的复古气质。高腰的版型和立体的剪裁,让这款半身裙勾勒出你婀娜多姿的线条。优雅的鱼尾裙摆,穿上以后更是平添了几分灵动飘逸感,打造自信满满的你。"} +{"content": "类型#裙*版型#宽松*风格#文艺*风格#清新*图案#环保*裙长#连衣裙", "summary": "这一款连衣裙采用了天然环保的苎麻面料,拥有良好的亲肤性和透气性,穿着舒适,带来亲近自然的自由感觉。宽松的廓形设计有种度假的韵味,不挑身材不挑人,可以轻松驾驭。裙身上的花果图案装饰配色文艺清新,营造淡雅的气韵。"} +{"content": "类型#裙*风格#淑女*风格#潮*图案#撞色*裙下摆#开叉*裙长#连衣裙*裙款式#拼接*裙款式#口袋*裙款式#连帽", "summary": "连衣裙采用了拼接的撞色设计,在视觉上带来层次感。时髦的连帽款式上身彰显活力十足,引领时尚潮流趋势,胸前的口袋设计更显立体,再加上下摆处的开衩设计,露出拼接的网裙摆,带来新颖的设计感,上身彰显甜美的可爱淑女气质。"} +{"content": "类型#裙*版型#显瘦*图案#线条*裙型#背带裙*裙领型#v领", "summary": "v字的领型是在背带裙的设计当中经常会出现的领型,其中一个重要原因就是v字领型的修身效果跟背带裙个性相搭,用在背带裙的设计上十分应景。而且v字的领型线条比较利落,它有着视觉上面的拉长感,让女孩子轻松穿出显高显瘦的效果。"} +{"content": "类型#裤*裤长#连体裤*裤型#连衣裤", "summary": "这款连衣裤,柔软面料亲和宝贝每一寸肌肤,同时不起球,的开档开合设计,能够方便更换尿布;同时加以品质暗扣,可以带给宝贝零摩擦的舒适体验,穿着更加自在。"} +{"content": "类型#裤*颜色#迷彩色*风格#街头*风格#潮*风格#摇滚*图案#字母*图案#条纹*图案#文字*图案#迷彩*图案#音乐*图案#线条*图案#刺绣*裤款式#拼接*裤腰型#松紧腰", "summary": "裤身两侧的条纹拼接,为整体增添潮流元素,精致的字母刺绣搭配,起到很好的点缀作用,让整体看起来美观又大方。裤身迷彩拼接,十分有街头摇滚风的味道,可以尽情展现属于你的风格;松紧裤型,美观而实用修饰出笔直而修长的腿部线条。"} +{"content": "类型#上衣*颜色#黑色*颜色#粉色*图案#线条*衣样式#针织衫*衣领型#圆领*衣款式#露背", "summary": "一件前后风格截然不同而有和谐共处的针织衫。温柔明媚的少女粉色,让人忍不住想捧在手心里好好呵护;大圆领设计,修饰脖颈线条;露背设计,搭配黑色弹力织带,交叉之中隐约露肤,时髦独特,尽显优雅气质;"} +{"content": "类型#裙*材质#蕾丝*颜色#纯色*图案#纯色*图案#线条*图案#蕾丝*裙下摆#层叠*裙下摆#花边*裙衣门襟#拉链*裙款式#勾花镂空*裙款式#拉链*裙款式#吊带", "summary": "超美的蕾丝裙,优雅的花边领口,贴合肌肤,修饰柔美颈部线条,精致动人。袖口的蕾丝花边彰显几分俏皮感。背部隐形水滴拉链,精巧大气,拉合顺畅,方便穿脱。肩部镂空设计,不但精致,而且很清凉哦。流畅的叠层裙摆,丰富层次感。一体吊带内搭,避免走光。纯色的裙装,有种很圣洁的气质。"} +{"content": "类型#裙*版型#宽松*材质#牛仔布*颜色#浅蓝色*风格#街头*风格#休闲*裙型#牛仔裙*裙下摆#毛边*裙款式#不规则", "summary": "休闲的牛仔裙裤无论在什么时候都不会过时,永远会给人一种时尚前卫的感觉。不规则的毛边裤口,彰显年轻新潮流和不一样的时尚品味,还搭配出满满的街头风格。它宽松舒适的版型可以修饰你笔直的大长腿,牛仔布设计穿起来不仅舒适又有质感。浅蓝色色调,看起来非常彰显年轻活力。"} +{"content": "类型#上衣*版型#显瘦*图案#条纹*衣样式#衬衫*衣款式#露肩*衣款式#抽褶", "summary": "BRAND以蓝白细条纹打造的这款上衣,通过竖条纹的运用结合相对修身的剪裁,带来较为显瘦且好穿的单品。比较特别的是,设计师为这款衬衫做了露肩的处理效果,变化感鲜明。此外,衬衫通体褶皱的处理,显得非常别致且出彩。"} +{"content": "类型#裙*材质#网纱*裙型#蛋糕*裙型#抹胸裙*裙长#连衣裙*裙款式#亮片", "summary": "这款连衣裙第一眼就美得让人窒息,在温柔的网纱织面上,点缀了炫目晶莹的亮片元素,看起来层次丰富而梦幻,流露出的朦胧感特别美妙,颇具华丽隆重的贵族气息。甜美的抹胸式设计更加有女人味,可以尽情展现女生的曼妙身姿。三层蛋糕裙摆仙气满满,着每一位有着少女心的girl,简单一件就能让你秒变小公举。"} +{"content": "类型#裙*材质#棉*颜色#纯色*图案#格子*图案#纯色*图案#拼色*裙长#半身裙*裙款式#抽褶*裙款式#收腰", "summary": "松紧收腰设计,拼色格纹极具度假情调,纯棉面料具有极好的透气性与亲肤感,下摆褶皱处理少女感十足,纯色极简百搭,日常搭配小脚都很好看。"} +{"content": "类型#裤*版型#显瘦*版型#h*材质#蕾丝*风格#ol*风格#潮*图案#蝴蝶结*图案#蕾丝*裤款式#绑带*裤腰型#高腰*裤口#微喇裤", "summary": "优雅气质的花边领口设计,凸显服装的时尚新潮。时尚喇叭袖口搭配绑带蝴蝶结,蕾丝裙摆设计,穿着飘逸大方,彰显女神范。高腰设计,拉长腿部比例,a字裙摆,遮肉显瘦,有范优雅显气质,谁都能hold住的实穿款。"} +{"content": "类型#裙*风格#潮*风格#性感*裙型#a字*裙型#鱼尾裙*裙长#连衣裙*裙领型#立领*裙款式#钉珠*裙款式#木耳边", "summary": "a字型轮廓的一条连衣裙,不显身材的设计,还不挑人穿,无论你是高个子,还是小个子,都可以轻松的驾驭,让你轻松展现魅惑的女人味。木耳花边的设计,显露穿着甜美感,立领的领口,修饰脸型,显脸小的视觉效果。鱼尾的裙摆,是个性感的设计。钉珠的点缀,增添服装的层次,与潮流感。"} +{"content": "类型#裙*版型#显瘦*材质#牛仔布*图案#线条*裙型#牛仔裙*裙下摆#弧形*裙腰型#高腰*裙长#短裙*裙款式#拼接", "summary": "显瘦牛仔短裙,穿着美观又时尚。优雅又大方的高腰设计,穿着非常的美观气质,勾勒出纤细的腰部曲线。多裁片拼接的造型,别致时尚又个性,增添线条美感。前短后长的版型,优雅的圆弧裙摆,美观气质又优雅。"} +{"content": "类型#上衣*颜色#迷彩色*风格#复古*风格#潮*图案#复古*图案#迷彩*图案#刺绣*衣样式#衬衫*衣样式#卫衣*衣款式#口袋", "summary": "采用经典美式复古版型,定制的刺绣臂章,彰显品质,方便实用迷彩大口袋设计,增大大提高了储物空间,采用标准版型裁剪工艺,能够容纳多种不同身材。随意搭配卫衣或者衬衫,非常适宜春季各类都市潮流装扮。"} +{"content": "类型#裤*版型#显瘦*版型#立体剪裁*颜色#深蓝色*风格#复古*图案#花色*图案#碎花*图案#复古*图案#线条*图案#撞色*裤型#阔腿裤*裤款式#拼接*裤腰型#高腰", "summary": "阔腿裤的设计垂感较好的a字版型,很显瘦呢!复古的深蓝色颜色十分得衬托肤色,深浅不一的小碎花点缀着吊带裤设计,让你看起来更显魅力。拼接撞色花色罗纹领口设计,也带有几分女人味。高腰的立体剪裁更是修饰腰部的线条感,凹凸有致的好身材表露无遗。"} +{"content": "类型#裙*版型#显瘦*材质#针织*风格#复古*风格#简约*风格#休闲*风格#清新*风格#职场*图案#条纹*图案#复古*裙下摆#花边*裙长#短裙*裙长#长裙*裙领型#半高领", "summary": "花边半高领唤醒了18世纪的浪漫主义风格,凸显气质,焕发复古优雅魅力。细带设计,沿袭宫廷式的至美元素。竖条纹弹力修身,显瘦纹理,贴合女性身材曲线,简约优雅。搭配优雅职场短裙,清新休闲长裙,打造不一样的气质。秋冬内搭不可或缺的打底针织。"} +{"content": "类型#裤*风格#复古*风格#性感*图案#复古*图案#印花*图案#撞色*裤长#九分裤*裤款式#木耳边*裤款式#抽褶", "summary": "领口和袖口的木耳褶皱元素穿上之后凸显女人的性感魅力,这样的设计元素你一定不会拒绝。领口的多层蝴蝶系带,有着少女的减龄可爱,不会显得太过老气。九分的喇叭袖设计迎合了几年来的复古风潮,而且可以修饰胳膊上的赘肉。微微廓形的裙摆不会闷汗束缚,无论是外穿还是内搭都很好看。热烈的撞色印花元素很适合度假穿着。"} +{"content": "类型#裙*材质#棉*颜色#红色*裙长#半身裙*裙袖长#长袖*裙款式#抽绳*裙款式#连帽", "summary": "红色斜襟抽绳,气场十足,回头率高,长袖连帽,适合早春时节。可以搭配半裙也可单穿,显得活力十足有气质,格调也随之上升,棉质材质,穿着很舒适,手感丝滑有韧性,还在等什么,回家吧。"} +{"content": "类型#裙*版型#宽松*颜色#墨绿色*风格#清新*图案#印花*裙型#直筒裙*裙长#连衣裙*裙款式#口袋*裙款式#纽扣", "summary": "十分秀气的一件气质连衣裙,宽松的直筒裙型一点也不挑身材,墨绿色的基调中给到了清新的小印花装饰,加上小巧的口袋和纽扣装饰,更是大方又端庄实穿。"} +{"content": "类型#裙*版型#宽松*材质#雪纺*风格#复古*风格#简约*图案#复古*图案#波点*裙款式#拼接", "summary": "复古俏皮的波点图案装点裙身,色彩亮丽吸睛,洋溢着浪漫甜美气息,拼接雪纺袖子设计,碰撞出别样个性优雅感。简约流畅宽松版型,结合a摆廓形,更好的包容和修饰身材曲线。"} +{"content": "类型#上衣*颜色#纯色*图案#纯色*衣样式#外套*衣领型#立领*衣门襟#拉链*衣款式#口袋*衣款式#拉链", "summary": "这款外套采用立体的版型,搭配立领的设计,让外套挺括有型,穿起来更显精神。而光面的设计,让外套表面的色泽会根据的变化而产生变化,彰显出低调的奢华感。同时也让外套穿起来温暖舒适,兼具了保暖性和美观性。拉链胸袋的装饰,让纯色的衣身不显单调,丰富了视觉效果,让衣身两侧的口袋点缀,让你的双手能够随时取暖。"} +{"content": "类型#上衣*材质#棉*材质#纤维*颜色#黑色*图案#线条*图案#撞色*衣样式#针织衫*衣领型#翻领*衣门襟#拉链*衣款式#拉链*衣款式#罗纹", "summary": "BRAND这款针织衫,采用手感细腻的棉和聚酯纤维面料精心制作而成,轻薄且透气性好,给人带来柔软亲肤的穿着体验。设计师以简明的线条勾勒撞色罗纹翻领版型,修饰脸部;并将黑色作为主底色,彰显型男的低调轻熟韵味;门襟拉链的设计,增加了层次,兼具美观性和实用性。"} +{"content": "类型#裤*裤长#连体裤*裤型#阔腿裤*裤款式#拉链", "summary": "连体裤的版型设计,在一定的条件上可以很好的拉长,女人整体的身材比例。应用的翻领展示出了干练的一面,采用的拉链设计,让你可以随心所欲的变换风格。阔腿的样式,双腿比例更加的完美,女神非你莫属。"} +{"content": "类型#裙*风格#高贵*图案#渐变*裙型#蛋糕*裙型#蓬蓬裙*裙型#公主裙", "summary": "甜美风格的小公主裙,在外形上融入了渐变的色彩,使得裙子的立体效果增强,突出宝贝的高贵之气。裙子在设计时融合蛋糕裙与洛丽塔的双重风格。在风格的混搭之间,营造出来更多的浪漫之感。蓬蓬的小裙摆,强烈的视觉效果,展示出来宝贝更多的活力,让小公主如同天鹅一般神秘迷人。"} +{"content": "类型#裙*材质#雪纺*风格#清新*风格#性感*图案#印花*裙长#半身裙*裙衣长#中长款", "summary": "一款中长款的雪纺半身裙,有种精致的花朵印花点缀,很清新的感觉,甜美减龄又充满浪漫的气息。腰部是松紧的设计,穿着舒适有度,更加轻松自在。双层的裙摆,微微透视的效果,层次清晰,裙摆侧边分叉,更加的性感诱惑。"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*材质#雪纺*颜色#黑色*图案#蝴蝶结*裤型#直筒裤*裤型#背带裤*裤腰型#高腰", "summary": "穿惯挺括的牛仔裤腿裤,在这个春夏不妨试试这款垂坠柔滑的雪纺背带裤。小有厚度能抵挡早春的料峭。黑色直筒裤型,修饰腿型显瘦自在。撞色感的亮银色圆环,暗色调的裤子也变得明快起来,背带穿过扣眼,半扎起蝴蝶结就能随性的调节背带长短。高腰设计,优化身材比例,有秒变大长腿的技能。"} +{"content": "类型#上衣*材质#蕾丝*颜色#杏色*图案#蕾丝*衣样式#衬衫*衣领型#v领*衣领型#翻领*衣门襟#一粒扣*衣款式#拼接", "summary": "让人一见倾心的粉杏色衬衫,采用了挺括的梭织面料,穿着透气舒适,自在又随性。翻领与v领的完美结合,既能勾勒出迷人的天鹅颈,又能衬托出娇俏的小脸。一粒扣开合,方便穿脱的同时又能起到装饰的作用。拼接的七分蕾丝袖,在婉转中展现一分利落,后幅收褶设计,让美妙得以展现。"} +{"content": "类型#裤*材质#牛仔布*颜色#浅色*裤款式#口袋", "summary": "新款女裤的时尚设计,结合了大众的审美,并注入了别有风格的设计。浅色磨白的图案选择,展现出女生天生具有的恬静优雅的气质。男生看到女生第一眼看到的是腿,这条休闲裤在腿部的设计通过微喇的版型,掩盖了腿型缺陷。还视觉上拉长小腿比例,塑造大长腿。后口袋的盾形设计,将焦点汇聚到臀线中间,打造立体臀型。时尚的裤脚做了三条杠的设计,让这条牛仔裤不显单调。"} +{"content": "类型#上衣*材质#蚕丝*风格#休闲*衣样式#衬衫*衣样式#西装*衣领型#立领*衣领型#v领", "summary": "假如不能将矜贵的面料做现代风格的设计表达,那么有如此的传统桑蚕丝面料,光华就永远只在过去时。如般下落的小翻折领,的态度和,呈现出的v字领,正如远古神话的“”传统的的肩线,下落式样的肩袖处理,与改良式西装立领的,休闲懒,又好像穿了的老衬衫一般调皮整个廓形为的三角形,适宜搭配哈伦裤或者阔腿裤。"} +{"content": "类型#裙*风格#知性*风格#青春*图案#蝴蝶结*裙下摆#开叉*裙长#连衣裙*裙领型#v领*裙衣门襟#系带", "summary": "这款连衣裙走的是青春时尚的风格路线,尽显出女性的精致细腻的一面,衬托出女性的端庄知性的一面。采用了v领的领口设计,十分利索干净。搭配腰部的蝴蝶结系带,突显出新鲜有趣的一面。裙摆的两侧开叉剪裁,更是体现出你的华丽优雅的气质。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*衣样式#衬衫*衣门襟#套头", "summary": "这款衬衫的看似普通实则颇有几分个性,宽松的版型也一样显瘦遮肉,是属于套头式的版型设计,大大的v字形领口,修饰脸型,前短后长的下摆设计,背后更是设有开叉的元素,这样看起来不会很单调。"} +{"content": "类型#上衣*材质#网纱*衣样式#卫衣*衣样式#外套", "summary": "此款做了黑、白两色,使用质量偏好的网纱和网眼;网纱飘逸动感,一般会搭配一些卫衣罗马面料,会中和掉网纱的仙;此款将网纱和网眼结合,既不会显得过于轻浮,也不会过于硬朗,吸人眼球,给人视觉冲击。偏薄,透风,可以用作春秋的薄外套。"} +{"content": "类型#裙*版型#显瘦*图案#条纹*图案#印花*裙型#一步裙*裙腰型#高腰*裙长#半身裙*裙款式#不规则", "summary": "造型感十足的一款半身裙,经典的过膝长度,尽显矜持优雅。时髦的高腰设计,采用了松紧设计,显瘦又方便穿脱。抢眼的条纹印花,更是充满时尚活力;裙摆处做了不规则的设计,让你的每一步都摇曳生姿,展现与众不同的自己。"} +{"content": "类型#上衣*版型#宽松*风格#简约*风格#ol*风格#休闲*衣样式#衬衫", "summary": "简约到极致的一款长款的宽松结构衬衫,几乎可以做连衣裙穿着,简约中带着些甜美,洒脱里又包含率性。很干净的白,很轻柔的面料,有些薄透的视觉感。很好搭配的款式,休闲还有ol风格。"} +{"content": "类型#裙*颜色#纯色*风格#简约*图案#纯色*裙长#连衣裙*裙袖长#短袖*裙领型#立领*裙款式#抽绳*裙款式#抽褶", "summary": "BRAND这款别致大方的纯色连衣裙,时尚又气质的立领设计,穿着美观又气质,更显立体大方美。腰间的抽绳设计,大方的褶皱处理,勾勒纤细的腰部曲线。大方的短袖造型,简约又方便,修饰优美的手臂曲线,时尚又大方。"} +{"content": "类型#裙*颜色#白色*图案#碎花*图案#印花*裙下摆#荷叶边*裙袖型#喇叭袖*裙款式#拼接", "summary": "深绿色的底色上点缀白色的碎花印花,仿佛绽放在草地上的花朵,为了迎接春天到来。v型的领口既修饰脸型又能露出精致的锁骨。喇叭袖的半袖甜美活泼,还能衬托手臂的纤细。荷叶边的裙摆拼接俏皮减龄。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*颜色#黑白*图案#波点*衣样式#衬衫", "summary": "波点一直都是复古风的标配元素,尤其是黑白波点的搭配特别的耐看,看似简简单单,穿上身却特别气质。直身的宽松衬衣版型,不挑身材,即使受力也不会觉得紧绷,舒适感上佳。面料的垂性很好,上身利落显瘦,很显精神。"} +{"content": "类型#上衣*版型#显瘦*材质#西装面料*颜色#纯色*风格#休闲*图案#纯色*衣样式#风衣", "summary": "这款西装裤选用了柔软细腻的西装面料,具有平整透气的上身效果,垂感非常好。整体光洁的布料,缝制有起皱效果,视觉效果上具有很好的显瘦作用。九分裤直筒的版型,休闲慵懒,带有一丝优雅气质,外搭一件纯色的风衣,还能引来不少异性的回头率~"} +{"content": "类型#上衣*版型#宽松*颜色#白色*风格#复古*风格#简约*图案#复古*图案#波点*图案#线条*衣样式#衬衫*衣款式#抽褶", "summary": "宽松的波点衬衣,散发着浓浓的法式复古风情,系带领的设计显得十分灵动。修饰颈部线条,肩部褶皱设计个性简约,修饰形体。波点面料经典简约,低调的咖色十分的特别雅致。白色的波点一扫沉闷的感觉,白色底色干净利落简约。"} +{"content": "类型#裙*材质#牛仔布*图案#蝴蝶结*裙型#牛仔裙*裙长#连衣裙", "summary": "颜值很高的一款单鞋,中性的帅气与优雅的女人味并存。大小刚好的蝴蝶结装饰在简洁的一字带上,将柔美的气息注入其中。不管是搭配牛仔或连衣裙上身都很显气质。"} +{"content": "类型#裙*风格#复古*风格#文艺*图案#复古*图案#刺绣*裙型#百褶*裙款式#拼接*裙款式#钉珠", "summary": "这款丝绵上衣上有着精致的花朵刺绣装饰着,v型的领口设计看上去别致又显档次,裙摆与袖口处都是百褶样式的,穿在身上格外地飘逸吸睛。复古的钉珠与金线拼接,细节处的设计满满都是品味,让你轻松演绎复古的文艺范儿。"} +{"content": "类型#上衣*颜色#酒红色*风格#性感*图案#线条*衣样式#衬衫*衣领型#v领*衣款式#抽褶*衣款式#荷叶边", "summary": "这是一款女人味十足的衬衫,线条流畅的v领,修饰颈部曲线,展现性感锁骨。腰部收紧设计,做了层层褶皱,更具层次设计感,同时提高了腰线,形成了自然的荷叶边下摆,优雅十足。而颜色更是选择了神秘而魅惑的酒红色,将女性魅力完全展现。"} +{"content": "类型#裤*版型#宽松*材质#纤维*风格#运动*图案#格子", "summary": "采用的聚酯纤维面料,具有柔软细滑触感和透气散热特性,让你在行走运动的时候舒适。宽松的版型,不会影响肢体自由,让伸展灵活自在。格纹的加入,轻盈立体,具有强的层次感。侧边的织带,柔和裤子的硬朗,突显柔情。"} +{"content": "类型#上衣*版型#宽松*风格#休闲*衣样式#卫衣*衣领型#圆领*衣长#短款*衣袖型#落肩袖", "summary": "以短款宽松直筒廓形剪裁的运动衫,带有卫衣风格,上身舒适自由。休闲圆领设计,上身舒适百搭。加上慵懒落肩袖型,带来轻松休闲范儿,正面配有光亮印字装饰,精致美观而不失设计感。"} +{"content": "类型#上衣*版型#显瘦*材质#牛仔布*风格#青春*图案#字母*图案#文字*图案#印花*图案#撞色*衣样式#卫衣", "summary": "这款卫衣展现出了青春原有的活泼与生机感。非常醒目的撞色设计,搭配上立体字母印花,带来了非常年轻的学院气息。而在搭配上也非常的简单,一条牛仔裤一双小白鞋,就可以青春故事了。整体是修身的款型,能够凸显出好的身材来,彰显十足品味。"} +{"content": "类型#裙*风格#复古*图案#复古*图案#线条*裙下摆#垂坠*裙领型#立领*裙款式#腰带*裙款式#螺纹*裙款式#亮片*裙款式#木耳边", "summary": "螺纹立领的设计,复古韵味十足,衬托脖颈修长,使你赚足回头率。以串珠与亮片钉制成的花朵图案,点缀在前襟,光影重重散发出璀璨的光芒,巧妙的吸引眼球,衬托出端庄时髦的气场。腰间搭配一条双层的pu腰带,合身的裁剪设计,束出纤细的小蛮腰。蜿蜒的木耳边点缀在裙边,在垂顺流畅的线条里添一些立体挺括的视觉效果,只需微风拂过,更显蹁跹的步伐。"} +{"content": "类型#上衣*版型#宽松*颜色#纯色*图案#纯色*衣样式#衬衫*衣领型#立领", "summary": "非常素雅的纯色衬衫,宽松版型,穿在身上的,有一种不可比拟的仙气。立领设计,打造立体效果,增加层次感,袖子可以翻起来穿,装点你的成熟气质更加显得干练。"} +{"content": "类型#上衣*版型#显瘦*风格#知性*图案#线条*衣样式#衬衫*衣领型#圆领", "summary": "气质干练知性的优雅气质,这款衬衫连衣裙当仁不让,精巧的小圆领,气质有型的衬托出柔美的,而合体直筒的裙身廓形,彰显出利落之感,同时能够藏起多余的肉肉。搭配直身线条的提花纹理,显瘦的视觉感更是妥妥的。"} +{"content": "类型#上衣*颜色#黑色*颜色#灰色*颜色#浅灰色*风格#复古*风格#简约*图案#复古*图案#线条*衣样式#西装*衣领型#西装领*衣门襟#双排扣", "summary": "两款小众的浅灰和珊瑚灰色调,有别于传统黑色西装的沉闷老气,在正式而严肃的干练商务感中凸显一丝轻盈从容的自在风范,却同时保留了大气利落的女性魅力。简约的版型设计,线条干净利落,完美表达极简主义,上身提升精气神更有气场。精致的西装领型,更显独特气质。复古的双排扣,增添丝丝文静书卷气质。"} +{"content": "类型#裙*颜色#纯色*风格#淑女*风格#简约*图案#纯色*图案#刺绣", "summary": "名副其实的淑女裙,缤纷的彩绣图案赋予其几丝民族风情,精致且令人惊艳。绣花灵活而生动的装饰上半身,如百花盛放般绚烂,叫人欣赏不够。裙摆则以纯色来演绎,进行简约的碰撞,视觉感浪漫唯美。"} +{"content": "类型#上衣*风格#通勤*风格#日系*风格#复古*图案#碎花*图案#复古*图案#线条*衣样式#衬衫*衣领型#polo领*衣领型#翻领", "summary": "这款衬衫采用极具通勤风的polo领设计,甜美的碎花点缀,就是一份精美的艺术品,偏日系风的颜色,天然纯洁无瑕,让肌肤自由呼吸,自然的色彩和翻领的设计为我们的衣身整体增不少,绝对是人群中的焦点。立体廓形简洁干净,复古的花纹图案,衬衫前后线条张弛有度。"} +{"content": "类型#上衣*风格#街头*风格#运动*风格#清新*衣样式#外套*衣领型#立领*衣款式#松紧带", "summary": "有别于传统的防晒衣,这款外套兼具实用性和潮酷感。帅气的立领,内置隐形衣帽,可根据造型需要随心变换。衣身简洁清新的英文字母大大丰富细节看点,同时带出街头玩趣感。可调节性直筒衣袖,兼具实用功能性与高街时髦感。衣摆处收弹力橡筋,轻松勾勒出活力运动风尚。"} +{"content": "类型#裙*版型#显瘦*风格#文艺*风格#知性*风格#清新*图案#线条*裙款式#腰带", "summary": "这款长款的裙装,可以是小清新的纯情旖旎,可以是闲适悠然的知性文艺。搭配一条腰带也可以拉长身材比例,长度也刚好包裹住大腿中相对较粗的地方,露出修长大腿,展现优雅迷人的气质。符合人体,勾勒身体线条,上身显瘦,显精神!"} +{"content": "类型#裤*版型#显瘦*材质#混纺*裤长#短裤*裤款式#流苏", "summary": "双色纱线掺入金丝混纺而成的粗花呢面料,无论是从颜色还是质感上看,都透露出满满的优雅气息。腰部以及裤脚处浓密的流苏装点,配合裤身双排珍珠扣,为小香风的短裤更添精细美好的小细节。腰腹部做了收省的版型,自然贴合腰部曲线,平腹显瘦的效果显著,配合逐渐变宽的裤脚,衬得双腿更为纤细,上身轻松穿出大长腿即视感。"} +{"content": "类型#裤*材质#丝绒*颜色#白色*颜色#粉色*风格#运动*风格#休闲*风格#清新*图案#条纹*图案#线条*裤长#九分裤*裤长#长裤*裤款式#拼接", "summary": "粉色丝绒面料制成的一条运动风格的休闲长裤,不过是九分的裤长设计,上身穿着可以露出纤细的脚踝,而且帮助你拉长腿部线条。因为是丝绒,而且用了清新少女的粉色,散发出好看的光泽感。侧面是白色的竖条纹进行拼接,丰富整体层次感,张扬出穿搭的青春活力。"} +{"content": "类型#裙*图案#蝴蝶结*裙款式#不规则", "summary": "度假味道浓浓的不规则上衣,马上让人想夏威夷,肩头的蝴蝶结俏皮可爱,两件套的设计弥补了细节单调的缺憾。单穿搭配打底裤,随风流动的飘逸裙摆,自然流露出女人味。"} +{"content": "类型#裙*图案#蝴蝶结*裙款式#不规则", "summary": "上半身的大蝴蝶结设计,为你的可爱加分,蝴蝶结的可随意性,使你有更多的搭配方式,不再一种;3d立体的剪裁方式,保证裙子贴合每个人的曲线,凸显你的小蛮腰;下裙的不规则剪裁,使你不缺乏灵动个性十足。"} +{"content": "类型#裙*风格#性感*图案#波点*裙长#连衣裙*裙衣长#中长款*裙款式#收腰", "summary": "这款连衣裙本身就有十足的独特风韵。腰部收腰的版型设计,完美修饰比例,打造性感的女性风采。独特的波点花纹设计,将衣身的独特魅力演绎的淋漓尽致。中长款的适度衣身,给与足够的舒适度。下摆的廓型设计,自然的流露了一种公主版的古典优雅气质。给您十足的穿衣享受。"} +{"content": "类型#裙*颜色#深蓝色*风格#简约*图案#植物*图案#线条*图案#印花*裙腰型#中腰*裙领型#圆领*裙款式#拼接*裙款式#抽褶", "summary": "精致的植物印花设计,以深蓝色为底,再加上红白的植物印花布满裙身,上身效果非常好,突显出女性的知性美。时尚简约的圆领,修饰纤细柔美的颈部线条,同时衬托的脸型更显娇小,彰显自信优雅的女性气质。舒适的中腰款式,恰到好处的褶皱处理,和不漏痕迹的拼接,呈现出曼妙窈窕的女性身姿。"} +{"content": "类型#上衣*材质#丝绒*风格#复古*风格#运动*图案#复古*图案#撞色*衣样式#外套*衣款式#拼接", "summary": "非常运动的一款丝绒拼接外套,无论是可竖可翻的领口,还是蓝白的撞色,都轻松的打造出青春活力的运动风尚。不仅带来出挑的视觉效果,还可以为你增添时尚的氛围。尤其是拼接的丝绒面料,不仅仅是崇尚复古的情调,更容易彰显出穿衣者的高尚格调,在运动中彰显格调的非凡。"} +{"content": "类型#裙*版型#显瘦*风格#潮*风格#性感*图案#条纹*裙长#连衣裙*裙衣长#短款", "summary": "这款连衣裙短款设计,加上修身的款式显瘦显腿长。红白蓝的配色经典时尚,配合条纹更衬潮流气息,腰间结饰装饰,精致俏皮,小小露腰更添小性感。"} +{"content": "类型#上衣*版型#宽松*材质#蕾丝*颜色#白色*风格#性感*图案#蕾丝*衣样式#衬衫*衣袖型#落肩袖*衣款式#拼接", "summary": "以白色为主体色调的衬衣本身看起来就充斥着几分温油又淡雅的味道,更能轻松带出实穿性与。偏宽松的版型设计配合落肩袖型的设计可以修饰肩部曲线,而且还能勾勒出慵懒随性范儿,对于微胖的小仙女也是敲友好的选择。蕾丝拼接的袖子设计可以增加灵动韵味,也能轻松勾勒出性感的女人味精致。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*材质#蕾丝*颜色#粉色*图案#线条*图案#蕾丝*裙袖型#泡泡袖*裙款式#勾花镂空*裙款式#收腰", "summary": "充满小女人风韵的粉色打造而成的裙身带着一些纯洁甜美的感觉扑面而来。全身上精致的蕾丝镂空提花,又一种美轮美奂的艺术感,也散发出了十分柔美的感觉。宽松的泡泡袖口若隐若现之间显露出白皙的肌肤,同时也修饰了手臂的线条,更显纤细。腰间的松紧收腰轻松显瘦,让裙摆呈现出一种仙气满满的感觉,婆娑摇曳之间展现雅致风范。"} +{"content": "类型#裙*风格#淑女*风格#文艺*图案#植物*图案#线条*图案#印花*裙下摆#荷叶边", "summary": "轻薄柔软的面料辅以植物花卉印花元素,碰撞出时髦与文艺气息。甜美的荷叶边俏皮可爱又不失淑女韵味,露腰的版型刚好秀位置,释放优美的线条,体现女性优雅魅力。"} +{"content": "类型#上衣*材质#棉*风格#复古*风格#文艺*风格#清新*图案#复古*图案#刺绣*衣样式#衬衫*衣领型#圆领*衣长#短款*衣袖型#落肩袖*衣袖型#灯笼袖", "summary": "这款衬衣选用纯棉的面料手感柔软,上身穿着透气亲肤。精致的圆领,勾勒锁骨线条美,突显女性柔美的气质。两侧肩部花朵刺绣点缀,散发出清新文艺气息。落肩灯笼袖,藏肉又不失个性,举手投足之间,尽显女性甜美优雅的复古气质。衣身短款的设计,在视觉上提高腰线,优化身材的比例,让你轻松穿出曼妙身姿。"} +{"content": "类型#上衣*颜色#白色*风格#淑女*图案#线条*衣样式#衬衫*衣样式#马甲*衣袖长#无袖*衣款式#绑带*衣款式#抽绳*衣款式#木耳边*衣款式#荷叶边", "summary": "以清浅的白色调来演绎的一款无袖马甲衬衫,袖口处以垂直至下摆的荷叶边点缀而成,修饰整体繁复精致的细节感,同时增添甜美淑女的韵味。领口处采用抽绳绑带的设计,可自主选择收紧,进而营造成甜美细碎的木耳边廓形,也可自然平铺修饰脖颈线条,设计贴心实用。"} +{"content": "类型#裙*版型#显瘦*颜色#蓝色*风格#文艺*风格#清新*风格#性感*裙长#连衣裙*裙款式#勾花镂空*裙款式#松紧带*裙款式#收腰", "summary": "这款连衣裙采用纯净的蓝色调,集清新与文艺为一体,让你轻松穿出别致风情。落肩的设计,显瘦之余更将性感撩人的气质展现出来。松紧带收腰设计,轻松勾勒出你曼妙的身姿让人眼前一亮。衣身胸口及裙摆,做了镂空的设计,别致又新颖,炒鸡美腻噢。"} +{"content": "类型#裙*版型#宽松*风格#文艺*裙下摆#压褶*裙长#连衣裙", "summary": "将娇甜可人的粉,与洋气热烈的大红完美碰撞,便将连衣裙突兀的视觉冲击感带出来了,一下子就捕获了别人的聚焦视线;前襟特意打造的压褶细节感很强,配合清雅脱俗的提花装点,瞬间将潜在的女性柔情通通展现;富有包容性的宽松廓形,让身材稍胖的妹纸,也能穿出苗条的感觉,行走间文艺气质十足。"} +{"content": "类型#上衣*版型#显瘦*颜色#纯色*风格#文艺*风格#简约*图案#纯色*图案#撞色*衣样式#衬衫*衣领型#小立领*衣袖长#长袖*衣款式#纽扣", "summary": "这款衬衣区别于一般的纯色长袖衬衫,这款衬衫采用撞色压线设计,使得衬衫简约而不失优雅。领口采用了经典的小立领设计,搭配撞线点缀时尚简约又十分抓人眼球,袖口处的纽扣点缀,给人一种别致的文艺气质。衬衣下摆巧妙的贴合体型并且修饰腰腹部,穿着舒适又显瘦。"} +{"content": "类型#裤*版型#显瘦*材质#网纱*风格#潮*裤长#七分裤*裤款式#纱网", "summary": "以优雅的的两件套设计,拥有丰富的感,打造时髦新潮的款式。飘逸的系带网纱设计,散发仙气十足的气息,塑造仙女的气质。修身显瘦的七分裤设计,凸显出傲人的身材,整体设计别具摩登感。"} +{"content": "类型#裤*版型#显瘦*颜色#红色*颜色#纯色*颜色#酒红色*风格#复古*图案#纯色*图案#复古*裤口#微喇裤", "summary": "基底色进行设计,少了大红色的张扬,偏暗的色调更加内敛、大气,尽显洋气的复古气息。衣身立体式的裁剪,呈现原始的流畅线条美;裤身正中的折痕,避免纯色的单一,与微喇叭的裤脚相呼应,上身带来显高显瘦的视觉效果。"} +{"content": "类型#裙*材质#针织*风格#简约*图案#线条*图案#印花*裙下摆#开叉*裙长#连衣裙*裙领型#圆领*裙款式#拼接", "summary": "它是针织上衣与印花裙拼接组合的连衣裙。圆领的设计简约大方,修饰漂亮脸型,凸显优美颈部线条,显优雅温婉气质。裙子采用可调节式肩带设计,高矮胖瘦身型都可轻松驾驭,开叉下摆让行走更轻松自如。"} +{"content": "类型#上衣*图案#线条*衣样式#西装*衣领型#翻领*衣款式#抽褶*衣款式#收腰", "summary": "为优雅女性量身打造的收腰连衣裙,融合了经典的西装翻领,颇有些毫不费力的法式BRAND。收腰扣带装饰强调曼妙的腰肢曲线,也在无形中拉长腿部线条,衣身的褶皱肌理是点睛之笔,给行走间灵动的裙摆带来更多张力动感。"} +{"content": "类型#上衣*版型#宽松*颜色#卡其色*风格#休闲*图案#撞色*衣样式#棒球服*衣领型#圆领", "summary": "拥有对美好生活的愿景注入设计的品牌徐洛淇,此番汲取棒球服的设计理念始终贯穿整体衣身;通过经典圆领款式,结合利落的宽松版型,进一步提升整体的休闲随性氛围;透过撞色的圆圈标志缀于胸前,完全吻合棒球服的底蕴;搭配一条卡其色的休闲哈伦裤,轻松在上驰骋。"} +{"content": "类型#裤*图案#线条*裤型#直筒裤*裤款式#口袋*裤款式#纽扣", "summary": "采用腰部的纽扣造型设计,让我们穿脱更加的方便。直筒裤的剪裁设计,更好的修饰腿部线条感。衣身两侧的口袋,更方便我们收纳物品。选用的面料舒适。"} +{"content": "类型#裤*版型#宽松*版型#显瘦*颜色#纯色*图案#纯色*裤型#直筒裤*裤型#阔腿裤*裤腰型#高腰", "summary": "这款阔腿裤做了一个纯色的设计,平时搭配起来更加的百搭,不会挑剔任何风格,都可以穿出不错的质感。宽松的直筒裤版型,对于身材的包容性,穿着起来也会更加的舒适。显得腿部笔直的同时也更加显瘦,高腰的设计更能提高腰线的位置,更加显腿长。"} +{"content": "类型#上衣*颜色#纯色*图案#纯色*图案#蝴蝶结*衣样式#衬衫*衣门襟#拉链*衣门襟#一粒扣*衣款式#拉链*衣款式#飘带", "summary": "甄选质感细腻真丝面料的这件衬衫,虽是纯色的配色,但其融入蝴蝶结飘带之后的它,整体气质可是提升了不少。而后又以皎洁珍珠点缀蝴蝶结,如此更体现出优雅和轻奢,轻松告别普通款的沉闷。而无论是一粒扣袖口还是后领拉链设计,都是为了让衬衫更方便穿脱一些。"} +{"content": "类型#裤*版型#宽松*颜色#黑色*图案#字母*图案#文字*裤长#九分裤*裤款式#拼接*裤款式#口袋*裤款式#抽绳*裤腰型#松紧腰*裤口#小脚", "summary": "黑色的九分运动裤,宽松的版型,让腿部活动自如,毫无束缚感。九分裤的设计搭配束脚。上身更显挺括感,松紧腰搭配抽绳的设计穿脱方便,两侧的口袋。零钱小物,实用方便,裤腿的个性拼接字母丰富了,裤子的设计与层次感使之不再单调。"} +{"content": "类型#上衣*版型#显瘦*颜色#红色*风格#复古*风格#简约*风格#青春*图案#复古*衣样式#外套*衣款式#绑带", "summary": "袖口绑带元素和大红色的外套合并在一起穿上它又是一个活脱脱的时髦!营造出帅气又甜美的时髦气息。这款外套是春秋的款哦~适合现在的天气穿,简约的版型复古潮范,洋溢着青春的活力气息,重点是显高显瘦,便于展现迷人大长腿。面料很柔软舒适,怎么搭配着穿都很舒服。"} +{"content": "类型#上衣*版型#宽松*颜色#黑色*颜色#纯色*风格#休闲*图案#纯色*图案#印花*图案#撞色*衣样式#polo*衣领型#翻领*衣款式#拼接", "summary": "ralphlauren休闲polo衫,黑色彰显稳重感。肩部与侧身撞色拼接,显示出独特巧妙。打破纯色的沉闷,时髦感立显。胸前商标图案印花,别致生动,极富吸引力。翻领设计,气质出众。领口的系扣设计,穿脱更方便。宽松的版型,穿着随意放松,优雅不失风度。"} +{"content": "类型#裙*版型#宽松*材质#棉*颜色#白色*颜色#蓝色*风格#清新*图案#条纹*图案#印花*裙长#连衣裙*裙袖长#短袖*裙款式#腰带", "summary": "白色和灰蓝色竖条纹连衣裙,非常清新,恰到好处的宽松短袖,很好的遮盖粗壮手臂。可调节同色系腰带装饰,凸显腰身柔美曲线。趣味印花图案贴布装饰,打破单调沉闷,俏皮可爱。纯棉亲肤面料,穿着挺括有型又不失柔软感。所以穿在身上显得很有青春活力。"} +{"content": "类型#上衣*材质#纤维*颜色#绿色*风格#文艺*风格#休闲*衣样式#西装", "summary": "BRAND这款西装采用绿色的主调进行渲染,营造出休闲文艺的氛围;整体甄选聚酯纤维面料打造,不具有延展性,不会变形,并持久保持其平整度;再加以经典的平驳领裁剪及双扣设计,彰显出儒雅绅士的风范;辅以背部下摆开叉处理,丰富层次感的同时,透露出几分随性韵味。"} +{"content": "类型#裤*颜色#纯色*风格#简约*风格#休闲*图案#纯色*裤款式#口袋*裤款式#抽褶*裤腰型#松紧腰", "summary": "褶皱型的松紧裤腰设计,弹性适中没有拘束感,又很是方便宝贝穿脱。纯色净面的衣身裁剪的简约大方,没有加入过多花哨的修饰,带来很多样的搭配性能,两侧的斜插口袋干净利落,裤脚裁剪的大小合适,上身后休闲有范。"} +{"content": "类型#上衣*风格#复古*图案#复古*衣样式#卫衣*衣长#短款*衣门襟#拉链*衣门襟#套头*衣款式#拉链*衣款式#连帽", "summary": "短款的版型打造,更显干练利落,连帽卫衣套头穿起来肆意张扬,慵懒随性范儿十足,胸前圆环拉链设计,增加衣身层次感。整个的色彩搭配复古优雅,既有甜美俏皮的韵味,又彰显熟女的成熟气质。"} +{"content": "类型#裤*风格#淑女*风格#清新*图案#格子*图案#波点*裤款式#口袋*裤口#翻折", "summary": "清新的天蓝色格纹图案让孩子的纯真内涵绽放而来,宽边的弹性腰带呵护宝贝的娇小身型,加持裤腿处可以翻边的波点内里,打造出甜美无比的淑女范儿,整体看起来既能吸引众人目光,又不乏柔和温婉的内涵。侧边的口袋让孩子小手伸入打造时尚造型,结实耐磨的面料让裤身充满舒适柔软的品质感。"} +{"content": "类型#上衣*图案#条纹*图案#印花*图案#撞色*衣样式#棒球服*衣样式#外套*衣袖型#罗纹袖口", "summary": "这件棒球服外套采用了活力满满的趣味印花点缀,让人眼前一亮,打造出别样的时尚气质。领口和袖口均有撞色条纹罗纹收口,富有整体造型感,穿着尽显品质。"} +{"content": "类型#裙*版型#显瘦*颜色#粉色*风格#青春*裙长#连衣裙*裙款式#勾花镂空", "summary": "亮粉色比普通的粉色更加萝莉,这款连衣裙以亮粉打底,上身后青春又甜美。结合通身的镂空勾花,更添优雅和仙气,而在视觉上也是轻松营造了精致轻奢观感。另外,连衣裙为修身版型,将腰身衬托的越发纤细,身线更加玲珑曼妙。"} +{"content": "类型#上衣*风格#休闲*图案#撞色*衣样式#衬衫*衣领型#一字领*衣袖型#喇叭袖*衣款式#腰带", "summary": "一件浪漫的衬衫连衣裙,万物复苏的春天可以尝试下这种风格,会让你有意想不到的效果。撞色的荷叶喇叭袖,是整件的亮点。采用休闲感的府绸衬衫料,又不会过分的甜腻。近两年大热的封宽腰带,把腰身包裹的玲珑有致。一字肩但很巧妙地遮住了手臂较粗的地方,刚好露出上方锁骨。"} +{"content": "类型#上衣*版型#宽松*风格#简约*风格#休闲*图案#卡通*图案#字母*图案#文字*图案#刺绣*衣样式#卫衣*衣领型#圆领", "summary": "经典的圆领卫衣款型,简约休闲的设计风格,呈现出十足的利落感,同时自然的版型也营造了舒适的上身感受,捎带宽松的样式彰显出动感十足的年轻风采。衣身上的卡通恐龙图案,个性吸睛,充满了俏皮可爱的感觉,提升整款设计的美观度,同时让人更加减龄。字母刺绣的点缀丰富设计效果,立体别致的字母融入了渐变色的元素,凸显出独特的风采。"} +{"content": "类型#上衣*材质#蚕丝*图案#蝴蝶结*图案#波点*图案#撞色*衣样式#衬衫*衣门襟#系带", "summary": "圆润规律的撞色波点充斥在整件衬衫上,释放出活泼的生动力。其后顺应领口的放下稍微下移,立体的蝴蝶结系带点缀其中,以抢眼吸睛的方式为主体进行修饰美化。同时轻柔顺滑的真丝也融入其中,好像要以一种浪漫温柔的形式表达出对你的与爱意。"} +{"content": "类型#裙*版型#显瘦*颜色#黑色*风格#简约*风格#潮*裙型#背带裙*裙型#包臀裙*裙下摆#开叉*裙衣门襟#拉链*裙款式#拉链*裙款式#对称", "summary": "纯黑色调的交叉型背带裙,简约而不简单,让你的搭配随性不挑身材,尽显潮流与时尚,塑造纤细身姿。拉链衣门襟,方便穿脱,搭配两侧对称方形插袋,做工精细,缝合紧密,保持外观整体美感,实用而显大气感。包型设计,讲究束腰显瘦遮肉,打造翘臀诱惑,提升你的女人味儿,彰显出诱惑与吸引力。裙摆开叉设计增添灵活性,减少束缚,让你的走姿更显优雅与端庄。"} +{"content": "类型#裙*材质#网纱*颜色#黑白*风格#简约*图案#波点*图案#刺绣*图案#撞色*裙下摆#荷叶边*裙下摆#垂坠*裙款式#拼接*裙款式#露肩*裙款式#抽褶", "summary": "裙身重工刺绣点缀,细腻的纱质,剔透朦胧的质地,邂逅精致的刺绣,亲们都知道在这样的薄纱面料上面要做刺绣有,做到,立体逼真的枝叶蜿蜒大气,简约黑白撞色,雅致的色调给人含蓄温雅的感觉,上身衬的人特别有气质;肩部拼接透视网纱,看着就给人很干净清爽的感觉,立体精致的刺绣波点设计,灵动俏皮,很是吸睛;拼接荷叶边,层次丰富,自然垂坠挺括的荷叶边,优美均匀的褶皱,配着露肩设计,尽显灵动优雅。"} +{"content": "类型#裙*版型#显瘦*颜色#纯色*风格#复古*图案#纯色*图案#复古*裙款式#抽褶*裙款式#收腰", "summary": "简洁大方的纯色裙身,加上经典百搭的伞型裙摆,时尚百搭还能给人以满满的复古典雅的气质范。经典褶皱收腰处理,显瘦又显高,小粗腿又能轻松自然的穿着起来,完全不挑身材,优雅大气给人以十足的气质感。"} +{"content": "类型#上衣*版型#立体剪裁*材质#针织*衣样式#毛衣", "summary": "此款针织毛衣采用舒适细腻面料,穿着舒适。3d立体剪裁版型,彰显男人魅力。缝线紧密结实,凸显优质品质。包边下摆,防风保暖。"} +{"content": "类型#裙*颜色#纯色*图案#纯色*裙下摆#花边*裙腰型#高腰*裙长#连衣裙*裙袖型#喇叭袖*裙款式#纽扣*裙款式#收腰", "summary": "本款喇叭袖纯色连衣裙,细边皮带高腰分割收腰,上身效果更加高挑纤瘦。领口立体感的半高花边点缀,更加显得有趣并精致。喇叭袖口花边装饰,使裙子具有更加独特的质感。单侧单排纽扣点缀细节,整体展现优雅气质。"} +{"content": "类型#上衣*版型#宽松*颜色#黑色*风格#简约*风格#休闲*衣样式#卫衣*衣门襟#套头*衣款式#罗纹", "summary": "此款套头卫衣,采用个性十足的布贴图案装饰,尽显宝贝的阳光帅气。简约大方的领口设计,配以肩部的开扣设计,让穿脱变得方便简单。罗纹的袖口与下摆,穿着舒适服帖。优雅的纯黑色色调,休闲百搭,宽松舒适的版型设计,穿着无束缚感。"} +{"content": "类型#裤*颜色#黑白*图案#条纹*图案#撞色", "summary": "这款休闲裤裤脚侧面撞色条纹设计,使得视觉具有延伸感,更显双腿修长笔直。而经典的红黑白撞色设计,让你轻松穿出干练清爽的感觉。"} +{"content": "类型#裤*版型#宽松*材质#水洗*风格#简约*风格#朋克*风格#摇滚*图案#音乐*裤款式#破洞*裤款式#口袋", "summary": "裤子的设计是金属朋克气息,有着一股子摇滚的意味很是个性了可以说。简约宽松的版型很好的修饰了曲线。口袋下面有着金属鸡眼和穿绳设计,又精致又能体现出细节感。破洞水洗元素带来放荡不羁感。"} +{"content": "类型#裙*风格#青春*裙腰型#高腰*裙长#短裙*裙款式#绑带", "summary": "这条高腰短裙,看似是短裙,其实内里还做了短裤的设计,时髦感强却又不会显得太浮夸。高腰的廓形设计,能够使得腿部看起来更加的修长。裙身做了钉扣的绑带设计,散发着青春甜美的气息。"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*风格#简约*裤款式#不规则*裤口#毛边*裤口#小脚", "summary": "这款fiveplus不规则毛边牛仔裤,简约的修身小脚版型设计,能够更好的修饰腿部轮廓,显瘦之中更能展现出修长笔直的大长腿。同时融合毛边元素装饰,增添设计感,彰显时髦。"} +{"content": "类型#上衣*风格#清新*图案#碎花*衣样式#衫*衣款式#荷叶边", "summary": "荷叶边的在是这款碎花衫的设计亮点。层层叠叠的荷叶边晕染出了浪漫的情调,为衣身注入了小清新的美感,穿起来能将少女的娇美感衬托出来。除此之外,荷叶边还有遮肉的效果,它能在视觉上打造出好身材。"} +{"content": "类型#裙*材质#网纱*图案#刺绣*裙腰型#高腰*裙长#半身裙", "summary": "独特的网纱刺绣设计,更加俏皮可爱;过膝的长度设计,带来满满的安全感;时尚高腰腰型,塑造优美的身材曲线;半身裙是每个季节都不会过时的时尚元素,能够恬静地成为这个夏天不折不扣的主角,百搭的特性可以包含不同的风格单品,个性的网纱面料亦会让你的时髦变得轻而易举。"} +{"content": "类型#裙*材质#网纱*图案#刺绣*裙腰型#高腰*裙长#半身裙", "summary": "活泼可爱的少女,夏季总会选择一件网纱半身裙,这件高腰刺绣网纱半身裙,裙头选择松紧有致的腰部设计,不拘束的同时更收敛身形。裙身选择中国古典风雅的星月刺绣,尽显少女活力十足,而裙外层则选择轻盈剔透的网纱设计,展现少女灵动活泼的个性。"} +{"content": "类型#裤*颜色#黑色*裤款式#亮片", "summary": "一款blingbling的质感,就是最大设计亮点。整个衣身都是blingbling的亮片,一不小心就被亮,非常的耀眼。又是黑色系的,光是看着就觉得。"} +{"content": "类型#上衣*材质#牛仔布*材质#针织*风格#休闲*风格#潮*衣样式#卫衣*衣款式#连帽", "summary": "一款时尚有范的牛仔裤,无论从款式还是面料来说,都非常的满意,保证你穿了还想穿,款式很百搭,三季好穿,能陪你多个春夏秋。早春搭配一款之前的马海毛针织或休闲连帽卫衣,配上这款新潮的牛仔裤,下摆开叉,时尚不羁的感觉,露出一点白皙肌肤,时髦带点潮感,使得穿上后气质更时尚。"} +{"content": "类型#裙*材质#网纱*风格#宫廷*风格#高贵*图案#刺绣*图案#印花*裙长#连衣裙", "summary": "像的春风从指尖,伴着丝丝惬意,这便是这款连衣裙给人的感受。朦胧的刺绣网纱领口还原法国宫廷的经典look,透着浪漫,也不失温柔,铺陈在印面上的工笔画印花,唤醒最古典的中国风情。不一样的元素碰撞,同样的高贵灵秀。"} +{"content": "类型#上衣*风格#休闲*风格#青春*风格#潮*风格#性感*图案#线条*衣样式#衬衫*衣领型#翻领*衣款式#勾花镂空", "summary": "这款男士衬衣在裁剪上紧跟人体身材科学设计的步伐,按照黄金比例裁剪,贴合男士身材,显示出每个人不一样的魅力风格,商务休闲二者流行元素合二为一,显示不一样的潮流风尚。镂空的设计,又使得增添了性感的魅力,小翻领的设计,修饰颈部线条。"} +{"content": "类型#上衣*风格#清新*风格#性感*衣样式#衬衫*衣领型#一字领*衣袖型#喇叭袖", "summary": "一字肩的设计使得美丽的香肩得以展现出来,尽显造型独特并无形中流露出性感的气息。结合浪漫甜美的喇叭袖设计,使得这件衬衫性感却又不至于妖娆媚俗,反而演绎出清新脱俗的小仙女气质,举手投足众生沉醉在你出众的魅力之中而无法自拔。"} +{"content": "类型#上衣*版型#显瘦*风格#英伦*图案#撞色*衣样式#马甲*衣款式#纽扣", "summary": "品牌的这款正装马甲采用商务修身的裁剪设计,提花的纹理图案设计更显精致感。深色的英伦配色,尖角斜下摆的版型裁剪,腰部调节扣设计。单排撞色纽扣门襟,各个细节款型都在彰显优质的品质感以及英伦绅士的穿搭风格。"} +{"content": "类型#裙*风格#清新*图案#碎花*图案#线条*裙长#连衣裙*裙领型#v领*裙袖型#喇叭袖*裙衣门襟#系带", "summary": "魅力女人味,还是要从连衣裙开始诉说起来~美观的中长连衣裙,它有着时尚系带v领,样式简洁大方,更加的富有女人味的感觉。而清新浪漫的碎花元素,分布在这款连衣裙的全身,样式优雅,最显清新感觉。加上这款它还有着精致的喇叭袖设计,在修饰了你手臂线条后,尽显出一种独特和浪漫。"} +{"content": "类型#裙*版型#宽松*材质#牛仔布*材质#水洗*裙型#牛仔裙", "summary": "这款裤子是饱和度较低的蓝调牛仔配合水洗做旧效果,帅气的很低调,版型不要太赞,耐穿耐脏耐摩擦。宽松的裤型顺便把腿型不完美的问题一并解决,这个膝盖洞,透气"} +{"content": "类型#裙*图案#蝴蝶结*裙领型#西装领*裙领型#翻领*裙款式#腰带*裙款式#抽褶", "summary": "沿用西装领的样式设计的时尚翻领,凸显气质。腰带采用金属圈作为装饰,迎合当下时尚风潮,BRAND风十足。选用梭织面料,紧密挺硬,保持裙型。腰部蝴蝶结系扣增添甜美气息。裙身自然褶皱,增添灵动性。"} +{"content": "类型#上衣*风格#文艺*风格#清新*衣样式#衬衫*衣袖型#喇叭袖*衣款式#荷叶边", "summary": "不同于普通白衬衫,这款衬衫很有设计感。温婉清新的喇叭袖设计,展现优雅气质的同时更能展现纤细的手臂。而别致的荷叶边下摆设计,带来不少灵动飘逸的感觉,浓浓的文艺气息。同时,这款衬衫特意选用柔软亲肤的面料,提升了不少穿着体验感,并且立体的剪裁工艺让这件衬衫看起来更加有质感,有气质。"} +{"content": "类型#裙*版型#显瘦*材质#牛仔布*风格#复古*风格#简约*风格#潮*图案#卡通*图案#复古*图案#线条*图案#刺绣*裙型#a字*裙型#牛仔裙*裙长#短裙*裙衣门襟#拉链*裙款式#口袋*裙款式#拉链", "summary": "妖精的口袋的这款牛仔短裙,经典a字版型,面料挺括、线条流畅简约。微微弹力,飘逸下摆,穿着显瘦自然。后置隐形拉链的融合,方便又摩登。复古的卡通刺绣图案,简约中不失灵动,超级吸睛,让你走到哪里都是潮流中心。"} +{"content": "类型#裤*版型#显瘦*裤长#九分裤*裤腰型#中腰", "summary": "这一款裤子给人的感觉就是很经典大气,如同西装裤一样非常的有气质,也显得比较的正式,我们可以在一些隆重的场合穿上它。九分裤的长度非常的适中,穿上可以完美的修饰出我们的脚踝。中腰的设计则很修身显瘦,很好的衬托出我们的完美身材。"} +{"content": "类型#上衣*版型#宽松*图案#线条*图案#撞色*衣样式#冲锋衣*衣领型#小立领", "summary": "这款冲锋衣采用薄款的面料,触感细腻,让你轻松兼顾温度与风度。衣身采用个性撞色的设计,不仅能穿出时髦感,更显皮肤白皙。小立领的设计很拉风,轻松凸显纤细的颈部线条。宽松的版型适合各种身型的妹纸穿着。"} +{"content": "类型#裙*版型#显瘦*颜色#纯色*风格#通勤*风格#简约*风格#知性*图案#纯色*图案#线条*裙长#连衣裙*裙袖长#长袖*裙款式#对称", "summary": "来自时尚品牌菲梦伊的一款通勤款的连衣裙,简约纯色设计,干净清爽。3d立体修身剪裁,贴合身形,突出纤细腰肢和挺翘饱满的臀部曲线,勾勒女性曼妙身材曲线。经典大方的反驳领设计,给脖颈自在活动空间,简约知性。长袖的设计,修饰手臂线条,彰显女性的温婉含蓄,对称衣扣装饰,丰富衣身结构,更具时尚感。"} +{"content": "类型#裤*版型#宽松*风格#清新*裤长#八分裤*裤型#直筒裤*裤型#阔腿裤*裤款式#拼接*裤款式#不对称", "summary": "这款阔腿裤的面料色调,怀旧又带了点清新禁欲系。款式设计上运用不对称的手法,在一边的裤筒上,用同色系不同纹理的面料拼接插入,而另一边也做了一条相呼应的细边出芽。给裤身制造一个不对称的视觉效果,和谐融入不显突兀。宽松的直筒八分裤型,裤脚的折边让穿着更显随性。"} +{"content": "类型#裙*版型#宽松*风格#休闲*裙下摆#压褶*裙领型#娃娃领*裙款式#口袋*裙款式#纽扣", "summary": "这件睡裙在背部采用了压褶设计,加上甜美的娃娃领,具有减龄的效果,带你重回童真时代。纽扣的门襟开合,缔造出干练自信的女性气场。宽松舒适的版型包容性强,隐藏身材小秘密,尽享惬意休闲时光。胸前的口袋设计,增添视觉层次感,可放置随身小物,方便实用。"} +{"content": "类型#裙*材质#蕾丝*颜色#黑色*图案#蕾丝*裙下摆#垂坠*裙长#连衣裙*裙袖长#无袖*裙领型#圆领*裙款式#拼接", "summary": "日着在今年的夏装中,继续延续了蕾丝所代表的女性柔美一面的设计元素。在这一款连衣裙里,大胆地将黑色的蕾丝面料附着在土黄色的棉布上,以无规则的车线工艺将两块面料进行了拼接,打造出了极度自然的垂坠感。同时,无袖搭配圆领的版型设计,更是将优美的脖颈以及臂部曲线展露无遗,凸显出了女性姣好的身材。"} +{"content": "类型#上衣*图案#线条*衣样式#衬衫*衣样式#风衣*衣领型#v领*衣款式#不规则*衣款式#荷叶边", "summary": "这件衬衫领口的设计是半开的小v领结合不规则的荷叶边设计,整体设计感十足,这样的领口勾勒出颈部线条和锁骨都很柔美。袖子部分的设计是可拆卸的风衣式袖子设计,时尚方便又能凹造型,是你的最佳选择。"} +{"content": "类型#上衣*材质#针织*风格#复古*风格#简约*风格#休闲*风格#青春*图案#字母*图案#文字*图案#复古*图案#撞色*衣样式#卫衣*衣领型#圆领*衣门襟#套头*衣款式#亮片", "summary": "圆领式的针织卫衣,套头的款型不仅方便了日常的穿脱,也展现出简约休闲的设计风格。加上微微的卷边效果以及局部的磨白处理,刻意做旧的感觉,让这款设计充满了复古的情怀,打造出个性又洒脱的女性气质。衣身上亮片字母的点缀,融入了明亮的撞色元素,十分吸人眼球,同时展现出极具现代感的青春风采,让人绽放出活力感。也成为了整款设计的点睛之处。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*裙下摆#花边*裙长#连衣裙*裙款式#不对称", "summary": "这款连衣裙精选优质的面料,不仅手感舒适,穿在身上还非常亲肤透气。宽松的款式穿脱方便的同时还非常显瘦。花边的设计也是一大亮点,使这条裙子更加甜美可爱。不对称下摆的设计也是时髦又别致。"} +{"content": "类型#裙*裙领型#v领*裙领型#翻领*裙款式#露肩*裙款式#收腰", "summary": "衣领在传统的v领上有了新的创新,加上了翻领设计,成熟之余又不失活泼可爱。并且衣领弹性大,可轻松实现正常v领和露肩两种穿法。腰部进行了收腰设计,尽显腰身。蓬松的裙摆可遮挡一些女性臀部的不足,打造完美身材。"} +{"content": "类型#裙*裙领型#v领*裙领型#翻领*裙款式#露肩*裙款式#收腰", "summary": "衣领在传统的v领上有了新的创新,加上了翻领设计,成熟之余又不失活泼可爱。并且衣领弹性度高,可轻松实现正常v领和露肩两种穿法转换。腰部进行了收腰设计,尽显腰身。蓬松的裙摆可遮挡一些女性臀部的不足,打造完美身材。"} +{"content": "类型#裙*裙型#a字*裙腰型#高腰*裙长#连衣裙*裙款式#绑带", "summary": "这款连衣裙最吸引的亮点就是它独特的腰封设计,显腰细max。加上高腰的版型,瞬间拉长全身比例,凸显高挑身材。细长的绑带设计,帅气中又凹足造型。a字型的裙摆敲不挑人穿,菇凉们可以轻松驾驭哦。"} +{"content": "类型#上衣*风格#文艺*风格#简约*风格#清新*衣样式#衬衫*衣领型#小立领*衣款式#口袋*衣款式#绑带", "summary": "衬衫和阔腿裤的组合,简约的搭配方式,但并不简单。上衣采用小立领半开襟设计,简约利落,前两个口袋设计,带来层次感,整体的风琴百褶,尽显女性的婀娜多姿,阔腿裤设计。加上时尚的绑带,利落范,还有开叉的半裙。飘逸清新又文艺。"} +{"content": "类型#裙*材质#蕾丝*材质#纤维*图案#条纹*图案#蝴蝶结*图案#蕾丝*裙长#连衣裙*裙款式#拼接", "summary": "一款舒适有型的聚酯纤维连衣裙;富有特色的拼接设计,将条纹与蕾丝相碰撞,融合出一种充满浪漫的设计感,使时尚的着装拥有了浪漫与柔美的气息;简洁有型的长领角,随性而系的蝴蝶结则凸显俏皮与可爱,垂下不系则显个性的韵味。"} +{"content": "类型#裙*风格#简约*风格#青春*风格#性感*图案#条纹*裙下摆#花边*裙长#连衣裙*裙袖长#短袖*裙领型#圆领*裙款式#露肩", "summary": "一款清爽又减龄的条纹连衣裙,衣身黄白条纹设计,显得青春而靓丽,上身衬肤色又显年轻;简约的花边圆领,散发出甜美可爱的气质;露肩短袖设计,满足穿者的性感小心机。"} +{"content": "类型#裤*风格#复古*风格#嘻哈*图案#复古*图案#撞色*裤款式#拼接*裤口#毛边", "summary": "前身复古的毛边点缀,趣味风十足,一下子提高了造型感,上身倍感玩味俏皮,让你很容易穿出青春活力女孩气息。嘻哈感十足的裤腿带来富有灵动感的穿着效果,结合整体的撞色和拼接设计,透着几分小女生的乖巧气息。"} +{"content": "类型#裙*颜色#浅色*图案#条纹*图案#线条*裙下摆#压褶*裙领型#一字领*裙款式#拼接", "summary": "肩部、袖口都采用条纹拼接,增加层次感,又营造出一字领的错觉,显得精致温婉。浅色线条勾勒裙边,让轮廓感立显,优雅大气。精致的压褶处理,让视觉效果更丰满,有不错修饰胸型的作用,让人心动。"} +{"content": "类型#裙*风格#街头*图案#条纹*图案#拼色*图案#线条*图案#撞色*裙长#连衣裙*裙领型#v领*裙款式#不对称*裙款式#对称", "summary": "这款对称撞色连衣裙,大胆玩转不对称的艺术美。左右对称拼色两件的设计,瞬间让你摆脱路人甲的穿搭造型,成为街头最出彩的。斜条纹的设计,更是摆脱纯色调的呆板无趣,让你穿出腔调感来。v字领口设计,更是完美凸显颈部的纤长线条。"} +{"content": "类型#上衣*颜色#黑色*图案#印花*图案#撞色*衣样式#衬衫", "summary": "个性时尚的衬衫采用了纯黑色的色调设计,整体打造了摩登随性的自由风格,凸显个性时尚的质感。结合了魔术扣的立体印花,在黑色的映衬之下,撞色使得更有摩登时尚的质感,打造活力穿着。"} +{"content": "类型#裙*版型#显瘦*裙款式#松紧带", "summary": "腰部是松紧带设计对身材的包容性很好。高矮胖瘦的姑娘都可以用,显高显瘦的效果不止一点点,另外这款裙子也很适合旅行时穿,飘逸柔美充满异域风情的味道。"} +{"content": "类型#裙*版型#显瘦*裙腰型#高腰*裙长#连衣裙*裙袖长#七分袖*裙袖型#喇叭袖*裙款式#勾花镂空", "summary": "修身款型的连衣裙是衬托曼妙身段的好帮手,高腰的设计将腰部的纤细修饰的更加迷人。尤其是衣袖处的别致设计,带着喇叭袖的造型将手臂修饰的更加纤细,搭配上七分袖的独特为穿戴之人更添优雅之感,从此告别令人尴尬的赘肉。若隐若现的镂空为整件衣衫增添了一抹成熟女人的妩媚之情。"} +{"content": "类型#裙*版型#显瘦*裙下摆#荷叶边*裙衣门襟#排扣", "summary": "一款仿佛礼服般的裙子。修身显瘦的款式衬托出端庄迷人的身姿,腰间处理让纤细的小蛮腰呈现盈盈一握之态。胸前荷叶边的点缀为整体增添了几分随心所欲的美,领口设计衬托颈部曲线的同时展示女性魅力。袖口三排扣从细节中彰显质感。"} +{"content": "类型#上衣*版型#显瘦*颜色#纯色*颜色#浅蓝色*风格#淑女*风格#文艺*风格#民族风*风格#清新*风格#中国风*图案#纯色*图案#蝴蝶结*图案#刺绣*衣样式#风衣*衣样式#西装*衣领型#西装领*衣领型#翻领*衣袖型#喇叭袖*衣门襟#系带*衣款式#腰带", "summary": "气质柔雅的一款纯色风衣,清浅素雅的浅蓝色基调清新脱俗有点不食人间烟火的味道,为整体文艺风格奠定了很好的基调。惹眼的细节莫过于西装翻领上的民族风刺绣花朵,古典的中国风美韵油然而生,把西装领原有的干练洒脱掩盖了起来。腰身修身的腰带系成了甜美俏皮的蝴蝶结,与两侧系带喇叭袖的造型相互呼应,轻松打造出乖巧恬静的淑女形象。"} +{"content": "类型#裙*风格#复古*风格#简约*图案#复古*图案#撞色*裙下摆#压褶*裙长#连衣裙", "summary": "简约不简单的一款连衣裙,裙身复古大气的花纹,好穿又衬气质!融入撞色设计,鲜明的层次感,美的清晰立体。轻松穿出惹眼视觉感!前片压褶设计,精致有型,褶线自然垂落,飘逸,带来一丝柔美韵味。"} +{"content": "类型#上衣*颜色#浅色*衣样式#衬衫*衣门襟#系带*衣款式#不对称*衣款式#抽褶", "summary": "浅色色系的衬衫是春意盎然的代表色系,清爽又整洁,非常时尚的设计。左边的系带使得右边出现褶皱感,使得整件衬衫变得非常有质感,衬衫下摆的不对称,更是设计师赋予它独特的魅力,时尚感强烈。增加了衬衫的,同时侧边开叉,更显女性腰部曲线魅力,非常完美的一件设计作品。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*材质#针织*颜色#黑色*风格#简约*风格#清新*图案#线条*图案#撞色*裙长#连衣裙*裙衣长#中长款", "summary": "这款针织连衣裙,是经典的黑色款,衣身的撞色线条时髦摩登,出街实力吸睛!衣身领口吃采用经典黑、白、红撞色,清新靓丽的大v字设计,糅合低调、浪漫与热情,彰显摩登的时尚感。中长款的宽松款式,落肩的设计轮廓勾勒简约裙装,轻松修饰身材小秘密,俏丽又显瘦!"} +{"content": "类型#上衣*图案#字母*图案#文字*图案#线条*图案#刺绣*图案#撞色*衣样式#卫衣", "summary": "由近年来大火的时装品牌dsquared2出品的卫衣,采用数字、字母logo标识和树叶图案作为点缀,以黑黄撞色的刺绣手法呈现,线条分明且具有立体感,经得起全方位的推敲与。"} +{"content": "类型#上衣*版型#宽松*风格#复古*风格#休闲*图案#复古*衣样式#外套*衣样式#西装*衣领型#西装领*衣门襟#系带*衣款式#收腰", "summary": "充满港式复古风味的西装外套,上身是宽松的西装下身则是休闲的阔腿裤。上身的西装采用的经典的西装领结构,能够衬托出女性精致的脸庞。腰间还加有一根系带装饰,可以随意调节进行收腰处理,勾勒出女性的细腰凸显出女人味。下身宽松的高腰裤型能够拉长下半身的视觉比例。"} +{"content": "类型#裙*颜色#纯色*风格#青春*风格#潮*图案#纯色*裙长#半身裙*裙款式#拼接*裙款式#不规则", "summary": "此款拼接贴布半身裙,采用不规则设计,搭配拼接设计,层次感强,紧随潮流,时尚又个性。贴布设计,增加整体设计感,尽显女性甜美气质。纯色设计,经典百搭,青春减龄,优雅又魅力。"} +{"content": "类型#上衣*材质#针织*衣样式#开衫*衣领型#圆领*衣袖型#罗纹袖口*衣款式#罗纹", "summary": "可爱甜美的棉线针织开衫,手感细腻软糯,是很实用的搭配单品,多种颜色可选,伴随宝贝靓丽出行。适合身形轮廓的剪裁,给身体留出足够的活动空间,自由不拘束。经典的罗纹圆领,织带包边,有效避免摩擦颈部肌肤,且防止拉扯不易变形。有弹性的罗纹袖口,宝贝穿着不易上下滑落,保暖又舒适。"} +{"content": "类型#上衣*版型#显瘦*颜色#白色*风格#知性*图案#字母*图案#文字*衣样式#衬衫*衣领型#娃娃领*衣款式#拼接", "summary": "干练大方的衬衫连衣裙,经典的黑白色拼接,知性优雅。娃娃领的设计,多了几分俏皮感,彰显甜美可爱的少女气息。门襟和袖口的拼接相呼应,时髦大气。修身的版型,束出纤细的腰身,凸显柔美的女性曲线。简洁的a字裙摆,衬出迷人的大长腿。整条裙子点缀彩色爱心,碰撞胸口的字母设计,趣味性十足。"} +{"content": "类型#裙*材质#网纱*材质#蕾丝*图案#蕾丝*裙衣门襟#拉链*裙款式#拉链", "summary": "活力四射的夏天穿搭,少不了一件蕾丝吊带裙。这件网纱蕾丝吊带裙两件套,内层选用精巧可爱的吊带裙,在蕾丝的映衬下,更衬托女性的优雅情调,两肩及袖口处采用质感十足的金属拉链装饰,在女性的柔美上,增添一份女性的独立性格。而腰身则选用精致动人的暗纹设计,隐隐显示女性专属的古典美。"} +{"content": "类型#上衣*材质#蕾丝*风格#简约*风格#清新*图案#蕾丝*衣样式#衫*衣领型#立领*衣袖长#短袖*衣款式#盘扣", "summary": "一款旗袍上衣立领短袖蕾丝衫,立领的设计非常能够彰显个人的气质。搭配盘扣的设计,透着中国风情,更显端庄大方。蕾丝面料的选择,优雅而又别致,女人味十足。纯白的设计,简约而又清新,可塑性很强,可以根据需要搭配任何颜色。"} +{"content": "类型#裤*版型#显瘦*材质#蕾丝*图案#蕾丝*裤长#连体裤*裤款式#拼接", "summary": "连体裤在上衣的部分采用了蕾丝拼接的设计,小小的披肩遮住上身,若隐若现的手臂十足的有朦胧美。而裤子的地方也特意做高了腰线,收褶的设计更是能够将腰部衬托的更加纤细,很好地起到显高显瘦的作用。灰白两种颜色做搭配也是十分的清新自然,仙女气十足。"} +{"content": "类型#裤*材质#蕾丝*颜色#蓝色*风格#清新*图案#蕾丝*裤长#连体裤*裤型#阔腿裤*裤型#连衣裤*裤款式#抽褶*裤腰型#高腰", "summary": "连衣裤的受追捧程度,BRAND也按捺不住加入了此款式的,采用通身的蕾丝质地,打造出一款又美又仙的扮靓单品。蓝色调的渲染,带来了湛蓝天空的清新纯净感;两侧的褶皱花边造型,演绎蝴蝶般的飘飘然姿态;高腰线和阔腿的剪裁,修饰双腿,立显高挑身姿。"} +{"content": "类型#裙*风格#简约*风格#知性*风格#高贵*裙下摆#毛边*裙长#连衣裙*裙领型#圆领*裙款式#亮片", "summary": "此款连衣裙硬挺厚实,细腻的颗粒,凹凸的触感,质感饱满,带来品质体验;把女性之美的璀璨夺目亮片融入其中,摩登与优雅并存,绽放你的美丽与智慧光芒;加上毛边的沿边点缀,简约时尚,削弱原本普通连衣裙的平庸,注入不安分玩转因子,散发出知性优雅的超强气场,摩登轻奢潮品,优雅不失时尚感;精致剪裁,注重每一个细节处理,简约的圆领,凸显优雅气质,勾勒婀娜身姿,犹如一件精致的艺术品,尽显高贵女性韵味。"} +{"content": "类型#上衣*风格#简约*衣样式#毛衣*衣领型#半高领*衣袖长#短袖", "summary": "春天到了,你的上衣准备好了吗?这款炒鸡减龄的毛衣,百搭又时髦,设计了优雅的半高领造型,打破常规,新颖简约不失时髦度,让基本款有亮点。以及短袖的裁剪,特别利落,减少造型的臃肿感,且百搭又有范。"} +{"content": "类型#上衣*版型#显瘦*风格#欧美*图案#几何*图案#线条*图案#印花*衣样式#衬衫*衣款式#荷叶边", "summary": "欧美个性几何衬衫搭配包臀半裙,给人带来视觉上的享受。精美印花的点缀,充满着浪漫的气息。干净利落的剪裁打造流畅的线条,修身的上衣塑造完美身形,荷叶边裙的设计很精巧,细节也做得很到位,上身显瘦。"} +{"content": "类型#上衣*颜色#黑色*风格#民族风*风格#休闲*图案#格子*图案#线条*衣样式#外套*衣领型#翻领*衣款式#口袋", "summary": "这款外套领部采用小翻领的设计,显得干净利落。以粗花呢格纹面料制作而成,展现出满满的小香风~衣身四个方形口袋的设计,集美观性与实用性于一体~并以流畅的黑色线条点缀,增加了整体的层次感。下摆和衣袖处做了流苏的设计,增添几分民族风的感觉,休闲范十足。"} +{"content": "类型#裙*材质#网纱*材质#蕾丝*风格#淑女*风格#居家*图案#蕾丝*裙长#连衣裙*裙款式#吊带", "summary": "一款优雅的连衣裙,将女人气质十足的体现了出来,蕾丝的小吊带图案饱满,亲肤不扎手,也不用担心起球。里面的连衣裙甜美感十足,网纱的裙摆别有一番淑女名媛风。袖子做了一点微喇效果,增加了甜美的感觉。连衣裙面料亲肤舒适柔软,质感也灰常的好。冬天搭大衣、棉服都灰常的有韵味,逛街,居家,上班,旅行穿都很合适。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#清新*图案#条纹*衣样式#衬衫", "summary": "这款衬衫,衣身采用桔红色条纹设计,视觉上很清新的颜色,很显肤色,显白。条纹也会让身体更加纤细。小宽松的版型,能更更加凸显身材,遮肉显瘦,彰显女性柔美的身姿。上身更显气质,处处弥漫着时尚的气息。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#清新*图案#条纹*衣样式#衬衫", "summary": "春天是属于衬衫的,一款清新的条纹衬衫,碰撞出属于这个时节的清爽利落感,纵向的条纹不仅显得帅气而且能增加视觉上的显瘦效果。前短后长的设计别具一格,在增加设计感的同时超级显腿长,宽松的衬衫版型显瘦显慵懒。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#清新*图案#条纹*衣样式#衬衫", "summary": "这款衬衣,的清新蓝白条纹十分给人自然干净的感觉,大方时尚条纹还有显瘦的功能。胸口的设计很特别,胸口的门襟铜扣,十分有质感,一看就很有品质,有着点睛的作用。宽松的版型,遮肉功能十分强大更显强调。"} +{"content": "类型#裙*材质#雪纺*裙型#大裙摆*裙腰型#高腰*裙长#连衣裙*裙款式#露肩*裙款式#不规则", "summary": "这条连衣裙飘逸轻柔的雪纺材质加上不规则露肩的设计,一举一动之间透露着十足的仙女气质,甜美迷人。高腰的大裙摆在视觉上拉长了双腿,更显大气时尚。纤细的肩带和露肩的设计,更是“”心机”地完美修饰脸型和锁骨。"} +{"content": "类型#裤*材质#丝绒*风格#性感*裤长#短裤*裤款式#钉珠*裤腰型#松紧腰", "summary": "钉珠丝绒短裤精选优质高档丝绒面料,轻盈顺滑,细腻绵软。清爽的松紧腰型设计提升腰线,彰显性感小蛮腰和修长大长腿。甜美高档的手工缝制钉珠提升高档品质,彰显个性的同时又有一份优雅和光泽在其中。"} +{"content": "类型#裤*版型#宽松*版型#显瘦*颜色#黑白*颜色#淡蓝色*风格#清新*图案#格子*图案#线条*裤长#七分裤*裤型#阔腿裤", "summary": "格纹元素在服饰中的运用是永不过时的,融合了经典黑白小方格的图案又创新加入了淡蓝色,别出心裁的设计让这条阔腿裤有了与众不同的辨识度。宽松的阔腿裤也能很好的修饰腿部线条,过膝七分长度露出白皙小腿更加显瘦;时尚单品的穿搭充满了美式田园风格的清新雅致,带来泛着甜美气息的春夏风格。"} +{"content": "类型#上衣*版型#宽松*材质#蚕丝*风格#性感*衣样式#针织衫*衣样式#毛衣*衣袖型#蝙蝠袖*衣门襟#套头*衣款式#勾花镂空", "summary": "一款蝙蝠袖撕破勾花毛衣女套头宽松针织衫,非常女人味一款设计,里面搭配一条真丝裙子,满满高级感。蝙蝠袖的设计,让肩膀自由不拘束。撕破钩花的设计,透着淡淡的性感。宽松版型的设计,慵懒随性。"} +{"content": "类型#上衣*版型#宽松*颜色#宝蓝色*风格#休闲*衣样式#卫衣", "summary": "适合装点春日的三色卫衣,通过把握色彩的饱和与明暗度,使其散发出暗藏的满满活力,柠檬黄、宝蓝色和紫外光,实用多样风格穿搭。而前襟处的英文字母点缀恰当,宽松的oversize版型更显洒脱,丰富从整体到细节的质感。在搭配舒适的面料,结合休闲与时髦理念,带来亲肤体验的同时也简洁大方。"} +{"content": "类型#裙*颜色#纯色*风格#潮*图案#纯色*图案#刺绣*裙款式#抽褶", "summary": "衣身采取个性化的绣花点缀,增添时尚的韵味,让你举手投足间尽显妩媚的气息。自然的褶皱裙摆,极具潮流的味道,尽情演绎你专属的摩登气息。雅致的纯色系列,更好的为你勾勒出酷帅的气质。"} +{"content": "类型#裙*版型#宽松*颜色#绿色*图案#线条*裙长#连衣裙*裙领型#圆领", "summary": "这款中长款式的连衣裙,经典的圆领设计,显得利落大方不失优雅,能够在穿着时增添女性的个人气质。精致的荷叶裙摆设计,让宽松的衣着轻松贴身,修饰腰部线条更添纤细之感。选用绿色调装饰,极具摩登时尚气。"} +{"content": "类型#上衣*颜色#黑色*风格#简约*图案#线条*衣样式#外套*衣样式#西装*衣领型#翻领*衣袖长#长袖*衣门襟#双排扣*衣款式#亮片", "summary": "这款翻领长袖西装外套,线条利落,版型挺阔,通身采用黑色面料,再融入亮片的设计,简约大气自带女王气场,潮酷有型。胸前采用v字西装大翻领设计,运用面料本身高级的光泽感,增添了造型的层次感与立体感,更加衬托出奢华的贵族气质。金属双排扣的设计,给整体暗色调加入了亮点,极具装饰美。"} +{"content": "类型#裤*版型#宽松*版型#立体剪裁*裤型#阔腿裤*裤款式#抽绳*裤款式#抽褶", "summary": "了一个冬季的赘肉,更是不能在春季暴露,一条阔腿裤就是帮你隐藏下半身缺陷的完美利器。腿短腿胖?这都是不存在的问题。宽松的廓形设计,遮盖住腿部赘肉,更能修饰o/x腿型。腰带也十分精致,阔腿裤搭配同色系的抽绳,在抽绳末端还设计了金属装饰,使绳子的垂坠感更佳。尤为突出的当属臀部剪裁,立体剪裁,形成圆润弧度,上身完美贴合,减少了臀部褶皱的出现,使臀型更佳完美。"} +{"content": "类型#上衣*材质#棉*颜色#黑色*风格#运动*风格#休闲*图案#字母*图案#文字*衣样式#衫*衣款式#罗纹", "summary": "这件休闲衫采用黑色作为主色调,非常酷炫时尚。胸前是品牌的logo图案,造型是流畅的手写体字字母,给人的感觉非常潇洒自然。这件休闲衫采用的是纯棉面料,贴身穿着也会非常舒服。领口和袖子采用的是罗纹面料,增强了防风保暖的效果,同时运动的时候也会非常方便。"} +{"content": "类型#裤*风格#休闲*裤长#长裤", "summary": "百搭而凸显都市时尚情结的长裤。自然的版型挺括而修饰身材,深黑色的面料,更加适宜各种商务和休闲场合的搭配。腰部的串珠点缀,修饰出华丽精致的时尚品味,面料具备弹力,上身体验舒爽轻柔。"} +{"content": "类型#裤*风格#休闲*裤长#长裤", "summary": "背带长裤打破以往的精致剪裁,结合带有垂坠质感的面料,凸显的是一种慵懒的休闲韵味。腰部的交叉系带设计,收腰的同时也使得更加贴合腰部,视觉上更显腰部的纤瘦和背部的笔直。两条大大的裤腿,自然落下,行走方便自如,没有束缚感的同时更显慵懒的浪漫情怀。"} +{"content": "类型#裤*版型#显瘦*图案#条纹*图案#印花*图案#撞色*裤型#直筒裤*裤款式#拼接*裤款式#口袋*裤款式#不规则", "summary": "印花图案很个性,上身很容易穿出范,而且,真丝面料对色彩的表现力非常好,印染的清晰形象,这非常难得!领口、袖口、下摆,撞色条纹设计轻松点亮视觉,很有气质,不规则下摆,特别有腔调感!无形中拉长身形,倍显高挑!裤子,直筒廓形,修饰腿型,藏肉显瘦前口袋设计,也增添了实用性!裤腿两侧撞色拼接,倍添出~"} +{"content": "类型#上衣*风格#简约*图案#线条*衣样式#西装*衣领型#西装领*衣门襟#一粒扣*衣款式#拼接", "summary": "这款西装设计了经典的西装领设计,有着干净简约的线条,轻松烘托出强大的气场能量。门襟采用一粒扣开合的造型,精致新颖,让心情都觉得活跃起来。袖口融入撞布的拼接,营造出两件套的效果,打破打掉,却增添一份穿衣的风格。"} +{"content": "类型#上衣*版型#宽松*风格#性感*图案#撞色*衣样式#卫衣", "summary": "此款加大型卫衣,宽松舒适。袖子上个性的平面图案为自主开发设计。平面感图案在袖子上排列设计,是今年的设计手法。,做了撞色的设计处理。整个款式个性感十足,彰显设计感。"} +{"content": "类型#裙*版型#宽松*图案#植物*图案#印花*裙长#连衣裙*裙款式#不规则", "summary": "款式简洁的连衣裙,翠绿色的植物印花,能很好的衬托出干净白皙的肤色,同时大面积的满印设计,打破夏日的炎热感,给人的心灵注入清爽与活力。宽松的裙摆营造出丰富的层次感,尽显小女孩的优雅甜美。不规则的下摆,像是随意的剪裁却也不失设计感,个性有趣。"} +{"content": "类型#裙*材质#蕾丝*颜色#纯色*风格#性感*图案#纯色*图案#蕾丝*裙腰型#高腰*裙长#连衣裙*裙领型#v领*裙衣门襟#系带*裙款式#勾花镂空", "summary": "这款纯色的连衣裙采用了蕾丝镂空的设计,性感而不会过于暴露,立体的蕾丝花纹同样作为装饰,若隐若现地展示白皙的肌肤,而高腰部位同样设计了镂空,展示腰线,大胆而吸睛。领口处采用了v领配合上小系带,灵动飘逸,举手投足之间散发甜美气质。"} +{"content": "类型#裙*材质#蕾丝*图案#线条*图案#蕾丝*裙型#百褶*裙长#连衣裙*裙领型#圆领*裙衣门襟#系带*裙款式#拼接", "summary": "一款充满着满满女人味的连衣裙,蕾丝面料的拼接增强了衣身的档次感,同时衬托出女性优雅的气质。基础圆领修饰出颈部线条,腰间系带凸显出女性纤细的小蛮腰,玲珑有致的曼妙身材彰显出来。百褶裙摆富有灵动飘逸韵味。"} +{"content": "类型#上衣*风格#潮*图案#格子*图案#撞色*衣样式#衬衫*衣款式#口袋*衣款式#纽扣", "summary": "以时尚界中永不落伍的格纹点缀于这件衬衫之上,使其保留着很率性的特色。还通过撞色效果加以点缀,令视觉冲击力更突出。而俏皮的弧形衣摆则展现出活力特色,令趣味性提高了许多。使其虽然是经典的纽扣门襟,却能衬出令人眼前一亮的潮流范。再加上对衬口袋的存在,就演绎出中性风BRAND。"} +{"content": "类型#裙*版型#宽松*图案#线条*裙腰型#自然腰*裙长#半身裙*裙款式#绑带", "summary": "这款中长款式的半身裙,别具一格的腰部蝴蝶系绑带设计,元气活力少女心爆棚,也多了几分趣味,穿着时刻轻松减龄更添气质。精致的自然腰版型设计,让宽松的衣着轻松贴身,修饰腰部线条更添纤细之感。选用深绿色调装饰,极具摩登时尚气。"} +{"content": "类型#裙*颜色#蓝色*风格#复古*风格#简约*图案#蝴蝶结*图案#复古*裙长#连衣裙*裙衣门襟#系带*裙款式#木耳边", "summary": "难得温柔的连衣裙,让看到的人便觉得舒适,优美的木耳花边小立领,温柔的弧度缱绻,娇俏可人的小女人味,蝴蝶结系带,一丝复古甜美,让裙子变得生动,而简约舒适的版型更让整体加分,淡淡的蓝色不管是在春天还是在夏天都能给人一种舒适的感觉不会。碎花纹,时髦的元素组合,赋予整款时尚度,同时透着不精心的美。"} +{"content": "类型#上衣*衣样式#衬衫*衣长#短款", "summary": "这款半裙采用a字版型剪裁,上身有效修饰腰部,勾勒出完美的身材曲线,更显女人味。短款的版型,受众于人群,从视觉上拉长下身比例,更显身高。绵羊皮面料处理,手感软糯,保留肉感,舒适细腻。上身可搭配衬衫,时髦又俏皮。"} +{"content": "类型#上衣*风格#复古*图案#蝴蝶结*图案#复古*图案#撞色*衣样式#衬衫*衣领型#立领", "summary": "新颖别致的立领设计,蝴蝶结的造型,打破年龄界限,很是甜美减龄;领口以及袖口的撞色镶边,镶边,别致新颖让人眼前一亮,洋气时髦,加上蝴蝶结的设计,与撞色镶边配合的很是默契,魅力吸睛;同时结合简单正式的衬衫版型,简直就是复古与现代美感的融合,为平淡无奇的衬衫带来了意想不到的时尚感。"} +{"content": "类型#上衣*材质#棉*风格#街头*图案#印花*衣样式#衬衫", "summary": "这件夏威夷衬衫采用了全棉贡缎面料,质感柔软透气,带来舒适的穿着体验。图案由深浅不一的芭蕉叶子组合,大面积印花尽显街头少年的叛逆不羁,尽显夏日的朝气活力。"} +{"content": "类型#裙*颜色#纯色*图案#纯色*图案#花色*裙长#连衣裙*裙领型#圆领", "summary": "这一款端庄的连衣裙,最满意于经典的圆领设计,进一步修饰了柔情的小丽人气质。配合纯色的花色图案印制,处处流露着柔美的都市丽人气质。"} +{"content": "类型#裙*材质#羊毛*图案#线条*裙长#短裙*裙长#连衣裙", "summary": "连衣裙采用80%的羊毛面料材质制作,具有吸水性高和保暖性强以及耐用性好和手感柔软而富有弹性等优质性能。可谓是时尚和保暖兼具的单品哟。短裙的裙长设计,不仅能给人带来满满的拉长腿部线条的视觉冲击感。同时又可谓是给小个子女生显高挑带来了好消息呐。"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*图案#线条*图案#刺绣*裤型#直筒裤*裤款式#口袋", "summary": "本款牛仔裤选择了直筒的修身版型,剪裁流畅自然,让整体更显利落清爽。通过简洁细致的线条凸显了整体的修身效果。同时在臀部口袋处加入了精美的刺绣图案设计,让这款时尚单品更具美观性和实用性,让您出行更加自信。"} +{"content": "类型#上衣*材质#羊毛*图案#蝴蝶结*衣样式#针织衫*衣款式#波浪", "summary": "蝴蝶结如诗如画,打破针织衫的规整沉着,加一抹轻盈的灵动风姿。这款针织衫在领口和袖口的地方分别设计成波浪花边状,展现女性出浪漫的情怀;而别致的蝴蝶结装饰,为整体增添一丝甜美俏皮感;采用羊毛面料,手感舒适有着良好的保暖性。"} +{"content": "类型#裤*风格#运动*风格#休闲*图案#线条*图案#刺绣*裤款式#螺纹*裤款式#抽绳*裤口#小脚", "summary": "腰部抽绳设计运动休闲感十足,并且与裤脚的螺纹束口上下呼应,可以很好地修饰腿部线条。抽绳腰头,对身材的包容性很大,穿着自由,随心而不拘泥。最后左腿处的刺绣logo,在运动裤休闲舒适的基础上,又增添了时尚个性。面料柔软舒适,品质面料柔软不易起球、不易变形、不易褪色,有一种亲肤的顺滑干,提升良好穿着感受。"} +{"content": "类型#裙*风格#淑女*图案#蝴蝶结*裙型#网纱裙*裙下摆#层叠*裙腰型#高腰", "summary": "选用高品质的面料为裙身的载体,立体的蝴蝶结装点在高腰衔接处的后身,一抹少女的优雅气息扑面而来。加以蓬松层叠的网纱裙摆,拥有错综复杂的叠加设计,让整体视觉层次更显立体,演绎出唯美时尚的画面效果,彰显满满的淑女范儿。"} +{"content": "类型#裙*材质#蕾丝*风格#性感*图案#字母*图案#文字*图案#印花*图案#蕾丝*裙款式#拼接", "summary": "BRAND的这款睡衣裙,设计师将蕾丝元素运用到衣面设计中,拼接的蕾丝花纹遍布裙面,营造出隐隐的透视感,身型曲线若隐若现,将优雅与性感相结合。肩带上配以字母的印花设计,为整体增添时尚气息,可自由调节松紧度,给人自由的穿着体验感。面料主材质为彩棉,给人亲肤舒适感。"} +{"content": "类型#上衣*版型#宽松*颜色#纯色*图案#纯色*衣样式#卫衣", "summary": "卫衣是纯色款式,非常日常易搭配。微微宽松的版型,对身材不自信的女孩也不用担心。胸前的英文logo设计,非常有个性,很有时尚态度。"} +{"content": "类型#裙*颜色#白色*颜色#黑色*颜色#黑白*图案#印花*裙型#直筒裙", "summary": "以白色为主打色,再加上袖子的黑色,黑白搭配不显单调,加之3d立体玫瑰印花,使裙子显的立体,玫瑰代表着热情,就好像在表达着女性的青春活力,绽放自己的魅力。直筒的裙型,修饰腿部,展现女性独特的身材比例,更能拉长腿部,显高显身材。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*颜色#黑色*裙型#直筒裙*裙长#连衣裙*裙款式#腰带", "summary": "黑色连衣裙是每个小仙女都必备的气质单品,黑色天生自带神秘的美感,加之宽松直筒版型剪裁,营造出视觉显瘦的效果,让各种身材的mm都能穿出自信的时尚态度。没有过多繁杂的装饰,一条宽腰带别在腰间,勾勒出纤细的腰身,给宽松的连衣裙增添一抹精致的女人味,极简更具高雅气质魅力。"} +{"content": "类型#裙*材质#蕾丝*风格#青春*图案#蕾丝*裙型#a字*裙腰型#高腰*裙长#连衣裙*裙领型#圆领*裙袖型#喇叭袖", "summary": "这款蕾丝连衣裙的版型非常容易驾驭,高腰a字的版型穿起来很显苗条和高挑,睫毛蕾丝圆领非常的精致,显得脖颈更加迷人。袖子呢是今年很流行的喇叭袖,很浪漫仙美的感觉,让你举手之间显得气质更加出众。"} +{"content": "类型#上衣*版型#显瘦*风格#复古*风格#知性*图案#复古*衣样式#衬衫*衣领型#翻领", "summary": "这款衬衫,精致的小尖衬衫翻领设计,衬托小v脸型。散发出女性温婉知性的美感,蓬松的袖子设计。显瘦又可爱俏皮,舒适性自是不用说。既清爽自然又复古时兴,观感超棒,让你清凉整个夏日。"} +{"content": "类型#上衣*材质#牛仔布*颜色#黑色*风格#复古*风格#潮*图案#复古*衣样式#西装", "summary": "还记得《王牌》中的帅气吗?传统牛津皮鞋给人非常儒雅老成的印象,就连这种原本的青年穿上黑色西装和牛津皮鞋,也变成绅士!然而平时总不能一直穿着牛津皮鞋出入各种场合,这个时候一双牛津皮鞋改版的复古板鞋就是你的需求了。它儒雅,能搭配正装而且更显年轻;它略微潮流,能跟休闲裤牛仔裤出街装备!"} +{"content": "类型#裙*颜色#粉色*图案#字母*图案#文字*裙长#短裙", "summary": "简简单单的字母t,想呈现什么样的风格就看你自己发挥了。和各种短裤短裙都,如果遇到不会搭配的下装不妨和这件搭配试一试。在如果你已经有了米色的t,推荐可以再入一件橡皮粉色,来给你的衣橱增加一些可搭配的色彩。"} +{"content": "类型#裙*版型#宽松*图案#条纹*裙下摆#垂坠*裙长#连衣裙*裙款式#拼接*裙款式#木耳边*裙款式#抽褶", "summary": "连衣裙做了长版的样式,配合着宽松的剪裁,垂坠感很强,裙摆上还加入了褶皱元素,更加易于带来极强的灵动气息,还能巧妙的掩盖身材小缺点。而裙身下方还选用了条纹拼接,木耳边环绕一周,尽显时尚魅力,同时也打破了纯色调的单一感,层次性油然而出,动人无比。"} +{"content": "类型#上衣*版型#显瘦*颜色#绿色*风格#文艺*风格#青春*风格#清新*图案#格子*衣样式#衬衫*衣袖型#蝙蝠袖*衣款式#拼接", "summary": "衬衫的百搭性能是非常强的,已然成为了人手必备的单品。这款衬衫选用绿色的格纹拼接,既凸显文艺小清新,又散发出青春的活力,适合学院风穿搭。而蝙蝠袖的设计,不仅起到了显瘦的作用,还增添了慵懒的感觉。再加上收褶的下摆设计,提升了整体的造型感。"} +{"content": "类型#裙*材质#针织*颜色#白色*风格#性感*裙长#连衣裙*裙款式#拼接", "summary": "BRAND以深灰色为主基调打造的这款针织连衣裙,整体采用了假两件的剪裁设计,带来较为慵懒且性感的穿着美感。毛衣裙在领口拼接了白色的背心,形成了半遮半掩的穿着美感,尽显都市女性摩登且炫酷的穿着效果,是非常出彩的选择。"} +{"content": "类型#裙*材质#雪纺*风格#高贵*裙下摆#垂坠*裙领型#一字领*裙袖型#喇叭袖*裙衣门襟#系带", "summary": "这条裙子一眼看过去就能带来甜美的即视感,优雅的一字领设计看上去十分的精致高贵。这样一条裙子穿在身上显得十分的少女,像是捧在手里的棉花糖。半透的喇叭袖设计看上去十分的飘逸,雪纺的面料使得袖子更加有垂坠感,有更好的视觉观感。胸前的系带穿绳设计可以有多种穿法,更加百搭多变。这样一条裙子既有少女的甜美又有轻熟女的娇俏。"} +{"content": "类型#裤*版型#显瘦*材质#蕾丝*风格#潮*风格#性感*图案#撞色*图案#蕾丝*裤型#哈伦裤*裤款式#拼接*裤口#小脚", "summary": "玛玛绨的一款港风风格的哈伦裤,采用的是束脚的版型,非常的显瘦,而且让你的双腿显得也更加的笔直。蕾丝透视的设计,让你的腿部的肌肤若隐若现,时刻都透露着性感,而且还能凸显你的女人味。融入了撞色拼接的元素,非常亮眼的颜色,更凸显了潮流的气息。"} +{"content": "类型#上衣*版型#宽松*风格#复古*风格#休闲*图案#格子*图案#复古*衣样式#衬衫*衣样式#外套*衣领型#翻领*衣袖型#收口", "summary": "复古怀旧风的格纹衬衫,配色很别致,出街都基本不会撞衫。简洁的小翻领,修饰脸型显精神都不在话下。整体的设计比较宽松,也可以当做小外套穿着,袖口做了收口,看起来更加利落,两个贴袋装饰。帅气休闲。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#棉*图案#线条*衣样式#衬衫", "summary": "这款衬衫的面料采用纯棉面料,面料手感柔软顺滑,穿着舒适亲肤。版型做了宽松版型设计,藏肉显瘦,适合多数mm驾驭。衣袖直筒设计,修饰纤细手臂线条,下摆包边设计,走线工整,凸显品质精致做工。"} +{"content": "类型#裤*颜色#黑色*图案#线条*裤长#短裤*裤款式#口袋", "summary": "裤子侧边的黑色五角星图案是她最大的亮点,几乎没有人想到在短裤的侧面还可以有这样的设计,真心赞。盾形的后口袋设计,完美提升臀部线条,这样就既有美腿又有翘臀啦。"} +{"content": "类型#裤*颜色#黑色*图案#线条*裤长#短裤*裤款式#口袋", "summary": "这款短裤采用纯黑色的色调,演绎出成熟女性的优雅与沉稳。廓形的剪裁设计,让这款短裤更加有型有范儿。裤脚处的荷叶边造型,更显俏皮可爱的少女气息,也为裤身增添了更多的层次感,丰富了整体的视觉感受。裤身两侧的斜插口袋,兼具美观性与实用性。臀部的尾带设计,将腰臀间的曲线展现的更加凹凸有致,打造曼妙的身形线条。"} +{"content": "类型#上衣*颜色#纯色*图案#条纹*图案#纯色*图案#撞色*衣样式#卫衣*衣袖长#短袖*衣门襟#拉链*衣门襟#套头*衣款式#拼接*衣款式#拉链*衣款式#连帽", "summary": "舒适套头的短袖连帽卫衣,标准版型穿着更加的合身。纯色的衣身精选舒适面料,就算贴身穿着也无妨。衣身两侧下摆处的撞色竖条纹拼接,并辅以金属拉链开叉装饰,开合随意,更有修饰身形,点睛的功效。"} +{"content": "类型#上衣*材质#蚕丝*风格#通勤*风格#运动*风格#青春*图案#条纹*图案#印花*衣样式#衬衫*衣款式#松紧带", "summary": "青色大印花两件套,通勤感又不失流行,拉长的衬衫领是它的小小心情,袖口红蓝白条纹与青色的冲突,更让整套视觉明亮。裙子用松紧带,弹性十足,胖瘦都毫不费劲,适合各类提醒。蚕丝面料舒适而不臃肿,运动风明快,却又带了小女人的气质。"} +{"content": "类型#裙*版型#显瘦*版型#h*材质#蚕丝*颜色#蓝色*风格#复古*风格#文艺*风格#简约*风格#知性*图案#复古*图案#印花*裙下摆#开叉*裙长#长裙*裙袖长#五分袖*裙领型#圆领*裙衣门襟#排扣", "summary": "桑蚕丝印花轻柔飘逸,色彩艳丽,彰显高端品质。h型长裙包容身材,端庄优雅又显瘦,用彩色排扣点缀裙摆开叉处,增添复古的文艺风范,露出蓝色双层裙摆,更显浪漫妩媚。圆领口和五分袖都是最简单的款式,简约中流露知性气质。"} +{"content": "类型#上衣*版型#宽松*颜色#黄色*风格#简约*图案#条纹*图案#蝴蝶结*衣样式#衬衫*衣袖型#喇叭袖*衣门襟#系带", "summary": "一款简约而不简单的衬衫,宽松合身的款式穿着舒适又不挑身材,看习惯了各种白条或者蓝白条纹的衬衫,暖黄色的条纹是不是给你眼前一亮的感觉呢?袖口小喇叭袖与领口上的蝴蝶结系带相呼应,满满的甜美少女感,穿起来也很减龄。"} +{"content": "类型#上衣*衣样式#风衣*衣门襟#拉链*衣款式#口袋*衣款式#拉链*衣款式#对称", "summary": "弹力束口袖口以及衣摆设计,松紧适宜,穿着舒适无束缚,加强防风保暖。侧身斜插口袋设计,对称美观,且实用方便。ykk拉链门襟设计,拉合顺畅且穿脱更方便。轻便耐磨风衣面料,亲肤透气,防风防晒。"} +{"content": "类型#裙*风格#复古*风格#文艺*图案#复古*图案#撞色*裙型#a字*裙领型#v领*裙款式#吊带*裙款式#收腰", "summary": "收腰的小吊带,能够轻松秀出纤细腰肢,下摆呈a字散开,带来几分俏皮甜美感,长度是到小腿的位置,这个长度最显气质,不长不短刚刚好。领口的撞色勾边,很容易就能吸引视线,搭配v领,更有一股复古气息。一款比较偏轻熟风的吊带裙,文艺又不显幼稚。"} +{"content": "类型#裙*图案#蝴蝶结*图案#线条*裙型#a字*裙长#连衣裙*裙款式#绑带*裙款式#收腰", "summary": "连衣裙属于非常好穿的x廓形,这样的线条对于身材不会有太大要求,立体收腰的设计,还会让腰肢看起来更为纤瘦。腰节以下的位置做成了散开的a字摆的样式,中袖的长度也是比较温婉的,袖口附带有绑带蝴蝶结,凸显年轻俏皮的味道。"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*颜色#浅蓝色*裤腰型#高腰*裤口#毛边", "summary": "浅蓝的色调迎合夏日明朗的氛围,糅合进牛仔裤间透着一丝丝潇洒大气的味道。简洁的款式摒弃浮华,象征着积极与纯粹的生活态度。高腰的裤型与a字版型相互配合,既能打造黄金身材比例又能巧妙地遮肉显瘦。另外在其裤头与裤腿出运用毛边处理,透着一丝丝不甘于平庸的个性,单调与乏味感瞬间一扫而空,宣扬潇洒不羁的活力。"} +{"content": "类型#裙*材质#蚕丝*图案#印花*图案#撞色*裙下摆#垂坠", "summary": "桑蚕丝是真丝中的翘楚,它能带给女性舒适亲肤的穿着体验,并具有轻盈垂坠的特性,作为高级时装的面料,也是高定晚礼服的时尚宠儿。这款真丝吊带裙充分诠释了桑蚕丝面料的高级之感,轻盈度和垂感都很赞,面料泛着淡淡光泽,夏季有出色的清爽与透气性能。印花图案总是具有化腐朽为神奇的力量,裙身的撞色印花增加几分轻奢味道,也让单品变得生动活泼起来,充满古典雅致之感。"} +{"content": "类型#上衣*版型#宽松*颜色#纯色*风格#简约*风格#休闲*图案#纯色*图案#线条*图案#撞色*衣样式#卫衣*衣袖型#落肩袖*衣款式#抽绳*衣款式#连帽", "summary": "简约又休闲的连帽卫衣版型,搭配上宽松的版型剪裁,瞬间穿出时尚慵懒气质。个性俏皮的落肩袖设计,修饰肩部线条,抹去了肩膀的硬朗感。撞色勾边和撞色抽绳的设计,打破了纯色的单调感,带去丰富的视觉层次感。"} +{"content": "类型#裙*风格#文艺*风格#知性*图案#条纹*图案#线条*图案#撞色*裙型#衬衫裙*裙领型#翻领", "summary": "透气清爽的色织麻衬衫裙,自带回归自然的恬静感,平添几分知性文艺风。整体大廓型的设计,让身体不受束缚的同时,结合撞色的条纹元素,带着时尚的律动感,赋予艺术气息。经典的小翻领,线条硬朗立体,契合裙身,尽显知性温柔,从而呈现出落落大方的穿着感。"} +{"content": "类型#裙*风格#清新*图案#卡通*图案#印花*裙长#连衣裙*裙袖长#无袖", "summary": "让您在夏日穿出清新范的连衣裙。淡雅的色系搭配精美的卡通印花,雕琢出格外唯美的时尚情结。采用欧根纱材质工艺,面料轻柔细腻,无袖背心式设计,上身效果更加清爽通透。"} +{"content": "类型#裤*材质#棉*材质#牛仔布*颜色#纯色*图案#纯色*图案#线条*裤口#毛边*裤口#微喇裤", "summary": "这款来自milibam的儿童牛仔休闲裤,甄选棉质牛仔面料,柔软滑糯,带来轻盈舒适的穿着体验;弹力伸缩裤头设计,可以根据宝宝腰身自由调节,舒适自在;裤脚结合毛边喇叭裤的版型设计,颇具时代感,且拉长腿部线条;甜美的纯色色调,在视觉上享受的同时,更于优雅气质中彰显出高街范,妥妥的小时髦精。"} +{"content": "类型#上衣*材质#蚕丝*风格#欧美*风格#潮*图案#线条*图案#刺绣*衣样式#衬衫*衣领型#圆领*衣袖型#灯笼袖*衣袖型#喇叭袖*衣款式#钉珠*衣款式#荷叶边", "summary": "经典的圆领设计,修饰颈部线条,时尚的绣花设计,尽显甜美可爱。欧美气质a字裙,真丝工艺打造,舒适的圆领喇叭袖,精湛的钉珠有档次,穿着优雅美丽。优雅的衬衫领口结合荷叶边修饰,干练灯笼袖,搭配一步裙,穿着气质大方,简单大方的款式设计。尽显满满的潮流气息,更有个性。"} +{"content": "类型#裙*风格#运动*图案#条纹*图案#撞色*裙长#长裙*裙款式#不对称*裙款式#波浪", "summary": "以剪裁和面料的变化,丰富你的衣橱。这款女子半身长裙,以不对称的下摆线剪裁和腰身波浪般的裙褶设计,为你的运动单品添加更多女人味。采用柔软的绉绸面料打造,舒适时尚。裙侧缀撞色三条纹。"} +{"content": "类型#裙*版型#显瘦*图案#条纹*图案#蝴蝶结*图案#撞色*裙型#a字*裙领型#v领*裙款式#绑带", "summary": "后背蝴蝶结绑带设计甜美可爱,裙身撞色条纹设计个性十足,经典的条纹元素看呢,v领设计修饰精致脸型,a字裙摆遮肉显瘦。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#性感*图案#蝴蝶结*衣样式#衬衫*衣领型#一字领*衣袖型#喇叭袖", "summary": "这一款衬衫一字领设计,露出肩部性感迷人。微微宽松的廓形,上身包容性比较强,遮肉显瘦中凸显美妙身姿。时尚的蝴蝶结装饰,甜美俏皮自然减龄。加上精致喇叭袖,轻盈大方随风摇曳。"} +{"content": "类型#上衣*版型#宽松*材质#网纱*衣样式#卫衣*衣袖型#落肩袖*衣款式#拼接", "summary": "卫衣真是春日里出门必备单品,而这款拼接的卫衣更是能驾驭多种风格场合,你看那网纱拼接的肩部,是不是很有特色呢?而且舒适好穿的落肩袖宽松舒适,上身自由自在不受拘束。"} +{"content": "类型#裙*材质#雪纺*颜色#黄色*风格#简约*风格#性感*裙长#连衣裙*裙领型#圆领*裙款式#不规则", "summary": "柔和的黄色能给人带来一股暖意,轻盈的雪纺面料柔软的手感让你浑身散发女人味。时尚简约的大圆领设计,让整体看起来更有活力,穿着可爱减龄;衣袖还采用了透视面料制作,能够修饰出纤细手臂,突显性感魅力;不规则的下摆设计,给整体增添了立体层次,穿着起来也十分灵动飘逸。穿上这款连衣裙,走在大街上绝对回头率满满哟。"} +{"content": "类型#裙*风格#知性*图案#蝴蝶结*裙型#一步裙", "summary": "很气质有范的版型让人穿起来干净舒服,就算小个子女孩也能驾驭的,穿上仍具有一样美美的气质。很有夏日感色系,搭配肩部设计很特别,不夸张但也能体现你的个人品味,知性又浪漫。腰部蝴蝶结的装饰结合一步裙摆的设计凸显身材高挑,勾勒出曼妙的身姿,打造属于自己的优雅,提高众人回头率。"} +{"content": "类型#上衣*材质#针织*风格#休闲*衣样式#开衫*衣门襟#单排扣*衣款式#罗纹", "summary": "针织开衫是都市女性表达出素雅的单品,诠释出一番淡淡的惬意风情,也玩转出一股端庄大气的即视感。单排扣装饰的门襟,注入一股法式优雅的情怀,加之珍珠扣的高级感,显得时髦又摩登,立体的袋盖点缀,平添出素雅的休闲风情,罗纹针织的收尾,增添一股淡淡的惬意风情。"} +{"content": "类型#裙*风格#淑女*风格#休闲*裙长#连衣裙*裙衣长#中长款*裙领型#圆领", "summary": "中长款款式的气质款式连衣裙,中长款的版型女性穿在身上具有十足的休闲魅力与时尚的个性,极具女性十足的优雅气息。圆领的设计,能够凸显出女性的颈部长度。还能够很好的修饰出女性的淑女气息。"} +{"content": "类型#裙*材质#牛仔布*风格#简约*风格#青春*图案#卡通*图案#刺绣*裙型#牛仔裙*裙衣门襟#拉链*裙款式#破洞*裙款式#纽扣*裙款式#拉链", "summary": "牛仔裤是衣橱里一年四季都不可或缺的时尚单品,这款就是比较简约版型的,整体优选的牛仔棉弹面料更是提升了穿着的舒适感,腰身的纽扣以及隐形拉链,美观也满足穿脱自如。精美的卡通刺绣更是演绎了无限童趣感,裤身的破洞更是凸显出青春专属的时尚与个性。"} +{"content": "类型#裙*材质#蕾丝*材质#雪纺*风格#性感*图案#刺绣*图案#蕾丝*裙长#半身裙*裙款式#拼接*裙款式#勾花镂空", "summary": "让您穿出小心机的雪纺小衫。深黑色的面料,两侧肩部采用蕾丝刺绣拼接工艺,展现出格外动人的肤质。后背的镂空式设计,更让您留下迷人性感的背影。搭配一条半身裙就能气质满满。"} +{"content": "类型#裤*材质#牛仔布*材质#混纺*材质#纤维*图案#线条*裤腰型#高腰", "summary": "这一款牛仔裤采用了棉纤维与聚酯纤维等的混纺面料,裤身具有良好的弹性,上身没有紧绷不适感,行走起来舒适自在。裤身的裁剪相当精巧,根据亚洲女性的独特曲线而裁剪出来的线条,能够更好的修饰腿部线条,结合着高腰的版型,勾勒优美曲线。"} +{"content": "类型#裙*版型#显瘦*颜色#黑色*裙腰型#高腰*裙长#半身裙*裙款式#口袋*裙款式#纽扣", "summary": "黑色是很典雅深沉的,既能够修身显瘦还能够衬托皮肤的白皙让你看起来更加的充满魅力。半身裙的长度恰到好处,搭配高腰的版型可以说是很时尚又充满美感了。金属纽扣的装饰,就如同沙漠里面的一般让裙子充满了生命的活力,粗线条的重工点缀增加了立体感和层次感,口袋的设计可以说很贴心的,能够放一些贴身的物品。"} +{"content": "类型#上衣*版型#宽松*颜色#黑色*风格#简约*风格#运动*图案#撞色*衣样式#外套*衣款式#拼接*衣款式#连帽", "summary": "针对运动训练而设计的一款外套,立体宽松的版型剪裁,活动自如不受束缚。整体以黑色做为基调,连帽与后背辅以撞色网眼面料加以拼接,简约之中搭出活力潮范儿。"} +{"content": "类型#裙*材质#网纱*颜色#粉色*风格#复古*风格#宫廷*风格#性感*图案#复古*图案#刺绣*裙长#长裙*裙长#连衣裙*裙款式#拼接", "summary": "网纱拼接长裙,网纱的点缀设计,舒适亲肤,但是又具有一点挺括性。粉色的甜美,延续女孩对梦幻色彩的定义,整体是运用各种几何图形绘制出精细图案,绣花精致的点缀,带来复古宫廷感,演绎浪漫优雅气质。上半身的设计,略带性感味道。单层网纱的设计,不会显得过于膨胀。柔美的仙女色调,朦胧轻盈带着舒适的质感,用在连衣裙温柔甜美。"} +{"content": "类型#裙*颜色#红色*图案#线条*裙下摆#荷叶边*裙领型#v领", "summary": "用红色来衬托白皙的肌肤,增加在人群中的瞩目度,轻薄的材质冰凉亲肤,即使是在炎热的沙滩,也能最大限度的透气散热。v领设计拉长脖颈线条,彰显气质,荷叶边袖口在沙滩微风的轻抚下,飘逸温柔。腰间松紧设计适合不同身形的女孩子,安全的裤裙有效避免了走光的尴尬。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*图案#线条*衣样式#卫衣*衣袖型#泡泡袖", "summary": "春日必备的卫衣,宽松的bf版型,上身舒适修身,遮掩住不必要的肉肉,让你的身材更为纤细玲珑。经典的圆形领口,柔化颈部的线条,修饰下巴的弧线,脸蛋立显精致有型。蓬松的泡泡袖,赋予人惊艳的视觉感,更显轻盈灵动,萌动不失调皮可爱。袖口处的紧实织法,简洁工整凸显质感,让你轻盈又利落哦~"} +{"content": "类型#上衣*颜色#白色*风格#淑女*风格#简约*风格#清新*衣样式#衬衫*衣袖型#插肩袖*衣袖型#喇叭袖*衣款式#绑带", "summary": "淑女风的衬衫,白色调的晕染,简约大气,清新脱俗,衬托白皙脸庞的同时提升女性的高雅气质。小v的领口,荷叶花边与绑带修饰,遮缮肩膀宽度,并融入甜美风情。插肩袖型与喇叭袖摆,方便活动,突显优雅浪漫。"} +{"content": "类型#裙*风格#淑女*图案#刺绣*裙长#连衣裙*裙款式#勾花镂空", "summary": "气质与舒适兼顾的一款连衣裙,因融入刺绣镂空设计,而尽显优雅迷人。小鸟刺绣图案精美逼真,除了透露着甜美大气的淑女气质,还兼备减龄效果。除了栩栩如生的刺绣图案之外,其细腻的针脚也透露出精湛的制作工艺。"} +{"content": "类型#上衣*风格#英伦*衣样式#外套*衣样式#西装", "summary": "这是一款充满英伦典雅气息的西装外套。采用精美挺括的华达呢面料,倾斜的纹理使得视觉与触感更为耐看与饱满。高温定型工艺处理,令经典的版型更加焕发光彩与强大的气场。肩部特别添加适度垫肩,令肩部的廓形更为立体时尚,从而凸显高挑修长的身形。采用英伦沿袭的织带方式装饰衣领与衣袋,同时搭配徽章造型,更加展现干练、自信的气质。"} +{"content": "类型#裙*版型#显瘦*材质#牛仔布*风格#简约*图案#线条*裙型#牛仔裙*裙型#直筒裙", "summary": "小直筒牛仔裤,上宽下窄的线条,流畅又简约,视觉修饰腿型和宽胯,显瘦实力满分。优质弹力牛仔,妥帖包裹身体曲线,持久回弹,久穿膝盖部位也不易。后腰小v带设计,时尚有趣,增添精致细节看点,还能从视觉臀部范围有所缩小。"} +{"content": "类型#裙*风格#通勤*风格#淑女*风格#文艺*风格#简约*图案#格子*裙长#连衣裙*裙衣长#常规*裙袖长#长袖*裙领型#v领*裙领型#翻领*裙衣门襟#系带*裙款式#拼接*裙款式#不规则", "summary": "这款长袖系带连衣裙,可以说是细节之处都是小亮点了。领口部分运用了常规通勤的翻领样式,形成了一个小v领,可以很好地展现锁骨的曲线。值得一提的是,腰部与袖口处相互呼应的格纹拼接设计,很有文艺淑女的气质,简约中透露着女性的优雅。还有不规则的碎褶裙摆,浪漫又随性不拖沓。"} +{"content": "类型#裙*风格#清新*图案#碎花*图案#线条*裙下摆#压褶*裙长#连衣裙*裙款式#钉珠", "summary": "这款碎花款式的连衣裙,碎花的设计显得很清新优雅,船型领的设计,穿脱方便,前襟压褶的制作,线条整齐自然减龄。最大的亮点就是腰间的手工钉珠的制作,非常的靓丽。"} +{"content": "类型#裙*版型#显瘦*颜色#黑色*颜色#黑白*风格#通勤*风格#简约*风格#知性*图案#条纹*裙衣门襟#单排扣*裙款式#腰带", "summary": "经典简约黑色,自有通勤典雅属性,而两肩和袖口,以及腰带的黑白条纹点缀,让裙身散发出律动的的吸睛魅力。此外,小立领和门襟处的单排扣则带有点中山装的影子,内敛沉稳,气质笃定。加上x版型的修身显瘦性能,凸显出女性优雅端庄的知性气质。"} +{"content": "类型#裙*风格#街头*风格#性感*裙型#蓬蓬裙*裙款式#拼接*裙款式#勾花镂空*裙款式#收腰", "summary": "裙摆拼接波浪形镂空花纹,增加整体的时髦度和设计感,看上去挺括立体。为单调的裙摆增添几分酷帅的街头感,蓬蓬的裙摆更具廓形,上身很显精神。收腰的效果真的很棒,束出小蛮腰。很性感,拉高腰线,不压个子不挑人穿。"} +{"content": "类型#裙*材质#丝绒*材质#纤维*裙腰型#高腰*裙长#半身裙*裙衣门襟#拉链*裙款式#拼接*裙款式#拉链", "summary": "一款舒适有型的聚酯纤维半身裙;别具一格的高腰设计,采用了丝绒作为拼接,贴合腰线并衬出纤腰,使曼妙身姿穿搭出柔美小女人的味道;侧腰处的隐形拉链设计,便于穿脱又不影响裙身的外观,穿着更显大方得体。"} +{"content": "类型#上衣*材质#针织*风格#淑女*风格#高贵*衣样式#毛衣*衣领型#半高领", "summary": "来自的这件针织毛衣领口采用半高领的花边设计,可以起到减龄的作用,同时又显的更加淑女。结合胸前的奢华镶嵌设计让这件针织上更加贵气,因此更能使穿着者更加的有气场且高贵。"} +{"content": "类型#裙*风格#街头*风格#简约*裙长#连衣裙", "summary": "作为时尚界的一股清流,时尚连衣裙得到了都市女性的一致认可和青睐,它的设计风格简约,迎合了都市女性的审美追求。这款时尚连衣裙在设计上没有多余的修饰,但是优质面料无疑是一抹亮色,它让这款连衣裙可以自然而然的在街头脱颖而出,轻松打造出高端路线,对于都市女性来说,简直再好不过。"} +{"content": "类型#裙*材质#网纱*颜色#白色*裙款式#吊带", "summary": "裙子采用两件套设计,内里为长款吊带,外罩选用珍珠点缀的网纱,精致的做工与纯洁的白色相得益彰,素雅纯洁仙气十足~网纱若隐若现的视觉感受与吊带元素提升了整个人成熟的气质。"} +{"content": "类型#上衣*版型#显瘦*风格#休闲*图案#卡通*衣样式#卫衣*衣款式#螺纹", "summary": "卫衣既可以很时尚也能够带来接头的炫酷风格,这款却是满满的少女俏皮感,一身的版型是外廓样式的。穿着起来不挑身材也更是显瘦,经典的圆形螺纹领口,贴合颈部更方便穿脱。胸前的可爱卡通图案还富有童趣感,衣摆还带有开叉设计。休闲同时也提升了洋气感。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#街头*风格#工装*图案#线条*衣样式#外套*衣袖型#插肩袖*衣款式#口袋", "summary": "这件时尚帅气的工装外套,穿着舒适更显个性。宽松版型,不挑身材不挑人,遮肉显瘦很百搭;口袋设计,立体有型,很有设计感;插肩袖设计,修饰手臂线条的同时,更显慵懒宽松的街头感。"} +{"content": "类型#裙*材质#蕾丝*风格#性感*图案#线条*图案#蕾丝*裙腰型#高腰*裙领型#一字领", "summary": "半透一字肩设计的这款婚纱,再饰以立体花朵点缀其中,打造甜美公主范儿的同时,也更能凸显性感锁骨线条。而高腰加上的裙型,上身则更衬优雅女神气质。水溶蕾丝面料的选用,更加彰显出了奢华品质感。"} +{"content": "类型#裙*图案#印花*裙衣长#中长款", "summary": "这款卫衣袖口处的假两件设计,增添层次感,一件出门也能穿出不一样的~帽檐廓形的袖口设计,增添整体的有趣成分更加亮点。oversize版型,松松垮垮,配以中长款打造,配上长靴不要太洋气~加上胸口及袖口的个性印花尽显青春活力气息~"} +{"content": "类型#上衣*材质#棉*风格#运动*风格#休闲*风格#潮*风格#嘻哈*图案#刺绣*衣样式#外套", "summary": "这款小外套选用棉质面料,使版型挺阔立体,上身更加有形。英文字母刺绣,增添潮流时尚感,打造出休闲运动风。细节袢的大袖扣点缀,设计新颖独特,低调中又不失利落帅气,上身嘻哈范十足。"} +{"content": "类型#上衣*版型#宽松*颜色#红色*衣样式#衬衫*衣领型#翻领*衣款式#钉珠", "summary": "让您穿出热情洋溢的名媛范,是这款衬衫带给您的惊喜。亮眼的红色系面料,工整的小翻领,适合各种场合穿搭。领口两侧的钉珠工艺非常别致,呈现出精美的时尚品味。自然宽松版型,可以驾驭各种身材。"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*裤腰型#中腰", "summary": "具有弹性的牛仔裤,在穿着的时候更加注重舒适性。设计师在大腿处采用了磨白的痕迹,打造出了时尚的立体效果,看上去更加显瘦。而中腰的版型能够保护腰腹部,防止寒风的入侵,也更加具有时尚感,实用的性能大大上升。"} +{"content": "类型#上衣*材质#针织*风格#简约*图案#线条*衣样式#针织衫*衣款式#勾花镂空", "summary": "这是一款初见简约,再看惊艳的针织衫,它的美,如雏菊,静美极佳。纯白色的衣身,色调大气简约,更好搭配服饰;针织的纹理诠释,竖线的线条更显身姿修长纤细;精妙之处就在于衣身加设了嵌花,精美的小花朵,圆孔镂空,就像一朵朵雏菊般,优雅泛清香。"} +{"content": "类型#裙*版型#显瘦*风格#知性*风格#中国风*图案#亮面*裙袖型#灯笼袖", "summary": "这款齐腰短外套,采用中西结合的造型设计,融入中国风的盘口元素,展现女性温婉知性魅力。精选提花亮面面料,明暗有致的反光效果,使整体立体感十足。衣身部分,鼓起的衣身遮肉显瘦。齐腰的衣长处理,提高腰线,衬出大长腿。的优美弧线,美化脖颈曲线。束口灯笼袖的设计,修饰手臂,亮眼抢镜。"} +{"content": "类型#上衣*风格#街头*风格#复古*图案#条纹*图案#复古*图案#线条*衣样式#衬衫*衣款式#口袋*衣款式#不对称", "summary": "这款衬衫采用了经典的条纹元素,彰显出鲜明的美式复古街头气息,能够打造出雅痞的绅士气度。前幅分别采用了不对称的口袋设计,营造出鲜明的层次感,又具有收纳的作用。弧形的下摆,剪裁比较流畅,可修饰出臀部的线条。"} +{"content": "类型#裤*裤型#灯笼裤*裤型#阔腿裤*裤型#背带裤*裤款式#绑带", "summary": "背带裤带着童年的记忆,总是让人对它爱不,是减龄的王牌单品,更是打造百变造型的小心机设计。以阔腿裤版型剪裁打造,实力修饰腿型,完美适配各种身材。藏在裤脚的设计小心机瞬间你的心,个性的绑带设计让阔腿裤一秒变成灯笼裤,俏皮感十足又百变,满足你对百搭的需求。"} +{"content": "类型#裙*颜色#纯色*风格#通勤*图案#纯色*图案#线条*裙长#连衣裙*裙领型#圆领*裙袖型#蝙蝠袖*裙衣门襟#系带", "summary": "通勤百搭的一款纯色连衣裙,经典时尚的圆领设计,勾勒出优美的颈部线条,结合领口个性的系带装饰,稍微为整体的造型增添几分设计感,显得美观而大方。袖子采用精致的蝙蝠袖,宽宽松松的廓形,能够很好的美化双臂线条,展现温婉优雅的气质。"} +{"content": "类型#裤*材质#天丝*材质#牛仔布*颜色#纯色*风格#复古*图案#纯色*图案#复古*裤款式#口袋*裤口#小脚", "summary": "选取天丝材质以其与生俱来的柔和光泽度与细腻质感,释放出慵懒复古味道。以基础简洁剪裁碰撞纯色设计,尤为时尚大气。简单而不失雅致的纯色衣身,传递出现代人追求便捷,舍简的生活方式,深受大众青睐。胸前口袋设计,在与衣身相同颜色渲染下轻轻点缀,低调添注时尚细节。排扣的设计绅士优雅,简易搭配,可搭配休闲裤、牛仔裤、束脚裤等。"} +{"content": "类型#裤*材质#丝绒*裤长#连体裤*裤款式#亮丝*裤款式#流苏", "summary": "颇具高级名媛气质的一款丝绒连体裤,采用的是质感极好的亮丝丝绒材质,泛着满满的光泽感,让人看一眼便能感受到它的高级感,又带来一种别致的华丽时尚气息;袖口处的羽毛流苏装饰亮眼吸睛,凸显出女性个性的一面;还有那收腰系带的设计,轻松便可勾勒出女性的苗条身姿。"} +{"content": "类型#裤*风格#简约*风格#清新*裤长#短裤", "summary": "本品采用简约短小的短裤造型,适合在炎热夏季穿着使用,能够带来清凉舒适的体验。短小的款式还可以实现修饰流畅腿型的效果,能穿出大长腿。采用清新的颜色款式,充满时尚气质,满足自由搭配的需求。"} +{"content": "类型#裙*版型#显瘦*裙型#百褶*裙长#连衣裙*裙领型#立领*裙款式#纽扣*裙款式#收腰", "summary": "BRAND这款百褶领连衣裙采用了百褶立领设计,显得十分俏皮可爱。后背带有点缀纽扣开合,方便人们穿脱。腰部带有收腰装饰,显瘦。"} +{"content": "类型#上衣*版型#显瘦*颜色#黑白*图案#条纹*图案#线条*衣样式#针织衫*衣领型#翻领", "summary": "十分适合早春穿搭的一款针织衫。经典的黑白竖条纹设计,视觉显瘦效果很好。小翻领设计,能够很好底修饰脖颈线条。特别定制的双层钩针设计,使得整件服饰的保暖性提升一个等级,但是又不会显得过分臃肿。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*材质#棉*风格#青春*裙下摆#弧形*裙款式#腰带", "summary": "选用客供高支棉面料,织面平滑细腻,质感挺括,上身有型感。宽松的廓形,搭配上一款颜色艳丽的腰带,不仅可以修身突出腰部曲线,更为裙装增加设计亮点,凸显女性的青春美丽。弧形裙摆剪裁配合前短后长的设计,丰富层次增添一份时尚感。"} +{"content": "类型#裙*材质#网纱*材质#蕾丝*图案#蕾丝*裙长#长裙*裙长#半身裙*裙款式#拼接*裙款式#勾花镂空", "summary": "网纱拼接半身裙精选优质网纱质地轻盈、触感柔韧、垂感自然飘逸。拼接精致的镂空蕾丝,轻薄柔软。精美的花型纹路多变、清晰细致。花瓣状边缘采用纤细的睫毛收尾,更添层次美感!清爽半身长裙展现出十足女人味。"} +{"content": "类型#裙*颜色#黑白*风格#简约*图案#线条*裙款式#拼接", "summary": "这款鞋子采用牛皮的面料,细腻柔软散发着淡淡的光泽感。纯黑白拼接的设计,展现出简约主义的时髦腔调。方头的设计尽显优雅,平底款式让你行走更加舒适。嘻嘻的扣带的设计,视觉上拉长腿部线条。无论是搭配裙装或是裤子,出街美翻天妥妥的啦。"} +{"content": "类型#裙*材质#网纱*颜色#金色*图案#蝴蝶结*图案#刺绣*图案#撞色*裙长#连衣裙*裙款式#收腰", "summary": "在裙身上做立体珠片堆砌的星星,和小星星形成对比的同时更显得格外的精致,小女人梦寐以求的连衣裙也~在领口及裙摆上用撞色的定制金色网纱做镶边,网纱上特别的刺绣上了撞色的金色珠片,更为贴心的激光镶边,不会散口不说,细节感也做得到位,u型的大领口上系蝴蝶结,甜美一瞬间就出来了,微收腰的版型,裙摆却是散口的a型,甜美,更多的是女人味."} +{"content": "类型#裙*风格#简约*风格#知性*风格#性感*图案#印花*裙长#连衣裙*裙衣长#中长款*裙领型#v领", "summary": "这件简约而不简单的中长款印花连衣裙穿着很显气质,它的设计很用心。设计师采用了经典的v领设计融入衣身,露出精致的锁骨。给人性感而不失优雅的感觉,让你轻松打造出知性迷人的轻熟女韵味,而且v领的融入还能很好的修饰脸型,穿上它让你显得魅力十足。"} +{"content": "类型#裤*版型#显瘦*颜色#粉色*颜色#灰色*风格#运动*图案#线条*裤腰型#高腰*裤口#微喇裤", "summary": "这款瑜伽服采用粉色和灰色的搭配,上身后显瘦效果极佳且丰富你的运动生活,精选柔软透气性的面料,拥有极佳的弹力感。裤子腰部高腰的设计,再配以微喇叭版型的裤脚,修饰了双腿线条的同时让肌肉得到放松。"} +{"content": "类型#裙*颜色#灰色*风格#淑女*风格#清新*裙长#连衣裙*裙款式#腰带", "summary": "这款连衣裙采用了高级灰色的色调,优雅的色彩让整件连衣裙看上去满满的淑女风,优雅大方,立体的剪裁。给人清新可爱的视觉效果,可拆卸腰带处理,新颖时尚,甜美减龄,体现对时尚的追求,灯笼中袖的设计更是别具一格。"} +{"content": "类型#上衣*材质#蚕丝*风格#简约*衣样式#衬衫*衣长#短款*衣款式#拼接*衣款式#荷叶边", "summary": "一款简约的短款衬衫,融入细腻顺滑的真丝材质后,赋予衣身新的魅力,高雅恬静的气质尽显;直筒的衣身廓形巧妙的修饰身材曲线,包容性很好。圆润的领口显得经典又大方,袖口拼接上飘逸的荷叶边,行走起来灵动而柔美,边缘小巧的收褶也更添立体质感。"} +{"content": "类型#裙*风格#清新*图案#条纹*裙下摆#垂坠*裙长#连衣裙*裙领型#翻领*裙袖型#插肩袖*裙款式#拼接", "summary": "充满了少女气息的一款连衣裙,气质小翻领设计衬出小巧五官,蓝白条纹裙身清新减龄同时提升整体时尚度和优雅气质,自然垂坠的裙摆带来无限浪漫情怀和灵动美,插肩袖拼接打造挺括肩型,起到修饰肩型的效果,没有肩宽限制,任何身材都能驾驭。"} +{"content": "类型#上衣*风格#街头*风格#潮*图案#刺绣*衣样式#卫衣", "summary": "卫衣最大的亮点在于胸前新潮独特的logo刺绣,不同于BRAND一贯的设计往事,在其中加入了不同色系的c字logo,令视觉上的层次效果更加分明饱满。而帽子一侧精致的BRAND点缀,强调了双方品牌的联名身份,展现出不羁而时尚的街头气息。"} +{"content": "类型#上衣*风格#街头*风格#潮*图案#刺绣*衣样式#卫衣", "summary": "此款BRAND卫衣,采用经典的帽衫款式,胸口和袖口缀有低调精致的刺绣logo,为衣身增添街头风味和潮人魅力。且搭配柔软面料,内里有加绒设计,手感细腻,带来保暖舒适的衣着感受。此外,俏皮袋鼠兜不仅方便放置物品,同时彰显前卫的潮流风范。"} +{"content": "类型#裙*版型#显瘦*风格#淑女*风格#清新*风格#性感*裙领型#圆领*裙袖型#喇叭袖*裙衣门襟#系带", "summary": "简洁的圆领设计,显露出迷人性感脖颈,增添几分娇俏动人。挂脖式系带,丰富了层次感,带着些许的柔美俏皮。修身a摆裙型,结合及膝的长度,遮掩不完美的大腿达到显瘦的效果,唯美浪漫间透着女人柔美气息。喇叭袖凸显甜美气质,更为时尚平添一股秀气,让清新淑女韵味展露无疑。"} +{"content": "类型#裙*颜色#粉色*图案#碎花*裙下摆#开叉*裙长#连衣裙*裙袖长#七分袖*裙款式#腰带", "summary": "这款粉色连衣裙精选柔软的面料,上身后能修饰身材曲线,且裙摆的垂落感十足,营造满满的仙女形象。七分袖的设计搭配袖口的小开叉,小巧的展现了可爱俏皮的气质,腰间的碎花腰带,提升你的气质。"} +{"content": "类型#上衣*版型#宽松*材质#棉*风格#休闲*图案#条纹*衣样式#衬衫*衣款式#纽扣", "summary": "衬衫采用纯棉材质手感细腻,亲肤透气性好穿着更舒适,条纹设计色彩清爽干净,活性印染色彩牢固不褪色。厚度适中穿着舒适,宽松版型设计更显休闲自在。干练的衬衫领设计,更显宝贝精神有朝气,精致品牌纽扣富有光泽,细节处更彰显品质。衣摆前短后长圆弧设计,更添设计感更显灵动,两侧心机小开叉,增添一丝甜美活力。"} +{"content": "类型#裤*版型#宽松*版型#显瘦*材质#棉*材质#水洗*风格#休闲*裤款式#口袋", "summary": "选取优质的水洗棉勾勒版型,柔软亲肤的触感,带来舒适透气的穿着体验。引用宽松的廓形版型设计,带来遮肉显瘦的穿搭效果。精致的口袋装饰着裤身,平添了几分休闲随性的气息。个性的圆环金属扣装饰着裤身,呈现出炫酷的时髦感。"} +{"content": "类型#裙*材质#棉*图案#格子*裙领型#翻领*裙款式#收腰", "summary": "一款无论什么场合都能驾驭的美裙。精致的小翻领,立体又有型,精气神儿很饱满。纹的设计,柔美又浪漫。经典的对格剪裁,彰显出精湛的工艺与高端品质感。100%客供棉,爽滑柔软又亲肤。收腰的款式,勾勒出纤细的曼妙身姿,气质非凡。"} +{"content": "类型#裙*风格#复古*风格#简约*图案#复古*图案#刺绣*裙长#连衣裙", "summary": "这款连衣裙选用素雅的有机水面料,纹理富有立体感,复古又不失时尚。亦红亦紫的藕荷色衣身,更能展现女性沉静如水的温婉恬静,再以做工精细的刺绣加以点缀,让其华丽感瞬间提升。以面料的“简”去映衬刺绣的“繁”,努力在简约与繁复中寻求一种平衡。"} +{"content": "类型#裤*风格#休闲*风格#潮*风格#工装*图案#字母*图案#文字*裤长#短裤*裤款式#口袋*裤款式#不对称*裤款式#飘带", "summary": "休闲短裤在左右两边的裤腿都设计上分明利落的工装口袋,塑造出帅气的工装版型,将男士硬朗的气场凸显。一侧口袋的翻盖装饰字母贴标,营造不对称的时尚感,两边裤脚还装饰飘带,摇曳出随性的味道,突出细节设计的个性和潮流。"} +{"content": "类型#裙*版型#显瘦*颜色#深色*图案#印花*裙下摆#花边*裙长#连衣裙*裙款式#不对称", "summary": "此款连衣裙选用优质面料打造,上身穿着舒适度爆棚。独特印花,立体饱满,工艺精湛。不对称的肩部设计,打破传统设计的单调感,更显时尚与活力。花边点缀,为整体注入甜美气息,少女感十足。搭配上深色腰封,有效拔高腰线,轻松优化身材比例,显高显瘦。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#针织*颜色#纯色*风格#简约*风格#休闲*图案#纯色*图案#线条*衣样式#开衫", "summary": "一件上身非常显瘦的针织开衫,轻薄垂顺的款式,无门禁,宽松休闲,自在随性,非常时尚百搭,显慵懒气质。后背做了一个立体的剪裁处理,削弱肩部线条,在视觉上就能显瘦十斤,还有一点的效果。简约的纯色设计,简单不单调,两边还有开衩处理,精致立体,更显苗条身形。"} +{"content": "类型#裙*风格#性感*图案#线条*裙下摆#花边*裙下摆#垂坠*裙腰型#高腰*裙领型#立领*裙款式#拼接*裙款式#勾花镂空*裙款式#钉珠", "summary": "镂空花边立领设计,勾勒出优美的脖颈线条,衬的人气质不俗,胸前镂空花纹拼接,加上精致的钉珠点缀,婉约朦胧却并不会显得过于暴露,带来刚刚好的性感情调。裙身上立体的花朵装饰,流露出几分婉约风情。高腰设计,自然垂坠的裙摆,裙长过膝至小腿,有很好的修饰作用,举手投足间尽显柔美气质。"} +{"content": "类型#裙*版型#显瘦*材质#雪纺*风格#复古*图案#碎花*图案#复古*裙款式#拼接", "summary": "很别致的一款碎花雪纺裙,选择温柔到骨子里的色调。带着一丝复古的气息,非常的耐看而且很好搭配。胸下的细褶拼接提高了腰线的位置,视觉上显高显瘦,轻松打造出大长腿的即视感,丰富的层次感充满着浪漫的气息,上身效果非常的轻盈优雅。"} +{"content": "类型#裙*版型#宽松*风格#复古*风格#简约*图案#蝴蝶结*图案#复古*图案#印花*裙型#a字*裙下摆#垂坠*裙款式#腰带", "summary": "这款裙子采用小a字版型,宽松舒适,视觉上提升腰线。慵懒随性的蝴蝶结腰带,增添几分甜美俏皮气息。复古简约方格纹印花,经典百搭不落俗套。垂坠面料与搭片款式的巧妙结合,增加整体层次感,更具独特气质。"} +{"content": "类型#上衣*材质#牛仔布*颜色#白色*风格#休闲*图案#线条*衣样式#外套*衣样式#西装*衣款式#拼接", "summary": "这款外套拥有硬挺的牛仔面料,诠释了西装的版型。设计师运用拼接手法,为西装加入了一丝趣味和休闲感。白色的线条更是勾勒出整体的效果。"} +{"content": "类型#裙*材质#棉*裙长#连衣裙*裙袖长#无袖*裙领型#圆领*裙衣门襟#系带", "summary": "此款连衣裙采用百分百的纯棉面料制作,具有极好的吸湿透气性,上身以后格外亲肤舒适。时尚大气的圆领,能够更好的贴合颈部,凸显女性的落落大方。利落简洁的无袖版型,清爽自在,特别适合夏季穿着。腰部贴心的系带设计,更是可以帮助你塑造纤细腰身。"} +{"content": "类型#上衣*图案#条纹*衣样式#衬衫*衣长#常规", "summary": "咋一看好像只是常规蓝白条纹衬衫,其实袖口处有着不经意的小亮点,打结的设计多了一些趣味性,使这件衬衫不会显得单板。领口大v设计,修饰脸型有瘦脸的效果。"} +{"content": "类型#上衣*颜色#宝蓝色*风格#淑女*风格#复古*风格#清新*图案#复古*图案#线条*图案#印花*图案#撞色*衣样式#衬衫", "summary": "撞色衬衫领拼接裙身,突出复古精巧的设计感,衬显出优美的天鹅颈,修饰精致脸型,上身穿着更显简单大方。个性花边剪裁的宝蓝色印花裙身,鲜明的色彩碰撞,给人活力又明快的俏皮感,凸显出女生清新柔美的气质,看起来纯净又优雅。高腰a字版型剪裁,使腰身的线条看起来更加自然、柔和,完美遮掩腿部线条和臀围的缺点,帮助穿衣者保持应有的淑女风范。"} +{"content": "类型#上衣*材质#蚕丝*风格#宫廷*风格#青春*衣样式#衬衫", "summary": "这款处处流露高贵典雅的桑蚕丝衬衣,尽显女性的优雅魅力,给人端庄大气的宫廷范儿,设计感以及质感完全不输大牌。衣袖采用透视真丝材质,舒适透气触感柔顺,给你清爽的体验。微喇的袖口尽显甜美可人,成熟中含有青春的气息。"} +{"content": "类型#裙*材质#牛仔布*裙长#半身裙*裙款式#破洞", "summary": "BRAND的这样一条别致迷人的牛仔裤设计感极好,彰显出你的独特韵味,让你吸睛十足更洒脱。它的别致半裙设计,甜美万分,同时带给你满满的层次感,让你大气十足。破洞的设计更是高端洒脱,想找别致韵味。"} +{"content": "类型#上衣*版型#显瘦*风格#复古*风格#知性*图案#复古*衣样式#风衣*衣样式#外套*衣门襟#系带*衣门襟#双排扣", "summary": "经典款式的双排扣风衣外套,搭配上色泽柔和的米色调,呈现出了更为复古知性的优雅感。系带设计的融入,让风衣可以更好的贴合多样化的身材,穿出更合身、也更显瘦的视觉效果。将它穿在身上,既优雅又不失气场,让你尽显时尚。"} +{"content": "类型#裙*版型#宽松*颜色#白色*图案#印花*图案#撞色*裙型#百褶*裙长#连衣裙*裙款式#收腰", "summary": "这款连衣裙的主体采用藏青水果印花图案,肩部和下摆加入白色布料,形成时尚的撞色效果。腰部采用收腰的设计造型,能够轻松打造流畅的身段。下摆是宽松的百褶裙摆,活动灵动自然,也能够体现细节美感。"} +{"content": "类型#上衣*版型#显瘦*版型#立体剪裁*图案#刺绣*衣样式#西装*衣长#常规*衣款式#钉珠*衣款式#亮片", "summary": "不同于常规西装的一板一眼,这套西装凸显出了令人惊艳的设计感。翻驳领挺括有型,加入了亮片钉珠的点缀,优雅中折射出唯美光芒。修身剪裁贴合身躯,勾勒出纤细腰姿更显精工的立体剪裁。通体刺绣考究精致,出立体花型堪称艺术,提升整体品相。"} +{"content": "类型#裙*颜色#白色*风格#复古*风格#文艺*风格#知性*风格#清新*图案#复古*裙型#直筒裙*裙长#连衣裙*裙领型#v领*裙衣门襟#系带*裙款式#流苏", "summary": "清新典雅的直筒连衣裙,经典时尚,营造出靓丽形象。清爽的白色基调,更好的凸显白皙肤色,营造文艺气质。精致优雅的提花纹理,充满浪漫知性的文雅气质。经典大气的v领样式,活力可爱,搭配个性系带设计,充满与众不同的个性魅力。点缀复古流苏,营造出飘逸灵动的丽人形象。"} +{"content": "类型#裙*风格#文艺*图案#格子*裙型#蛋糕*裙型#背带裙*裙下摆#层叠*裙长#连衣裙", "summary": "这款独具学院风格的连衣裙采用背带的版型设计,穿搭起来更具减龄的效果,让你充满少女的气息。格纹的BRAND图案修饰其中,衬托出满满的格调,文艺气息脱颖而出,蛋糕裙效果的层叠裙摆造型又很有立体感,看起来很有活力,穿起来更加年轻。"} +{"content": "类型#上衣*图案#文字*图案#印花*衣样式#卫衣", "summary": "这款卫衣是以当下热门的元素作为设计题材,胸前印花采用高清数码印花贴布工艺,呈现出逼真的视觉效果。左侧文字设计配以右侧的宇航员,轻松点出主题,两侧袖子特色贴标装饰,让整体视觉效果更为丰富时尚。"} +{"content": "类型#上衣*颜色#紫色*风格#性感*衣样式#衬衫*衣袖长#长袖*衣款式#露肩", "summary": "欧时力新款的长袖衬衫实在是太凸显女性的魅力光彩了。独特的长袖和性感露肩设计,走到哪里都让人忍不住望一眼,性感时髦的设计会让人爱不释手。搭配亮眼的紫色使得衬衫焕发光彩,加上精选优质柔和面料,亲肤无刺激,舒适不起球,轻松驾驭各种场合。"} +{"content": "类型#上衣*风格#简约*衣样式#针织衫*衣款式#对称", "summary": "针织衫洋气舒适又保暖,是秋冬两季应该选择的一种服饰。这款针织衫设计上非常用心,时髦又透露着典雅风,而且针线很密集,给人一种高大上的感觉,袖口,领口,相同颜色的线,看上去非常简约大气,又有一种对称的美感。"} +{"content": "类型#裙*材质#雪纺*图案#碎花*图案#线条*裙下摆#荷叶边*裙长#连衣裙*裙领型#v领*裙袖型#荷叶袖", "summary": "这条唯美浪漫的雪纺碎花连衣裙,穿着舒适更显气质。v领设计,修饰脖颈部线条,更显修长白皙;荷叶边裙摆设计,甜美浪漫更显层次感与设计感;荷叶袖设计,修饰手臂线条,更显纤细。"} +{"content": "类型#裙*裙型#背带裙*裙型#牛仔裙*裙型#铅笔裙*裙型#直筒裙*裙腰型#高腰", "summary": "这款牛仔裙,背带的设计充满青春活力,减龄效果max。裙摆则选择的是直筒铅笔裙的版式,罕见而又不突兀的搭配让人眼前一亮,更是增添了优雅的魅力。下摆开叉设计,行走起来更加自如。高腰的版式在视觉上更是有显高的效果。"} +{"content": "类型#上衣*版型#显瘦*颜色#黑色*颜色#红色*图案#线条*衣样式#雪纺衫*衣领型#小立领*衣长#短款*衣款式#木耳边", "summary": "这套裙装是经典的红黑配色,非常的百搭时尚。上衣的红色雪纺衫,短款修身更显瘦。精致的木耳花边小立领,凸显脖颈纤长优美的线条感。袖身的木耳花边,和甜美的灯笼袖口,彰显活力甜美的气质。下身的黑色半身裙,高腰a字的版型更显瘦。这套裙装不论是上班还是约会,都非常的夺目吸睛!"} +{"content": "类型#上衣*版型#显瘦*风格#淑女*图案#线条*图案#刺绣*衣样式#卫衣*衣领型#圆领*衣长#短款", "summary": "这款淑女风卫衣,采用圆领的设计,加上条纹的装饰,修饰颈部柔美线条的同时,且丰富视觉美观。衣身精美的绣花,彰显女性的几分典雅气质。短款直筒的版型,遮掩女性身材的不足,上身毫无束缚感又显瘦。此外,品质的面料,给你带来贴身舒适的穿着体验。"} +{"content": "类型#裤*颜色#绿色*图案#卡通*图案#字母*图案#文字*图案#撞色*裤型#哈伦裤*裤款式#口袋", "summary": "绿色的裤身,充满了活力阳刚之气。撞色的弹力裤头,松弛有度,穿着舒适不勒小肚子,还方便了孩子们自由的穿脱。腰后侧字母图案修饰,醒目亮眼,丰富整体的视觉感。两侧对称的假口袋造型配以卡通图案修饰,栩栩如生,构成了一幅妙趣横生的画面。后侧还设计了字母与图案,可爱充满童趣,完美的彰显了男孩子的活泼天真。再加上哈伦的版型设计,让这款裤子的时尚一级。"} +{"content": "类型#裤*材质#牛仔布*风格#潮*图案#字母*图案#文字*图案#撞色*裤长#五分裤*裤型#直筒裤", "summary": "该款牛仔裤采用五分款型设计,夏季穿着清凉舒适。简洁的直筒裤脚走线均匀细密,不易脱线和变形,结合裤腿处的撞色字母logo,成功体现出裤子的品牌魅力,时尚显潮流感。"} +{"content": "类型#裙*风格#简约*裙型#百褶*裙长#连衣裙*裙领型#圆领", "summary": "23区的这款连衣裙,经典的圆领,既能修饰脸型小巧,还能凸显出颈部的纤细,皮扣的设计,不仅能随意转换造型,还能给人一种抢眼的视觉感。百褶的款式,简约又不失时尚,还很好的丰富了整体的立体感。"} +{"content": "类型#裙*材质#蚕丝*颜色#红色*图案#抽象*图案#线条*图案#印花*裙腰型#高腰*裙长#连衣裙*裙袖长#七分袖*裙款式#木耳边", "summary": "连衣裙抽象的印花融入大自然的元素,清爽自然,赋予创新的艺术韵味,优雅之美。流畅的廓形,做了高腰线的处理,拉伸视觉比例,勾勒曼妙迷人的身姿。七分袖的修饰纤细的手臂线条,木耳花边的袖口柔美大方,平添一份浪漫女人味。绚丽的红色与清爽的桑蚕丝浑然天成,质地清透柔滑。踩上一双单鞋,摇曳的步伐,美得不可收拾。"} +{"content": "类型#裤*图案#动物*图案#刺绣*裤型#背带裤", "summary": "绣工精美的动物刺绣图案,形态栩栩如生,非常软萌可爱。腰间装饰扣袢,做工精细,对称整齐,于细节彰显高端品质。实用贴袋平整,裁剪利落,增添背带裤的层次感。重工车线,等距,简单的工艺透出精工细作的品质。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*材质#羊毛*风格#ol*风格#性感*图案#线条*裙长#连衣裙*裙领型#圆领*裙款式#收腰", "summary": "美丽家这件ol风连衣裙,宽松大圆领设计,轻松彰显脖颈线条,透出性感吸睛魅力。裙面采用羊毛面料制作,呈现出的毛呢材质兼具手感与质感,上身亲肤舒适。宽松的伞摆设计,上身立显活力气质。修身收腰版型设计,上身舒适不紧绷,轻松展现身材曲线。"} +{"content": "类型#裤*风格#性感*图案#蝴蝶结*裤长#九分裤*裤款式#绑带*裤口#微喇裤", "summary": "这款的设计亮点在裤脚。利用了绑带的多变化,可以绑成各种造型。超简单的就是绑个蝴蝶结了,增添了裤子的时髦裤。而且让裤脚处略带喇叭式,更加显高挑了。九分的款式,露出脚踝既有几分小性感,也更显高挑了。"} +{"content": "类型#裙*风格#休闲*图案#撞色*裙型#大裙摆*裙腰型#松紧腰*裙长#半身裙", "summary": "一款休闲百搭的半身裙,采用富有弹力的优质面料,给宝贝带来舒适的穿着体验。撞色的松紧腰设计,不仅方便穿脱,还温柔的呵护着腰部肌肤,品牌的图案装饰,增添一丝小俏皮。宽大的裙摆设计,让宝贝穿着清爽舒适。"} +{"content": "类型#上衣*图案#字母*图案#文字*图案#印花*衣样式#卫衣*衣袖型#收口*衣款式#破洞", "summary": "肩袖处有破洞的设计,顺着破洞处的缝线别出心裁,不同于以往的卫衣,就是很好看又很显随意的感觉,立马年轻活力起来。袖口和下摆都做了收口的设计,柔软舒适穿着体验感很好,后背的字母印花打破整体的单调性~"} +{"content": "类型#上衣*颜色#白色*风格#青春*风格#职场*图案#蝴蝶结*衣样式#衬衫*衣款式#拼接", "summary": "白色衬衫是职场女性不可缺少的经典单品,如果你没有更好的设计,fendas可以为你提供多一种选择。这件衬衫在袖口的位置用甜美的蝴蝶结装饰,展现出女孩青春活泼的一面。并且用不同的材质拼接,丰富了视觉效果。"} +{"content": "类型#上衣*图案#刺绣*衣样式#衬衫*衣领型#翻领*衣款式#拼接*衣款式#口袋", "summary": "袖子的拼接设计是这款衬衫的亮点之处,轻松显不同,穿着更容易凹凸个性魅力。经典的翻领,很好衬托气质优雅大方,显颈脖修长。口袋装饰提升整体的丰富性,绣花点缀,体现细节设计,彰显与众不同。"} +{"content": "类型#裙*材质#网纱*颜色#浅蓝色*风格#复古*风格#文艺*风格#清新*风格#性感*图案#复古*图案#线条*图案#印花*裙型#大裙摆*裙长#连衣裙", "summary": "这款独具仙女气息的连衣裙采用浅蓝色作为主基调设计,穿搭起来更具清新文艺感,结合大气的印花图案修饰其中,带来更具复古典雅的韵味。网纱的半透明材质更具性感的味道,大裙摆的线条悠扬而有型,轻松增加端庄优雅的女人味。"} +{"content": "类型#裙*版型#显瘦*材质#针织*风格#性感*裙型#a字*裙款式#螺纹*裙款式#纽扣", "summary": "含蓄细腻柔软的螺纹针织,温暖有型,穿上之后成就你的性感女神。双排手工缝制纽扣,非常有特点,视觉上显瘦,小众显品味,出街不易撞衫。优雅a字版型,演绎名媛风格,既满足了基本款的百搭又兼具了时髦。"} +{"content": "类型#上衣*材质#雪纺*图案#蝴蝶结*衣样式#衬衫*衣款式#口袋*衣款式#飘带", "summary": "精选上等的雪纺材质打造的衬衫,更加呼应夏天酷热的气氛,时刻为你提供一个舒爽干净的穿着环境,轻松应对尴尬的夏季。领口处的飘带装饰突出丰富的立体层次,不论是打成蝴蝶结,还是随意着都很有腔调感。口袋剪裁打造满满细节亮点。"} +{"content": "类型#裙*颜色#红色*风格#青春*裙型#背带裙*裙长#连衣裙*裙衣门襟#单排扣*裙款式#口袋*裙款式#腰带*裙款式#对称*裙款式#收腰", "summary": "背带式的连衣裙,增添了青春俏皮的女性气息,上身起到了减龄的效果,展现出女性美好灵动的风采。腰部一根腰带的搭配,提升了整款设计的美感,在一定程度上起到了收腰的效果。单排扣的设计,美观又实用,同时彰显出优雅大气的女性风采。红色色调十分显白,美丽动人,凸显出精致的女人味。对称的口袋设计,发挥了实用效果。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#混纺*风格#街头*衣样式#衬衫*衣袖型#收口", "summary": "这款哈伦裤是松紧腰头,穿上的舒适感超好,适合各种身材的宝宝们。裤脚有收口的设计,让裤子显得比较立体,在视觉上打造显瘦的效果,而且也带上了几分酷酷的街头味道。裤子的材料是混纺的,比较类似衬衫的那种爽滑度,宽松舒适。"} +{"content": "类型#裤*版型#宽松*版型#显瘦*材质#牛仔布*风格#街头*风格#青春*裤型#阔腿裤*裤款式#拼接*裤款式#纽扣*裤款式#流苏*裤腰型#高腰", "summary": "BRAND的这款牛仔裤俏皮而灵动的拼接裤脚设计,配合上独特的流苏缀边装饰处理,既带来一份不羁叛逆的街头潮感,又能诠释出你时髦玩味的个性风采。而其简洁利落的高腰剪裁,配合上唯美雅致的斑斓纽扣点缀设计,锦上添花的好效果自不必说,还能张扬出你活力满满的青春动感气息。再加上它宽松自在的阔腿裤型,藏肉显瘦的同时也能绽放出你洒脱率性的自在风姿。"} +{"content": "类型#上衣*版型#宽松*材质#棉*风格#性感*图案#条纹*衣样式#衬衫*衣领型#v领", "summary": "蓝白的条纹从远处看仿佛就是一条靓丽的风景线,设计师将其融进衬衣的设计当中,并使用宽松的v领版型,微露锁骨的同时。将小性感的气息散发出来,精致又不失时尚感。而细腻的棉质面料的加入更是增添了舒适的体感,让穿搭更加时尚。"} +{"content": "类型#裤*材质#棉*颜色#黑色*颜色#卡其色*裤型#直筒裤", "summary": "这款休闲裤选用特别舒适的棉质面料,加入氨纶带了弹性,穿着无束缚感。直筒立体版型,穿着挺括硬朗,非常利落。锥形裤的款式,让你气质倍增,使身材很明显,搭配上不拘束。有黑色和卡其色两款可选,都是裤子中的经典配色。"} +{"content": "类型#裤*材质#棉*材质#混纺*颜色#米白色*风格#清新*裤长#短裤*裤腰型#高腰", "summary": "avivaonearth以清新、淡雅的米白色为主基调打造的这款短裤,整体采用了高腰的剪裁设计配合短款的裤腿设计,带来较为显身材且轻便舒适的穿着效果。设计师为这款短裤了苎麻和棉的对半混纺效果,兼顾麻的干爽和棉的亲肤,是非常好穿的单品。"} +{"content": "类型#裙*风格#性感*图案#植物*裙长#长裙", "summary": "领口的松紧设计可以视觉上收紧脖颈,营造完美天鹅颈,优雅迷人,同时腰间的松紧设计收紧腰身,轻松露出小蛮腰。性感撩人,二者相互呼应,打造出完美比例。优雅长裙是是女神们的最爱,仙气十足。大片花卉让人仿若置身花海,远远望去像是花中仙子,沉迷其中。"} +{"content": "类型#裙*材质#蕾丝*风格#知性*风格#性感*图案#蕾丝*裙型#小黑裙*裙领型#圆领*裙款式#拼接*裙款式#勾花镂空", "summary": "好像小黑裙总会给人一种很神秘很妩媚的感觉,裙身采用秀气的圆领设计,贴合颈部,凸显知性优雅,展现女性的天鹅颈。以及领口设计了小镂空的裁剪,微露肌肤,平添了不少性感韵味。肩部采用蕾丝的拼接,显得甜美带洋气的气息。"} +{"content": "类型#裤*颜色#黑色*裤口#小脚", "summary": "个性时尚的休闲裤采用了纯黑色的色调设计,纯黑色的色调,打造时尚摩登的风格凸显的随性自然的特点。束脚裤的版型设计,展示了最具个性时尚的风格魅力,精湛的可口可乐,凸现时尚,摩登。"} +{"content": "类型#裤*颜色#黑色*裤口#小脚", "summary": "经典的黑色小脚裤,多少条都不嫌多,也不用担心搭配出错的问题。厚度适中的面料,不管是稍有寒意的春季还是酷热的夏季,它都是那么的柔软透气。"} +{"content": "类型#裤*版型#宽松*版型#显瘦*材质#牛仔布*风格#简约*裤长#长裤*裤款式#破洞", "summary": "非常春日穿搭的一款基础风格的牛仔长裤。简约的版型,对腿型没有过分的限制,更加宽松舒适。磨毛的裤脚设计,能够凸显出脚踝部分,更加显瘦哦。个性的破洞造型,展现出青春活力之感。"} +{"content": "类型#裤*材质#羊毛*颜色#灰色*风格#知性*裤型#阔腿裤*裤款式#流苏", "summary": "浪漫优雅的麻灰色系,是让人一眼就会爱上的颜色,隶属于不暖的色系,在视觉上给人舒服自然的感觉,着实百搭。裤型采用经典的阔腿裤型,搭配具有丰富质感的羊毛材质,整体风格利落有型,散发着知性的女神范。裤身周围加以精致的钩工流苏边点缀,颇具层次感。"} +{"content": "类型#裙*材质#蚕丝*材质#蕾丝*风格#复古*风格#性感*图案#复古*图案#线条*图案#印花*图案#蕾丝*裙下摆#开叉*裙领型#v领*裙衣门襟#暗扣*裙款式#拼接*裙款式#勾花镂空", "summary": "复古时尚的印花元素让你仿佛闯入了乱花迷人眼的花海,飘逸柔软的真丝材质带来舒适的穿着体验,开衩的裙摆设计,行走间流露出温婉迷人的风情。时尚的v领以暗扣闭合,透露出柔美的颈部线条,衬托出娇小的脸型,腰间唯美的镂空蕾丝拼接,隐约透肉的质感展现出性感曼妙的腰线,是衣身的一大亮点。"} +{"content": "类型#裙*材质#雪纺*裙长#连衣裙*裙领型#v领*裙袖型#喇叭袖*裙衣门襟#系带", "summary": "女人和雪纺仿佛天生就有一种不解之缘。一见钟情,倾心。这款雪纺连衣裙设计了优雅的系带v领造型,精致之余显格外与众不同,且予以肩颈万种风情,还让胸型更饱满。七分喇叭袖的造型,举手间灵动柔美,由内而外散发着小女人味。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*材质#棉*裙长#连衣裙*裙领型#娃娃领*裙款式#拼接", "summary": "这款甄选进口棉并经过细腻的剪裁和拼接工艺,实现了超宽松的廓形,没有了修身版型的这款连衣裙依然可以让我们穿出瘦瘦的身材,同时还能在视觉上将腹部赘肉隐藏住。此外该款连衣裙采用娃娃领造型,它能给我们带来减龄效果。"} +{"content": "类型#裤*材质#牛仔布*风格#街头*风格#休闲*裤长#九分裤*裤型#直筒裤*裤腰型#中腰", "summary": "九分裤的版型加上中腰的裤型设计,给人一种别样舒适的休闲感。直筒版型修饰腿型,上身之后秒变笔直大长腿。中腰的版式设计,给裤子的穿有丰富的选择性,百搭而又时尚。小亮点在于裤边的磨边装饰,带有个性气息,街头风格十足。牛仔面料硬挺时尚,穿着上身透气性好。"} +{"content": "类型#裙*版型#显瘦*风格#中国风*图案#植物*裙型#花苞裙*裙型#包臀裙*裙款式#盘扣", "summary": "中国风盛行的时代,衣橱里怎能少得了优雅的改良式旗袍裙呢?缤纷浪漫的花卉,装点在裙面上瞬间就吸引了别人的目光,浑身上下都充斥着无穷东方韵味;斜襟如意花苞盘扣,将女性的温柔感全带出来了,配合修身包臀的廓形,女性该凸该凹的好身材都能完美呈现,俨然一副古雅端庄的小家碧玉的模样。"} +{"content": "类型#裙*材质#蕾丝*图案#条纹*图案#蕾丝*裙款式#勾花镂空*裙款式#收腰", "summary": "这件裙子的颜色本身就够惹眼了,所以在鞋子、包包和其他配饰上不用太费心,简单些就好。竖条纹的设计让身材更加修长,肩部和裙摆的镂空蕾丝,给人优雅朦胧的感觉。收腰的设计不会凸显小腹,还能显出傲人的身材。"} +{"content": "类型#裤*版型#显瘦*风格#街头*风格#休闲*裤长#短裤*裤款式#口袋*裤款式#不规则", "summary": "虽然夏天还没到,但是短裤好囤起来啦,毕竟这么好看的!这款带点民族风情,还蛮有特色的,在一众短裤里很容易出彩版型是有点a字的,裤腿微宽,不会有束缚感,也能显瘦显腿腰头口袋都做了明缉线装饰,视觉上很有立体感裤口不规则磨破撕边效果随性不羁,休闲街头感很浓"} +{"content": "类型#裤*版型#显瘦*风格#街头*风格#休闲*裤长#短裤*裤款式#口袋*裤款式#不规则", "summary": "虽然夏天还没到。但是短裤好囤起来啦,毕竟这么好看的,这款带点民族风情,还蛮有特色的。在一众短裤里很容易出彩,版型是有点a字的,裤腿微宽,不会有束缚感。也能显瘦显腿细,门襟腰头口袋都做了明缉线装饰,视觉上很有立体感。裤口不规则磨破撕边效果随性不羁,休闲街头感很浓。"} +{"content": "类型#裤*版型#显瘦*风格#街头*风格#休闲*裤长#短裤*裤款式#口袋*裤款式#不规则", "summary": "虽然夏天还没到,但是短裤好囤起来啦,毕竟这么好看的!这款带点民族风情,还蛮有特色的,在一众短裤里很容易出彩版型是有点a字的,裤腿微宽,不会有束缚感,也能显瘦显腿腰头口袋都做了明缉线装饰,视觉上很有立体感裤口不规则磨破撕边效果随性不羁,休闲街头感很浓!"} +{"content": "类型#上衣*颜色#纯色*风格#清新*图案#纯色*衣样式#衬衫*衣领型#一字领", "summary": "“一抹,一曲”的风格在这款衬衣上悄然体现,一字肩简洁大方,轻松勾勒肩部的迷人。花苞袖的设计扩大了优雅的气息,举手投足都是满满的温柔感。清新感十足的纯色打造,给人一种极为雅致的视觉感,好似“犹抱琵琶半遮面”。"} +{"content": "类型#裙*版型#显瘦*颜色#黑色*颜色#红色*裙长#连衣裙*裙衣门襟#双排扣*裙款式#绑带", "summary": "炎炎的夏日我们想要赶走阳光带来的,选择这件连衣裙是个不错选择。大红色的衣身配色活力十足,让你轻松地回到年轻时代。同时这种色彩也可以,让你的肌肤看起来白嫩具有光泽。腰间应用的系扣绑带,具有很好的显瘦显高效果。精致无比的黑色双排扣,体现出大牌的做工。"} +{"content": "类型#上衣*图案#线条*图案#撞色*衣样式#针织衫*衣样式#开衫*衣袖长#长袖*衣袖型#落肩袖*衣门襟#单排扣*衣款式#拼接", "summary": "众所周知,春季是针织衫的专属季节,采用冰丝面料的开衫,不会到来过热的穿着触感,能够轻松应付早晚的温差。落肩长袖的拼接,美化了肩臂的线条感,单排扣门襟的装饰,可敞开or合并来穿,各有一番味道,撞色锁边元素,去除单一,营造出立体的层次效果。"} +{"content": "类型#裙*裙型#牛仔裙*裙型#包臀裙*裙下摆#开叉", "summary": "此款牛仔裙大亮点,第一就是它包臀设计,能完美的展现女性的翘臀,在细节处勾勒出女性婀娜曼妙的体态。二是它的开叉设计,行走时能看到大腿曲线若隐若现,增添了神秘感。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*材质#纤维*风格#淑女*风格#复古*风格#文艺*图案#复古*图案#印花*裙衣长#中长款*裙衣门襟#系带*裙款式#不规则", "summary": "时尚印花图案,优雅又大气,聚酯纤维面料,手感柔软舒适,经典复古又永不过时,时而甜美少女时而文艺淑女;素致淡雅的色彩搭配,中长款的版型,修身与宽松的合理搭配,温柔而又显气质;裙摆的不规则裁剪,使得与众不同有个性,腰间系带又恰当的显瘦。"} +{"content": "类型#上衣*风格#街头*风格#青春*图案#条纹*衣样式#衬衫*衣款式#口袋", "summary": ",慢慢的气味回暖。这时候穿件帅气的衬衫,在合适不过啦。想在的街头脱颖而出,经典的条纹衬衫是个不错的选择哦,经典是在时代之后还能流行于大街小巷中,简单明了的规整条纹,简单的口袋做了很好的修饰版型的作用,喜欢的千万别错过哈!"} +{"content": "类型#裤*版型#宽松*版型#显瘦*风格#文艺*图案#刺绣*裤型#阔腿裤*裤腰型#松紧腰", "summary": "松紧一字领搭配上荷叶边的设计,增添了甜美俏皮的灵动感,露出锁骨和肩头,更感情调。刺绣花朵设计带来文艺浪漫的气息,宽松的阔腿裤遮肉显瘦,很有垂顺感。"} +{"content": "类型#裤*版型#显瘦*颜色#纯色*风格#性感*图案#纯色*裤长#长裤*裤腰型#高腰", "summary": "为避免纯色设计太过单调,e在这款长裤侧边加入拼纱丰富层次给人视觉上的惊艳感,小透性感且颇为个性!而高腰紧身版型修身效果佳,勾勒长腿并可遮肚收腰、提臀显瘦,一展苗条身姿轻松穿出气质范,不仅是瑜伽装也是潮感满满的穿搭单品。"} +{"content": "类型#裙*风格#民族风*风格#性感*图案#印花*裙腰型#高腰*裙款式#吊带", "summary": "灵动裙摆设计,展现出优美的律动感,同时也带来了民族风的味道,带有不羁风情。面料舒适柔软,穿着无拘束。高腰设计突显美丽曲线,在视觉上提高了腰线。吊带设计,十分的性感,加倍吸睛。上有印花图案点缀,尤为精致美观。"} +{"content": "类型#裙*版型#显瘦*材质#雪纺*风格#青春*图案#碎花*裙长#连衣裙*裙衣门襟#系带*裙款式#木耳边", "summary": "这一款雪纺连衣裙精致的木耳边领,精致俏皮特别出彩,利落的裁剪,塑造出迷人身段,给人恰到好处的视觉效果,显瘦的同时彰显高挑身姿,加上领口系带,塑造造型特别灵动。碎花装饰,青春减龄丰富视觉。"} +{"content": "类型#裤*颜色#黑色*裤款式#口袋*裤口#毛边", "summary": "裤装以简单的黑色打底,凸显出服装的百搭属性,以军事风为主体设计,更能呼应主题,展现出裤装散发出来的男人味。两侧的立体口袋装饰,不仅美观还很实用,让裤装的视觉装饰更加饱满。另外裤脚处的毛边设计也是充满了时尚的小心机。"} +{"content": "类型#上衣*版型#显瘦*图案#条纹*图案#蝴蝶结*衣样式#衬衫*衣领型#v领*衣袖型#堆堆袖*衣款式#腰带*衣款式#抽褶", "summary": "极具学院风的一款衬衫,前后v领的设计,既能勾勒出迷人的天鹅颈,又能衬托出娇俏的小脸。时髦的褶皱堆堆袖,打破基础款的单一更具层次感。同色系的蝴蝶结腰带,不仅能够修饰身形,还能诠释出个性腔调。大热的条纹元素,结合开叉的衣摆,视觉上更显瘦显气质。"} +{"content": "类型#上衣*颜色#黑色*衣样式#外套*衣领型#翻领*衣款式#口袋", "summary": "这是一款经典的黑色西服外套,版型看上去没有特别的设计,但就是因为经典,才更受欢迎。帅气的翻领设计让整个人显得更有气质,还带有两个翻盖的口袋,在起到装饰作用的同时也十分便利。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*颜色#黑色*裙腰型#高腰*裙袖型#喇叭袖", "summary": "这款孕妇裙采用黑色的主色,黑色有视觉显瘦的效果。宽松的领口将脖颈修饰的更加修长。喇叭袖的设计可以遮挡手臂的问题。高腰的版型是为了不让凸起的小肚子有紧绷难受的感觉。"} +{"content": "类型#裙*版型#显瘦*版型#h*图案#波点*图案#印花*裙下摆#花边*裙长#连衣裙*裙领型#圆领", "summary": "这款由itmichaa推出的连衣裙,修身h版型设计,穿搭上身有着显瘦的效果,适合各种身材穿搭。花边圆领的设计,个性又时髦,又能巧妙的修饰出小巧迷人的脸型。衣身通体以波点印花图案点缀,时尚而新颖,也为衣身带来了丰富的视觉看点。"} +{"content": "类型#上衣*版型#宽松*衣样式#风衣*衣长#短款*衣袖型#落肩袖*衣门襟#系带*衣款式#收腰", "summary": "这件风衣的最大好处在于短款且收腰的设计,非常适合小个子女生。短款的版型能够拉高腰身显腿长,宽松的版型非常遮肉,腰身加上收腰系带的小细节使整体不会臃肿,反而会使腰间看起来更加纤细。落肩袖很适合肩宽的妹子,可以很好的在视觉上削弱肩宽。"} +{"content": "类型#裙*颜色#红色*裙下摆#开叉*裙下摆#垂坠*裙衣门襟#系带", "summary": "独特的材质选择,让裙身具有很好的垂坠感,讲女性优美的身体曲线展现出来,采用了系带与开叉的设计,不仅表现出现代女性的干脆利落,同时还流露出女性特有的妩媚味道。纯正而大气的红色,十分的吸睛,展现浓浓女王范儿~"} +{"content": "类型#裙*版型#显瘦*颜色#红色*风格#简约*图案#印花*裙腰型#高腰*裙长#连衣裙*裙款式#拼接*裙款式#飘带", "summary": "简约而彰显个性的一款连衣裙,纯白色系面料,一侧的红色拼接设计,宛如一条浪漫的飘带坠入眼帘。个性的扇子印花图案,更点缀出满满的诗意。高腰显瘦版型,轻松穿出高挑曼妙的身段。"} +{"content": "类型#裤*风格#简约*图案#线条*裤款式#口袋*裤口#小脚", "summary": "这是一款简约百搭的束脚裤,穿搭实用性高,让你赚足回头率。两边的斜插口袋,可放随身物品,解放你的双手,也是街边凹造型的好帮手;束脚裤型,拉长腿部线条让你变身长腿欧巴。"} +{"content": "类型#裙*图案#线条*图案#印花*裙长#连衣裙", "summary": "见惯了摩登的都市女郎,不妨来点独具匠心的民族风情。此款连衣裙,采用传统交领改良设计,颈部的优美线条瞬间凸显出来,女人味儿十足。斜襟与袖口的仿珠扣装饰,非常有古典风味。醒目的印花加上明亮的颜色,色泽的碰撞形成了一大冲击感,低调奢华。"} +{"content": "类型#上衣*风格#休闲*图案#刺绣*衣样式#衬衫*衣袖型#落肩袖", "summary": "当刺绣遇上了衬衫,让你的穿着美到;时尚刺绣落肩袖衬衫,休闲的版型设计,优雅有气质,配上非常有特色的花朵刺绣图案,会给你的穿搭带来不一样的时尚范儿~"} +{"content": "类型#裙*风格#复古*风格#文艺*图案#格子*图案#复古*裙型#a字*裙下摆#垂坠*裙腰型#高腰*裙衣门襟#单排扣", "summary": "手感光滑柔软的面料,拥有极佳的垂坠感。清爽简洁的a字廓形,剪裁利落,上身优雅大方,洋溢着满满的文艺气息。贴心的高腰设计,轻松勾勒腰线,凸显纤细腰肢。腰部的处理,巧妙的塑造了裙身的层次变化,增强裙身立体廓形感。前门襟处采用单排扣固定,精致的格纹圆扣,带来浓浓的复古学院风,有趣又独特,颇为时尚减龄。"} +{"content": "类型#上衣*风格#知性*图案#线条*衣样式#衬衫*衣领型#v领*衣款式#绑带", "summary": "采用柔软的粘纤面料制作而成,带来亲肤透气的舒适感。个性的衬衫领结合v领的造型设计,别致的绑带缠绕着领口,修饰了颈部线条,同时凸显出干练知性的气质。精美的双层饰边装饰着前襟,尽显甜美浪漫气息。飘逸的百褶裙摆,摇曳出优雅迷人的身姿。"} +{"content": "类型#上衣*材质#蕾丝*风格#性感*图案#蕾丝*衣样式#外套*衣样式#西装*衣款式#吊带", "summary": "吊带同样配置了内衬,所以如果想单独穿的话,一点问题都没有,得体还带那么一些小性感。胸襟的地方,做了睫毛边的蕾丝来点缀,这个细节满分,如果搭配西装外套,或者其它的单品,在胸前恰当露出来,是超巧妙又的性感穿法。"} +{"content": "类型#裙*材质#丝绒*颜色#绿色*图案#波点*图案#印花*裙下摆#荷叶边*裙下摆#花边*裙长#长裙*裙款式#拼接", "summary": "这件连衣长裙多处都运用了荷叶边的拼接设计,肩头处添加花边点缀,更加凸显女性温柔典雅气质,而腰部的荷叶边正好能够勾勒修饰腰部曲线,裙摆处则是让裙装更具灵动飘逸感。波点印花元素的融入,又让裙子有了另一番俏皮可爱的气息。采用高级靓丽的绿色丝绒面料,看着摸着柔软而有质感。"} +{"content": "类型#裙*风格#简约*图案#蝴蝶结*裙长#连衣裙*裙衣门襟#系带", "summary": "素雅而又简约的连衣裙,在腰间缝制上了两条系带,起到了画龙点睛的作用。两根带子既可以系在一起,塑造成一个蝴蝶结的造型,又可以自由的放在两边,打造出慵懒随性的风格。这种独特的设计不仅能够起到装饰性的美观作用,同时还能凸显出穿着者腰间曼妙曲线,令人回味无穷。"} +{"content": "类型#裙*颜色#白色*风格#清新*裙袖长#无袖*裙领型#翻领*裙款式#拼接", "summary": "白色的裙子第一眼就容易让人移不开眼,纯洁乖巧的气息可以想象,上身绝对是甜美清新的完诠释。还有小巧的翻领设计,特别显可爱,还能修饰出优美的颈部曲线。加上无袖,露出纤细白皙的手臂,是不是很诱人呢?最重要还有肩部和腰部的拼接处理,十分具有特色。"} +{"content": "类型#裤*风格#运动*风格#休闲*图案#线条*裤腰型#松紧腰*裤口#小脚", "summary": "这款裤子,结合了休闲与运动两种风格,彰显了不一般的帅气感。腰部的松紧绳设计,不仅不挑人,也显得十分时尚,束脚的设计,避免了整体过于臃肿的尴尬场面,而完美的修饰了腿部线条,衬托得人更加修长。"} +{"content": "类型#裙*风格#青春*风格#性感*裙型#百褶*裙型#包臀裙*裙下摆#花边*裙款式#抽褶", "summary": "可爱与性感兼并的双面性时尚。这一款包臀裙,阳光黄的颜色非常特别,不止显白,而且能给你添加不少温柔的气质。膝盖以上的长度能显腿长而包臀的设计,能完美展现你的身材比例,极具女人味。让人心动的是裙身的花边,充满个性,适合轻熟女。经典的褶皱元素,化作甜美含蓄的百褶,显得青春又优雅,让人不由得回忆起那个校服裙摆飞扬的年代。"} +{"content": "类型#裙*图案#印花*裙下摆#压褶*裙腰型#高腰*裙腰型#中腰*裙腰型#松紧腰*裙长#半身裙", "summary": "这款印花半裙是经过高温压褶的,不易变形,印花有种凹凸的层次美感,内敛含蓄。是上身会微微撑开的a型,整洁有序的褶裥营造出挺括的廓形,并伴随着你的步履摇曳舞动,为设计注入了几分灵动气息。经过高温定型的褶裥,富有层次感,弹力松紧腰,好调节高度,高腰、中腰不受限制,可以根据自己的喜好来。"} +{"content": "类型#裙*版型#宽松*颜色#黑色*风格#复古*风格#性感*图案#复古*图案#波点*裙下摆#荷叶边*裙腰型#高腰*裙衣门襟#拉链*裙款式#拼接*裙款式#拉链", "summary": "复古的波点一直都是不败的经典,想来大家都有目共睹。柔美流畅的荷叶边与经典复古的波点相结合,性感有魅力,黑色的高腰裙设计,凸显出腰身的优美曲线,腰间无松紧,平整的腰部显得质感很好。而且在侧面拼接了隐形拉链用于日常穿脱,方便又贴心整体的版型偏宽松,行走之中带来足够的自由感。"} +{"content": "类型#上衣*图案#印花*衣样式#衬衫*衣款式#不规则", "summary": "精致干练的翻边衬衫领设计,简洁大方,勾勒出干练气质。衣身不规则涂鸦印花,错落有致,凸显时尚活力感。弧形下摆裁剪,优美的曲线营造出优雅气质,不规则的摆动,灵动飘逸又不失轻松随性的风范。"} +{"content": "类型#上衣*图案#印花*衣样式#衬衫*衣款式#不规则", "summary": "很养眼的一款衬衫设计,丰富色彩的大胆碰撞,轻松带来一场精彩的视觉盛宴。不规则印花图案装饰,带来时尚混搭魅力,轻盈面料剪裁,舒适透气又不怕透。"} +{"content": "类型#裤*颜色#灰色*风格#高贵*图案#线条*裤长#短裤*裤腰型#高腰", "summary": "高贵的灰色,把女人独有的优雅大气气质发挥到了极致,带来无与伦比的时尚魅力。圆领精致美丽,展现颈部线条,让你更显端庄优雅,娇俏的荷叶边袖点缀珍珠装饰,增添温婉柔美气质,下装高腰短裤a字版型设计,让你秒变大长腿。"} +{"content": "类型#裙*版型#显瘦*风格#性感*裙下摆#荷叶边*裙下摆#压褶*裙长#连衣裙*裙领型#一字领*裙款式#钉珠", "summary": "这一款连衣裙精致一字领的设计,韵味迷人性感出彩,精挑细选的布料软糯细腻,贴身穿着很舒适,体验度也是不一般。钉珠荷叶边的装饰,气质优雅随风摇曳。加上重工压褶,包容显瘦做工精致。"} +{"content": "类型#裙*版型#宽松*颜色#纯色*风格#性感*图案#纯色*裙型#衬衫裙*裙款式#露肩*裙款式#不规则", "summary": "这是一件充满慵懒与个性之感的衬衫裙,整体采用宽松版型加上不规则剪裁,随性中又带着慵懒气息,尤其是露肩设计展露骨干肩部,轻松展现出女性性感之味。纯色色调更是与夏季搭配适宜,简简单单但是充满纯粹感。同时设计师又采用了以上含材质,不仅舒适亲肤,而且十分吸汗,即便是炎炎夏日也不会感到粘腻。"} +{"content": "类型#裙*材质#网纱*颜色#纯色*风格#性感*图案#纯色*图案#线条*裙下摆#荷叶边*裙长#连衣裙*裙领型#一字领*裙款式#拼接", "summary": "这款纯色的连衣裙中长款式穿着更显飘逸性,采用了性感的一字领设计,展示出白皙的颈部肌肤,带来一丝亮点,女人味十足。采用了拼接荷叶边的设计,富有层次感,上身不失有型。下摆处的拼接网纱设计,若隐若现地展示出优美的腿部线条。"} +{"content": "类型#裤*图案#条纹*裤款式#口袋*裤款式#纽扣*裤腰型#松紧腰", "summary": "布满衣身的竖型条纹,显出女性甜美可爱的形象。松紧的裤头设计,出行穿脱更加的便捷。对称的贴布口袋设计,方便存放随身的小物品,兼具美观性与实用性。精致的单排纽扣进行开合,出行穿脱更加的简单,提升出行的便捷性。"} +{"content": "类型#裙*版型#显瘦*材质#牛仔布*颜色#纯色*颜色#浅蓝色*风格#简约*风格#休闲*风格#潮*图案#纯色*图案#线条*裙型#牛仔裙*裙型#直筒裙*裙下摆#毛边*裙腰型#高腰*裙款式#纽扣*裙款式#不规则", "summary": "经久不衰的牛仔元素,由一抹浅蓝色渲染,简约的纯色,休闲大方。修身的直筒版型高腰提臀的设计拉长双腿的比例,显高显瘦。结合了纽扣门襟的设计,方便穿脱。简单流畅的线条,巧妙的修饰腿型。搭配运动鞋休闲鞋都能轻易焕发青春活力气息。同时此款裤身采用磨破做旧处理,时尚前卫,尽显潮流风范。裤脚处也运用了不规则毛边的装饰,更显慵懒随性。裤腿的缝补拼布设计别出心裁,设计感十足。"} +{"content": "类型#裙*材质#蚕丝*图案#印花*裙下摆#压褶*裙袖长#七分袖*裙领型#圆领", "summary": "轻盈飘逸的真丝材质,手感丝滑,上身仿若无物,带给你有如婴儿肌肤般的细腻触感。简洁的圆领搭配同样简洁的七分袖,进一步提升干练利落的气质。前幅的压褶设计很是别致,于细节中彰显前卫的设计感。精致的印花点缀裙身,带来春风拂面般的清凉感。"} +{"content": "类型#裤*版型#宽松*材质#亚麻*颜色#纯色*风格#清新*图案#纯色", "summary": "裤裤的设计走的极简路线,或明亮甜美或沉稳优雅的纯色,都衬托出宝贝乖巧干净的气质,同时也百搭夏季清凉的上衣。a字的宽松版型,很好地修饰宝贝的腿型,结合亲肤透气的亚麻面料,穿着飘逸,走路带风,给人清新、格调十足的感觉。"} +{"content": "类型#裙*版型#宽松*风格#复古*风格#简约*图案#复古*图案#刺绣*裙长#连衣裙*裙袖长#七分袖*裙领型#圆领", "summary": "简约而唯美的一款连衣裙。纯白色系搭配起来更加游刃有余。精美的刺绣工艺,呈现出复古而别致的艺术效果。圆领七分袖设计,让您的仪态更显优雅从容。长款宽松版型,轻松穿出高挑曼妙的身段。"} +{"content": "类型#上衣*版型#宽松*图案#线条*图案#印花*衣样式#衬衫*衣领型#圆领*衣款式#不对称", "summary": "萌趣印花点缀整体衣身,元气满满的衬衫让你在春天活力四射。气质的小半圆领设计,巧妙的勾勒你的脖颈线条,在视觉上增添高挑出众的气质。略微宽松的款式设计,巧妙的遮住你的肉肉,形成修长的线条美感,营造一身高级的慵懒感。不对称下摆的设计,增添了整体造型的层次感,巧妙的展现你独特的个性与魅力。"} +{"content": "类型#上衣*版型#宽松*材质#棉*风格#青春*风格#清新*图案#条纹*衣样式#卫衣*衣长#常规", "summary": "这款卫衣打破了常规的单调款型,宽松的假两件版型,凸显出了层次感,穿着随意不受约束。清新的条纹图案修饰,洋溢着满满的青春气息,选用优质的棉质材质制成,柔软亲肤,给你带来舒适的穿着体验。"} +{"content": "类型#裙*版型#宽松*裙长#连衣裙*裙款式#抽绳*裙款式#连帽", "summary": "一款非常时髦的连衣裙,采用网布连帽设计,很适合炎热的夏季。抽绳则用带有淡淡光泽感的铜氨丝打造,做工精细又不显夸张。将面料进行打孔艺术排序,呈现出极致的质朴形象。宽松的a型廓形,很好的包容身材,给予一定的活动空间,让人不自觉充满着活力。"} +{"content": "类型#裙*版型#显瘦*颜色#黑色*图案#线条*裙腰型#高腰*裙袖长#无袖*裙领型#高领*裙款式#钉珠", "summary": "这款礼服裙手感厚实优质感,穿着上身,笔挺有型,简洁干练。以深邃的墨黑色作为基调,搭配小高领的无袖设计,仿佛拥有黑天鹅般的优雅气质。肩部的剪裁配合颈部线条特别修饰上半身,高腰处增添钉珠装饰,高腰的修饰能力,拉长腿部线条感。精致小巧的视觉上又非常显瘦。"} +{"content": "类型#裤*风格#潮*图案#线条*裤长#短裤", "summary": "此款短裤的另外一个设计亮点在于它采用简洁的涂鸦线条,勾画出个性活力的天使之翼图案装饰,提升层次和视觉效果。展现现代潮流风。"} +{"content": "类型#上衣*版型#宽松*风格#日系*风格#简约*衣样式#卫衣", "summary": "做旧工艺的使用,赋予了这款日系卫衣更多几分的韵味感,使得简约的它,拥有更为丰富的设计层次。它的版型带着几分宽松,却不会让人觉得肥大,上身之后可以更好的衬托出你随性、潇洒的气质,为你的男性魅力加分不少。"} +{"content": "类型#上衣*材质#棉*风格#简约*衣样式#衬衫*衣领型#翻领*衣款式#拼接", "summary": "法式的简约版型,衬衫的袖口开叉处做了拼接设计,让衬衫整体看上去更有细节感,整件衬衫最吸睛的就是领口,翻领也是做了全包边,突出了衬衫的时髦和高级感,面料选用100%高支棉,是最抗皱不透的,有一定的挺括度,不易皱,不透光。"} +{"content": "类型#裙*版型#宽松*颜色#黑色*风格#宫廷*裙衣长#常规*裙款式#木耳边", "summary": ",几乎是两个常规的宽度,显得手臂纤细修长。木耳花边领+袖,与宽松的衣身组合,真的有种欧式宫廷内衫的既视感,精致中透露出慵懒气质。还有黑色点点提花,规律点缀,多一份少女感。"} +{"content": "类型#上衣*版型#立体剪裁*风格#简约*衣样式#衬衫*衣领型#翻领", "summary": "简约衬衫,经典衬衫版型,遵循布料肌理。立体剪裁,以翻领明门襟的经典造型、配合曲摆的现代人性化裁减,相得益彰,舒适的面料搭配精致缝纫线使成衣领型自然舒展、缝线部位平服工整、牢固耐磨,单穿或者内搭都非常好看。"} +{"content": "类型#裙*颜色#深蓝色*风格#复古*风格#高贵*图案#复古*图案#印花*裙长#连衣裙*裙款式#不规则*裙款式#收腰", "summary": "带有复古BRAND风的一款连衣裙,以深蓝色的基调质地,营造出高贵、优雅的气质。结合收腰的版型剪裁,让腰部曲线更显立体,增加女性魅惑。不规则的裙摆设计有弧度感,可增加视觉层次感。精美的印花质地于裙身,更显优雅。"} +{"content": "类型#上衣*风格#淑女*风格#复古*风格#宫廷*风格#高贵*图案#蝴蝶结*图案#复古*衣样式#衬衫*衣领型#立领*衣袖型#喇叭袖", "summary": "带有浓郁欧式古典宫廷风的气息,这款衬衫造型优雅又高贵。立领的立领再加上蝴蝶结装饰,无比的淑女。胸前有花边的造型,更显娇美可人。复古的喇叭袖处理,典雅的气质尽显。搭配一条小裙子,简直可以直接去拍少女了。"} +{"content": "类型#裙*版型#显瘦*颜色#粉色*风格#性感*图案#蝴蝶结*裙长#连衣裙*裙款式#绑带*裙款式#吊带*裙款式#露肩", "summary": "柔和的粉色连衣裙,轻松凸显少女气息,又显肤色白皙。吊带露肩,凸显迷人小性感,结合绑带蝴蝶结,提升甜美可爱感,波浪式的伞型裙摆。穿着显瘦又具时髦感。"} +{"content": "类型#上衣*衣样式#衬衫*衣领型#翻领*衣袖长#短袖*衣门襟#单排扣*衣款式#口袋", "summary": "经典的衬衫翻领利落有型,展现女性柔美脖颈同时,凸显出端庄大气的气质。直筒版型配合短袖设计,优雅利落有着很好的包容性,遮掩不完美的身形,从容间透着随性的慵懒气息。从领口一直延伸至裙摆的单排扣,落落大方提升衬衫裙的时尚度,带着些许复古风韵味。两侧弧形斜插口袋,方便实用靠上的位置让插袋姿势更具气场。"} +{"content": "类型#上衣*图案#线条*衣样式#卫衣*衣款式#连帽", "summary": "卫衣在简练版型的基础上结合了连帽的造型设计,不仅具有一定的保暖效果,同时对颈部也起到一定的修饰效果,展现出细线的脖子线条。在帽檐的边沿利用包边与缝合的线条,体现出细节的质感与柔软的触感,巧妙的避开了僵硬的触摸感,细心呵护着脖子的肌肤。"} +{"content": "类型#裙*版型#显瘦*裙型#牛仔裙*裙腰型#高腰*裙款式#拼接*裙款式#腰带*裙款式#不规则*裙款式#收腰", "summary": "高腰拼接牛仔裙摆,精致的方扣腰带收腰设计,圈出纤细腰身,气质显瘦;不规则的下摆造型,长短错落,很有层次感,更加凸显了纤细的双腿,视觉上妥妥显瘦。"} +{"content": "类型#上衣*材质#牛仔布*颜色#白色*颜色#黑色*颜色#黑白*风格#街头*风格#休闲*图案#印花*衣样式#外套*衣款式#绑带", "summary": "牛仔外套是街头常见的存在,休闲街头似乎是它与生俱来的魅力。而白色牛仔的出现,注定是街头抢镜的,那渲染着的黑色印花,经典的黑白碰撞,时尚火花,同时将休闲的外套平添了一层优雅的韵味。交叉绑带的设计,出现在身前,设计感的视角,给人焕然一新的感受,独特的它,给你避免撞衫的!"} +{"content": "类型#裙*风格#文艺*风格#性感*图案#印花*裙下摆#花边*裙长#连衣裙*裙袖型#荷叶袖*裙衣门襟#系带*裙款式#勾花镂空", "summary": "印花连衣裙真的是好穿且实用又时髦。衬上文艺格调的花边小立领造型,简洁利落,打造随性优雅的气质。以及领口采用镂空与系带设计,小小透出的肌肤,平添了不少性感韵味。唯美的双侧荷叶袖,特别显气质,而且显手臂纤细。"} +{"content": "类型#裙*颜色#黑色*风格#潮*风格#性感*裙长#连衣裙*裙袖长#长袖*裙款式#勾花镂空*裙款式#飘带", "summary": "这款太平鸟黑色长袖两件套连衣裙,透视的两件套设计,满足叠穿的潮流,更加性感魅惑。同时后背镂空设计,微露美背,尽显迷人魅力。搭配飘带设计,增添细节感,彰显时髦。"} +{"content": "类型#上衣*版型#显瘦*颜色#黑色*风格#青春*图案#线条*衣样式#衬衫*衣领型#v领*衣款式#绑带", "summary": "好喜欢这款衬衫纯白的颜色,轻松穿出优雅妩媚的感觉。袖口的翻边样式,可以说是相当惹人喜爱。黑色腰封绑带,时髦流行设计,纤细腰身线条,v领袖子翻折,下摆u型弧度,高挑显瘦女人!"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#棉*风格#知性*图案#印花*衣样式#衬衫*衣袖长#九分袖", "summary": "显瘦十足的v字延伸衬衫领设计,备显知性温婉的同时将少女气息演绎到了极致。合身的版型设计修身且不紧绷,宽松却不松垮。趣味感十足的个性印花设计,让整体层次感丰富不显单调。纯棉印花的面料设计,亲肤舒适透气效果也是极佳。干练利落的九分袖设计,更加清爽。"} +{"content": "类型#上衣*风格#街头*风格#嘻哈*图案#字母*图案#文字*衣样式#冲锋衣*衣样式#风衣*衣款式#拼接*衣款式#腰带*衣款式#连帽", "summary": "从经典的冲锋衣风格为灵感来源,搭配近来大火的街头嘻哈风范,营造出叛逆而显潮感的风衣版型。胸口搭配拼接图片与字母logo,彰显个性品味之余,更添层次美感。连帽版型配合斜开腰带,舒适保暖贴合身躯。配合贴布袖子设计,于细节中把握时尚腔调。"} +{"content": "类型#裙*材质#网纱*风格#性感*图案#格子*图案#刺绣*图案#撞色*裙领型#一字领*裙款式#拼接*裙款式#腰带*裙款式#收腰", "summary": "肩部网纱拼接,微透雪肤,婉约朦胧,刚刚好的性感情调。配上精致的刺绣图案,上身洋气又不显单调。拼接一字肩格纹裙设计,浪漫雅致的格纹图案,给人一种优雅娴静的知性感,肩部的翻折边设计,修饰肩线。撞色腰带收腰设计,吸睛同时,腰线也收的恰到好处,立体伞摆裙型,更显纤细腰身。"} +{"content": "类型#裙*风格#休闲*风格#潮*风格#性感*图案#线条*裙型#a字*裙下摆#开叉*裙下摆#毛边*裙腰型#高腰*裙长#半身裙*裙款式#口袋", "summary": "半身裙是采用a字型前面开叉的设计风格,不仅有个性时尚还能在走动间露出纤细的双腿,不失性感又潮流。裙底的毛边,显得随性慵懒又自在,高腰的裙身,把身材比例拉长。流畅干净的线条,和裙身两侧的大口袋,可以装点随身携带物品,方便实用又美观,自带休闲气质。"} +{"content": "类型#上衣*版型#显瘦*颜色#黑色*风格#简约*衣样式#衬衫*衣领型#尖领*衣款式#纽扣", "summary": "白衬衫是大多商务男士的标配,那么相比之下。黑色的衬衫则显得更加出众醒目,尖领的裁剪,利落大方的同时却又不失干练气场。同色的纽扣,彰显简约的时尚美,修身的版型,让你健美的身躯完美呈现,上身不自觉释放出几分高冷的气息,更显得魅力无穷。"} +{"content": "类型#上衣*材质#针织*颜色#纯色*风格#简约*图案#纯色*图案#撞色*衣样式#外套*衣领型#小立领*衣门襟#一粒扣*衣款式#对称", "summary": "这款针织外套采用了纯色的做工,穿着简约精致,针织的领口采用饿了简约的小立领领型,并在领口做了处理。搭配简约的一粒扣门襟设计,穿着显得干净利落。后背采用了拼布工艺形成撞色的效果,丰富你的视觉感官。衣身还加入了对称的贴袋,兼具美观与实用性。"} +{"content": "类型#裤*材质#牛仔布*材质#棉麻*材质#混纺*风格#运动*图案#印花*裤腰型#松紧腰", "summary": "时尚易搭的儿童过膝牛仔裤,logo印花的图案装饰,打破版型的沉闷感,时尚更富有活力,打造小潮童造型。宽幅的松紧腰带,平整有弹性,舒适不勒皮肤,孩童穿着活动自如。优选棉麻的混纺面料,具有极佳的透气效果,穿着活动不会有潮湿闷热感,非常适合孩子爱运动的本质。"} +{"content": "类型#裙*风格#知性*图案#条纹*图案#印花*图案#撞色*裙长#连衣裙*裙袖长#五分袖*裙领型#polo领*裙款式#拼接", "summary": "淡雅的条纹印花连衣裙,采用时尚的polo领,领子的撞色设计,可以更好的修饰柔美的颈部,彰显率性干练的气质。新颖独特的条纹印花,横竖条纹拼接使用,碰撞出别致的视觉效果,呈现出知性优雅的女性韵味。利落的五分袖设计,恰到好处的露出白皙的手臂,体现出温婉的女性气息。"} +{"content": "类型#上衣*版型#显瘦*风格#清新*图案#线条*衣样式#马甲*衣样式#西装*衣领型#翻领*衣袖长#无袖*衣款式#腰带*衣款式#对称*衣款式#收腰", "summary": "西装式的翻领领口,在最后设计成对称的样式,搭配单边开叉缺口的设计带来满满的个性与俏皮,轻松展现出干练却摩登的时尚气场。无袖的马甲款式露出了手臂的线条,在视觉上显得整个人更为利落帅气,散发出整洁清新的优雅气质。腰间加入了腰带进行收腰,所以不会看起来拖沓,突出了腰身曲线的设计更显女人味。长款过膝的长度可以修饰臀部和腿部的线条,上身更为显瘦。"} +{"content": "类型#裙*材质#牛仔布*风格#休闲*图案#撞色*裙型#背带裙*裙型#牛仔裙*裙型#小黑裙", "summary": "强推一条带你切换不同风格的背带裙,第一眼就会爱上的必入单品!背带裙一直是大热经典的宠爱单品哟,这款用的深牛仔配色设计,低调的同时非常百搭休闲。设计可以调节肩带长度的版型设计,无论高矮都可以完美驾驭酷炫又有型。随便一双同色系小黑靴再撞色一下吸晴满满出街!"} +{"content": "类型#上衣*颜色#纯色*风格#简约*风格#休闲*图案#纯色*图案#蝴蝶结*衣样式#衬衫*衣长#短款*衣袖型#灯笼袖*衣门襟#系带", "summary": "这件衬衫版型的上衣,简约的设计,显露穿着的大气感。蝴蝶结系带的领口设计,是整件服饰的焦点所在,显露出穿着的满满个性,与洋气的穿着。灯笼袖的设计,轻松遮挡了腰部的赘肉。纯色的色调,休闲时尚,还很百搭,让日常的生活中,展现出穿着的多变感。短款的上衣,无论是搭配裙子,还是裤子,都很百搭有个性。"} +{"content": "类型#裙*风格#淑女*风格#青春*裙长#连衣裙*裙领型#立领", "summary": "初次见到连衣裙就被它独特的立领所吸引,日字圆扣将领边的裁片系起,宛如颈间搭配的一条丝巾,尽显温婉柔美的淑女气质,再加上扣环明亮的金属光泽,带来青春灵动的时尚气息。立领温柔的围裹着脖颈,能够起到拉伸颈部曲线的视觉效果,展示出修长天鹅颈。"} +{"content": "类型#上衣*风格#青春*图案#刺绣*衣样式#棒球服*衣领型#立领*衣长#短款*衣门襟#单排扣*衣款式#螺纹", "summary": "棒球服在近几年来极其流行,螺纹立领的设计加上精致的单排扣点缀,让你无论是敞开还是闭合穿着都极其好看;在衣衣上加入了精美的绣花图案点缀,既增添了一份美感,又释放出女性的优雅魅力;而短款的设计干净利落,穿出女性的那份帅气时尚感。"} +{"content": "类型#裙*风格#复古*风格#青春*图案#复古*图案#线条*图案#刺绣*裙长#连衣裙*裙领型#翻领*裙款式#收腰", "summary": "这一款连衣裙看起来公主风十足,翻领的线条流畅,显得整个人很有气质,而领部上面的精致刺绣散发着甜美的气息,有着青春减龄的效果。收腰放摆的廓裙摆看起来很蓬松,轻松藏肉。复古的提花面料带来优异的质感,更加分哦。"} +{"content": "类型#裙*风格#复古*风格#性感*图案#豹纹*图案#复古*裙下摆#开叉*裙长#连衣裙*裙领型#v领", "summary": "本款连衣裙较适合成熟女性,深v领的设计,尽显撩人的性感魅力,喇叭扇形的衣袖设计,不显约束感。经典的豹纹花纹带来怀旧复古风尚,尽显摩登女郎范。下摆处开叉的设计,使走起路来裙裾飘飘,修长美腿若隐若现。"} +{"content": "类型#裤*版型#宽松*材质#棉*颜色#红色*风格#街头*图案#撞色*裤长#短裤*裤款式#抽绳*裤腰型#松紧腰", "summary": "这款来自品牌太平鸟与可口可乐联合跨界合作的男装中短裤精选百分百的纯棉面料,轻薄舒适的质地贴身穿着更加干爽透气。整体采用宽松的版型,抽绳设计的松紧腰带轻便自在不束缚腰部,裤腿处加入ola的撞色胶印,搭配大红色的基底色调,尽显街头风格的雅痞个性。"} +{"content": "类型#裙*材质#棉*颜色#白色*颜色#藏蓝色*风格#清新*图案#碎花*图案#线条*裙下摆#花边*裙领型#翻领*裙衣门襟#单排扣", "summary": "精选优质的纯棉面料,让裙子穿着后更加亲肤舒适。精致的小翻领设计,有效修饰颈部优美线条,裙身铺陈小碎花图案,带来清新文雅的味道,在藏蓝色的色调映衬下,更显得别致优雅。单排扣的设计加上一边白色花边的装饰,让线条更加明朗,展现设计美感。"} +{"content": "类型#裙*图案#圆点*图案#条纹*图案#蝴蝶结*裙长#连衣裙", "summary": "布满圆点元素的连衣裙,尽显青春活力的感觉。经典的圆形领口,衬托孩子的颈部曲线,穿着起来也不会有束缚感。领口处还添加了条纹边,以及抢眼的蝴蝶结装饰,轻松打造甜美公主范儿。"} +{"content": "类型#裙*材质#网纱*材质#雪纺*风格#性感*图案#波点*图案#线条*裙型#a字*裙长#连衣裙*裙领型#v领", "summary": "很有女人味的一款两件套连衣裙,若隐若现的网纱搭配顺滑的雪纺,袖口那一部分将手臂的肉肉都藏起来,增添了波点元素,上身满满的可爱气息。吊带裙是深v的设计,带着点的小性感,a字裙摆才将利落,能很好的修饰身材线条。"} +{"content": "类型#上衣*版型#宽松*颜色#白色*颜色#黑色*颜色#姜黄色*图案#撞色*衣样式#卫衣", "summary": "卫衣加连衣裙的两件套设计,上衣是黑色和姜黄色的卫衣,搭配白色的裙摆,层次感十分丰富,而且很有撞色的时髦感。宽松的卫衣廓形剪裁,oversize造型,轻松打造出娇小的气质。裙摆的斜切设计,长短不一的错落感,非常有设计感,灵动飘逸的同时,很有优雅魅力。"} +{"content": "类型#上衣*材质#棉*材质#混纺*风格#休闲*图案#几何*衣样式#卫衣", "summary": "休闲的卫衣是宝贝们最喜爱的时尚单品啦!棉混纺面料柔软舒适,保暖效果特别好,微收的袖口和下摆设计也可以为宝贝抵挡风寒呢!夸张的几何图案设计跟萌萌哒的有一拼哦!"} +{"content": "类型#上衣*材质#针织*颜色#黑色*风格#简约*风格#休闲*衣样式#卫衣*衣款式#拼接*衣款式#连帽", "summary": "带有帅气酷炫的风格的针织卫衣,既能展现你潇洒休闲的风格,又能衬托出别具一格的魅力。经典的黑色色调,带来不可多得的神秘气息,展现深沉内敛的性格,简约的连帽设计,衬托轻松随性的风格,带来慵懒的气质,个性的袖子拼接,凸显另类独特的魅力。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*风格#复古*风格#高贵*风格#性感*图案#复古*裙领型#v领", "summary": "宽松的款式设计有效遮住小赘肉,舒适显瘦。靓丽的提花图案点缀于裙身,展现出一股复古优雅情怀。v领的设计不仅可以选择显露出性感脖颈,亦可以选择轻松搭配一件内衬,实用又时尚。后领装饰了品牌logo,让裙子更显高贵品质。"} +{"content": "类型#上衣*风格#通勤*图案#菱形*图案#印花*衣样式#衬衫*衣门襟#系带", "summary": "将衬衫与裙装结合,优雅之余不乏柔美气息,是通勤的不错选择。这款裙子采取了唯美印花,活力时装的菱形图案,优雅中透露着一种浪漫气息。配上贝壳扣单排门襟,简洁利落,流转出的五彩光泽为整衣装点精致感。腰部系带勾勒出纤细腰姿,使身姿更显窈窕玉立。"} +{"content": "类型#上衣*颜色#红色*风格#欧美*风格#清新*风格#性感*图案#格子*衣样式#衬衫*衣领型#v领*衣袖长#七分袖", "summary": "精选优质面料,打造轻薄凉爽的衬衫,上身更显欧美气质。采用经典的红色格子,不仅显甜美风格,还十分衬托白皙的肤色。七分袖长刚刚好,露出纤细的手腕,在荷叶花边袖口里,更加显小清新。v领的设计很好的修饰了颈部曲线,还能隐约看见性感的锁骨。"} +{"content": "类型#裙*材质#蕾丝*颜色#白色*图案#蕾丝*裙型#a字*裙下摆#花边*裙款式#镶钻*裙款式#勾花镂空", "summary": "每一个心中有公主梦的小仙女,都期待有一条属于自己的蕾丝裙,细腻的触感,凹凸有致的镂空花朵,恰到好处的凸显出女性的甜美优雅感。内里是加了白色的内衬,丝毫不用担心走光的危险。镶钻珍珠花边丰富了层次感,立体的a字型裙摆,更是摇曳动人。"} +{"content": "类型#上衣*颜色#黑色*颜色#裸色*风格#文艺*图案#创意*图案#撞色*衣样式#衬衫*衣款式#纽扣", "summary": "波浪形开合边缘,成为衬衫一大亮点,古典文艺创意感满分;配有黑色纽扣,与裸色衬衫撞色搭配,塑造强烈的视觉冲击力;袖口配有纽扣,可随意转换不同造型。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*材质#羊毛*颜色#黑色*风格#文艺*图案#线条*图案#刺绣*裙型#大裙摆*裙腰型#高腰*裙领型#圆领*裙款式#抽褶*裙款式#收腰", "summary": "简单经典的小圆领修饰脖颈线条,宽松版型裁剪,上身有余量,穿着轻松自在。高腰剪裁,往里收,自然的收腰效果衬托出身体线条的纤细。腰间褶皱形成大大的裙摆,随着步伐飘逸而动感。气质显瘦的黑色调,羊毛材质,表面立体感的绣花更添文艺气息。"} +{"content": "类型#裙*材质#棉*颜色#浅蓝色*风格#青春*图案#环保*裙下摆#荷叶边*裙下摆#垂坠*裙领型#圆领*裙款式#拼接", "summary": "柔软环保的纯棉面料织造,亲肤透气,素雅恬淡的浅蓝色,饰以精致的花型图案装饰,带来青春甜美的少女气息。经典圆领,简洁的袖型,修饰手臂,更显纤细。荷叶边拼接腰身,丰富层次,流畅的版型轮廓,浪漫垂坠的裙摆,尽显飘逸灵动韵味。"} +{"content": "类型#上衣*版型#宽松*风格#复古*风格#宫廷*风格#休闲*图案#复古*衣样式#卫衣*衣袖型#落肩袖*衣袖型#灯笼袖", "summary": "衣身选有质感的卫衣面料,自带休闲气息,但挺廓而有型。宽松的设计,慵懒不随意,配合落肩灯笼袖的设计,富有复古的宫廷优雅气息。"} +{"content": "类型#上衣*版型#宽松*材质#牛仔布*衣样式#衬衫", "summary": "万物复苏的季节,穿上一款宽松版型的衬衫出门游玩吧。兔耳朵领子的造型,彰显了品牌的质感,同时显得洋气十足。搭配经典的牛仔蓝底色,可谓是充满了魅力。加上胸前的小口袋设计,让人感受到品牌对细节的执着。"} +{"content": "类型#上衣*材质#羊毛*材质#羊绒*风格#简约*图案#条纹*图案#撞色*衣样式#针织衫*衣样式#毛衣*衣领型#一字领*衣款式#拼接", "summary": "这款羊绒针织衫,整体都采用了撞色条纹,在夏天显得十分出挑。羊毛的材质温暖舒适,十分亲肤,不易变形十分耐穿。百搭舒适版型,拒绝臃肿,一件就能搞定整个冬天。经典的一字领设计,领口采用了撞色的拼接,让整款毛衣看上去十分简约大方,是夏天必备的一款毛衣,展现高档品质。"} +{"content": "类型#裤*版型#宽松*风格#工装*图案#线条*裤款式#口袋*裤款式#螺纹*裤款式#拉链*裤款式#抽绳*裤款式#松紧带", "summary": "弹力的橡筋腰头,以同色系的抽绳装饰,能够自由灵活的调节。裤脚采用了螺纹的收紧,与宽松的裤型结合起来,能够更好的修饰出腿部的线条。裤子的两侧搭配了对称的大口袋装饰,腰部还有拉链的口袋,彰显出浓郁的工装气息。"} +{"content": "类型#裤*版型#显瘦*风格#简约*图案#线条*裤款式#纽扣", "summary": "整体线条简洁流畅,静静地诠释着简约主义的魅力。利落的翻领,简洁的纽扣门襟,上身就是很利索又潇洒的感觉。落肩袖的设计,柔化了肩部线条,也将优雅和慵懒表现的恰到好处。搭配裤子,显瘦的版型,流畅的剪裁线条,上身正好是修身不紧绷的尺度,显瘦惬意,恰到好处的裤长,优雅利落,时髦都市腔调,大爱。"} +{"content": "类型#上衣*版型#宽松*材质#棉*风格#休闲*衣样式#外套", "summary": "在日常的休闲当中,一款棉质的外套可是少不了的~它以简洁的样式出现,穿搭起来更显百搭和气质的感觉。而且比较宽松的版型,对于身材一点也不挑,加上熟练的缝纫车工平整走线,更是能展现出整款的品质和衣型。再配上那棉质的面料,使得它在春日中穿搭,更显舒适的感觉。"} +{"content": "类型#上衣*材质#亚麻*风格#职场*图案#线条*衣样式#西装*衣领型#翻领*衣袖长#长袖*衣袖型#落肩袖*衣款式#口袋", "summary": "柔软不失挺括的亚麻面料舒适有质感,展现优雅不做作的自然美。帅气的西装翻领设计,展露出女性柔美的颈部线条,塑造优美天鹅颈。胸前和胯骨的大翻盖口袋装饰,给人耳目一新的感觉,彰显都市女性的干练职场范儿。而慵懒的落肩长袖,又不会使整体过于正式,为你送上丝丝温暖。"} +{"content": "类型#裙*版型#显瘦*版型#立体剪裁*材质#蕾丝*风格#宫廷*图案#蕾丝*裙下摆#荷叶边*裙下摆#花边*裙长#连衣裙*裙领型#v领*裙款式#拼接", "summary": "连衣裙延续了一贯的版型,修身的立体剪裁上身之后可以提高腰线更显身材。拼接的睫毛蕾丝设计在视觉上很吸睛,凸显出个性不羁的感觉不会太过沉闷。领口很有宫廷气质的花边再加上深v设计看起来很精致,荷叶边的喇叭袖子上身之后有微微的透视感不会太过沉闷。裙身的提花图案不会太过花哨,很有少女气质。刚刚好盖过屁股的长度也不压个子,很适合大多数人穿着。"} +{"content": "类型#裙*颜色#黑色*风格#高贵*风格#清新*裙型#蛋糕*裙下摆#层叠", "summary": "繁复而美好的层叠设计让这款蛋糕裙有着清新而温婉的少女气息。浅粉的配色是永不过时的少女梦想,甜蜜而唯美。黑色的配色则有着高贵低调的韵味,是贵族小姐的冷艳美感。飘逸的下摆柔美缱绻,温柔动人。"} +{"content": "类型#裤*版型#显瘦*材质#羊毛*颜色#黑色*裤腰型#高腰*裤口#微喇裤", "summary": "羊毛呢面料质地挺括垂感极佳,纯黑色极简百搭,微喇叭设计极好的修饰腿型很显小腿纤细,裤脚珍珠点缀女人味儿十足,非常适合春秋季节穿搭。高腰版型极好的在视觉上延伸腿部比例,显高又显瘦~"} +{"content": "类型#裙*图案#蝴蝶结*裙下摆#荷叶边*裙腰型#高腰*裙长#连衣裙*裙领型#v领*裙款式#抽褶*裙款式#飘带", "summary": "不论是约会还是聚餐,你都需要一件漂亮的连衣裙加持。本款采用的是v领的裁剪,搭配飘带的设计,随意系成一个蝴蝶结,甜美度up。立体的褶皱荷叶边装饰裙身,带出柔美温婉的女性魅力。高腰线的设计,修饰出完美的身材曲线。"} +{"content": "类型#上衣*颜色#黄色*风格#复古*风格#简约*图案#复古*衣样式#衬衫*衣款式#收腰", "summary": "这是一款简约复古的衬衫,细节处做了精致珍珠扣装饰,提升整体质感十足。收腰处的设计,正好凸显腰身。选用黄色设计,衬托你白皙的肌肤,让你轻松驾驭。"} +{"content": "类型#上衣*材质#丝绒*风格#复古*图案#复古*衣样式#卫衣*衣款式#绑带*衣款式#收腰", "summary": "一款正反两穿交叉绑带卫衣,非常时髦小众的一款设计,很容易让你成为人群的焦点。正反两穿的设计,可以变换不同的风格,更显新颖而又别致。交叉绑带的设计,透着几分复古的气息,同时起到了一定收腰的作用。丝绒面料的选择,带着别致的光泽感,时髦感万分。"} +{"content": "类型#上衣*版型#宽松*风格#简约*图案#条纹*图案#刺绣*图案#撞色*衣样式#马甲*衣袖长#无袖", "summary": "雅致的竖条纹被剪裁成帅气的马甲形式,利落的无袖赋予圆润的弧度,精湛平滑的车缝线展现着细节的魅力,宽松舒适让手臂活动自如不会产生拘束感。前襟在绣花印章的点缀下,增添了一份软萌的风范,配搭上简约撞色的小口袋,色彩的而对比更显俏皮而不单调。"} +{"content": "类型#裙*风格#宫廷*图案#印花*裙长#长裙*裙款式#吊带", "summary": "以未来的视角,丝绸之路,从出发,寻找唯美壁画。吊带裙的正反都印有丝绸海景印花,充满着风情。吊带装饰有扣,可以调节长短,也非常别致。唯美长裙,显示出西方宫廷装细节,高贵典雅。"} +{"content": "类型#裙*版型#显瘦*风格#知性*图案#线条*图案#刺绣*裙长#连衣裙*裙领型#圆领", "summary": "落落大方的时尚连衣裙,应用了重工刺绣的精美图案,整体焕发着令人心驰神往的迷人魅力。简洁裁剪的圆领曲线,修饰颈部的柔美线条,更衬托出端庄的知性气质,修身的精美版型流线,展现女性的优雅风姿。打造夏季连衣裙的新时尚风度。"} +{"content": "类型#裤*风格#简约*裤型#直筒裤*裤款式#不规则*裤口#毛边", "summary": "简约舒适的直筒裤是妹纸的百搭单品,穿上呈现出自由灵动的气息。裤口不规则毛边处理可以将整体修饰的更加俏皮和个性,加之腰部不规则设计与下部相呼应,着实吸睛。"} +{"content": "类型#裙*材质#蚕丝*风格#清新*图案#植物*图案#印花*裙型#直筒裙*裙长#连衣裙", "summary": "一款彰显清新雅致干的时尚连衣裙,植物印花设计格外别致,呈现出唯美的艺术美感,面料采用真丝材质,细腻轻柔,上身体验舒爽顺滑。长款直筒版型,轻松穿出高挑曼妙的身段。"} +{"content": "类型#上衣*材质#牛仔布*风格#清新*图案#印花*衣样式#外套*衣款式#破洞*衣款式#绑带", "summary": "这一款牛仔外套精美印花点缀其上,看起来特别的有美感,衬得妹子们更显清新脱俗。特别是时尚的破洞装饰,时尚个性凸显不羁。加上精致绑带的装饰,错落有致随风摇曳。这个时节穿,自然就把与众不同美丽突显出来"} +{"content": "类型#上衣*版型#显瘦*图案#撞色*衣样式#马甲*衣样式#外套*衣领型#翻领", "summary": "采用修身的版型设计而成的一款马甲外套,上身穿起来更加贴合女性的身材曲线感,达到更加显瘦的效果。领口珠翻领的剪裁方式,搭配上撞色的图案,更显一种时髦气息。"} +{"content": "类型#裤*颜色#白色*图案#条纹*图案#蝴蝶结*图案#撞色*裤长#短裤*裤款式#勾花镂空*裤腰型#高腰", "summary": "一字肩设计的上衣结合红白的撞色条纹样式,彰显出时尚活力感,上身效果显眼又吸睛。领口还装饰着系带的蝴蝶结,为整衣增添了些许活泼俏皮的感觉。雅致的方形镂空样式很显气质,搭配高腰的白色短裤,打造热情与时尚的夏日穿搭。"} +{"content": "类型#裙*版型#宽松*风格#简约*裙型#直筒裙*裙长#连衣裙*裙领型#圆领*裙衣门襟#套头*裙款式#拼接", "summary": "连衣裙看似简约,实际上有着很强的设计感。宽松直筒版型包容性强,不管是什么身材的女性穿在身上都十分有型。圆领套头设计简约实用,裙身上的拼接设计给人亮眼时尚的感觉,优质面料柔软舒适,保暖性透气性极佳又十分亲肤。"} +{"content": "类型#裙*材质#蕾丝*颜色#黑色*风格#性感*图案#蕾丝*裙长#连衣裙*裙款式#拼接*裙款式#收腰", "summary": "BRAND带来的这款连衣裙,选用经典的黑色系为基调,展现出女性成熟大方的气质,独特的收腰设计,修饰腰部的曲线,秀出曼妙迷人的身姿。加之肩部的蕾丝拼接点缀,打破单调,肩部若隐若现的朦胧感,尽显性感魅惑格调;以及贴心的内衬加持,无需担心走光,透露出女士的知性美。"} +{"content": "类型#裤*版型#显瘦*材质#混纺*风格#青春*图案#线条*裤腰型#松紧腰", "summary": "自带弹力的混纺面料,有一定的厚度,保暖的同时又不会臃肿,萝卜裤版型直挺有型。轻松改变腿部线条,松紧腰舒服又方便穿脱,不挑身材。藏肉显瘦这件事交给它,再加上今年流行的小蜜蜂元素,穿上身绝对时髦有型。"} +{"content": "类型#裤*风格#休闲*图案#条纹*图案#线条*裤长#七分裤*裤腰型#松紧腰", "summary": "商务休闲风的裤型设计,百搭时尚,凸显出女性的干练气质,适合各种场合穿着。竖条纹的设计,以及七分裤的版型,拉伸腿部线条,展现出女性修长美腿。贴心的松紧裤腰的设计,提升穿着方便舒适度,甄选优质面料,纹理大方,手感柔和亲肤。"} +{"content": "类型#上衣*图案#拼色*衣样式#外套*衣领型#方领*衣长#短款*衣款式#口袋", "summary": "这款配色鲜明亮眼的短款外套,真的让人爱不释手呢,一眼就被吸引住了!方领的设计,很好的修饰脸型,凸显时尚干练的气质。衣身拼色设计,超级吸睛,饱和度也非常高,同时也多了几分俏皮感。两个设计不一的实用口袋,有种特别的美感!"} +{"content": "类型#裙*版型#显瘦*颜色#黑色*颜色#金色*图案#条纹*裙长#连衣裙*裙衣门襟#拉链*裙款式#不对称*裙款式#拉链*裙款式#抽褶", "summary": "此款为及连衣裙,不对称的设计,更显瘦显高,翻折式活片打褶领口,圆型肩带。采用立裁处理的腰部褶皱感,黑色与金色经典条纹,侧缝有隐形拉链便于穿脱。"} +{"content": "类型#上衣*版型#宽松*风格#休闲*衣样式#针织衫*衣领型#v领*衣袖型#灯笼袖", "summary": "对于微胖的妹纸来说,oversize版型的针织衫很实用。尤其是这种轻薄的款式,穿起来自由随性而不紧绷。再加上宽松的灯笼袖袖型,修饰了双臂多余的肉肉,令整个人看起来更加苗条。而简洁的v领领口设计,则起到了修饰脸型的作用,同时也令针织衫穿起来更加休闲轻松。"} +{"content": "类型#裙*颜色#白色*风格#休闲*风格#清新*图案#线条*裙下摆#花边*裙长#连衣裙*裙领型#圆领*裙款式#对称", "summary": "massimodutti这款连衣裙,采用粘纤面料,给予孩童柔软舒适的穿着体验。以流畅的线条勾勒圆领版型,柔和了轮廓,打造休闲活力风格。将白色作为主底色,清新脱俗,闪亮的树叶随风飘落在衣面,俏皮又不失趣味性,让人眼前一亮。对称的花边加持,展现孩童的甜美与可爱。"} +{"content": "类型#上衣*颜色#黑色*风格#简约*衣样式#衫", "summary": "BRAND的这款polo衫采用简约素气的纯黑色呈现而成,你在搭配中十分的简易轻松,同时又显得成熟稳重,经典的polo领让肩部看起来挺括有型,精致细腻的做工搭配上舒适柔软的面料,穿着减少摩擦同时又速干排汗。"} +{"content": "类型#裙*颜色#白色*风格#性感*裙款式#露肩", "summary": "女生衣柜里面必不可少的一条裙子就是白色的裙子,但是单调的纯白从来都不会是有时尚嗅觉的女孩的首选。这样一条蓝白混搭的小香风裙子十分完美的诠释了所有女孩子心里面白色裙子该有的样子。个性的旗袍领口看上去十分的别致出色,很有中国古典的优雅感觉。侧边的露肩设计看起来无比的性感优雅,也不会太过于暴露。"} +{"content": "类型#裙*材质#牛仔布*裙型#牛仔裙*裙下摆#毛边*裙腰型#高腰", "summary": "牛仔裤有着硬朗潇洒的态度,又融合了俏皮活力的一面。这款裤子的牛仔蓝与局部磨白拥有微妙和谐的美感。小高腰的设计,真的很显腿长。大大的裤筒带来许多味道,也是对腿的释放,穿起来很有范。裤脚毛边设计,才不至于显得沉闷呆板,又提升了时尚感。"} +{"content": "类型#上衣*版型#宽松*风格#休闲*图案#条纹*衣样式#衬衫*衣袖型#落肩袖", "summary": "红与白的细细密密条纹在衬衣上看起来很有些随性迷人的感觉,更能轻松修饰出柔美身线的诱惑。偏宽松的版型设计配合小落肩袖型的设计可以修饰肩部曲线,而且整体看起来都充斥着几分慵懒随性范儿,休闲气息十足。下摆处做了开叉设计可以带出动感随性范儿,更能修饰纤美腰线诱惑。"} +{"content": "类型#上衣*版型#宽松*风格#休闲*图案#条纹*衣样式#衬衫*衣袖型#落肩袖", "summary": "衬衣可以说是日常穿搭中最为实穿的单品了,无论是怎么样搭配都能有不错的感觉。而条纹更是经典款的元素,分分钟就能带出俏皮又随性的味道,更具灵动迷人的休闲范儿。偏宽松的版型设计配合落肩袖型的设计可以修饰肩部曲线,更能带出慵懒随性范儿,对于微胖的小仙女也是敲友好。"} +{"content": "类型#裙*材质#雪纺*风格#简约*风格#性感*裙下摆#开叉*裙下摆#荷叶边*裙长#连衣裙*裙领型#圆领*裙款式#拼接*裙款式#吊带", "summary": "这款连衣裙,穿着十分浪漫。吊带设计加圆领,简约又大气时尚。腰间荷叶边拼接,浪漫又唯美。裙摆前开叉设计,增添性感味道。雪纺的面料,还带着丝丝飘逸感。出街或海边都十分好搭配,行走间优雅又浪"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#蚕丝*风格#复古*风格#简约*图案#抽象*图案#复古*图案#线条*图案#印花*衣样式#衬衫", "summary": "重磅复古抽象真丝印花衬衫,宽松的版式注定在显瘦的同时多了一分随性,袖子也是的样子,设计感无需很多的点缀一点点执着也就够了。优雅的版型让整个线条更简约,对身材也没那么大限制。天然舒适的亲肤材料、平坦,线条顺畅。露出锁骨肌肤,柔美和帅气并存。"} +{"content": "类型#上衣*风格#英伦*风格#复古*图案#格子*图案#复古*图案#线条*图案#撞色*衣样式#风衣*衣领型#翻领*衣款式#腰带*衣款式#收腰", "summary": "大翻领的款式设计,让风衣带有别样的质感,精致唯美中独具特色。长款的版型能够拉伸身体线条,将率性和洒脱演绎得淋漓尽致,腰间以自然的收腰样式点缀撞色的腰带,勾勒出玲珑有形的曲线,细腻的格纹元素,衬托出复古的英伦风。"} +{"content": "类型#裙*版型#宽松*图案#碎花*裙型#百褶*裙长#长裙*裙款式#木耳边", "summary": "孕妇碎花长裙,优雅的百褶裙摆,让你孕期行走在时尚与气质之间。宽松的裙摆穿着非常舒服,腰部可以包容孕肚。小碎花元素的添加,木耳边的设计,更时尚。"} +{"content": "类型#裙*版型#宽松*版型#h*材质#蕾丝*风格#性感*图案#蕾丝*裙袖型#喇叭袖", "summary": "通体蕾丝裙,天生魅惑而浪漫,微透的肌理,时髦性感紧紧跟随,藏不住的优雅气质;尽显柔美的喇叭袖,优雅中带着利落的气韵,舒适中带着都市女性的儒雅,很凸显气质哦;简洁而流畅的h型廓型,利落包裹身材,宽松的设计,有效遮掩腰腹肉肉,体现自由的穿衣态度。"} +{"content": "类型#裙*颜色#驼色*风格#清新*图案#格子*裙型#鱼尾裙*裙下摆#荷叶边*裙长#连衣裙", "summary": "怡人的春季,怎能少了一件浪漫轻盈的连衣裙,奶驼色的面料,散发着一阵迷人的温柔气息,清新的格纹,让整个季节都明朗起来。荷叶边的点缀浪漫唯美,下摆是鱼尾裙设计,旋转着柔和的阳光里,彰显灵动的气质,既有减龄效果,又不会显得幼稚。"} +{"content": "类型#裙*材质#棉*颜色#黑白*风格#休闲*图案#字母*图案#文字*图案#线条*图案#印花*裙领型#圆领*裙衣门襟#套头", "summary": "整件服装以黑白素色为主基调,加以前襟的人像头像以及字母印花,瞬间打破了单调的沉闷感。使得休闲的衣衫也充满了辨识度,时髦之中透着个性。圆领套头的款式,百搭实穿。裁剪利落的线条搭配上精选的棉面料,兼顾舒适与美观特性。"} +{"content": "类型#上衣*材质#棉*图案#线条*衣样式#polo", "summary": "以简洁流畅线条构筑紧身轮廓,贴合身形不紧绷。polo衫领设计,修饰脸型同时穿着舒适。以纯白色调打造衣身,奠定百搭实穿特性。采用优质面料打造,含丰富棉成分,穿着舒适亲肤。"} +{"content": "类型#裙*版型#显瘦*材质#丝绒*材质#蕾丝*材质#纤维*风格#街头*风格#复古*风格#清新*风格#性感*图案#复古*图案#蕾丝*裙型#抹胸裙*裙长#连衣裙", "summary": "透视蕾丝立体连衣裙,流畅简洁剪裁工艺,精致浪漫演绎神秘而性感诱惑,微透露朦胧的视觉感观。聚酯纤维面料,设计了抹胸连衣裙,细腻柔软有弹性,修饰腰线显高显瘦,凸显迷人身材比例曲线。清新复古的丝绒材质,彰显时尚年轻,充满青春活力。街头出行清新靓丽迷人,落落大方妩媚动人。"} +{"content": "类型#裙*版型#显瘦*图案#线条*裙型#大裙摆*裙袖型#灯笼袖", "summary": "这条裙子最大的亮点在于灯笼袖的设计,有效遮住手臂的赘肉,打造纤纤玉臂。双层大裙摆设计,上身摇曳生姿,飘逸唯美。简洁腰设计,修饰腰部纤细线条,修身显瘦。"} +{"content": "类型#裙*版型#宽松*材质#棉*材质#牛仔布*风格#潮*图案#撞色*裙型#背带裙*裙型#牛仔裙*裙长#短裙", "summary": "背带牛仔短裙是最彰显潮流风尚的单品元素之一,采用棉质面料精心打造,柔软舒适给女孩儿的肌肤贴心的呵护。撞色背带造型个性吸睛,融入levi's元素与后部皮印章相呼应,展现出强烈的品牌质感。胸前的按扣闭合设计,美观的同时使穿脱更加方便。腰部置入松紧皮筋,贴合身体领穿着更服帖。宽松的裙摆十分有型,走线均匀平整,着在身上大方又有型。"} +{"content": "类型#裙*版型#显瘦*材质#涤纶*颜色#焦糖色*风格#简约*裙下摆#层叠*裙衣门襟#系带", "summary": "面料选用羊皮绒面革,里衬选用100%涤纶,舒适有质感。采用围裹设计,腰部可调节系带,可以,所以适合各种腰围和高度。运用了时尚——焦糖色,简约到没有多余的装饰,简单搭配就能散发出优雅气质。层叠式裙摆具有层次感,及膝的长度更能显高显瘦。"} +{"content": "类型#上衣*版型#宽松*风格#复古*图案#复古*衣样式#衬衫*衣袖型#灯笼袖*衣款式#腰带", "summary": "以质地饱满的乱麻材质为主,具有良好的透气性与垂坠性,穿着自然的同时还不会轻易起球。长长的同款腰带勾勒出女性柔美曲线的同时,还能为自身气质增添一丝飘逸灵动感。宽松的灯笼袖设计,更让这款衬衫有了现代时尚与复古风情。"} +{"content": "类型#裙*材质#网纱*风格#性感*裙型#蛋糕*裙型#网纱裙*裙下摆#层叠*裙款式#木耳边", "summary": "轻盈蓬松的网纱裙,性感中又略带点小女人情怀,朋友聚会或是参加婚礼都是不错的选择。外层网纱层次分明,可爱的小木耳边元素,层层叠叠的蛋糕裙,特别有少女心,精心挑选的柔软细网纱,不易勾丝还能时刻保持飘逸感,整身都是小细节的网纱裙,满足你的薄纱情结。"} +{"content": "类型#裙*图案#撞色*裙下摆#花边*裙腰型#高腰*裙长#连衣裙*裙袖长#无袖*裙款式#木耳边", "summary": "合体无袖连衣裙设计,甜美优雅更显公主气质。领口与袖口木耳花边造型,捏褶均匀细致更显宝贝甜美可爱,撞色锁边工艺亮眼醒目,不易脱线更显品质。满印小花朵设计俏皮可爱。高腰线花边装饰,视觉拉长身材比例更显身材高挑。裙摆宽敞设计便于行走活动,飘逸灵动美观大方。"} +{"content": "类型#裤*版型#宽松*风格#简约*图案#线条*裤长#连体裤*裤型#阔腿裤", "summary": "这是一款非常简约气质耐看的连体裤装。吊带v领的设计,露出好看的锁骨线条,修饰美丽的鹅颈,小心思满满。衣身设计到腰部以上是贴身的,下身腿宽松设计,很好的把身形扬长避短,显露了纤纤细腿。"} +{"content": "类型#裙*材质#棉*风格#复古*风格#文艺*风格#清新*图案#复古*裙长#连衣裙", "summary": "原创汉元素品牌重回出品的这款连衣裙,纯净的白与淡雅的绿相互映衬,散发出沁人心脾的清新味道,将少女的欲语还休诉说尽致;优选轻薄的棉质面料制作裙身,上身轻盈舒适;重工手绣花朵点缀,尽显复古文艺气息~"} +{"content": "类型#上衣*风格#性感*衣样式#针织衫*衣款式#拼接*衣款式#勾花镂空*衣款式#绑带", "summary": "精致优雅的针织衫,性感不失优雅的镂空设计,富有层次感与设计感,彰显女性柔情优雅气质。下摆的交叉绑带设计,飘逸且灵动,点缀整体造型,显得丰富多彩。镂空与布料的拼接设计,令人眼前一亮,凸显女性身材曲线,打造完美的黄金比例。"} +{"content": "类型#裙*裙腰型#高腰*裙长#短裙*裙衣长#短款*裙款式#纽扣", "summary": "这一款短裙裤高腰设计,提升腰线自然显高。同时精挑细选的布料自带弹力,贴合身形勾勒曲线,对身形的包容性很好。加上别致纽扣的装饰,增添看点特别出彩。精致短款,衬托身材凸显腿长。"} +{"content": "类型#裙*颜色#绿色*图案#印花*裙长#半身裙*裙长#连衣裙*裙款式#纽扣", "summary": "夏天看到好的颜色的印花总是忍不住要做成成品出来,各种半身裙,连衣裙,湖绿色的底色,搭配着细小的桃花,一路桃花相伴,心情都是美丽的。领口的桃花纽扣的小设计显出了与众不同的小独特小细节中凸显品质感~"} +{"content": "类型#裙*图案#刺绣*裙型#小黑裙*裙领型#翻领*裙款式#拼接", "summary": "一款优雅气质的小黑裙,让人尽显自身魅力,体验非凡感觉。裙身满幅太阳花刺绣,不仅让人的心情变得明媚如春,更丰富了层次,彰显了立体感觉,增添了许多清新自然的氛围。欧根纱拼接翻领,让裙子变得更挺阔,更有型,促使人的身材在变得高挑挺拔的同时也显得精神阳光。"} +{"content": "类型#裙*版型#显瘦*材质#蕾丝*风格#性感*图案#撞色*图案#蕾丝*裙长#连衣裙*裙领型#圆领*裙袖型#喇叭袖", "summary": "此款连衣裙经典圆领设计,拉长颈部比例,衬托出锁骨的精致,使得脸部轮廓柔和自然,展现女性魅力;时尚撞色+喇叭袖,穿出不一样的韵味,蕾丝花瓣状简洁利落,减龄更灵动飘逸浪漫,随性的下摆伴随着微风肆意摆动的瞬间,成就了女性特有的名媛魅力,更有女人味,以活泼的韵味打造立体的视觉效果;优雅性感的透视设计,时尚显瘦,凸显身材曲线,尽显女性曼妙的身姿,提升女性柔美的气质。"} +{"content": "类型#上衣*颜色#纯色*风格#街头*图案#纯色*图案#线条*衣样式#外套*衣领型#立领*衣长#短款", "summary": "这件很时尚的纯色短款外套穿着很有范儿,它的设计很用心。设计师采用了经典的立领设计融入衣身,给人很酷酷的感觉。让你轻松打造出个性帅气的街头风气息,而且立领的融入还能很好的修饰脖颈线条,让你的脖颈看上去更加的纤细迷人。"} +{"content": "类型#裤*版型#显瘦*风格#性感*裤腰型#高腰*裤口#微喇裤", "summary": "这一款休闲裤时尚的高腰设计,提升腰线自然显高,精挑细选的面料,手感柔软舒适亲肤,有筋骨有弹力而且挺括有型。时尚的微喇裤型,轻松遮肉自然显瘦。加上包臀设计,性感迷人女人味足。"} +{"content": "类型#裙*颜色#黑色*风格#复古*风格#潮*图案#复古*图案#印花*裙型#百褶*裙下摆#垂坠", "summary": "黑色的裙身并没有带来沉闷乏味的印象,七格格的设计师用生动形象的印花装点裙身,反而显得活泼减龄。它采用轻盈垂坠的雪纺纱制成,更是在行走间带来了灵动和飘逸浪漫。裙子的下摆带有顺应复古潮流的百褶,无形中加大了裙身。"} +{"content": "类型#上衣*风格#文艺*风格#知性*图案#蝴蝶结*图案#线条*衣样式#马甲*衣样式#风衣*衣样式#外套*衣领型#翻领*衣袖长#无袖*衣款式#腰带", "summary": "知性文艺风的风衣马甲外套。帅气的翻领造型,线条流畅,拉长脖颈线条。无袖设计,清爽利落,显手臂细长。无门襟造型,金属质感的圆环设计。腰间系上腰带,系上蝴蝶结,个性十足。"} +{"content": "类型#裤*风格#简约*裤长#连体裤*裤型#直筒裤*裤型#背带裤*裤款式#纽扣*裤口#翻折", "summary": "当活力美少女遇上减龄背带裤完美的衬托出小美女的俏皮与可爱,背带前方有纽扣设计,不光是为了方便穿脱还能避免连体裤上厕所的尴尬。背带后面的裤腰增添打褶设计,让裤腰具备一定的弹性,无论哪种身形的小美女穿着都不会紧绷还能增添甜美感。简约的直筒裤腿支持翻折,让减龄连体裤超流感大增。"} +{"content": "类型#裙*版型#显瘦*风格#潮*裙长#连衣裙", "summary": "拥有这件连衣裙立马让你变身精致的猪猪女孩。斗篷的设计酷炫又紧跟潮流,水溶钩花的设计个性新颖,修身舒适的版型展现你的曼妙身姿。"} +{"content": "类型#裤*版型#显瘦*材质#棉*颜色#纯色*颜色#军绿色*风格#街头*风格#简约*图案#纯色*裤腰型#高腰", "summary": "这款休闲裤的设计简约而不简单,高腰修身的版型,勾勒出了腿部优美的曲线,拉高了腰际线。军绿色的纯色裤面,彰显出了简约美感,尽显帅气的街头风范,选用优质的棉质材质制成,充满了质感,给你带来舒适的穿着体验。"} +{"content": "类型#裙*颜色#黑色*风格#性感*图案#线条*裙型#小黑裙*裙领型#v领*裙款式#收腰", "summary": "这款小黑裙采用纯黑色的设计风格,展现出经典大气的女性魅力,同时给人一种成熟优雅的时尚质感。交叠式的v领造型不仅将上身的线条修饰的更加凹凸有致,还起到很好的修饰脸型的作用,同时还带来更多的性感风情。自然的收腰设计,将女性柔美的身材曲线展现的淋漓尽致,打造有型有范的熟女魅力。"} +{"content": "类型#上衣*版型#宽松*风格#简约*图案#线条*衣样式#风衣*衣长#中长款*衣款式#抽绳", "summary": "作为一件中长款设计的风衣,显得有垂感。领口的设计较大气,很有个性。袖子部分则比较宽松,新颖别致,质感十足,置彰显卓越品质。肩型流畅,摩登有范儿。袖口看起来颇有造型,线条流畅做工精湛,上身优雅有型。同时,下摆位置设计了抽绳,较为宽松,方便活动。色系的搭配简约而不简单。"} +{"content": "类型#上衣*材质#针织*衣样式#毛衣*衣款式#不规则", "summary": "这件针织毛衣的亮点就在底摆上,前短后长的不规则底摆,增添了可爱俏皮的气息,穿在身上更有时尚的感觉。工整平滑的底摆锁边工艺,确保了针织不会出现脱线的情况,保证了衣衣的质量上乘。光滑弹性极佳的面料让你有更好的上身体验。"} +{"content": "类型#上衣*材质#针织*衣样式#毛衣*衣款式#不规则", "summary": "这款连衣裙设计。外搭一款暖心针织毛衣,加上的版型。搭配不规则的开叉下摆设计,丰富整体造型,还能让你感受到贴心的小温暖,内搭裙款不埋没骨子里的时尚。"} +{"content": "类型#裙*版型#宽松*风格#性感*裙长#连衣裙*裙领型#v领*裙款式#勾花镂空*裙款式#收腰", "summary": "柔软的面料让这款长款连衣裙穿着舒适,同时搭配宽松版型,更带了份随性慵懒的气质。然后加上镂空和v领设计的领口,更多了份性感气息,而收腰设计的运用,凸显曲线,更多了份优雅气质,再搭配豆沙红底色,非常显白,也非常显气质。"} +{"content": "类型#裙*裙长#连衣裙*裙衣长#中长款*裙款式#腰带", "summary": "连衣裙的长度被成中长款的样式,从而不会显露出肉感很足的臀部与大腿根,气质上更为优雅。整体选用真丝面料打造,其与舒适性十分的卓越,中长款式腰带,轻松勾勒出窈窕身姿。"} +{"content": "类型#裤*材质#雪纺*颜色#白色*颜色#纯色*风格#简约*风格#知性*风格#休闲*风格#性感*图案#纯色*裤长#九分裤*裤款式#勾花镂空*裤口#小脚", "summary": "浪漫轻盈的雪纺上衣,肩部以及腰部的镂空设计打破呆板,层次丰富,增添灵动温婉的气息。深v领口,在拉长脖颈曲线的同时又能展现精致迷人的锁骨,流露不经意的小性感。知性气质的白色九分小脚裤,简约的纯色设计尽显时尚休闲气息。"} +{"content": "类型#裙*材质#牛仔布*颜色#黑色*风格#潮*裙型#a字*裙型#牛仔裙*裙款式#口袋", "summary": "俏皮的a字廓形搭配挺括硬朗的洗水牛仔,穿又实力减龄,同时露出美好的腿部肌肤,让时尚潮人们钟爱不已。黑色毛球装饰在口袋与裙摆上,丰富其层次感之余格外吸睛夺目。舒适的一字腰头,巧妙勾勒出纤细腰肢。"} +{"content": "类型#裙*版型#显瘦*颜色#深色*风格#性感*图案#线条*图案#印花*裙长#连衣裙*裙款式#吊带*裙款式#不规则", "summary": "这款连衣裙是性感迷人的吊带款式,上身后凸显脖颈纤美的线条感!深色的混合印花,更显肤色白皙亮丽。中长的修身款式,对身材的包容性更大,上身遮肉显瘦,更显高挑纤瘦的美感。不规则的下摆剪裁,走动间轻盈飘逸,更显时髦灵动的气质感,出街实力吸睛!"} +{"content": "类型#上衣*版型#显瘦*风格#通勤*风格#休闲*图案#条纹*图案#线条*衣样式#西装*衣门襟#一粒扣", "summary": "竖条纹打破了沉闷感拉长了线条视觉显高挑,修身的剪裁简洁干练,jing典一粒扣版自然有型,将西装的休闲和精致诠释的刚刚好,通勤和休闲都有时髦的意味。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙型#百褶*裙下摆#层叠*裙长#连衣裙*裙袖型#泡泡袖", "summary": "这条复古风的连衣裙是经典的女神款,谁穿上都会非常好看,首先是袖口设计,非常典型的泡泡袖,甜美可爱,再配合整条袖口都是纯蕾丝,这份可爱中又多了一丝的浪漫和温柔。裙摆是多层的,旋转起来,有层层叠叠的百褶效果,追求名人效果的女孩儿必备。"} +{"content": "类型#裙*材质#网纱*风格#知性*风格#高贵*风格#清新*图案#刺绣*裙长#连衣裙*裙领型#圆领*裙款式#收腰", "summary": "透露着清新淡雅,高贵知性,端庄的气质连衣裙,适合每个美眉。它经典的圆领设计,修饰了女性细长的脖颈。腰身处收腰的裁剪,不仅拉伸身材比例,更凸显了高挑的身姿。网纱刺绣的设计,把女性的高贵端庄,典雅知性气质展现的淋漓精致。"} +{"content": "类型#上衣*版型#宽松*材质#雪纺*风格#清新*衣样式#雪纺衫*衣袖长#七分袖*衣款式#木耳边", "summary": "这款甜美气质的雪纺衫,精选优质的雪纺料,柔顺亲肤,穿着舒适。宽松版型,包容各种身材,舒适自然。木耳边点缀领口,浪漫不失气质。七分袖设计,端庄大气,有女人味,采用当前大热的马卡龙色。清新时尚,造型甜美时尚,又百搭。"} +{"content": "类型#裙*颜色#白色*图案#线条*裙下摆#荷叶边*裙下摆#层叠", "summary": "白色的裙身让你如一样圣洁,打造出高冷的女神形象。细细的肩带大方地展露出骨感的肩颈线条,不忍转移视线。尽管采用的颜色较为单一,但是不会显得乏味和沉闷。层叠的荷叶边不仅甜美,还能令扁平的胸部变得丰满。"} +{"content": "类型#裙*材质#牛仔布*颜色#蓝色*风格#文艺*风格#休闲*风格#清新*图案#字母*图案#文字*图案#印花*图案#撞色*裙型#背带裙*裙型#牛仔裙*裙长#连衣裙*裙款式#口袋", "summary": "这款连衣裙采用自然的牛仔蓝色,展现出年轻一族随性自由的生活态度,同时也为衣身带来一丝清新的文艺范儿。撞色字母印花的背带设计,为这款连衣裙带来了全新的活力,也让它的色彩搭配更加丰富,展现出别样的休闲色彩。前襟的口袋造型,不仅将休闲的色彩渲染的更加浓郁,也起到了很好的实用功能。"} +{"content": "类型#裤*版型#宽松*风格#通勤*风格#简约*图案#格子*图案#线条*图案#印花*裤长#长裤*裤型#直筒裤", "summary": "带着点小帅气感的一件长裤。经典的格纹印花点缀,搭配上直筒款式设计,简约线条,满满通勤风时尚感。裤身相对宽松,对大多数身型友好,且包容腿部线条,显得双腿线条笔直。"} +{"content": "类型#裤*版型#宽松*版型#立体剪裁*材质#牛仔布*裤款式#拼接*裤款式#不对称", "summary": "采用高品质牛仔面料,立体剪裁而成。舒适宽松的版型,小裤脚修饰腿型效果更棒。两侧不对称的磨破设计很有新意,背面明辑线拼接细节更具未来感。"} +{"content": "类型#上衣*版型#显瘦*材质#棉*风格#复古*风格#清新*图案#格子*图案#复古*衣样式#衬衫*衣袖型#泡泡袖*衣门襟#系带*衣款式#拼接", "summary": "复古雅致的格纹衬衫,采用100%纯棉面料打造而成,柔软透气,穿感舒适自如。系带修饰领口,增添飘逸律动感,清新减龄。胸口处的毛边拼接设计,增添立体视效,更显别致细节处理。复古泡泡袖型,轻松修饰手臂曲线,遮肉显瘦。"} +{"content": "类型#上衣*版型#宽松*材质#针织*风格#文艺*风格#简约*风格#休闲*图案#条纹*图案#撞色*衣样式#开衫*衣领型#圆领*衣长#短款*衣门襟#单排扣", "summary": "一款休闲减龄的条纹针织开衫,宽松的短款版型,点缀撞色的条纹,上身舒适而不单调,散发出减龄的文艺气质;简约的小圆领结合门襟单排扣,经典而大气,上身大方而不落俗。"} +{"content": "类型#裙*版型#显瘦*材质#羊绒*风格#简约*风格#清新*裙型#包臀裙*裙领型#圆领*裙领型#v领", "summary": "上衣简约圆领,搭配裙子的v领设计,修饰出脖颈的纤细曲线美。显瘦包臀的裙型,完美勾勒出身形的曼妙迷人,甜美浪漫的纯色调,清新而又格外的减龄。使用优质的羊绒面料,触感舒适柔软,温和亲肤,尽显品质做工不凡。"} +{"content": "类型#上衣*材质#蚕丝*颜色#粉色*风格#清新*衣样式#衬衫", "summary": "百搭而张显小清新风格的衬衫,淡雅的粉色系,排扣贴袋设计格外工整,衬托出优雅的仪态。活动式袖袢的设计更加彰显随性洒脱的特质。优良桑蚕丝材质工艺,上身效果轻柔舒爽。"} +{"content": "类型#裙*材质#牛仔布*颜色#纯色*图案#纯色*裙型#a字*裙型#牛仔裙*裙腰型#高腰*裙长#半身裙", "summary": "极简的纯色牛仔半裙,面料和版型上就很有看点,上身效果出乎意料。经典的a字半裙,用棉弹牛仔面料打造,配合腰部立体收省,能全面包裹并收紧腰腹的赘肉,轻松穿出纤细的小蛮腰,自高腰处延伸出来的裙摆,刚刚好遮住较粗的大腿部位,结合恰到好处的三分裙长,轻松穿出迷人大长腿。"} +{"content": "类型#裙*图案#拼色*图案#线条*裙长#半身裙*裙领型#一字领*裙袖型#喇叭袖*裙款式#木耳边", "summary": "这款裙子分为上下两个部分。上身采用一字领设计,很好的展现了脖颈及肩膀处的优美线条,落落大方更显穿戴者的优雅气质。喇叭袖的设计有效的修饰的手臂的赘肉,搭配上木耳边的装饰,让上身更具层次感。下身的半裙则是采用拼色设计,让裙子不显单调,更有设计感。"} +{"content": "类型#裙*材质#网纱*风格#简约*风格#性感*图案#线条*图案#撞色*裙长#连衣裙*裙领型#翻领*裙款式#拼接*裙款式#腰带*裙款式#抽褶", "summary": "网纱连衣裙透露着浪漫的女人味,裙身别致的撞色拼接格调呈现新颖的时髦感,精致的发酵着性感和优雅。简约的翻领修饰颈部线条,门襟一排扣装饰闭合,若隐若现的轻薄质感,弥漫着神秘的气息,恰如其分地诉说一半唯美一半优雅的情怀,袖口抽褶工艺呈现微喇叭的袖口,腰间配弹力腰带。下垂自然的裙摆轻盈飘逸,传递着冬日的浪漫。"} +{"content": "类型#上衣*颜色#粉色*风格#简约*衣样式#衬衫", "summary": "BRAND的这款睡衣,看起来很普通。但是只有你穿上身之后,你才能感受到这款家居服的魅力。棕色的走线设计,恰到好处地修饰了温馨的粉色,让整个人看起来十分简约时尚。而领口采用衬衫领的设计,勾勒出你迷人的颈部曲线。"} +{"content": "类型#上衣*风格#简约*风格#潮*图案#线条*图案#印花*衣样式#毛衣*衣领型#圆领*衣款式#勾花镂空", "summary": "简约与个性巧妙结合是这款毛衣的亮眼之处,线条流畅的圆领设计,轻松勾勒肩部与颈部轮廓。成衣袖口别致的镂空设计,别致新颖又潮流时尚,立体印花的点缀,又给其平添了一分酷雅气息。然后浑身散发出潮流之感,舒适的用料,将不凡格调发挥地淋漓尽致。"} +{"content": "类型#裙*颜色#白色*颜色#紫色*颜色#纯色*图案#纯色*图案#渐变*裙型#百褶", "summary": "这一款比起纯色的百褶裙,更显精致与高级。运用了渐变色的设计,由紫色一直渐变到白色,给人一种妩媚动人的感觉。结合百褶的裙摆,穿出优雅和飘逸感,让你的气质变得更加与众不同。"} +{"content": "类型#上衣*风格#复古*图案#复古*图案#撞色*衣样式#外套*衣款式#连帽", "summary": "BRAND塔卡沙品牌的这款男士连帽外套,采用了经典个性的撞色设计,搭配复古的色彩,更具时尚感。领口v字设计,视觉上起到瘦脸的效果,更加精致有型。"} +{"content": "类型#裙*版型#显瘦*材质#蕾丝*颜色#红色*风格#高贵*图案#线条*图案#蕾丝*裙型#a字*裙型#包臀裙*裙型#鱼尾裙*裙下摆#花边*裙腰型#高腰*裙款式#不规则", "summary": "有着显肤白红色调的半身包臀裙,珍珠蕾丝花边装饰丰富整体立体层次感,又彰显优雅高贵气质,不规则鱼尾下摆设计增添几分灵动轻盈感,高腰a字版型轻松拉长腿部线条,上身显瘦又显高。"} +{"content": "类型#裙*版型#宽松*材质#蕾丝*图案#拼色*图案#线条*图案#蕾丝*裙型#直筒裙*裙下摆#压褶*裙长#连衣裙*裙领型#v领*裙款式#拼接*裙款式#口袋", "summary": "本款连衣裙采用了宽松宽松直筒的版型,大大的v字领口,能大大的修饰女性的颈部线条,侧边超级实用的口袋,方便又百搭。加上领口处拼接的睫毛蕾丝,妩媚又迷人。下摆的同色系拼色压褶设计更是设计感十足,让人情不自禁的想要拥有它。"} +{"content": "类型#裙*风格#知性*图案#线条*裙型#鱼尾裙*裙长#连衣裙", "summary": "这是一款将传统与现代完美结合的一款连衣裙,受了传统旗袍的启发,保留了旗袍元素中的知性与优雅,小半立旗袍领,很好的修饰了颈部线条,显得温婉典雅。裙摆采用了鱼尾的样式,走起路来更是摇曳生姿,充满女人味。"} +{"content": "类型#裙*版型#显瘦*图案#格子*裙袖型#喇叭袖*裙款式#腰带*裙款式#收腰", "summary": "时尚达人都喜爱的格子吊带裙两件套,个性优雅不过时,适合任何体型的人群穿着。遮肉显瘦,百搭经典的上上之选。时尚喇叭袖袖口设计,修饰手臂曲线,彰显优雅名媛气质。同色系腰带收腰设计,修身显瘦,凸显身材,整齐衣摆设计,修饰身形,彰显妙曼身姿。"} +{"content": "类型#上衣*版型#宽松*风格#街头*风格#简约*图案#字母*图案#文字*衣样式#风衣*衣样式#外套*衣长#中长款", "summary": "采用了帅气的棒球领口制作而成的风衣外套自带一股潇洒随性的魅力,简约的风格有着落落大方的魅力,完美诠释着街头的潮女风范。宽松的中长款版型剪裁搭配上内敛低调的色系。充满着简洁明快的减龄魔力。而衣身下摆处的字母装饰时髦有腔调,有一种趣味的感觉,却又不会过于张扬,既丰富整体细节的同时又能让衣身变得更加亮眼,轻松赚足回头率。"} +{"content": "类型#上衣*版型#宽松*风格#街头*风格#简约*图案#字母*图案#文字*衣样式#风衣*衣样式#外套*衣长#中长款", "summary": "采用了帅气的棒球领口制作而成的风衣外套自带潇洒随性的魅力,简约的风格有着落落大方的魅力,完美诠释着街头的潮女风范。宽松的中长款版型剪裁搭配上内敛低调的色系。充满着简洁明快的减龄魔力。而衣身下摆处的字母装饰时髦有腔调,有一种趣味的感觉,却又不会过于张扬,既丰富整体细节的同时又能让衣身变得更加亮眼。"} +{"content": "类型#上衣*风格#简约*图案#抽象*图案#印花*衣样式#衬衫*衣领型#立领*衣领型#翻领*衣袖长#长袖*衣门襟#单排扣", "summary": "这款来自BRAND旗下精心推出的男士长袖衬衫,前幅利用简约的抽象印花图案修饰,增添整体的时尚气质,又具有别样的迷人气质。经典的立领翻领领口,立体感十足,也让衣物廓形更明晰。时髦的单排扣衣襟,穿脱很便利,展露出温文尔雅的气息,做工与剪裁属于一流。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*风格#复古*图案#复古*裙长#连衣裙*裙款式#盘扣", "summary": "宽松板式设计的连衣裙,是非常好驾驭的款式,遮肉显瘦,又不失灵动的俏丽之感。小立领和斜襟盘扣的中式设计元素,使得本款连衣裙,在一众连衣裙中脱颖而出,上身后既有女性温柔甜美的一面,又不失复古的端庄优雅。"} +{"content": "类型#上衣*材质#网纱*风格#复古*图案#蝴蝶结*图案#复古*衣样式#卫衣*衣袖型#落肩袖*衣款式#螺纹*衣款式#抽褶", "summary": "此款卫衣可谓是足够吸引眼球。粉粉的色调,足够的甜美可爱,加上网纱的领子与蝴蝶结的装饰点缀,又带点复古的俏皮,让人心动不已。落肩袖的设计,修饰肩部曲线。与略微褶皱的袖子的结合,高效地遮住手臂的赘肉。经典的螺纹袖口,舒适度极佳。"} +{"content": "类型#裙*颜色#蓝色*颜色#黄色*风格#简约*风格#潮*图案#植物*裙袖长#五分袖*裙领型#圆领*裙袖型#喇叭袖", "summary": "这条裙子的配色非常舒服,蓝色为底色,黄色的浪漫花卉平铺其上,和谐又雅致,而且衬得人的皮肤白皙清透。它的袖子非常有趣,喇叭袖的设计也是正好迎合了当下的潮流,五分袖又恰到好处地隐藏了胳膊的肉肉。领口选择大方经典的圆领领口,整体非常简约,上身柔美典雅。"} +{"content": "类型#上衣*版型#显瘦*风格#淑女*衣样式#风衣*衣领型#翻领*衣门襟#双排扣*衣款式#腰带", "summary": "该款风衣采用修身版型,上身后有一种女神范儿,在此同时翻领设计还能为你添上一丝淑女气质。双排扣加上腰带看上去非常有气质,符合欧巴心目中女神的穿衣风格。风衣的材质薄厚适度,适合春季过渡穿着,这样既能保证温度也能显示出我们完美的身材。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#清新*图案#条纹*图案#蝴蝶结*衣样式#衬衫*衣袖型#喇叭袖*衣款式#拼接", "summary": "今年的衬衫款,宽松的廓形设计清新宜人的纹饰,总能给人带来清凉舒适的感觉。这款小清新喇叭袖条纹衬衫裙就非常适合学生穿着,细条纹轻盈显瘦,融入拼接元素及蝴蝶结喇叭袖更多细节,可爱甜美。"} +{"content": "类型#裙*材质#丝绒*材质#雪纺*风格#清新*裙型#大裙摆*裙袖型#喇叭袖*裙款式#拼接*裙款式#抽褶", "summary": "雪纺面料具备了亲肤透气的穿着效果,更好地满足了早春的时尚氛围,诠释出舒适自在的穿着体验。腰间运用丝绒织带的拼接,塑造出完美的身材比例。喇叭袖的加持,提升一股淡雅的清新格调。加之褶皱的腰身,蔓延出风情万种的大摆裙,打造出灵动撩人的魅力。"} +{"content": "类型#裙*颜色#白色*图案#条纹*图案#线条*图案#刺绣*裙型#a字*裙款式#拼接*裙款式#不规则", "summary": "蓝白条纹相间的裙身,清爽得如同清冽的,颠覆传统的条纹裙身设计款式,横条纹的上身,显得女孩十分安静,而下摆采用不规则的竖条纹拼接,结合a字下摆版型,瞬间让视觉效果灵动了起来,女孩子显得更加有活力。胸前又以彩色的刺绣线条点缀,完美地结合白色条纹,构成了一幅下雨图,浪漫新颖,又增添了裙身层次亮点。"} +{"content": "类型#裙*材质#蕾丝*风格#潮*图案#撞色*图案#蕾丝*裙下摆#层叠*裙腰型#高腰*裙长#连衣裙*裙款式#拼接", "summary": "这款蕾丝连衣裙,有着粉嫩的柔美色调,衬的肌肤粉嫩白皙,穿上身减龄就是分分钟的事。蕾丝的面料,上身有种层叠的繁复感,轻透的同时又有妩媚优雅的风情,更显窈窕动人。拼接的撞色设计,让整体的层次更为丰富多彩,有种个性时尚的潮流范儿,新颖吸睛。高腰的设计,很好的拉长了身材的比例,收束出纤细的腰身,让你更为玲珑窈窕。"} +{"content": "类型#裙*版型#显瘦*材质#蕾丝*风格#性感*图案#刺绣*图案#蕾丝*裙型#大裙摆*裙下摆#花边*裙腰型#高腰*裙长#连衣裙*裙领型#v领", "summary": "浪漫仙气的蕾丝连衣裙,v字领口露出迷人的锁骨,衬托纤细小脸;领口周围花边的点缀,轻奢甜美的同时又带点小性感。衣身精致的水溶蕾丝,刺绣的图案低调而华美。修身高腰的裁剪拉长身形,展现女子的曼妙身姿。长长的大裙摆随着步伐摆动,让人心动,满足你对夏日的浪漫幻想。"} +{"content": "类型#裙*版型#显瘦*材质#蕾丝*颜色#粉色*风格#复古*图案#复古*图案#蕾丝*裙型#a字*裙型#大裙摆*裙长#连衣裙*裙款式#拼接*裙款式#收腰", "summary": "这是一款粉色拼接蕾丝的连衣裙,通过大裙摆表达出十分强烈的仙女感,a字型的版型,十分显瘦收腰,拉长双腿的比例。展现满满的复古甜美感。"} +{"content": "类型#裤*版型#宽松*材质#棉*材质#牛仔布*图案#环保*裤型#直筒裤*裤型#背带裤", "summary": "这款来自米妮哈鲁牛仔背带裤,精选100%纯棉面料制造,手感柔软细腻穿着舒适合身。表面采用环保活性印染,无甲醛荧光剂残留,安全温和的质地守护孩子身体健康。宽松的直筒裤版型包容性较好,可以藏住肉肉凸显孩子的大长腿。"} +{"content": "类型#裤*材质#牛仔布*材质#蕾丝*颜色#黑色*风格#性感*图案#蕾丝*裤长#短裤", "summary": "牛仔短裤,黑色调,散发出性感撩人的迷人风情;简洁的腰头,显得腰线特别好看;裤脚边沿顺着蕾丝睫毛花边,微微蜿蜒的弧线婉约浪漫,整套look更显得精致了几分。"} +{"content": "类型#上衣*颜色#绿色*图案#格子*图案#线条*衣样式#衬衫*衣领型#翻领*衣款式#收腰", "summary": "较为少见的绿色格纹元素,彰显了与众不同的设计韵味。干练衬衫翻领修饰颈部线条,衬托娇小脸型。腰部收腰剪裁,勾勒出曼妙的小蛮腰曲线,裙身做了撞绿色的裙摆插片,丰富视觉看点。"} +{"content": "类型#裤*材质#牛仔布*裤款式#拉链*裤腰型#高腰*裤口#毛边*裤口#小脚", "summary": "这款来自的毛边牛仔裤,高腰的设计,能有效提臀瘦身,塑造迷人身姿。门襟拉链设计,开合顺滑,穿脱便捷。小脚的设计,紧紧包裹双腿,显得腿部纤细又修长。毛边裤脚设计,时尚洋气,穿起来潮范儿十足。"} +{"content": "类型#裤*材质#牛仔布*裤款式#拉链*裤腰型#高腰*裤口#毛边*裤口#小脚", "summary": "这款来自paige的小脚牛仔裤,高腰的设计,能有效遮盖腹部赘肉,轻松塑造纤细小蛮腰。门襟拉链设计,开合顺滑,穿脱便捷。小脚的设计,紧紧包裹双腿,显得腿部纤细又修长。毛边裤脚设计,个性时尚,穿起来潮范儿十足。"} +{"content": "类型#裤*版型#宽松*裤长#九分裤*裤型#直筒裤", "summary": "来自于的这条宽松褶位九分裤,采用的是宽松直筒的版型设计,这种版型的包容性极佳,不仅不挑身材,还能起到一定的修饰不完美腿型效果,而前腰的打褶细节,则使得裤子更为立体。再添加可拆卸织带进行点缀,轻松就能凹出自主造型。"} +{"content": "类型#裤*版型#宽松*裤长#九分裤*裤型#直筒裤", "summary": "这一款裤子腰部串珠的装饰,增添看点自然出彩,立体的裁剪,符合女性的身材曲线,轻松的勾勒出女性曼妙的身材。加上九分的裤长,精致优雅气质不凡。直筒宽松,不挑身材包容性强。"} +{"content": "类型#裙*材质#蕾丝*图案#蝴蝶结*图案#蕾丝*裙下摆#开叉*裙长#连衣裙*裙领型#圆领", "summary": "一款优美的圆领连衣裙,穿起来优雅又时尚。蕾丝花朵装饰衣身,像整个人漫步在花海,充满浪漫气息;侧边开叉蝴蝶结,优雅灵动很显大气,上身减龄又衬气质。"} +{"content": "类型#上衣*版型#显瘦*风格#休闲*风格#清新*图案#条纹*衣样式#衬衫*衣领型#圆领*衣款式#拼接*衣款式#荷叶边", "summary": "这款衬衫很有女人味,特意选用柔软亲肤的面料,带来舒适的穿着体验。同时清新的蓝白条纹设计,在视觉上让你更显瘦。并且休闲的圆领设计更能展现你优雅的脖颈曲线美。让人眼前一亮的是,领口和袖子拼接的荷叶边设计,整体打造了优雅柔美的气质,更能衬托出女性魅力。"} +{"content": "类型#裤*风格#清新*裤型#阔腿裤", "summary": "连体的阔腿裤版型十分抢眼,既凸显了女性上身的颈部曲线,同时也很好的修饰出腿部曲线,最适合那些腿型并不好看的女性穿搭,体现出设计师独特的设计理念和新奇的设计想法。更值得一提的是,这款阔腿裤选择清新的颜色设计,而是选择的是端庄但不张扬的藏青色设计,在放肆的格调中注入一点稳重气息。"} +{"content": "类型#裙*裙型#a字*裙长#半身裙*裙款式#抽褶", "summary": "明明是经典的半身裙,在融入了整齐排列的图案后形成了奇妙的视觉效果,增添了一丝高雅的韵味。上窄下宽的a字版型既修饰了腰身也展现了十足优雅的形象,结合随性的褶皱设计,增强了层次感也更凸显优越品质。"} +{"content": "类型#裙*裙型#a字*裙长#半身裙*裙款式#抽褶", "summary": "本款半身裙采用了棉布的挺括面料,在亲夫吸汗的同时又具备良好的舒适感,大大的a字版型少女感十足,配上30到褶皱的复杂工艺,加上大片的翻飞花朵,炎炎夏日,裙摆随着微风起舞,好一个吸睛的场面!这款设计感十足的半裙,小仙女必须要拿下呀"} +{"content": "类型#上衣*材质#牛仔布*材质#水洗*颜色#浅蓝色*衣样式#外套*衣长#短款", "summary": "早春的时候,没有一件牛仔外套,怎么能算是过春天呢?其实在每年的开春之际,这也是让大家烦恼的一件事,今天种草给大家的这件牛仔外套,简直不要太赞哦!属于短款的版型,相信我不用多说,这种短版型是特别显高的,刚好腰下一点,连水洗的浅蓝色也是符合了早春的气息,超赞的!闭眼入是没有问题的。"} +{"content": "类型#上衣*风格#休闲*图案#骷髅*图案#印花*衣样式#卫衣*衣款式#连帽", "summary": "卫衣两边袖筒的logo图案,结合后幅大骷髅印花,彰显品牌别致的设计理念,极具个人特点。连帽设计尽显无限休闲感,带来舒适惬意的穿着体验。"} +{"content": "类型#裤*版型#宽松*风格#青春*风格#性感*图案#创意*裤腰型#高腰*裤口#毛边", "summary": "气质高腰设计,轻松拉长腿部的比例,尽显身姿的高挑动人。创意与个性十足的毛边设计,展示出青春的不羁与时尚。宽松超短裤的设计,带有几分俏皮的小性感,更迷人。"} +{"content": "类型#上衣*颜色#粉色*风格#街头*风格#性感*衣样式#卫衣*衣样式#开衫*衣样式#外套*衣门襟#系带*衣款式#口袋*衣款式#连帽", "summary": "这是定制的一款开衫外套,选用的是独特的橘粉色,既能很好地显肤白,又给你满满的少女感,而且有一种温暖的感觉。集开衫和卫衣两种衣款于一身,带给你两种风格的完美结合。连帽与系带设计,具有街头随性感。口袋设计,方便而美观。"} +{"content": "类型#裙*风格#性感*图案#格子*图案#线条*裙长#连衣裙*裙袖型#喇叭袖*裙款式#勾花镂空*裙款式#木耳边*裙款式#收腰", "summary": "格子元素的连衣裙,既低调内敛,又精致时髦,充分展现出女性风情。考究裁剪的版式,结合收腰的设计,自然贴合身形,展现出纤细迷人的曲线。双肩的镂空造型,凸显出了精致的锁骨线条,平添几分性感的小女人味。裙身多处的木耳边的点缀,呼应柔美的喇叭袖型,缱绻浪漫,尽显甜美少女风。"} +{"content": "类型#裙*裙型#a字*裙腰型#高腰*裙长#半身裙*裙衣门襟#拉链*裙款式#拉链", "summary": "这款半身裙采用高腰的a字版型设计,拉高了腰际线,尽显身材纤细,亮色的漆皮材质,带来了很好的吸睛效果。优质的五金拉链,充满了质感,在细节中彰显出了品质,选用进口的羊皮制成,给你带来舒适的穿着体验。"} +{"content": "类型#上衣*版型#显瘦*风格#职场*衣样式#衬衫*衣样式#西装", "summary": "西装对于职场人士而言一定是必备的单品,更何况这样一款剪裁优良,版型合身,质感高级的设计。选用纯黑的色调,十分的经典,同时可塑性极强,搭配种颜色的衬衫都是非常不错的选择。而修身的版型,更是很好的勾勒出美好的身材曲线,优雅而又气质。"} +{"content": "类型#裙*版型#宽松*风格#休闲*风格#潮*裙长#半身裙*裙款式#拼接", "summary": "时尚加分的拼接设计,在视觉上丰富了服装的层次感。把不同图案、面料组合在一起,碰撞出个性抢眼的休闲风。让你秒变不折不扣的潮流,走在之中别提多吸引人了。十分宽松的裤腿设计,从远处看,有种半身裙的既视感。恰到好处的隐藏住略显不完美的腿型,既时髦又飘逸,好感度up。"} +{"content": "类型#上衣*图案#条纹*图案#蝴蝶结*衣样式#衬衫*衣袖长#长袖*衣袖型#衬衫袖", "summary": "面料条纹清晰,光泽柔和,具有良好的透气性,上身立体挺括。衬衫袖型为连衣袖,丰富衬衫结构,细节的时尚新颖。淡雅的颜色搭配基础的直筒版型,前幅横向分割融入连身长袖,领部蝴蝶结玩转细节心机,记住衬衫三颗扣,达人没错了。"} +{"content": "类型#裙*颜色#蓝色*风格#复古*风格#知性*图案#复古*裙长#连衣裙", "summary": "想要简洁而知性的感觉,那你不妨试试素雅的色调。你看那清浅的蓝色调,配合在一款中长的连衣裙上,总是能展现出十足的魅力。而那复古的旗袍领设计,穿搭起来可以让你更显优雅范儿。配上婀娜的身线和那波浪式的裙身,让你的身材更俏,从而展现出优雅美观的女人味来。"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*风格#街头*裤长#九分裤*裤口#毛边", "summary": "牛仔裤不得不说,的确是衣橱百搭小能手,不知道怎么搭配的时候来一条这样的九分牛仔裤怎么穿都合适,上身真的超级显瘦。九分裤长裤脚撕边,露出精致的脚踝,多了几分时髦感,区别常规的千篇一律,裤脚随性的毛边,演绎着摩登街头感的雅痞气息。"} +{"content": "类型#裙*图案#动物*裙型#a字*裙型#大裙摆*裙下摆#荷叶边*裙长#连衣裙*裙款式#木耳边*裙款式#收腰", "summary": "来自东北动物明星的原创“史前壁画”,描绘一个万物初开,烂漫的年代。而图案,这一回用的是仙味儿连衣裙呈现,上身后帮姑娘们悄咪咪减龄哟。双层的荷叶边元素丰富连衣裙的层次效果,使得裙子变得更加飘逸灵动,领口的木耳边轻盈而甜美,呼应荷叶边喇叭袖口,尽显女孩子柔美动人姿态,结合收腰a字大裙摆,尽显仙气十足的少女魅力。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#简约*衣样式#卫衣*衣领型#圆领*衣款式#拼接*衣款式#不规则", "summary": "不规则的设计和卫衣的搭配得很好,袖子部分也进行了拼接,不再稚气之余又添加了几分乖巧。不规则的下摆显高又显瘦,走起路来飘逸感十足。经典的圆领设计,修饰脸型不挑人穿。整体简约大气,剪裁干净利落,小宽松的长款版型,遮肉又显瘦。"} +{"content": "类型#上衣*版型#宽松*材质#牛仔布*风格#青春*衣样式#外套*衣袖型#落肩袖", "summary": "春暖花开,又是一年牛仔季,已经忍不住要给你们出各种好看的牛仔外套啦,这一款彰显着骨子里的青春个性,零星点缀的毛边处理,让牛仔帅气不减;宽松版型+落肩袖的设计率性不羁,上身各种自由随性and显小减龄"} +{"content": "类型#上衣*版型#宽松*材质#牛仔布*风格#青春*衣样式#外套*衣袖型#落肩袖", "summary": "春暖花开,又是一年牛仔季,已经忍不住要给你们出各种好看的牛仔外套啦,这一款彰显着骨子里的青春个性,零星点缀的毛边处理,让牛仔帅气不减;宽松版型+落肩袖的设计率性不羁,上身各种自由随性and显小减龄。"} +{"content": "类型#裤*颜色#白色*风格#运动*风格#青春*风格#潮*图案#条纹*图案#线条*裤型#直筒裤*裤口#开叉", "summary": "直筒裤裤型上身舒适不束缚,裤身线条流畅,垂感很好,易凹造型;两侧条纹装饰紧跟校服裤潮流,释放时尚运动风气息,青春动感,有着瞬间减龄的;裤脚上点缀了白色的英文字母,一点就能抓住眼球,醒目吸睛;适当的裤脚开叉设计让整条裤子远离刻板,前卫个性。"} +{"content": "类型#裙*颜色#纯色*图案#纯色*裙衣长#中长款*裙衣门襟#系带", "summary": "纯色的色彩使用,时尚百搭,出行更加的便捷。腰间系带的工艺,轻松修饰女性完美身姿,塑造女性完美腰型。半袖的制作工艺,让穿着活动更感轻松自由,体验畅快无拘束的穿着效果。中长款的裙型制作,拉长女性身形比例,视觉显身高。"} +{"content": "类型#上衣*图案#线条*图案#撞色*衣样式#卫衣*衣款式#抽绳*衣款式#连帽", "summary": "这款连帽卫衣让人一眼就会爱上,独特撞色工艺搭配经典连帽抽绳版型,下摆部分的走线将主体分成上下两部分,将男性率性洒脱不羁爱自由的风格展现出来,又不会显得过分花哨。独特的假两件设计修饰手臂部分的线条,衬托出穿着者修长挺拔的身形"} +{"content": "类型#上衣*版型#显瘦*材质#针织*颜色#裸色*风格#淑女*风格#潮*风格#性感*图案#撞色*衣样式#针织衫*衣样式#外套*衣领型#圆领*衣袖型#喇叭袖", "summary": "早秋时节,针织衫实用保暖又百搭。整体色调呈现裸色,干净简洁贴近肤色更加实用。小圆领的款式剪裁经典又百搭,方便搭配各种外套。撞色的喇叭袖设计结合了当下的潮流元素,体现女性手腕处的性感,彰显淑女气质,独特的细节设计,引人瞩目。整体款式为紧身款修身显瘦,采用天然针织面料,寒冷的冬季作为内搭十分柔软舒适,保暖性能强。"} +{"content": "类型#裤*风格#复古*图案#复古*图案#波点*裤型#阔腿裤*裤腰型#高腰", "summary": "阔腿裤加上超长裤腿的设计,走路,气场十足,很。垂感很足的面料,行走间飘逸灵动。黑底白点的波点元素,尽显复古优雅,女人味十足。高腰的设计,更是塑造胸以下都是腿的既视感。"} +{"content": "类型#裙*材质#牛仔布*裙款式#不对称", "summary": "这款具有时尚色彩的短外套。不对称的门襟设计别具用心,半个领子的设计也是个性突出,穿起来很潮。牛仔面料看起来质地很好,板正有型。"} +{"content": "类型#裙*材质#绸缎*风格#性感*裙型#抹胸裙*裙衣门襟#系带*裙款式#勾花镂空", "summary": "色彩靓丽的绸缎纹路,带来与众不同的视觉冲击,即便是同款亦有不同色呈现,可谓独一无二。人性化的可拆卸挂脖,后背系带设计,双重既能让你抹胸撩人又能系带风情万种,内侧贴心的防滑硅胶是你穿抹胸的安全保障。而泳裤的侧边镂空式设计,除了性感之外也是展示自我的小秘诀。"} +{"content": "类型#裙*版型#显瘦*裙型#一步裙*裙下摆#开叉*裙长#连衣裙*裙袖型#花瓣袖*裙衣门襟#拉链*裙款式#拉链", "summary": "这款连衣裙采用修身的版型设计,搭配精致的剪裁设计,结合一步裙款式,展现出玲珑曼妙的曲线,更为显瘦。而后中开叉设计,穿着给身体更多的活动量,行动更为方便。加上花瓣袖设计,更为优雅浪漫。搭配拉链设计,穿脱更为方便。"} +{"content": "类型#裤*风格#性感*图案#卡通*图案#印花*裤长#短裤", "summary": "夏季不只是空调呆着,更重要的让身体全方位凉爽。这款工字露脐吊带衫,带着bf风的3d卡通印花,艳丽的色彩和更具和谐感,让你舒适感倍增,穿上配上短裤妥妥的性感风。"} +{"content": "类型#裙*版型#宽松*版型#立体剪裁*材质#蕾丝*风格#宫廷*图案#线条*图案#蕾丝*裙型#花苞裙*裙型#抹胸裙*裙长#连衣裙*裙领型#一字领*裙袖型#泡泡袖", "summary": "它像是夏天里的一朵含苞待放的花苞裙,宽松的抹胸一字领设计,这种版型属于谁穿谁好看的那种,恰到好处的展现迷人的锁骨和肩部线条而。蕾丝的优雅与浑然天成的甜美感,也衬托得无比细腻。立体裁剪独有的柔美素雅,更是仙气满满,让你回头率爆表。加上气场强大的宫廷泡泡袖设计,搭配七种不同花型,造就了这件充满生命力的连衣裙。"} +{"content": "类型#上衣*版型#宽松*风格#复古*风格#简约*风格#休闲*图案#复古*衣样式#卫衣*衣门襟#系带", "summary": "对卫衣情有独钟的妹子可不要错过这款趣味十足的卫衣哦!点缀之上的可爱装饰让人眼前一亮,同时充满了妙趣横生的画面感。再搭配复古的咖啡色,打造出甜美不乏个性的特点。还独特的采用了后襟系带设计,富有满满的设计感,再结合简约宽松的廓形,打造出时尚休闲的穿搭风格。"} +{"content": "类型#上衣*颜色#白色*风格#简约*风格#清新*图案#线条*衣领型#翻领", "summary": "穿上这款白衬衣,会是耳目一新的感觉,它清丽而不失优雅,端庄而不失纯真,茉莉白色清新的空气一样,让人百看不厌。是那种咋见欢喜,也惊艳的感觉。端庄的小翻领,加上衣襟简约干练的线条"} +{"content": "类型#上衣*风格#清新*图案#刺绣*衣样式#卫衣*衣袖型#灯笼袖", "summary": "法式浪漫小清新的卫衣,真的很适合爱美的小仙女。BRAND的这款灯笼袖卫衣格外的洋气,清新配色的小鹿图案绣花,精美的展现在胸前,增加了十足的俏皮感,穿着凸显出满满的仙气。"} +{"content": "类型#裙*材质#雪纺*风格#性感*图案#刺绣*裙长#连衣裙", "summary": "这条花朵刺绣雪纺连衣裙领口部分采用优雅的系带领口设计,可以根据自己的喜好随意调节,让小性感呼之欲出。精致的花朵刺绣装饰,是整条裙子的亮点所在,精湛的刺绣工艺让花朵看起来栩栩如生,增添了裙子的生动气息。另外裙子选用轻盈的雪纺面料,打造出浪漫飘逸的感觉。"} +{"content": "类型#上衣*版型#宽松*图案#人物*图案#字母*图案#文字*图案#线条*衣样式#风衣*衣领型#翻领", "summary": "宽松廓形的风衣,大大的版型,帅气又有型;简洁精干的翻领设计,修饰线条又凸显气质;光滑抗寒的面料,薄薄一层却可以抵挡春日寒意。背后的人物图案,前身的字母装饰。减龄又显活力。松紧袖口的设计,活动方便又实穿。"} +{"content": "类型#上衣*版型#显瘦*图案#条纹*图案#线条*衣样式#衬衫*衣袖长#长袖", "summary": "基础款的条纹衬衫,圆形的领口修饰精美的颈部线条,使得脸型也变得更加的小巧精致,长袖的设计贴合手臂的线条时,手臂更加的纤细活动自如。修身的版型穿在身上贴合身体线条,选用优质的面料,舒适柔软而又亲肤,适合贴身穿着"} +{"content": "类型#裤*材质#牛仔布*颜色#深蓝色*风格#清新*裤型#直筒裤*裤型#阔腿裤*裤腰型#高腰", "summary": "来自BRAND的牛仔裤采用深蓝色造型,耐脏的同时也不容易哦。高腰的款式结合荷叶边的加持,将身材比例拉长,再多的肉肉也能秀出小蛮腰。直筒阔腿的裤型,遮盖腿部小秘密,轻松显笔直腿型,走路间飘逸自在,既能扮酷又能散发清新哟。"} +{"content": "类型#裤*版型#显瘦*裤长#九分裤*裤型#哈伦裤", "summary": "这条裤子比一般的哈伦裤,来的更加的显瘦。来自于它腰头的褶裥和省道收割,该收的地方收,该放的地方放。上身特别的藏肉,然后来自于带点慵懒味道的宽大裤筒,遮肉,,很显腿直!简直是梨型身材妹子的福音。配合九分的裤长,穿起来更显腿长不说还很帅。"} +{"content": "类型#裙*颜色#粉色*风格#淑女*图案#印花*裙型#百褶*裙腰型#高腰*裙衣门襟#系带", "summary": "过膝至脚踝的长度将双腿都隐藏在裙身当中,再搭配上高腰的设计,展现出曼妙高挑的身材。淡粉色的款式更能衬托出甜美的气质,小巧的印花图案还增添了淑女感,也有减龄的效果。v领搭配系带的设计更显乖巧,风琴百褶的裙摆富有层次,轻盈流动。"} +{"content": "类型#裙*颜色#深蓝色*裙型#蓬蓬裙*裙长#连衣裙", "summary": "采用金属色绣线将棱角分明的星星刻画出来,以毫无规律的姿态摆放在裙身。在和深蓝色基调相融之间,营造出一种夜空繁星的感觉。让这款蓬蓬纱连衣裙看起来梦幻又浪漫,并且给人一种高调轻奢的感觉,穿起来气质又迷人。"} +{"content": "类型#裙*版型#宽松*材质#蕾丝*颜色#粉色*风格#文艺*图案#蕾丝*裙下摆#荷叶边*裙长#连衣裙*裙领型#圆领*裙款式#木耳边", "summary": "飞扬的夏季,少不了蕾丝裙的踪影。这件粉色文艺荷叶边连衣裙,采用简单大方的圆领设计,修饰脖颈,微露一丝性感美。袖口采用宽松舒适的荷叶边,修饰手臂,又迎合整件衣裙的飘逸感。腰间以精巧可爱的木耳边递进,在宽松上又增添一丝格调美。"} +{"content": "类型#上衣*版型#宽松*风格#休闲*风格#清新*图案#印花*衣样式#卫衣*衣款式#连帽", "summary": "BRAND带来的这款印花大贴袋卫衣,衣身采用常见的连帽款式设计,轻松减龄,上身穿出清新少女感。宽松的版型裁剪,舒适不紧勒,带来休闲洒脱的惬意。衣身正面的logo印花点缀,装饰衣身的同时,提升了品牌辨识度。"} +{"content": "类型#裤*版型#宽松*图案#线条*裤型#阔腿裤*裤款式#口袋*裤款式#拉链*裤腰型#高腰", "summary": "珂莱蒂尔高腰阔腿裤采用宽边高腰腰头设计,可完全包裹腰腹部,可隐藏小赘肉,展现纤细腰部线条。拉链顺滑易拉,穿更方便、更轻松。两侧对称斜插口袋设计,可方便插手的同时,还可放置随身携带的小物品,美观又实用。宽松的裤腿设计,可隐藏腿部肉肉,更加凸显腰部曲线。内里走线工整,精致工艺,彰显品质。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#运动*风格#休闲*风格#青春*衣样式#卫衣*衣样式#外套", "summary": "卫衣款裁剪的外套,延续了一贯的休闲运动风,倍显青春的动感减龄范儿。整体宽松的廓形裁剪,营造出了舒适的上身感,富余的空间维度,赋予了衣身修身百搭的实穿属性。看点满满的帽带摆脱了以往的平庸造型感,宽织带点缀的小猫图案,结合的金属环扣,尽显潮酷个性。"} +{"content": "类型#裤*材质#牛仔布*风格#性感", "summary": "3x1的这款牛仔裤采用浅白的牛仔面料为裤身材质,其柔然的手感和细腻的质地,在穿着舒适的同时,透露着清纯甜美的个性气质。除此之外,流畅的裤身剪裁将性感的腿部曲线彰显的淋漓尽致,不失为一款随性出街的必备单品。"} +{"content": "类型#裤*图案#线条*裤长#九分裤*裤型#阔腿裤*裤款式#拼接*裤款式#口袋*裤腰型#高腰", "summary": "这条九分休闲裤选择百搭的色系,可以随性穿搭。高腰的裁剪和阔腿的版型,拉长你的双腿线条,缔造黄金比例,九分的裤腿露出纤细的脚踝,不会使下半身显得臃肿。裤腿处的设计舍弃了单调的侧面单裤缝,而是选择了裤脚两侧拼接,使腿部看起来更加纤细。后侧的一字口袋,提升臀线,塑造臀部凹凸有致。"} +{"content": "类型#裤*材质#羊毛*裤口#卷边", "summary": "选用高品质精细羊毛面料,质地轻薄细腻,贴身丝毫没有。柔韧而不失挺括度,能极好的撑起极简的裤型,上身立体有型。腰部和脚口融入金线织带装饰,打造出吸睛的细节看点,令整体不再单调。卷边的裤脚造型,更有设计感,也能丰富裤身层次,提升上身的时髦度。"} +{"content": "类型#裙*颜色#深蓝色*风格#复古*风格#性感*图案#条纹*图案#复古*图案#撞色*裙长#连衣裙*裙款式#吊带*裙款式#不规则", "summary": "深蓝的连衣裙色调沉稳而不显浮夸,设计师仅以复古的条纹勾勒,加以蓝白的撞色点缀,在丰富细节亮点之余,也更具吸睛抢眼的时髦个性。而吊带的设计,让你肩部的肌肤些微裸露,不经意间散发的性感气息将你迷人的风情演绎的淋漓尽致,搭配上不规则的下摆设计,优雅天成。"} +{"content": "类型#裙*颜色#黑色*裙型#衬衫裙*裙下摆#荷叶边*裙款式#露肩", "summary": "以荷叶边装饰整体裙身,在肩部开口的露肩设计,可谓是点睛之笔。微露肉的设计,给人以甜美温柔的印象。黑色竖纹的元素装饰,更为衬衫裙平添亮点,轻松满足icon多种风格搭配所需。"} +{"content": "类型#裙*版型#宽松*风格#青春*风格#清新*风格#性感*图案#刺绣*裙长#长裙*裙袖长#五分袖*裙衣门襟#系带", "summary": "清新飘逸的长裙设计,融合五分袖款式,突出独特个性的时尚气息。性感的内衬及若隐若现外衫设计,加上精致小心机的胸前刺绣设计,彰显出青春活泼的气质。颈后系带设计,突出个性,彰显特色。宽松舒适的款型设计,张扬出女性轻柔飘逸的气质,加上精致的裁剪工艺,体现出女性独有的柔和美好。"} +{"content": "类型#上衣*材质#棉*材质#牛仔布*衣样式#外套", "summary": "一款柔软耐穿的棉质牛仔外套,独具一格的打褶毛边袖口,更显别致优雅。衣身的明线装饰更是增添了整体的造型。养眼的天蓝色让你在这个春日充满了活力。"} +{"content": "类型#上衣*材质#棉*材质#牛仔布*衣样式#外套", "summary": "牛仔外套经典不败,质量要经得起考验。这款牛仔外套采用全棉牛仔布制作而成,织物坚牢,抗皱性能卓越,而且具有较好的吸湿性和透气性,上身后自然舒适。同时棉料为主的牛仔布,防也不错,非常适合应对的早春温度。"} +{"content": "类型#上衣*材质#棉*材质#牛仔布*衣样式#外套", "summary": "这款牛仔外套采用了棉质牛仔面料制作而成,棉质牛仔面料拥有非常柔软舒适的手感,质感亲肤透气。而且棉质牛仔面料不会褪色和变形,经久耐穿。"} +{"content": "类型#裙*版型#宽松*材质#牛仔布*风格#文艺*风格#青春*图案#字母*图案#文字*裙型#牛仔裙*裙下摆#荷叶边", "summary": "自在宽松的牛仔裙,利用编制肩带连接,营造出文艺乖巧的少女气质。自然做旧的牛仔面料,配以字母元素点缀,提升造型时尚度更显潮女独有的朝气活力。配以荷叶边点缀,让你在青春俏皮的同时更多几分小女人的妩媚感。"} +{"content": "类型#上衣*颜色#金色*图案#蝴蝶结*衣样式#外套*衣门襟#系带*衣款式#纽扣", "summary": "长款毛呢外套最大的设计亮点在于衣身的金色纽扣设计,非常精致大气。领口蝴蝶结系带设计,尽显优雅美观,衣身外兜设计,增加立体层次感。"} +{"content": "类型#裙*材质#网纱*颜色#纯色*风格#性感*图案#纯色*裙型#百褶*裙长#连衣裙*裙领型#圆领*裙衣门襟#拉链*裙款式#拼接*裙款式#拉链", "summary": "纯色的连衣裙采用了圆领设计,配合上肩部的百褶斗篷设计,层次感强,还可以遮掩肩部的肉肉。采用了隐形拉链的设计,简洁又不会破坏整体的设计感,同时在穿脱时更加方便,领口处的透视网纱拼接更是性感而又不会过于暴露。"} +{"content": "类型#裙*颜色#黑色*风格#高贵*风格#性感*裙下摆#层叠*裙款式#亮丝*裙款式#拼接*裙款式#吊带", "summary": "运用经典的黑色调,打造出性感的女人味,蔓延出骨子里的柔美气息,配合细吊带的娇俏感,发挥出妩媚动人的吸引力。惹眼又吸睛的亮丝面料,诠释出高大上的都市情调,衬出不一样的诱惑气息,加之层叠拼接的裙摆,洋溢出几许浪漫的风情,正好诠释出高贵大方的名媛气场。"} +{"content": "类型#裙*材质#网纱*材质#蕾丝*风格#性感*图案#线条*图案#蕾丝*裙长#连衣裙*裙领型#半高领*裙款式#木耳边*裙款式#收腰", "summary": "这件连衣裙可谓完美演绎了不是人间烟火的高雅气质。木耳边与轻盈网纱带来别致女人味。睫毛蕾丝勾勒的半高领,修饰修长的脖颈线条。花型蕾丝元素,微微露出脖颈肌肤与手臂线条,带着若隐若现的性感。合体的收腰版型勾勒出曼妙的身材曲线。蕾丝元素下摆,行走间更具柔美风情。"} +{"content": "类型#裙*版型#宽松*颜色#红色*颜色#蓝色*颜色#粉色*风格#性感*图案#撞色*裙长#连衣裙*裙领型#v领*裙袖型#荷叶袖*裙款式#绑带", "summary": "一款极具设计感的连衣裙。红色、粉色、蓝色的撞色设计,小v领加绑带的设计和荷叶袖的设计,微微性感中带着一丝乖巧。舒适的材质搭配宽松的版型,上身非常舒适。"} +{"content": "类型#裙*颜色#黄色*图案#蝴蝶结*裙长#连衣裙*裙款式#绑带", "summary": "这款来自百伶妈妈孕妇连衣裙,无论是款式还是品质都无可挑剔。领口以绑带蝴蝶结装饰,丰富了整个裙面尽显甜美浪漫的情怀,而且明黄色的蝴蝶结还能让裙子更耀眼夺目。裙面采用高品质面料制造,手感柔软细滑穿着舒适,此外面料悬垂性较好上身效果超棒。"} +{"content": "类型#上衣*颜色#黑白*风格#青春*图案#卡通*图案#条纹*衣样式#卫衣*衣款式#连帽", "summary": "穿搭中不可或缺的自然是连帽卫衣,光是版型就能体现出女性追求的随性自在感,倘若想在清凉的天气中穿着。这款条纹连帽卫衣相信会适合你,经典的黑白条纹上映衬着卡通图案的造型,带来青春中的童真趣味感,将减龄活泼的个性透露出来,举手投足都会超有气质!"} +{"content": "类型#上衣*风格#清新*风格#性感*图案#蝴蝶结*衣样式#衬衫*衣款式#拼接*衣款式#纽扣*衣款式#荷叶边", "summary": "此款BRAND荷叶边衬衫采用多种材料拼接打造而成,质感细腻柔和,上身效果极佳,温暖而舒适;经典的荷叶边袖子处理,传递出优雅清新的气质;脖颈间的蝴蝶结装饰,呈现出一派甜美可爱的女性气息;精致的纽扣点缀,彰显新颖个性感。"} +{"content": "类型#裙*版型#显瘦*材质#牛仔布*材质#水洗*风格#复古*风格#民族风*风格#性感*图案#复古*图案#刺绣*裙型#a字*裙型#牛仔裙*裙腰型#高腰*裙领型#v领", "summary": "这款带着水洗的做旧质感的牛仔连衣有着独特的迷人味道。v领的设计,显露出迷人性感的脖颈与锁骨,显瘦显脸小,上身气质又女人味十足哦。从v领延续下来的门襟一直到裙摆,以包扣点缀,加上扭结的高腰设计,时髦感爆棚哦。右边袖口和背后的民族风精致绣花,更让整体展现复古的优雅与恬静。a字裙摆,灵动飘逸。"} +{"content": "类型#裤*版型#显瘦*材质#涤纶*风格#性感*裤型#阔腿裤*裤款式#绑带*裤口#开叉", "summary": "一款舒适有型的涤纶阔腿裤;富有心机感的腰部,将自然打褶与同色系绑带相结合,系上则贴合腰线并收腹显瘦,为纤腰的展现赋予了柔美的韵味;裤脚处的开叉设计,裸露美腿并凸显性感的魅力,使自信的漫步间洋溢着个性洒脱的时髦范。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#蕾丝*图案#蕾丝*衣样式#外套*衣门襟#无扣", "summary": "这款蕾丝外套,采用宽松版型,完美包容各种身材。显瘦不臃肿,精选优质蕾丝料,柔软亲肤,穿着舒适。蕾丝花纹装饰全身,温柔细腻,彰显优雅女人味。无扣设计,方便穿着之余,轻松打造个人气场,走路带风,整体时尚显气质。"} +{"content": "类型#裙*版型#显瘦*风格#性感*图案#线条*裙型#抹胸裙*裙型#大裙摆*裙腰型#高腰*裙长#连衣裙*裙款式#波浪", "summary": "性感的抹胸设计,加上修身的版型,使得连衣裙能很好的展现出女性优美的上身线条,增添几分妩媚感。高腰大裙摆设计,使连衣裙上身效果十分显腿长,让女性的身材更加完美。裙身的波浪纹设计,如的浪花,灵动而又美丽。"} +{"content": "类型#裙*图案#渐变*裙下摆#荷叶边*裙长#连衣裙*裙袖长#长袖*裙款式#绑带*裙款式#收腰", "summary": "这款连衣裙将精致的品位展现的淋漓尽致,衣襟,长袖及裙摆由荷叶边细心装点,打造出动感的飘逸,上身后随着举止舞动,与众不同。收腰的绑带完美将腰身展现,体态尽显。渐变的色系使得裙装变得有光泽,灵气十足。"} +{"content": "类型#裙*颜色#白色*颜色#红色*风格#复古*风格#青春*风格#清新*图案#蝴蝶结*图案#复古*图案#线条*图案#印花*裙腰型#高腰*裙长#短裙", "summary": "上衣铺满了红色印花图案,且图案多样,丰富靓丽,清新优雅。红色蝴蝶结,在优雅的基础上增加了一丝俏皮可爱,更显青春活泼,喇叭袖口,繁复多样,显出复古基调,时尚大方。白色高腰短裙,拉长腿部线条,让身材比例更加完美,整个人青春优雅。"} +{"content": "类型#裙*材质#蚕丝*材质#蕾丝*图案#蕾丝*裙长#连衣裙*裙款式#勾花镂空", "summary": "连衣裙,将浪漫的镂空与蕾丝相如何是女神,衣橱不可缺少的单品。经典的白,打造的薄纱真丝裙,让你用最轻松的方式展现优雅的气质。"} +{"content": "类型#上衣*图案#印花*图案#撞色*衣样式#衬衫*衣袖长#长袖", "summary": "基础的长袖衬衫设计,是以简单的直筒版型设计,让你穿着多一点舒适大气的感觉。并且个性的细碎印花设计,配上时尚的撞色设计,让视觉多一点清爽效果。而顺滑的真丝面料,可以为你的穿着,带来多一点的亲肤舒适效果。"} +{"content": "类型#裙*材质#牛仔布*风格#复古*风格#简约*图案#复古*裙型#牛仔裙*裙下摆#开叉*裙腰型#高腰*裙长#半身裙*裙衣门襟#拉链*裙款式#拉链", "summary": "这款帅气的牛仔半身裙,采用了时尚的金属外拉链,简约实用,还给裙身增添了一份视觉层次。经典的高腰版型,视觉上拉长腿部比例。裙身微磨白工艺,展现了几分怀旧复古的韵味。开叉下摆,美观又个性,使行走更加舒适自由。"} +{"content": "类型#上衣*风格#日系*风格#简约*风格#工装*衣样式#卫衣*衣款式#口袋*衣款式#抽绳*衣款式#抽褶*衣款式#连帽", "summary": "外穿内搭皆可的廓形卫衣,采用简约日系工装风格,前片大口袋多包袋设计,可随身携带多个小物品。搭配个性的抽绳连帽,稍稍提高领口设计,具有防风保暖的作用。帽子和袖子融入褶皱设计,让版型更加立体有型。"} +{"content": "类型#上衣*材质#牛仔布*风格#街头*风格#运动*风格#潮*衣样式#卫衣*衣款式#破洞", "summary": "当破洞式的潮流从牛仔裤延伸到卫衣,它的潮流毫不逊色于破洞牛仔裤。这款运动卫衣运用破坏性的设计手法,在手肘处剪开两道口子,营造出感,让原本沉闷的卫衣增添了些许街头的感,让你潮的。"} +{"content": "类型#上衣*版型#显瘦*风格#简约*风格#朋克*衣样式#外套*衣样式#西装*衣款式#腰带", "summary": "都市女性必备的时尚单品就是西装外套了,利落的裁剪简约而又大气,挺括的面料质地更加凸显气场;微微修身的阔形完美的展露出优美的身姿,及臀的长度,展露出修长的双腿,倍显高挑,加上独特的圆环打孔腰带装饰,增添几分不羁的朋克范。"} +{"content": "类型#裙*版型#显瘦*版型#立体剪裁*材质#雪纺*图案#碎花*图案#线条*裙下摆#开叉*裙衣门襟#系带*裙款式#不规则*裙款式#收腰", "summary": "气质名媛碎花雪纺,透气舒爽。不规则裙型设计,修身有型,飘逸的裙摆落落大方。贴心的系带收腰又能凸显身材,造型简单洋气。开叉设计,也方便了行走,增添了更棒的时尚看点,上身还多了高挑感。整条裙子都是立体裁剪线条,版型非常流畅。"} +{"content": "类型#上衣*材质#针织*材质#网纱*颜色#纯色*风格#潮*风格#性感*图案#纯色*图案#拼色*图案#线条*衣样式#开衫*衣领型#v领*衣款式#拼接", "summary": "这款针织开衫,区别于以往纯色的开衫,采用独特的拼色设计,潮流感十足。大大的v领设计,拉长了颈部线条,非常显脸小。袖口一圈网纱拼接,给人以朦胧感,随着风随意摆动,性感迷人。"} +{"content": "类型#裤*版型#宽松*版型#显瘦*颜色#深色*风格#街头*图案#字母*图案#条纹*图案#文字*裤型#直筒裤*裤型#阔腿裤", "summary": "sibyl以显瘦的深色基调打造的这款裤子,整体采用了直筒的阔腿裤剪裁,带来较为宽松且舒适的穿着效果。设计师为这款裤子的侧边点缀了红白条纹,配合字母的提花效果,显得炫酷且出彩,是充满了街头气息的裤装穿着之选。"} +{"content": "类型#上衣*风格#复古*风格#潮*图案#文字*图案#复古*图案#撞色*衣样式#卫衣", "summary": "由具有独树一帜设计风格的杜嘉班纳品牌出品的卫衣,采用经典人物形象点缀;以复古涂鸦元素为背景,轻松塑造出帅气新潮的型男造型;加之辅以撞色数字标识,显示出独特的年代感。"} +{"content": "类型#裤*材质#牛仔布*材质#水洗*风格#复古*风格#简约*图案#复古*图案#色块*裤型#直筒裤", "summary": "来自BRAND的这款牛仔裤,采用舒适的棉弹牛仔布料制作,柔软亲肤,穿着舒适;经典简约的直筒裁剪方式,勾勒出男士的硬朗身形;经过做旧的水洗技术处理之后,呈现的深浅色块显得独特而又个性,既营造出复古不羁的质感,又带来了率性时髦的穿搭体验。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*风格#复古*图案#格子*图案#复古*裙型#a字*裙下摆#花边*裙领型#立领", "summary": "一款假两件格子a字裙,假两件的设计,更显新颖别致。格子的设计,更添几分复古气息,花边立领的设计,甜美又灵动。宽松的版型设计,慵懒随性,同时起到了很好的遮肉显瘦的效果。"} +{"content": "类型#上衣*颜色#黑色*颜色#灰色*图案#印花*衣样式#卫衣*衣款式#连帽", "summary": "BRAND带来的这款连帽卫衣,以黑色为基调,就连前胸与后背的印花,也选用哑光黑与灰色妆点,营造风范。本次将三角进行了立体化处理,前胸与后背遥相呼应,彰显品牌一贯的lessismore的设计理念。"} +{"content": "类型#上衣*材质#针织*衣样式#开衫*衣门襟#拉链*衣款式#拼接*衣款式#拉链", "summary": "采用弹力的针织面料,柔软舒适松紧适宜。拉链开衫设计,方便穿脱时尚前卫。肩部网面拼接,透气吸汗。腰身两侧可开拉链设计,内里为网面拼接和肩部相互呼应,有整体感,造型多变。后颈部用心的小细节设计,可调节式织带显独特个性。"} +{"content": "类型#上衣*版型#宽松*颜色#黑白*颜色#绿色*风格#简约*风格#潮*图案#字母*图案#文字*图案#印花*衣样式#卫衣*衣款式#连帽", "summary": "自带潮流个性的连帽卫衣可以说是人人必备,所以炉品这次满足需求。出了这么一款连帽卫衣,采用宽松版型,让其穿着舒适的同时更具随性潮范儿。然后采用基础的黑白灰三款底色以及少见的豆绿色,轻松适应多样风格穿搭。再加上胸前的字母印花,简约不失个性,轻松穿着独特潮范儿。"} +{"content": "类型#上衣*风格#民族风*图案#线条*衣样式#外套*衣领型#圆领", "summary": "经典的圆领设计,衬托脸部线条,民族花纹织带装点,散发别样的异域气息,在设计过程中。通过民族风图案全新设计,在袖管、衣襟进行连贯的点缀。串联起来的线条更显外套的利练之外,也赋予其独特的韵味,雅致而典雅的风格,让你在对自己的全新探索中收获更完美的自己。"} +{"content": "类型#上衣*版型#显瘦*颜色#深色*风格#休闲*图案#刺绣*衣样式#卫衣*衣领型#圆领*衣款式#螺纹", "summary": "一件卫衣,让你舒适度过微凉的春天。这件圆领卫衣,螺纹圆领,照顾到了穿着的舒适性,深色系配色更显瘦有质感。挚爱的立体花鸟刺绣,精致的设计感,更显时尚品味。休闲的卫衣版型,适合有个性的你穿着!"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*风格#复古*风格#性感*图案#复古*图案#线条*裤款式#口袋*裤腰型#高腰*裤口#开叉*裤口#微喇裤", "summary": "这是一款修身版型的牛仔裤,流畅的裁剪线条,打造出的开衩设计,带有轻柔飘逸的质感,让你在帅气与优雅中随意的切换,还不失性感魅力。经典的喇叭裤造型,演绎复古时尚。高腰的设计,凸显腰线的位置,视觉上秒变大长腿。对称性的斜插口袋,方便放置物品。"} +{"content": "类型#裤*材质#棉*材质#牛仔布*材质#混纺*裤款式#破洞", "summary": "设计很赞的一款磨白破洞牛仔裤,选用棉混纺面料制作而成,呈现出帅气有型的上身效果,走在街上格外的亮眼。个性的磨白工艺搭配破洞设计,带给你非同寻常的穿着体验。"} +{"content": "类型#裤*版型#宽松*材质#混纺*裤型#阔腿裤", "summary": "阔腿裤腿视觉上有着一气呵成的流畅感,留有宽松的余量,行走间自在如风。高密实的棉涤混纺面料,布料平整挺括度好,穿着时自然板正有型。立身剪裁上身收腰提臀,修饰身型的同时不会显胯宽。带有几分时光的,百搭不挑人。"} +{"content": "类型#裤*材质#牛仔布*颜色#浅蓝色*图案#线条*裤长#九分裤*裤型#直筒裤", "summary": "这款极具帅气男友力的牛仔裤,以浅蓝色调渲染,更加的百搭时尚,同时展现了青春活力感。直筒的裤型设计结合九分裤长剪裁,能够更好的修饰腿部的不足。拉长腿部线条的比例,显露出大长腿,补丁磨旧的设计,增添设计看点,展现帅气个性。"} +{"content": "类型#上衣*材质#雪纺*颜色#黑色*颜色#黄色*风格#休闲*图案#印花*衣样式#衬衫*衣领型#立领*衣款式#绑带*衣款式#不规则", "summary": "一件兼具休闲风和甜美气质的雪纺衬衫。明亮的黄色象征着少女的满满朝气,加上黑色的不规则印花星星点点地分布在垂坠感极好的面料上,顿时蔓延出几分慵懒风情。领口的部分别出心裁,把绑带和立领结合在一起,简直仙气十足!"} +{"content": "类型#裤*版型#宽松*材质#牛仔布*颜色#黑色*风格#休闲*图案#线条*裤长#短裤*裤型#阔腿裤*裤腰型#高腰", "summary": "这款短裤采用黑色的牛仔风格,展现出自然随性的休闲韵味,也流露出女性内心的优雅与大气。宽松的阔腿裤型,将双腿的线条修饰的更加纤瘦有型。微微翻卷的裤边,流露出强烈的个性色彩。自然的高腰版型,将双腿拉伸的更加修长,从而打造高挑动人的身材曲线。"} +{"content": "类型#裤*版型#显瘦*裤型#直筒裤*裤款式#纽扣*裤腰型#高腰*裤口#微喇裤", "summary": "都是拖地的直筒裤型,但这条是有几长度的微喇叭高腰,腰围有两个纽扣,可以把小腹收起来,遮肉显瘦不挑身材。中高腰的版型穿着不会束缚,布料是西裤的那种,春夏秋冬都可以穿,不会热而且保暖,两种颜色,带着时装又冷酷的感觉。"} +{"content": "类型#上衣*版型#显瘦*风格#休闲*图案#条纹*图案#线条*衣样式#衬衫*衣款式#拼接*衣款式#腰带*衣款式#不对称", "summary": "这一款带有设计感的衬衣,穿在身上凸显女性的个性和时尚感。它采用了修身的版型,剪裁得体利落,能够贴合身体线条,带来非常合身的穿着效果。衣身采用条纹的拼接,粗细横竖条纹,搭配不对称的下摆,带来时髦的美式休闲风,而且方便凹造型。腰部搭配腰带,凸显腰身。"} +{"content": "类型#裙*版型#显瘦*材质#蕾丝*图案#蕾丝*裙型#鱼尾裙*裙下摆#荷叶边*裙长#连衣裙*裙袖型#喇叭袖*裙款式#拼接", "summary": "精美的蕾丝连衣裙拼接了双层喇叭袖,富有立体的层次美感。举手投足间也给人一抹灵动性,带有甜美减龄的气息,很有时尚感。同时,其鱼尾摆的设计能勾勒出美好的身形曲线,女人味十足。再加上,拼接的荷叶边使得裙身更丰富,有飘逸浪漫的感觉。而整款蕾丝连衣裙在穿着上很显瘦,又衬气质。"} +{"content": "类型#上衣*版型#宽松*颜色#黑色*风格#性感*衣样式#衬衫*衣领型#立领", "summary": "纯黑色颜色使用的这一件较为宽松版型设计的衬衫最大的设计亮点在于衣身上面采用的立领款式的设计哦,这样的款式的设计使得整一件衬衫给人一种很时髦很有个性感的气息,让人一眼看过去就很是喜欢。"} +{"content": "类型#上衣*图案#撞色*衣样式#衬衫*衣样式#外套*衣袖长#长袖*衣款式#拼接*衣款式#口袋*衣款式#纽扣", "summary": "黯哑沉稳的配色有素净的感觉,撞色为视觉上提升亮点。面料的拼接营造假两件的效果,像是长袖衬衫外套着背心,富有层次感,立体的口袋与纽扣设计提炼假两件的细节,看上去更加逼真。背心部分的面料挺括感强,使着装干练挺拔。"} +{"content": "类型#裤*版型#宽松*风格#复古*风格#简约*风格#休闲*图案#复古*图案#印花*裤长#短裤", "summary": "BRAND的这款短裤以极具有民族特色的波西米亚风印花为主,复古典雅,轻松凸显出优雅的气质和出众的时尚品味,经典的腰头设计,舒适亲肤。简约而不,大气的宽松版型,包容性很好,穿起来不仅很显轻盈。还洋溢着轻松休闲的气息。"} +{"content": "类型#裙*版型#立体剪裁*风格#简约*图案#线条*裙型#大裙摆*裙衣门襟#拉链*裙款式#口袋*裙款式#拉链*裙款式#收腰", "summary": "简约又气质大方的onepiece~收腰设计可以很好的勾勒出腰部线条大大的裙摆侧面有设计口袋用料足足的立体剪裁细细品味也不乏小细节感背后的拉链设计细节感很好"} +{"content": "类型#上衣*版型#宽松*风格#复古*风格#休闲*图案#复古*衣样式#衬衫*衣样式#外套*衣款式#口袋", "summary": "宽松休闲的bf款型,可以当作衬衫穿着更可以是一件开春小外套。缝接处用明线装点,增加细节,视觉上显得格外生动有趣。袖口扣子设计,俏皮又不失个性,结合胸前口袋的装饰,率性利落,带出浓郁复古的中性气息,帅气又拉风!"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "以淡淡的天蓝色打造的这款连衣裙,整体采用了长款的剪裁设计,配合飘逸的薄纱,显得较为灵动且迷人。裙子为四分袖的剪裁,结合朦胧的蕾丝花纹,呈现出较为精美且大气的穿着美感,是非常出彩的优质裙装单品。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "蕾丝是女人味的至佳代表元素,没有女人不爱蕾丝的,对于蕾丝连衣裙更是有无数丽人追捧和热爱着,它制造出来的气质——精致而优雅。这款连衣裙拿到手一眼就能看出其精美以及品质感。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "这款两件套连衣裙,一眼就能看到清晰的lace,视觉上有凹凸的感觉。几种不同的蕾丝花纹出现在同一件裙子上,充满新鲜感和设计撞击,孕期可以穿,产后哺乳期也方便,上身雅致有腔调,带一点俏皮的女人味。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "性感美腻的一款连衣裙,见到的第一眼就想把所有美好的形容词都用在它身上。重工水溶蕾丝裙,大朵大朵的花型,立体感超强,有筋骨,有挺度,柔韧,自然溢出女人香气,非常吸引人,非常惊艳眼球!"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "一袭优雅大气的蕾丝连衣裙!蕾丝能穿出高品质感,真心不简单!专门到大工厂ding制而来的,优美大气的蕾丝花纹,不同于市面上普通常见的那种花纹,这款很有特色!有质感有风骨,耐品耐看~小立领,柔美的睫毛边,肌肤,风雅撩人。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "蕾丝连衣裙对于女生的吸引更是加倍的,繁复精美的蕾丝天生就拥有着优雅和仙气,这种气质是深入骨髓也是无可复制的,它也似乎充满着魔力,勾住人们的眼球让所有人为之倾倒。加之蕾丝面料触感细腻光滑,更加衬托出女人独特的个人魅力。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "温柔的蕾丝连衣裙不失为小女生的必备,既能让你变得优雅十足,又充满着少女感。衣身采用淡雅的,整体透着一股子高贵典雅的感觉,瞬间提升气质。通透的蕾丝面料,为服装增添了神秘色彩,带来朦朦胧胧的浪漫魅力。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "这件时尚的连衣裙,面料采用的是蕾丝绣,工艺极其繁琐,精致又优雅。袖口与领口的睫毛边细节,增添灵动感,时尚而且不失浪漫。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "精美又迷人的蕾丝钩花,悄悄地盛开在那迷人的连衣裙之上,既有着迷人得视觉效果,又能让的宝贝展现出十足的公主范,确实很美妙哦!单单看那一抹蕾丝钩花,就展现出了熟精湛的工艺,既能够凸显不凡气质,又能缔造独一无二的美感。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "重工定织的蕾丝面料,不同的碰撞,真的让人惊艳,特别清晰的花型,片片花朵风情相连,浪漫中带奢...花纹立体精致,肌理细腻,手感柔软干爽。shou先从面料材质把确保这款蕾丝连衣裙的高品质感。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "春天到了。仙女们都换上了美美的连衣裙,经典充满魅力的蕾丝元素怎么可以少呢,梦幻精致的水溶蕾丝,带有非常甜美的减龄皱褶裙摆,满满少女心。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "美好的春夏季节,可以用一款美美的蕾丝裙来应景,它会穿出你的精致与优雅,让你美得独特与别致。这款裙子上的蕾丝,具有很好的肌理感与挺括度,层次立体丰富,仿佛雕刻上去的一样,蕾丝的存在,让连衣裙别有风情与韵味。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "唯美动人的连衣裙,穿在身上总是能够彰显出迷人的视觉效果,优雅大气个性别致,造就出了非凡的气质,显得魅力非凡。配以精美大气的蕾丝钩花设计,随时随地凸显出了迷人的视觉效果,彰显出十足的时尚魔力。优雅吸睛的蕾丝钩花还能够彰显出无与伦比的神奇身材,缔造出婀娜的美感。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "蕾丝连衣裙对于女生的吸引更是加倍的,繁复精美的蕾丝,天生拥有着优雅和仙气,这种气质是深入骨髓当中无可复制的,它也似乎充满着魔力,勾住人们的眼球,让所有人为之倾倒。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "作为小女孩喜欢的连衣裙,选用少女气质的樱花粉甜美婉约,是小公主们无法抗拒的色彩,不仅可以衬托宝贝那的脸蛋,还能展现出小女孩独有的俏皮与甜美。浪漫的蕾丝材质,让宝宝化身成为童话中的小公主。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙长#连衣裙", "summary": "这款连衣裙,运用了雾的特色理念,将其打造成朦胧般误入的仙女。蕾丝结合的裙摆,加上纯白色色彩基调,如同般高洁,穿上它,你就是神圣美丽的小仙女。"} +{"content": "类型#裙*版型#显瘦*颜色#黑色*裙长#连衣裙*裙袖长#无袖*裙领型#方领*裙款式#收腰", "summary": "非常素雅的纯黑色连衣裙,低调雅致的色彩显得朴素干净,能够很好地显衬肤色。加持无袖方领的剪裁大气利落,给人以明朗亲和的气质感。纯黑色还自带高贵典雅的格调,配上收腰的伞型裙身更是亮眼出彩。显瘦修身的收腰伞摆蓬松轻盈,让上身效果非常有型。的衣襟细节别致出挑,工整精致的包布圆扣保证了整体视感的和谐精致。"} +{"content": "类型#裙*版型#显瘦*颜色#黑白*图案#条纹*裙型#直筒裙*裙衣长#常规", "summary": "比基础款的条纹羊毛衫,更加时髦有趣!常规厚度,内搭外穿都ok,舒适实穿经典优雅的黑白条,直筒修身版,款式简洁耐看,配上细羊毛纱的高弹力,舒适,裹出曲线身材。"} +{"content": "类型#上衣*颜色#白色*颜色#纯色*颜色#绿色*风格#简约*图案#纯色*衣样式#衬衫", "summary": "此次推荐的这款衬衫,就显得有些简约大方。淡雅的纯色系版型配合整个立体廓形剪裁,使得衬衫带有一份低奢的故事感,很精致亦不失时尚。衬衫拥有抹茶绿与纯净白两款选择,酵洗的白色与的绿色,这两款衬衫的颜色表现力都带有自己的味道,淡雅值得你去细细。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*风格#复古*风格#文艺*风格#知性*风格#清新*图案#复古*裙领型#圆领", "summary": "宽松的版型,穿着遮肉显瘦,自然复古的色调打底,只用看一眼就感觉拥有清新的心情。精致的圆领设计,显得知性文艺。搭配长款的轻薄裙摆设计,走起路来飘逸感十足,仙气满满,突显女性端庄委婉的一面。"} +{"content": "类型#裙*版型#显瘦*图案#撞色*裙型#百褶*裙型#a字*裙腰型#高腰*裙长#半身裙*裙衣门襟#拉链*裙款式#拉链", "summary": "腰头钉扣的装饰,精致有型大方。后身拉链的设计,穿着舒适方便。青春少女款百褶裙,让你的小公举气质随便锋芒毕露。这款a字型的半身裙,减龄可以打个高分呢。高腰中带有修身效果,让你的身体看点很足,拉长身高视觉感,裙身的彩色纹理带来时兴指数。打造撞色般的效果,视觉上塑造的震动美感。"} +{"content": "类型#裙*版型#显瘦*图案#撞色*裙型#百褶*裙型#a字*裙腰型#高腰*裙长#半身裙*裙衣门襟#拉链*裙款式#拉链", "summary": "立体绒布暗花的设计,精致美观。隐形侧拉链的设计,穿脱方便。青春少女款百褶裙,让你的小公举气质随便锋芒毕露。这款a字型的半身裙,减龄可以打个高分呢。高腰中带有修身效果,让你的身体看点很足,拉长身高视觉感,裙身的彩色纹理带来时兴指数。打造撞色般的效果,视觉上塑造的震动美感。"} +{"content": "类型#上衣*材质#雪纺*风格#简约*图案#蝴蝶结*图案#线条*衣样式#衬衫*衣门襟#系带", "summary": "这条蝴蝶结衬衫裙选用的雪纺面料,上身非常细腻舒适。领口是简约的衬衫领设计,彰显独立干练的女性气质。还有巧妙的腰间系带设计,可以优化身材比例,拉长腿部线条。裙子下摆设计了双侧开叉,走起路来有若隐若现的感觉,非常轻柔浪漫。"} +{"content": "类型#裙*版型#宽松*版型#立体剪裁*风格#复古*图案#复古*图案#电影*图案#线条*裙长#连衣裙*裙衣门襟#系带", "summary": "想尝试电影里的日式和服,又担心。试试这条简化改良版,日式风复古连衣裙。面料凉爽飘逸,前后都采用放大的设计,延伸颈部线条。沉稳的网格设计,给人古风素雅的感觉。立体剪裁的宽松大袖子,十分有设计感,仿佛自来。加上经典系带,系出优雅自信。"} +{"content": "类型#上衣*图案#线条*图案#印花*衣样式#卫衣*衣款式#螺纹*衣款式#抽绳*衣款式#连帽", "summary": "来看看这件卫衣在设计上采用了经典的抽绳连帽设计,可以根据自己的需求随意的调节造型,并且修饰颈部线条,时尚大方。再加上后背的闪电大logo印花,看上去造型独特,彰显出自身的张扬个性,与浓郁的时尚色彩。并且采用弹力螺纹袖口,舒适贴肤,更加保温。"} +{"content": "类型#上衣*图案#线条*图案#印花*衣样式#卫衣*衣款式#螺纹*衣款式#抽绳*衣款式#连帽", "summary": "本款卫衣在设计上采用了经典的抽绳连帽设计,可以根据需求随意的调节造型,并且修饰颈部线条,显得时尚大方。后背的教堂贱猫印花,恶搞趣味浓郁,凸出自身的时尚品味和自身的张扬个性。弹力螺纹袖口,保证手腕活动自如。精致的袋鼠兜装饰,造型独特,美观实用。"} +{"content": "类型#裤*颜色#纯色*图案#纯色*图案#线条*裤型#阔腿裤", "summary": "纯色的配色不免会让人觉得有些单调,但这款阔腿裤只采用了一种颜色,看上去也丝毫不会显得单调。没有了色彩的交织,不过采用干脆利落剪裁工艺制作的它,亦拥有了流畅线条和挺括阔腿裤型,这样的裤子包容性强大,也非常具有魅力。"} +{"content": "类型#裙*颜色#黑色*颜色#深色*风格#性感*图案#撞色*裙腰型#高腰*裙长#连衣裙*裙款式#拼接*裙款式#腰带", "summary": "这款连衣裙最吸引人的地方就是撞色拼接的设计,不仅打破了深色裙身带来的沉闷感,还点亮了整体的造型。搭配上高腰版型,视觉上又拉伸了身材比例,尽显女性高挑个子。而且点缀上黑色的腰带,还能轻松凸显出性感的小蛮腰,女人味十足。"} +{"content": "类型#裙*裙下摆#荷叶边*裙长#连衣裙*裙款式#勾花镂空", "summary": "极具设计感的连衣裙,后背处采用了镂空处理,在腰线和两边都加了灵动的荷叶边设计,增添裙身的俏皮感,又能适当的修饰下背部的肉肉,展现曼妙身姿曲线。"} +{"content": "类型#上衣*颜色#白色*颜色#金色*图案#线条*衣样式#衬衫*衣领型#v领*衣款式#钉珠*衣款式#亮片", "summary": "衬衫纯净白色调,半开v领自然贴合,修饰迷人的脖颈线条。领子金色边装饰,顺延出长长的垂落。后背及胸前鸟儿图案,配色多彩丰富。亮片钉珠立体装点,轻松提升整体质感。"} +{"content": "类型#裤*版型#宽松*材质#混纺*材质#纤维*风格#街头*风格#简约*风格#休闲*风格#青春*风格#性感*裤口#小脚", "summary": "如今的休闲裤将经典与时尚结合,以束脚的工艺诠释着青春的率性感,同时街头感十足。这款由马克华菲推出弹性休闲束脚裤,采用经典的纤维混纺面料制成,整体的舒适度非常赞,穿着弹力十足。加之简约的小宽松版型的设计,穿着不挑身材,适合更多种身形的型男进行选择。"} +{"content": "类型#裙*材质#蕾丝*风格#性感*图案#蕾丝*裙腰型#高腰*裙长#半身裙*裙衣门襟#拉链*裙款式#拉链", "summary": "蕾丝剪裁,在下摆自然的流出须边,犹如性感的睫毛。下摆的四片式做法,让半裙在行走间充满了灵动的韵律。干净工整的高腰头设计,使得穿着效果更显高挑。侧边的隐形拉链,不露,保持着简洁大方的完整造型。"} +{"content": "类型#裤*材质#牛仔布*裤长#短裤*裤腰型#高腰*裤口#毛边", "summary": "牛仔裤总是女孩子们非常关注的裤子款式之一。高腰的牛仔短裤,可以拉长女性的身材曲线,让自己的双腿变得更完美哦。再利用毛边进行点缀,也可以彰显出自己的随性美感呀,不用担心自己穿着很普通啦。又利用防走光的设计,更可以给自己带来保守气质哦。"} +{"content": "类型#裙*风格#复古*风格#休闲*风格#潮*图案#复古*图案#撞色*裙型#百褶*裙长#连衣裙*裙款式#拼接", "summary": "可以给人眼前一亮的假两件连衣裙,裙身的上半部分为休闲随性的卫衣样式,裙身则采用了灵动飘逸的百褶裙摆拼接,毫不费力就可以穿出新潮时髦的层次感,简直就是懒癌患者们的福利款!优雅浪漫的百褶裙摆还小心机地加入了个性的撞色设计,瞬间让整体造型的色彩感更加地丰富显眼,很有复古的feel,而且还衬得人整个人很有气质,推荐啦~"} +{"content": "类型#裙*材质#蚕丝*颜色#纯色*颜色#深色*图案#纯色*图案#渐变*图案#线条*图案#刺绣", "summary": "亲肤透气的真丝皱面料,手感柔软透气性好,穿着轻盈飘逸。裙身斑驳的渐变染色设计,很适合初春时分,比深色俏丽,比纯色风韵,有着万物苏醒的生命力。的花瓣领口温婉大方,展露柔美颈脖线条。裙身精美刺绣图案,时尚美观。过膝的版型微露脚踝,端庄优雅感立显。"} +{"content": "类型#上衣*版型#宽松*材质#针织*材质#混纺*风格#清新*图案#花色*图案#线条*衣样式#开衫*衣领型#圆领*衣袖型#插肩袖", "summary": "春日里不可或缺的基础款针织开衫,是一直都偏爱的简洁清新样式。细腻亲肤的腈纶混纺针织面料,触感舒适绵糯,扑在肌肤上软软的,没有明显的扎肤感。圆领设计尽显颈部线条。古着感的花色琥珀在门襟上,插肩袖柔化肩部线条,袖管宽松适宜,给手臂留足了空间,舒展间不会有束缚感。"} +{"content": "类型#裙*风格#知性*图案#碎花*图案#风景", "summary": "碎花元素的裙装一直是春夏里最美的时装风景,它有着一种魔力,无论你的多刚强,也会被它甜美温柔的的气息包围着,仿佛世界一下子就变得温柔起来。它有熟女的优雅知性,也不失小女生的甜美俏皮,让你在唯美中度过草长莺飞的漫漫春夏时光,融合飘逸的裙摆设计忍不住雀跃,翩翩起舞。"} +{"content": "类型#上衣*版型#宽松*颜色#白色*颜色#红色*风格#复古*风格#休闲*图案#条纹*图案#复古*衣样式#衬衫*衣领型#翻领", "summary": "衬衫是比较百搭的单品,能够给你增添精神的气质。以经典的白色调为底,加入红色的条纹纹路在其中,具有复古海军风的时尚美感,加上细微的小小翻领设计,让你穿起来更显率性魅力。宽松的版型设计具有休闲时尚的美感。"} +{"content": "类型#裤*版型#宽松*颜色#蓝色*风格#复古*风格#简约*风格#运动*风格#潮*图案#复古*裤款式#口袋*裤款式#松紧带*裤腰型#松紧腰*裤口#小脚", "summary": "蓝色基调带来一种历史复古气息,简约时尚又潮流,加上独特的裁剪,是它看起来个性又魅力十足。口袋设计不仅美观,而且方便携带物品。松紧带松紧方便实用,适合多种体型穿着。特别的板型设计,臀部和大腿部位一般为宽松剪裁,而在小腿至裤脚部分则慢慢收窄,最大特点便是裤脚位置采用束脚设计,运动风和时尚风兼得。线口缝制细密,做工精细,整体看起来潇洒感十足。"} +{"content": "类型#裙*版型#宽松*风格#文艺*裙型#仙女裙*裙领型#娃娃领*裙款式#抽绳*裙款式#收腰", "summary": "一款优雅文艺的仙女裙,浪漫的星星印花布满裙身,穿着灵动飘逸,仿佛就是不被世俗污染的林间仙子;独特翻边娃娃领,甜美可人的造型,让每个女孩子都怦然心动;宽松的版型,即使是身材丰腴的mm也能轻松驾驭;另配有抽绳收腰,轻松勾勒出妙曼身姿。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#复古*图案#复古*衣样式#风衣*衣款式#绑带", "summary": "这款风衣是属于宽松直筒版型,因此上身显瘦又遮肉。与众不同的是,这款风衣的斜襟式衣领和腰间的绑带设计充满名媛复古范,非常的衬托气质。后背的交叉绑带装点为点睛之笔,更显优雅复古。"} +{"content": "类型#裙*颜色#藏蓝色*风格#知性*裙长#连衣裙*裙袖型#公主袖*裙款式#镶钻*裙款式#钉珠*裙款式#抽褶", "summary": "成熟优雅的藏蓝色连衣裙衬托出女性非凡的气质,加上遍布的钉珠与镶钻点缀,彰显出高级华丽的设计美感,吸睛满满。别出心裁的褶皱公主袖设计优雅大方,随着手臂摆动尽显温婉知性的迷人姿态。"} +{"content": "类型#上衣*图案#字母*图案#格子*图案#文字*图案#线条*图案#印花*图案#撞色*衣样式#衬衫*衣领型#立领*衣款式#纽扣", "summary": "这件印花立领衬衫,在洁白的底色上以撞色字母印花装饰,各字母之间以线条格子图案,使衣衫极具时尚大方的感觉,直筒的版型设计,修饰身材的同时,也能有一定的包容性,能有效的遮掩小肚上的赘肉,后背领口处开叉设计。以纽扣开合,方便穿脱。"} +{"content": "类型#裙*材质#蕾丝*颜色#白色*颜色#浅蓝色*风格#复古*风格#宫廷*图案#格子*图案#蝴蝶结*图案#复古*图案#蕾丝*裙长#连衣裙", "summary": "来自西班牙的的连衣裙,延续了欧洲宫廷的复古风格,用暗红色与浅蓝色的格纹相衬,再配以前片的白色蕾丝和优雅的蝴蝶结点缀,让古典主义与现代时尚相互碰撞,凸显出小女孩俏皮的气息,造就了优雅的小公主形象。"} +{"content": "类型#上衣*颜色#金色*风格#性感*衣样式#西装*衣领型#一字领*衣门襟#三粒扣*衣款式#吊带", "summary": "挺括有型的西装版型,萦绕出一股凌厉干练的职业范儿。为了避免强势而显得,巧妙的加入了金色三粒扣,甜美可爱,软化了硬朗外形。经典的一字肩剪裁,彰显柔美锁骨,性感撩人。加上吊带的设计,更是凸显圆滑的肩部曲线。尽显优雅气质。"} +{"content": "类型#上衣*版型#显瘦*材质#棉*材质#蚕丝*衣样式#衬衫", "summary": "衬衫采用棉和桑蚕丝组合的面料材质制作,具有吸湿性和保湿性好以及亲肤透气和手感柔软光滑等完美优点。能给穿者带来每时每刻都是好心情的美感体验哟。直筒的版型设计,不仅能给人带来满满的修身显瘦视觉冲击感。同时又是完全不挑身材不挑人穿着的哟。"} +{"content": "类型#上衣*版型#宽松*颜色#白色*颜色#红色*风格#街头*风格#复古*风格#简约*风格#休闲*图案#复古*衣样式#外套*衣门襟#拉链*衣款式#拉链", "summary": "这一季度BRAND的秋冬新款外套,依旧融合经典三道杠系列,打造十足复古优雅魅力。这款外套配色上选择了热情又带有复古韵味的红色暗纹设计,碰撞出优雅简约时尚质感,白色三道杠点缀在袖部,更为外套添加经典复古风潮。白色的拉链与胸前的三叶草logo互相呼应,整体配色不会过多,又不至于单调,是足够日常的休闲造型。宽松开襟造型搭配柔软保暖的舒适质感,实属轻松满足秋冬需要的街头造型。"} +{"content": "类型#上衣*材质#牛仔布*颜色#白色*风格#青春*图案#线条*衣样式#风衣*衣样式#外套*衣款式#口袋*衣款式#收腰", "summary": "特别适合春天的一件长款风衣外套,采用到了自带减龄感的牛仔材质,与众不同的白色加上双插口袋更是带来了挡不住的青春感,有着收腰束带的装饰更加凸显纤细的腰身线条。"} +{"content": "类型#上衣*材质#网纱*颜色#白色*衣样式#衬衫*衣领型#翻领*衣门襟#单排扣*衣款式#腰带", "summary": "白色衬衫连衣裙配上抹胸网纱裙子,两件套叠穿的方式很时髦。腰间配上腰带的设计带来时髦的气息,加上网纱下摆与白色内衬凸显丰富的层次效果,透露出白皙的肌肤,很迷人。前面配上单排扣点缀其间,开合简单,穿脱起来很方便。精致的衬衫翻领打造出小巧别致的脸蛋。"} +{"content": "类型#上衣*风格#街头*风格#复古*风格#休闲*图案#复古*图案#印花*衣样式#卫衣*衣领型#圆领*衣袖型#落肩袖", "summary": "经典的圆领设计时尚百搭,手臂两侧经典印花复古有个性。明快亮丽的色调轻松打破了沉闷,带来一丝自由愉悦的气氛。胸前的英文印花点缀着衣身,凸显出满满年轻活力氛围,释放出年轻人任性追逐时尚的态度。休闲时尚落肩袖穿着舒适,印花与卫衣的结合在视觉上呈现出时尚街头风范。"} +{"content": "类型#裙*风格#复古*风格#文艺*风格#简约*风格#清新*图案#碎花*图案#复古*图案#线条*裙型#大裙摆*裙下摆#垂坠*裙长#连衣裙*裙袖长#无袖*裙款式#收腰", "summary": "通常度假风的连衣裙总是有小仙女的,这款连衣裙更是融入了小碎花,清新之余也赋有一种文艺风,围裹样式的v型领口更是带有复古气息,简约的无袖设计还方便了手臂活动,收腰设计配以垂顺自然的大裙摆,修饰腿部线条同时也更具穿搭的灵动美感。"} +{"content": "类型#裙*版型#显瘦*颜色#粉色*风格#清新*风格#性感*裙型#直筒裙*裙长#连衣裙*裙领型#一字领", "summary": "春天来了,夏天还远吗?是时候准备一条美腻的连衣裙啦。粉色连衣裙是很多美少女一样就会爱上的单品,浅粉色不会给人过于甜腻的感觉,却又透着女孩的清新浪漫味道,很显气质。一字领的设计,大胆的露出肩颈曲线,带着几分性感,好身材显露无疑。直筒版型,肉肉女孩也可以穿哦,非常显瘦。"} +{"content": "类型#裤*材质#棉*材质#牛仔布*风格#性感*图案#字母*图案#文字*图案#拼色*裤型#直筒裤*裤款式#绑带*裤口#开叉", "summary": "一款舒适透气的棉质牛仔直筒裤;富有趣味感的裤身处,将拼色的字母绑带交叉而系,新颖别致中透着个性的韵味,为时髦的穿搭增添了俏皮的活力范;侧边对称的大开叉设计,轻松裸露出诱人的美腿,使自信的漫步中洋溢着性感帅气的风范。"} +{"content": "类型#裤*材质#纤维*裤长#九分裤*裤型#哈伦裤", "summary": "使用聚酯纤维为主的材质制作,手感舒适柔软,亲和肌肤,加入少量的粘胶纤维,缔造出色的肌肤穿着效果。腰部腰袢设计,可搭配腰带穿着,增添时尚气息。九分的裤长结合哈伦的版型,穿着舒适美观,带来简洁利落穿着感受。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#简约*衣样式#外套*衣领型#圆领*衣袖长#无袖", "summary": "圆领设计,时尚大方,优雅舒适有型。简约无袖的设计,尽显干练利落,简洁圆领设计,剪裁简洁大气,衬托出一丝禁欲气息,轻松优雅,宽松自在,遮肉显瘦,舒适还保暖,单穿或者搭配大衣外套都是经典,优质面料,舒适透气保暖,不起球不易变形,耐穿手感舒适顺畅。"} +{"content": "类型#裤*材质#混纺*风格#运动*风格#休闲*图案#刺绣*裤款式#抽褶", "summary": "运用了带有弹性的面料,还有着抗褶皱的功能,因为其挺括性,可以衬托出男士修长的腿部轮廓。同时,轻薄的款型带着混纺的材质制作出来,更适合于休闲运动的场合,笔挺的造型衬托出腿部的修长感觉。裤兜处的精致刺绣更是彰显出男士独有的魅力。"} +{"content": "类型#上衣*版型#显瘦*风格#运动*衣样式#卫衣*衣款式#连帽", "summary": "刚刚步入春季,温度也随之上升,这时候就需要保暖耍酷兼具的服装来衬托自己,卫衣就是最理想的首选装备。这款具有运动气息的卫衣,采用连帽设计,既能防风保暖,还能将自己率性的一面展现出来;修身的版型,除了能完美的构造身材,还能轻松搭配各种服装,那时尚不仅仅展现一面。"} +{"content": "类型#上衣*材质#蚕丝*材质#网纱*材质#蕾丝*颜色#肉色*风格#性感*图案#蕾丝*衣样式#衬衫*衣领型#v领*衣款式#拼接", "summary": "这款衬衫在布料的选择上下了一定功夫,真丝双绉布料,清透飘逸。双面细微而均匀的纹理,正是因为纹理的存在,使得布料带有一点哑光的质感,素净典雅。深v领口,搭配上门襟处的精致蕾丝,性感迷人,女人味十足。门襟处拼接的肉色密网纱,柔软亲肤,又提供了很好的私密感。袖口处的拼接蕾丝给人一种小女生的可爱感。"} +{"content": "类型#上衣*材质#蚕丝*材质#网纱*材质#蕾丝*颜色#肉色*风格#性感*图案#蕾丝*衣样式#衬衫*衣领型#v领*衣款式#拼接", "summary": "真丝材质的衬衫,结合精美绝伦的设计感,更能凸显造型感。性感的v领造型,修饰颈部曲线更纤长,更骨感。门襟处拼接肉色网纱,若隐若现的性感意味,更具致命诱惑力。领口袖口处的蕾丝拼接,充满律动美感,女人味儿十足!"} +{"content": "类型#裙*材质#羊毛*风格#文艺*裙款式#不规则", "summary": "这款裙子非常的精致,选用的是欧洲的面料,很细腻柔软的全羊毛,手感很舒适。裙摆不规则的设计也是格外的有味道,举手投足更有飘逸灵动的韵味,俏皮又减龄。文艺有质感的深灰色,行走时露出的小腿白皙又修长。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙型#a字", "summary": "版型的处理上是采用了经典的a字版型设计,通过精湛的剪裁工艺,彰显出高级的品质感。裙身表面的蕾丝花纹,精致美观,上身穿着很显气质。内衬里布的贴心设计,追求时髦设计之余更加注重贴心细节,安全防走光,兼顾细节感与设计感。"} +{"content": "类型#裙*材质#蕾丝*图案#蕾丝*裙型#a字", "summary": "这款蕾丝裙的设计亮点在于蕾丝面料以及a字裙摆的设计,采用优质蕾丝面料提升了裙子的整体品质,同时a字版型修饰身材,遮住赘肉,减龄的设计,灵动俏皮,凸显甜美气息,是非常别致的设计亮点。"} +{"content": "类型#上衣*版型#显瘦*风格#街头*风格#复古*风格#文艺*风格#清新*图案#格子*图案#复古*衣样式#衬衫*衣款式#口袋*衣款式#对称", "summary": "这款衬衫采用了清新的格纹风格,经典且不过时,能够彰显文艺复古腔调。前幅搭配了对称的翻盖口袋装饰,又以个性的贴布点缀,使得衬衫不显单调和乏味,更具街头感。衬衫采用的是修身的剪裁设计,能够契合身形轮廓,塑造出挺拔俊朗身姿。"} +{"content": "类型#上衣*颜色#绿色*风格#性感*图案#条纹*图案#线条*衣样式#针织衫*衣袖长#五分袖", "summary": "针织衫采用了绿色的条纹图案,起到视觉上的冲击对比。竖条纹图案能够从视觉上拉伸身材比例,配合心领的设计。展示性感锁骨的同时也可以起到瘦脸的效果,恰到好处的五分袖长,露出白皙修长的手部线条,举手投足之间不失去优雅。"} +{"content": "类型#上衣*材质#棉*风格#简约*图案#撞色*衣样式#衬衫*衣袖长#长袖*衣款式#拼接", "summary": "一款简约的基础款长袖衬衫,版型很时尚也很百搭,整体以简单干净的配色为基调,衣身个性的撞色拼接设计增加了时尚感,显得与众不同,更是能带来一丝丝优雅浪漫的气息。在材质上精选优质的棉面料,穿着舒适透气。"} +{"content": "类型#上衣*版型#显瘦*风格#简约*衣样式#外套*衣门襟#一粒扣*衣款式#收腰", "summary": "帅气中不乏优雅气质的一款外套。精选优质的绵羊真皮面料,质感比较柔软,中和皮衣的硬朗,凸显女性的柔美气质。时尚的翻驳领设计,简约大方,更具造型感。简约的一粒扣收腰版型,修身显瘦尽显曼妙迷人的好身材。"} +{"content": "类型#裤*版型#宽松*材质#棉麻*颜色#米白色*图案#刺绣*图案#撞色*裤型#阔腿裤", "summary": "这条裤子,无论从色彩上还是穿着感上,考虑到季节的特点,从视觉与触觉上给人以清爽与舒适的感受。质地采用柔软又吸湿的棉麻,亲肤又好穿,透气柔软,上身舒适。裤身色彩选用柔和自然的米白色系,与裤前大面积的刺绣工艺撞色,清丽和谐,看起来颜色饱满,明朗大方。宽松的阔腿造型,隐藏小肉肉的同时更加随性。"} +{"content": "类型#上衣*版型#宽松*衣样式#衬衫*衣袖长#长袖", "summary": "宽松的样式,让这款衬衫能够拥有更好的修饰效果,轻松的修饰整体的身形,搭配上细条纹的样式,打造出更为随性的个性魅力,作为整体的设计亮点。超长袖的时尚元素,能够更好的与这款衬衫进行结合,时尚更富有优雅魅力,轻松的让整体更显与众不同。"} +{"content": "类型#裤*材质#棉*风格#简约*风格#青春*风格#清新*图案#印花*裤长#短裤", "summary": "很青春时尚的一款棕榈印花短裤,简约的版型设计,上身特别的时尚个性。清新的印花设计,好穿又舒适。纯棉材质设计,亲肤舒适,透气又清爽。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#蚕丝*材质#蕾丝*风格#清新*风格#性感*图案#刺绣*图案#蕾丝*衣样式#衫*衣袖型#喇叭袖*衣袖型#收口*衣款式#拼接", "summary": "仙气轻薄透视风的蕾丝衫,星星点点的绣花图案有种隐隐约约的含蓄美,不失温油清新小性感。面料精致,上面有一丝丝的blingbling银线镶嵌。的立体真丝绉面料,轻薄丝滑,很飘逸。门襟拼接了一层水溶蕾丝花边精致感满满。宽松的版型藏肉显瘦,任意身材都能驾驭。朦胧的灯笼收口喇叭袖也很加分,甜美迷人,举手投足之间多了一些飘逸感,浪漫唯美。"} +{"content": "类型#裙*版型#宽松*材质#蕾丝*图案#刺绣*图案#蕾丝*裙型#a字*裙型#蓬蓬裙*裙下摆#花边*裙长#连衣裙", "summary": "这款新春连衣裙采用了可爱宽松的a字版型。可以轻松修饰身材上的小缺点。公主一般的蓬蓬裙体现出小女人的可爱精致感。采用了手艺精湛的刺绣工艺打造出高端华美服装品质感。同时还运用了大量的蕾丝元素并将它用花边的形式呈现出来。给人一种高贵典雅的美感。"} +{"content": "类型#裙*图案#条纹*图案#刺绣*裙下摆#荷叶边*裙长#连衣裙", "summary": "连衣裙蓝白条纹,简洁清爽,又很有生动感,美的很实在。靓丽的刺绣花朵点缀蔓延,充满灵气。与条纹来了个碰撞,让视觉充满饱和,也让裙子多了一份浪漫基调。荷叶边,刺绣花朵点缀,增强轮廓感,更显灵动。彰显了女性的魅力。"} +{"content": "类型#上衣*版型#显瘦*风格#通勤*衣样式#马甲*衣长#中长款", "summary": "马甲既是一件可以通勤上班穿,又能日常逛街的拼时髦单品。中长款的设计,这样的长度衬托的比例很好,而且藏肉显瘦,各种身形都很适穿。"} +{"content": "类型#裙*图案#卡通*裙领型#圆领*裙衣门襟#排扣", "summary": "每个小女孩都有一个童话公主梦,充满童真与梦幻的色彩。灯芯绒背心裙,精致可爱的造型,简单的圆领配上背后排扣的点缀,增添细节质感,方便穿脱。衣襟前后的小捏褶设计,端庄大方优雅不刻意。尤其的裙摆出色彩艳丽的卡通贴布绣点缀,尽显天真俏皮的可爱童趣,妥妥的小公主一枚。"} +{"content": "类型#裙*风格#简约*图案#风景*图案#线条*图案#印花*裙长#连衣裙*裙领型#圆领*裙袖型#喇叭袖*裙衣门襟#系带", "summary": "素雅的浅粉色在炎炎夏日给你清爽的感觉。一款别致的连衣裙带你领略美丽风景,简约的圆领系带设计衬托出女性脖颈部线条和小巧的脸型,靓丽印花的点缀散发出浪漫柔美女性风情,喇叭袖凸显温婉而又不失时尚,裙摆设计行走间尽显女性自信和魅力。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*风格#性感*衣样式#衬衫*衣领型#v领", "summary": "衬衣控们快看过来,喜欢bf风的可不要错过这款咯。特意打造成宽松的款式,可以完美遮掩你的小肥肉,显瘦效果佳。让人眼前一亮的就是v领领口的设计,展现了你的脖颈曲线美之外更透露了小性感,迷人又可爱,同时非常百搭,适合不同场合的穿搭。"} +{"content": "类型#裙*材质#牛仔布*风格#知性*风格#青春*图案#刺绣*裙型#牛仔裙*裙长#半身裙", "summary": "真心美到cry的一款牛仔半裙!一点点知性,一点点优雅感,气质宁静迷人~运用今年大热流行的蝴蝶元素,一改之前硬朗帅气的设计,增添了很多女性化元素,重工蝴蝶刺绣,每只蝴蝶大小不同,栩栩如生,刺绣非常精美!有蝴蝶翩翩起舞的即视感,蝴蝶不是直接绣上去,而是做好之后再车上去,工艺着实够复杂!"} +{"content": "类型#裤*风格#简约*风格#性感*图案#线条*裤长#连体裤*裤款式#口袋*裤口#卷边", "summary": "生机勃勃的春季自然少不了中性连体裤,帅气又不失干练。柔软透气的面料,简约的v领设计拉长脖颈线条,露出纤细锁骨,时尚迷人。卷边设计修饰腿部曲线。胸部的对称口袋设计,起到了优化胸型的作用,增添性感韵味。"} +{"content": "类型#裤*风格#潮*图案#刺绣*裤长#九分裤*裤款式#破洞*裤腰型#高腰*裤口#翻折", "summary": "花朵刺绣这一时尚元素可是说是在各大秀场频频出现,稍加点缀就很有时尚的feel,这款裤子就采取了花朵刺绣,裤腿上的花朵刺绣栩栩如生,漂亮别致,很有春天的感觉呢。除了刺绣,还有潮流的破洞设计,个性前卫。裤脚做了翻边设计,九分裤的裤长,露出脚踝,高腰的设计,视觉上拉高腰线,很显高挑哦。"} +{"content": "类型#上衣*风格#性感*图案#线条*衣样式#西装*衣领型#v领*衣领型#翻领*衣款式#腰带", "summary": "西装式的大翻领从视觉上很好的拉长了颈部线条,凸显出女性干练大气的魅力,深v领完美得展现出女性饱满的上围,散发着十足的女人味。金属搭扣的腰带装饰很好的隐藏了腹部上的赘肉,勾勒出女性纤细迷人的小蛮腰,金属搭扣避免了整体造型过于单调,瞬间提升了整体气质。下摆开叉设计更显得性感妩媚。"} +{"content": "类型#裤*版型#宽松*颜色#纯色*风格#运动*风格#休闲*图案#纯色*裤款式#绑带", "summary": "这条裤子,采用简单的宽松款式,展示出一种休闲的运动风,穿起来给人一种舒适有型的立体感;长款的设计拉长了腿部的比例,简单的纯色色系,色泽靓丽又大方,给人不一样的视觉;腰间绑带的搭配,打破了常规设计,使用起来收缩自由,实用便捷;整体衬托出满满的轻松休闲。"} +{"content": "类型#裙*版型#宽松*风格#性感*裙型#鱼尾裙*裙领型#v领*裙款式#收腰", "summary": "这款收腰设计的鱼尾裙,让你的小蛮腰完美的展现,穿起来绝对吸睛。v领秀出性感锁骨的同时缩短了上半身的视觉长度,鱼尾裙的魅力在于它宽松的下摆,展现女生优雅和婀娜的体态。"} +{"content": "类型#裙*材质#棉*材质#网纱*颜色#白色*颜色#藏蓝色*风格#清新*图案#条纹*裙下摆#层叠*裙款式#拼接", "summary": "精选纯棉弹力面料,手感细腻舒爽,吸汗透气性能,特别适宜夏季穿着。采用新颖的水手服拼接浪漫纱裙的方式,呈现出不一样的唯美清新感觉。藏蓝色与白色条纹的搭配,为整件裙子赋予了更多优雅的意义。而水手领的运用,更成为视线中最美的焦点,完全展示出女孩的大方、活泼,对于远方的美好向往之情。小飞袖轻松甜美,搭配网纱层叠裙摆更显娇俏与可爱。"} +{"content": "类型#上衣*版型#宽松*材质#天丝*材质#混纺*风格#复古*图案#复古*衣样式#衬衫*衣袖型#灯笼袖*衣款式#拼接*衣款式#纽扣", "summary": "一款柔软清凉的混纺天丝衬衫;富有心机感的前后身处,将自然捏褶作为拼接,赋予身体宽松的余量,使自在的穿搭尽显时髦的趣味感;纽扣开合的灯笼袖设计,既能轻易遮盖不足又可修饰臂部,使自信的举手投足间蔓延着优雅大气的复古范。"} +{"content": "类型#上衣*版型#宽松*材质#针织*颜色#红色*衣样式#针织衫*衣款式#勾花镂空", "summary": "ur这款针织衫运用优雅的枚红色点缀衣身,结合独特的镂空设计,露出女性上身的优美轮廓,更显优雅气质美。宽松的版型包容性极强,能够迎合大众身材,轻松塑造慵懒随性范。最后再辅以柔软细腻的针织面料,打造绝对舒适的穿着体验。"} +{"content": "类型#裙*版型#显瘦*裙型#a字*裙腰型#高腰*裙衣门襟#系带*裙款式#不对称*裙款式#不规则", "summary": "第一眼看到就很喜欢的裙子,腰部时尚的系带设计,不只是很好的装饰,还能显腰身显瘦,高腰的a字版型,上身给人胸部以下都是腿的视觉感。裙摆不规则的设计,让腿部有加长的视觉效果,斜剪裁不对称裙摆烘托柔美气质。"} +{"content": "类型#裙*版型#显瘦*裙型#a字*裙长#长裙*裙款式#不规则", "summary": "BRAND的风格以时尚不失典雅著称,会出其不意打破常规,展现个性。这款半身长裙作为代表,以不规则的剪裁,为整体营造丰富层次,行走时带来轻盈灵动感,行走间飘逸生姿,诉说娴静柔美的气质。结合它a字版型的演绎,将女性的下半身完美遮盖,显瘦效果满分。"} +{"content": "类型#裙*材质#网纱*裙型#网纱裙*裙下摆#压褶*裙长#半身裙*裙款式#拼接*裙款式#不规则", "summary": "这款半身裙不同于其他网纱裙单一的版型,采用了不规则三层拼接,每一层的网纱都做了压褶处理,更好地展示了裙子的层次感,臀部有点大的妹子穿,可以遮住臀部的肉肉,亮点是腰部的腰封。腰封的也做了压褶处理,很好的修饰了腰身。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#牛仔布*风格#休闲*风格#潮*图案#刺绣*衣样式#外套*衣长#短款", "summary": "休闲款式的经典牛仔外套,清洗时髦的牛仔蓝简洁大气,短款衣身加上宽松版型,有着极其突出的显瘦气质。衣摆的毛边彰显出随性不羁的潮流魅力,而精致的刺绣图案更有着非常好的观赏性,帅气风格中透露几分软萌的少女心。"} +{"content": "类型#上衣*材质#针织*风格#复古*图案#条纹*图案#复古*图案#撞色*衣样式#毛衣", "summary": "让人眼前一亮的是很剑走的怪异时尚,撞色的元素作为时髦的主基调,你就能从平庸的风格之中脱颖而出。换上舒适的轻软针织,质感绝佳的毛衣分分钟就你的芳心。自带复古感的红蓝条纹,趣味间隔玩转色彩艺术!"} +{"content": "类型#上衣*风格#街头*风格#简约*风格#青春*衣样式#外套*衣领型#翻领*衣长#短款*衣门襟#系带", "summary": "微凉春日中,最清晰活泼的选择,少不了一件短款外套。这件翻领系带短款外套,衣身选用简洁大方的明线设计,更显十足简约风格。衣袖选择飘逸非凡的系带设计,长长系带更突显街头少女的灵动活泼。而经典大方的短款设计,显露腰身,更显靓丽青春。"} +{"content": "类型#上衣*版型#宽松*风格#休闲*图案#线条*图案#刺绣*衣样式#毛衣*衣领型#高领*衣袖型#喇叭袖", "summary": "这款毛衣属于非常宽松的版型,穿在身上很休闲,大气,包容感很强。小高领的设计,精致可爱不挑人。喇叭袖的设计,很好的修饰手臂的线条。刺绣花朵点缀袖子,举手投足间尽显迷人的魅力。"} +{"content": "类型#裙*版型#立体剪裁*图案#线条*裙腰型#高腰*裙款式#收腰", "summary": "开口可以显得脖颈修长,显得脸小,收腰的立体剪裁可以打造高腰线,视觉上显得身材比例更好。领口圆润的弧度上身之后让脸部线条看起来更加好看。坠感很好的裙摆不会太过沉闷,让你一秒变身大长腿。"} +{"content": "类型#上衣*材质#棉*材质#针织*颜色#纯色*图案#纯色*衣样式#开衫", "summary": "这款纯色的开衫采用的是优质针织面料,亲肤柔软,绝对适合宝宝稚嫩的小皮肤。全棉内里设计,守护宝宝整个冬天的温暖,俏皮的彩虹图案设计加上朴素的平针造型。让整件开衫变的生动有趣,带给孩子一个快乐的穿着体验。"} +{"content": "类型#上衣*版型#显瘦*风格#简约*风格#ol*风格#职场*图案#拼色*衣样式#衬衫*衣领型#翻领*衣门襟#系带*衣门襟#单排扣*衣门襟#双排扣*衣款式#抽绳", "summary": "这款衬衫简约素雅的风格加上高级的颜色,适合职场ol的日常搭配,精致的翻领搭配上双排扣的门襟,相比一般的单排扣显得更有设计感,扣子的拼色细节,精致又减龄。腰间内嵌抽绳,前中系带,收放自如,而且可以凸显腰线,非常显瘦、显高。这个抽绳系带的设计还增加了俏皮感,使衬衫显得不那么正式。"} +{"content": "类型#裙*材质#水洗*颜色#黑白*图案#印花*裙型#百褶*裙下摆#压褶", "summary": "这款裙子上有黑白沾染水洗留下的自然印记,看起来就像是泼墨的印花图案,具备年代感,一眼看过去就觉得舒服。而裙摆带点微微的百褶压褶处理,尽享垂感,行走间,飘逸动人。"} +{"content": "类型#裙*颜色#红色*风格#知性*图案#电影*裙下摆#压褶", "summary": "自带女主光环的一套,仿佛带入了90年代电影里。醋酸手感细腻温柔融入了藕粉拼红色更是微妙,请给春天来点“洋气”裙摆的压褶恰到刚刚好,层次感也凸显了出来,配温柔的藕粉基本款上衣更显得人知性温柔。两款无论是单穿还是组合都不失特色~"} +{"content": "类型#裙*图案#刺绣*裙型#a字*裙腰型#高腰*裙长#半身裙*裙领型#v领*裙袖型#喇叭袖", "summary": "干练而展现高雅气质的时尚两件套。温婉而动人的纯白色上衣,大v字领口喇叭袖设计,百搭而彰显优雅气息。搭配的高腰a字半身裙,深黑色的面料和精美的刺绣工艺,雕琢出精致冷艳的视觉美感。"} +{"content": "类型#裙*风格#性感*裙下摆#开叉*裙下摆#荷叶边*裙长#连衣裙*裙领型#v领", "summary": "一款时髦简单的新颖连衣裙,甄选优质高级的舒适面料,上身显得尤为亲肤和自然,同时自带大牌感的光泽感,微微显露轻奢气息。加以衣身的精致v领点缀,带出小女人的性感气息。同时甜美荷叶边点缀袖口和裙摆,加以开衩的设计,平添几分优雅气息,更添层次美感。"} +{"content": "类型#裙*材质#蚕丝*图案#条纹*图案#印花*裙长#连衣裙*裙款式#腰带*裙款式#抽绳", "summary": "这款抽绳连衣裙采用蚕丝的材质,非常的轻盈,运用竖条纹的印花方式,能给人视觉上的冲击,很好的拉长身体的比例。抽绳腰带的设计,更是将女性甜美气息更好展现出来,非常的不错。"} +{"content": "类型#上衣*风格#简约*风格#青春*风格#潮*风格#性感*图案#条纹*图案#撞色*衣样式#衬衫*衣领型#圆领*衣袖长#五分袖*衣款式#拼接", "summary": "这款衬衫采用经典的条纹拼接,不仅丰富了整体层次感,还显出了女性独特的气质。实用圆领款式,包边设计,均匀工整车线,凸显服装的时尚,新潮。撞色条纹拼接袖口,青春感十足,简约不张扬,低调却很有自我格调。俏皮的五分袖设计,视觉看起来性感而又不失端庄,别具一格的袖子设计,凸显出了服装的独特性。底摆做工精致,细条纹收边,不易变形,穿着舒适。整体给人感觉亮丽青春。"} +{"content": "类型#上衣*风格#简约*风格#青春*风格#潮*风格#性感*图案#条纹*图案#撞色*衣样式#衬衫*衣领型#圆领*衣袖长#五分袖*衣款式#拼接", "summary": "这款衬衫采用经典的条纹拼接,不仅丰富了整体层次感,还显出了女性独特的气质。实用圆领款式,包边设计,均匀工整车线,凸显服装的时尚,新潮。撞色条纹拼接袖口,青春感十足,简约不张扬,低调却很有自我格调。俏皮的五分袖设计,视觉看起来性感而又不失端庄,别具一格的袖子设计,凸显出了服装的独特性。底摆做工精致,细条纹收边,不易变形,穿着舒适。整体给人感觉亮丽青春。"} +{"content": "类型#上衣*版型#立体剪裁*材质#棉*衣样式#衬衫*衣领型#翻领", "summary": "经典衬衫版型,遵循布料肌理。立体剪裁,以翻领明门襟的经典造型、配合曲摆的现代人性化裁减,相得益彰,舒适的面料搭配精致缝纫线使成衣领型自然舒展、缝线部位平服工整、牢固耐磨,整体简单素面。面料是棉质的,手感舒适耐磨,单穿或者外搭都非常好看。"} +{"content": "类型#裙*颜色#深蓝色*裙款式#亮片*裙款式#收腰", "summary": "重工亮片裙,静谧的夜空,深蓝底,亮片蝴蝶,华丽丽的,穿上给人一种神秘的奢华感,更是流露出无以复加的梦幻气息,上身仿若,两根细细的肩带道出专属于女人的似水柔情,给人清爽感受的同时传递着打动人心的美感。收腰放摆的经典廓型,美好修饰女性的身材曲线!裙摆在透视纱的笼罩下,上身有一种朦胧梦幻缥缈的美丽。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*图案#格子*裙型#大裙摆*裙型#鱼尾裙*裙长#半身裙*裙款式#不规则", "summary": "这一款半身裙不规则裁剪的设计,穿着新颖别致自然出彩。不挑人的合体微宽松版型,包容性很好,藏肉于无形穿着显瘦。特别是迷人的格子装饰,丰富视觉自然减龄。鱼尾大摆,随风摇曳韵味十足。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*颜色#灰色*风格#街头*风格#清新*图案#条纹*衣样式#卫衣*衣长#短款*衣款式#抽绳*衣款式#连帽", "summary": "这款卫衣是经典的灰色款,色调清新柔和,上身更显肤色白皙!宽松的短款设计,对身材的包容性更大,上身遮肉显瘦。舒适的连帽设计,领口采用气眼抽绳的设计,上身实力减龄。衣身下摆和袖口处,采用彩色亮丽的条纹装饰,更显时髦率性的街头范儿!"} +{"content": "类型#裙*裙下摆#开叉*裙长#连衣裙*裙袖型#插肩袖*裙款式#拼接", "summary": "这款连衣裙别出心裁的与通透的梭织拼接,打造出轻盈飘逸宛如仙女般的气场,着实是惊艳四方。半高的领型,能更好修饰颈脖曲线,插肩袖型自带闲适风范,上身倍感轻松舒适,开叉的裙摆,若有若无的透出纤细的小腿,引人无限遐想,好不诱惑,行走间更显灵动十足。"} +{"content": "类型#上衣*版型#宽松*风格#简约*图案#字母*图案#文字*图案#线条*图案#刺绣*衣样式#衬衫*衣领型#圆领*衣门襟#系带", "summary": "衬衫采用经典圆领设计,优美的领部弧形线条,唯美动人。衣身正面设计字母绣花标贴,小设计大亮点,提升衬衣层次感。宽松的衣袖,以可拆卸的圆环系带装饰,造型新颖独特。简约直筒下摆,不挑人穿着,包容性佳。"} +{"content": "类型#裙*材质#蕾丝*风格#青春*图案#蝴蝶结*图案#蕾丝*裙长#连衣裙*裙领型#v领*裙衣门襟#系带*裙款式#拼接*裙款式#抽褶*裙款式#不规则", "summary": "v领设计的这款连衣裙,再于前襟加入褶皱流行元素,打造出层次丰富的视觉效果,同时也更衬精致小巧脸型。采用钩花蕾丝拼接而成的后背,上身更是女人味十足。不规则裁剪的荷叶裙摆,以及腰部的蝴蝶结系带,灵动间又带点俏皮感。"} +{"content": "类型#裙*风格#淑女*风格#复古*风格#文艺*图案#格子*图案#复古*裙型#大裙摆*裙腰型#高腰*裙长#长裙", "summary": "裙身延用了经典的格纹图案,设计风格非常复古文艺,层次感十足。运用了高腰的设计,凸显身材比例,塑造迷人腰肢。蓬松飘逸的大摆长裙,仙气十足,穿着优雅得体,上身舒适感很好。彰显淑女气质,十分温柔甜美。"} +{"content": "类型#裤*材质#雪纺*风格#性感*图案#线条*裤长#连体裤*裤款式#松紧带*裤款式#飘带*裤腰型#高腰*裤腰型#松紧腰", "summary": "很性感的一件雪纺露背连体裤,给人的感觉就是仙仙的,很有灵动感!整个版型可以说是很有设计感,小高腰的设计提拉腰线,显得上下身的比例就很好看。加上腰部又是做了橡筋松紧的设计,不会限制腰的围度,穿脱起来也是很方便的。后背是做了恰到好处的露背设计,但是这个露背又不会显得太夸张,还加了一个飘带设计,更灵动飘逸,整个背部线条会显得更迷人,小性感。"} +{"content": "类型#裙*版型#显瘦*图案#条纹*裙长#连衣裙*裙领型#翻领*裙袖型#衬衫袖*裙衣门襟#系带*裙款式#衬衫式*裙款式#收腰", "summary": "衬衫式的设计让这款连衣裙展现出一种利落大方的感觉,尤其是搭配了气质小翻领和衬衫袖的设计,更加的稳重、自信,展现出都市女性的独立、干练。竖条纹的设计,不仅能够起到视觉显瘦的效果,也让裙子看起来更有立体感。腰间的系带既收腰显瘦,又提升气质。"} +{"content": "类型#上衣*版型#宽松*风格#复古*图案#格子*图案#复古*衣样式#卫衣*衣样式#毛衣", "summary": "此款阔腿裤选用优质面料,顺滑挺括,垂感度佳。宽松的裤腿设计,不,轻松修饰纤细笔直双腿。格纹元素的装点,赋予整体复古韵味,优雅大气。腰部花形穿绳,可随意调整大小,兼具实用度与美观度。无论是搭配毛衣还是卫衣,都不在话下,轻松化身时尚icon。"} +{"content": "类型#上衣*版型#显瘦*颜色#灰色*风格#青春*衣样式#外套*衣领型#半高领*衣长#短款", "summary": "小半高领今年很流行,保暖气质,很有特色的是从上到下共五层不同宽窄的竖条状,上面采用修身,下面越宽条衬出女人的大方优雅,别致款!墨灰色颜色特别适合搭配长款大衣整体有气场短款外套也可以"} +{"content": "类型#裙*颜色#黑色*风格#高贵*风格#性感*图案#线条*裙型#鱼尾裙*裙长#连衣裙*裙领型#立领*裙款式#拼接", "summary": "经典的黑色连衣裙,同时象征女性端庄优雅的品质。恰到好处的领口拼接设计,气质性感。立领的设计,时髦摩登,搭配透视的小斗篷,修饰手臂线条,又美观大方。下摆的鱼尾拼接设计,凸显女性高贵的气质,每个细节都彰显着女性妩媚动人性感十足的气质。"} +{"content": "类型#裤*材质#亚麻*颜色#米白色*风格#职场*裤长#长裤*裤型#阔腿裤", "summary": "米白色的亚麻长裤设计,米白是亚麻的精致代表色,非常的美观和大气,几乎是完美的百搭色,自带典雅气质,亚麻更是舒适和透气的最好材质。收腰设计具有时装的装饰美感,也能很好的修饰身材曲线,金属扣带来质感碰撞,阔腿裤具有职场美感,也有率性气质。"} +{"content": "类型#裤*版型#宽松*颜色#黑色*风格#简约*风格#休闲*风格#潮*图案#字母*图案#文字*图案#刺绣*裤腰型#松紧腰*裤口#小脚", "summary": "宽松舒适的黑色休闲裤与呆萌的校服类似,有了个性的扣环织带、刺绣字母边线的助攻,尽显时髦前卫的潮流范。简洁的束脚设计,营造了裤管的空荡感,让你的腿型更加纤细修长。贴合身形的松紧腰,分割精细,凸显纤细身形,更显休闲感。简约的斜袋设计,方便携带小物件,插兜街拍更有型。"} +{"content": "类型#裙*图案#碎花*裙长#连衣裙*裙衣门襟#系带*裙款式#露肩*裙款式#抽褶*裙款式#收腰", "summary": "精致大方的时尚连衣裙,运用了露肩的设计,露肩配合双层的荷叶摆,展现小女人的柔美姿态。腰部采用了褶皱的收腰设计,勾勒出俊美优雅的动感身姿。配合领口的飘逸系带,更显真挚的时尚魅力。唯美大方的碎花点缀,焕发着典雅的气息。"} +{"content": "类型#裙*颜色#粉色*图案#条纹*裙腰型#高腰*裙长#连衣裙*裙领型#v领", "summary": "连衣裙用粉色做基调,充满少女的活泼感,衬托肤色白皙细腻。v领领口设计可以修饰脸型显脸小,又展露精致锁骨,散发迷人魅力。高腰设计在视觉上拉长了腰线,凸显大长腿显人更高挑。裙身竖条纹设计,丰富视觉效果,展现女性柔美气质。"} +{"content": "类型#裙*裙下摆#花边*裙长#半身裙*裙款式#口袋*裙款式#抽褶*裙款式#对称", "summary": "设计师用花边的设计,在半身裙的后面做了两个夸张的对称口袋,设计很特别。用褶皱花边最大程度上打造视觉上的甜美感觉,并没有显得很突兀。"} +{"content": "类型#上衣*版型#宽松*材质#棉*颜色#蓝色*图案#线条*衣样式#衬衫*衣样式#衫*衣领型#翻领*衣袖型#落肩袖", "summary": "今年大火衬衫,这件衬衫的配色格外的独特,蓝色的色调配上淡淡的棕色,上身之后了撞衫的尴尬。上宽松的版型,柔化了肩部与手臂上的线条,显得纤瘦娇小。小翻领的设计,更好的修饰了颈部的线条。全棉的材质穿着更透气舒适。"} +{"content": "类型#裙*版型#显瘦*风格#休闲*风格#青春*图案#线条*裙型#a字*裙长#连衣裙*裙款式#抽褶", "summary": "玛玛绨连衣裙a字版型的裙式设计,少女感十足,洋溢着满满的青春俏皮范儿,穿着巧妙的减龄。a字显瘦的效果也十分的出色。的裙摆,隐藏住调皮的赘肉,衬托出更纤细修长的腿部线条。较高的包容度也方便不同身型的女生穿着。裙身随意的褶皱,更为整体增添了一丝休闲感。百搭又减龄的款式,轻松的凹凸你的造型。"} +{"content": "类型#裙*材质#网纱*风格#清新*图案#渐变*图案#线条*裙下摆#花边*裙长#半身裙*裙领型#立领*裙袖型#喇叭袖*裙衣门襟#系带*裙款式#拼接*裙款式#木耳边*裙款式#抽褶*裙款式#不规则", "summary": "木耳边半立领,增添褶皱即视感,塑造花边效果,俏皮清新甜美可爱,进一步拉伸颈部线条,提升你的气质。袖口设计为系带结合,塑造喇叭袖造型,展现少女气息,轻松减龄。搭配同色系半身裙,网纱面料拼接,不规则裙摆,更显飘逸与大气,随着你的走姿翩翩起舞,引人注目。整体设计为同色系的两色搭配,形成渐变效果,拒绝单一丰富色调,靓丽清新,打造森系少女形象。"} +{"content": "类型#裙*材质#蕾丝*图案#刺绣*图案#蕾丝*裙型#a字*裙款式#收腰", "summary": "采用利落的剪裁工艺,将胸腰曲线展现的十分立体,收腰的设计,展示出你纤细傲人的身材。a字裙摆自然展开,弥漫出优雅的气息。袖子部分采用刺绣的设计,增添浪漫唯美的气息,饱满圆润的珍珠扣装饰,泛出莹润的雅致光泽,蕾丝装点添出一份小女人的柔美气息。"} +{"content": "类型#上衣*图案#线条*衣样式#外套*衣领型#翻领", "summary": "小外套使用翻领设计,剪裁简洁,给人干净利落的感觉,展现女性少有的帅气。制作工艺匠心独运,线条流畅,将优雅和时尚结合起来,衬托女性气质。款型设计贴身,修饰女性身材。材质优良上档次,做工精细展现不凡品味,适合多种年龄层、多种场合穿搭。"} +{"content": "类型#上衣*图案#线条*衣样式#外套*衣领型#翻领", "summary": "在简单利落的外套上融入利落的翻领设计,呈现出穿着者独有的率性气息,剪裁自然流畅,领边规整而又精致的走线。从视觉上拉高了颈部的线条,结合立体干练的版型,上身就能体现无懈可击的时尚调性。"} +{"content": "类型#上衣*颜色#黑白*图案#线条*衣样式#卫衣*衣领型#v领*衣领型#高领*衣门襟#套头", "summary": "这款卫衣是一件不挑身材的版型,黑白两色基础的色系都非常百搭,卫衣套头的,前面扣子敞开v领的效果,修饰颈部的线条,扣上就是高领的效果,无论是单穿或者是做内搭,都非常好看。"} +{"content": "类型#裙*版型#显瘦*图案#线条*裙款式#勾花镂空*裙款式#收腰", "summary": "以修身收腰的版型勾勒出优雅的裙装,展现出纤细的腰身和修长的腿部线条。在清爽镂空的面料上加入别致的钉钻点缀,平添几分俏丽感,搭配清雅淡雅的色调,赋予衣衫浪漫的气质。"} +{"content": "类型#裙*风格#复古*图案#复古*图案#刺绣*裙长#短裙*裙款式#盘扣*裙款式#收腰", "summary": "复古的刺绣工艺,经过现代时尚的演绎,展现出女性柔美气质,同时增强时尚气场。中短裙的版型设计,飘逸的裙摆,小个子也可以穿出大长腿的既视感。精致的后领盘扣设计,古典雅致,衬托出女性独有的韵味。收腰的版型剪裁,勾勒出曼妙身姿曲线。"} +{"content": "类型#裤*风格#青春*风格#性感*裤长#九分裤*裤型#直筒裤*裤腰型#高腰", "summary": "这款休闲裤走的是青春时尚的风格路线,尽显出你的与众不同的个性,体现出你的帅气气场,不失自信的一面。采用了直筒的版型,带来舒适自在的体验感。配合高腰的腰型设计,尽显出纤瘦性感的一面,九分的裤长细节,突显出时尚个性的一面。"} +{"content": "类型#裤*版型#显瘦*风格#简约*裤长#九分裤*裤款式#口袋*裤款式#抽褶*裤腰型#松紧腰", "summary": "经受的强大的考验才会被人们所信赖的BRAND,以其出色的设计理念吸引你的眼球。这条运动裤看着简简单单,实际上却是满满的心机小细节,整体简约而不简单,九分裤的设计,显瘦的同时还能拉长身材比例,让你穿上运动裤也能分分钟变大长腿,细节处的口袋和褶皱松紧裤腰,走向干净利索,彰显高超技术,精致logo凸显质感。"} +{"content": "类型#裤*版型#显瘦*风格#简约*裤长#九分裤*裤款式#口袋*裤款式#抽褶*裤腰型#松紧腰", "summary": "简约不简单的一款裤装,后身松紧收腰设计,时髦显瘦展现优美比例,释放女性姣好身形;前后多口袋点缀,实用方便不乏俏皮,同时还让潮女插兜街拍更有型。匠心的九分的设计,隐约露出一丝打破你的一本正经,无意间让你吸睛无限;不甘于平平无奇,耳目一新的褶皱细节,更显前卫和别具腔调。"} +{"content": "类型#上衣*材质#牛仔布*材质#水洗*颜色#金色*风格#复古*图案#复古*衣样式#外套*衣领型#一字领*衣长#短款*衣门襟#拉链*衣款式#拉链", "summary": "这款一字肩做旧水洗牛仔外套,妥妥的时尚又百搭的初春单品。短款的设计,同时还拉长身材比例。复古的旧金色拉链和好玩的主题四合扣,给衣身注入了新鲜的活力,不管怎么搭配都很赞。"} +{"content": "类型#上衣*风格#性感*图案#创意*图案#印花*衣样式#衬衫*衣领型#v领", "summary": "萌趣十足的印花点缀在衬衣之中,形成一种特别的视觉效果来提升你的魅力,展现柔美又很温和的一面让人发现你的与众不同。v领的设计效果凸显颈部白皙与精致,可以在扣饰开合的作用下将性感的味道加以提升,穿出百变的创意理念。"} +{"content": "类型#上衣*版型#宽松*材质#针织*风格#复古*风格#文艺*风格#潮*风格#性感*图案#复古*衣样式#开衫*衣领型#v领*衣长#短款*衣袖型#落肩袖*衣袖型#收口*衣款式#纽扣*衣款式#罗纹", "summary": "针织开衫体现出的是温婉大方的东方古典美韵,加上小v领的版型却具有了性感潮流的个性。前襟的单排树脂纽扣门襟,穿脱方便装饰感强,突显复古典雅的美感。紧密的编织设计,具有文艺复古气息,宽松的落肩短款版型,适合多种身材穿搭,领口袖口的罗纹包边收口,做工细腻美观大方。"} +{"content": "类型#裤*版型#宽松*裤长#长裤*裤型#直筒裤*裤款式#不对称*裤腰型#松紧腰", "summary": "这样一款颜值与质感皆在的长裤,面料是精选的顺滑面料,具有一定的光泽度。比较宽松的直筒裤型,做了松紧腰的设计,对各种腿型都很友好,更是让你轻松驾驭。侧边加入了双色的织带,一根是较暗的深藏蓝织带,另一根是哑光的涤棉织带,两边还是特别的不对称式。"} +{"content": "类型#裙*风格#复古*风格#性感*图案#格子*图案#复古*裙型#鱼尾裙*裙长#连衣裙*裙领型#v领", "summary": "这款连衣裙穿着轻便而舒适,具有良好的弹性和恢复性能,面料不起皱不起球,穿起来挺括有型。格子底纹加上配色显得非常的时髦复古,前后v领的领口设计性感又优雅,腰间上提的款式独特新颖。微微鱼尾的裙摆款式显得非常的优雅气质。"} +{"content": "类型#裤*图案#线条*裤长#短裤*裤长#五分裤*裤型#直筒裤*裤款式#口袋*裤腰型#高腰", "summary": "纹提花直筒五分短裤,口袋和下摆运用面料的反面,在图案和颜色上打破了直筒五分短裤给人一本正经的感觉,穿起来更加夺取眼球。直筒裤包容性强,不仅可修饰大腿线条,高腰版型更有效拉伸整体比例。"} +{"content": "类型#上衣*版型#宽松*风格#知性*风格#休闲*衣样式#衬衫*衣款式#拼接*衣款式#不规则", "summary": "足以当做连衣裙来穿着的一款衬衫,带有几分知性风格。宽松的版型剪裁,气质休闲且包容性强大。而丰富衣身的不规则拼接,更是成为了格外吸睛醒目的存在,搭载着肩膀处新颖的斜排扣设计,增加整个衣身的亮点,丰富视感个性的时尚感。"} +{"content": "类型#裙*材质#蕾丝*风格#性感*图案#蕾丝*裙下摆#花边*裙长#连衣裙*裙领型#一字领*裙袖型#喇叭袖", "summary": "这一款连衣裙喇叭袖的设计,看起来具有十足的仙气,加上花边下摆的设计,行走之间显得很有美感。时尚的一字肩设计,美肩微露特别性感。蕾丝装饰,妩媚动人风情浪漫。穿上让人深深着迷无法自拔,时尚精致。"} +{"content": "类型#上衣*材质#蕾丝*图案#条纹*图案#蕾丝*衣样式#衬衫*衣领型#翻领*衣款式#拼接*衣款式#纽扣*衣款式#吊带", "summary": "这款衬衫上衣采用了蓝白条纹的图案,再加上它一侧拼接了蕾丝吊带,打破了传统衬衫的设计款式,在设计上更是凸显出它与众不同之处,带来一丝甜美气质。采用了翻领的设计,加上单排纽扣的衣门襟设计,简洁时尚,方便穿脱。"} +{"content": "类型#裙*版型#显瘦*风格#复古*风格#简约*图案#条纹*图案#植物*图案#复古*图案#刺绣*裙长#连衣裙*裙袖长#短袖*裙款式#收腰", "summary": "非常具有细节设计感的连衣裙,灰白色的条纹设计,色调经典又美观,带来恬静的贵族风格,上身横纹简约大气,下身的竖纹设计,更加显瘦和纤细。肩部的两朵小刺绣花卉,显得精致典雅,具有点睛之效,喇叭短袖设计,复古又很时尚,收腰更加突出造型。"} +{"content": "类型#上衣*版型#显瘦*风格#简约*图案#线条*衣样式#针织衫*衣领型#圆领*衣长#短款*衣袖长#七分袖", "summary": "简约百搭的针织衫特别有范,胸前火烈鸟图案充满了个性和活力,精致优雅带来满满的俏皮感。经典圆领设计修饰颈脖,显得简洁大方,短款修身的版型从视觉上拉长身形,构造曼妙身姿,七分袖长度露出纤细修长的手腕线条,举手投足之间绽放出女性的独特魅力。"} +{"content": "类型#裙*材质#蚕丝*裙长#连衣裙*裙衣门襟#拉链*裙款式#拼接*裙款式#拉链*裙款式#木耳边", "summary": "这款拼接的真丝连衣裙采用木耳花边的设计,穿着之后甜美俏皮又灵动。后背隐形拉链的设计,细腻不容易被察觉增加了整体的美观效果。"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*颜色#深蓝色*风格#清新*裤长#七分裤*裤型#阔腿裤*裤腰型#高腰", "summary": "纯白和深蓝两色设计的牛仔裤,纯白色为牛仔裤注入清新气质,极简色调清新典雅,点亮了牛仔裤,特深蓝色更加内敛显瘦,一深一浅都极具精致美感。高腰设计突出身材曲线,带来很好的一体连贯美,纤细的腰部曲线展露无余,阔腿七分更添率性,典型的现代美。"} +{"content": "类型#裙*材质#牛仔布*颜色#蓝色*风格#街头*风格#英伦*风格#休闲*风格#青春*风格#清新*图案#线条*裙型#背带裙*裙型#牛仔裙*裙型#直筒裙*裙长#连衣裙*裙衣门襟#排扣", "summary": "这款很有青春气息的连衣裙采用背带的版型设计,剪裁出更具有型利落的线条感,直筒的造型着身更是舒适休闲,结合排扣的装点,衬托出满满的英伦洋气街头感,加上小清新韵味十足的牛仔蓝色,轻松增加了百搭效果。小背带的设计又能让活力气息脱颖而出。"} +{"content": "类型#上衣*衣样式#外套*衣领型#v领*衣门襟#系带", "summary": "这款背心裙精选优质的面料,不仅手感舒适,穿在身上还尽显出高级质感,非常时髦大方。v领的设计也是一大亮点,衬显这款外套更加的吸睛优雅,凸显气质。腰部系带的设计还可以很好的勾勒腰线,展现好身材。"} +{"content": "类型#裙*风格#知性*风格#清新*图案#条纹*裙长#连衣裙*裙款式#不规则", "summary": "连衣裙一直是女性们出街凹造型的神器,它兼具着时尚与百搭的特性,让很多美眉们爱不释手。这款连衣裙,下摆处采用不规则的设计,时尚而带有层次感,浪漫飘逸,同时为整体增加了设计感,让你行走间展示女神般的魅力。融入条纹的设计,清新雅静,不仅丰富了整体的视觉效果,同时还能拉长身形,散发出女性优雅知性的时尚气质。"} +{"content": "类型#裙*风格#文艺*风格#清新*图案#蝴蝶结*裙长#连衣裙*裙款式#腰带*裙款式#吊带", "summary": "连衣裙采用麂皮绒面料制成,细腻顺滑有光泽,穿着自然舒适。麂皮绒经过设计师精心剪裁和缝纫,结合清新的素色印染,展现出简洁大方的版型,边缘整齐走线流畅,不易脱线经久耐穿。除此以外,吊带式的设计和腰间的蝴蝶结腰带相呼应,洋溢出浓浓的青春活力气息,打造出小清新的文艺女青年形象。"} +{"content": "类型#上衣*风格#韩版*风格#简约*风格#潮*衣样式#衬衫", "summary": "90后潮搭韩版潮牌衬衫,面料厚实透气性好显成熟稳重气质。看看用这个全新的方式来诠释潮流,穿搭很轻易让你打造出秋冬型男造型。既简约又帅气哦。"} +{"content": "类型#上衣*材质#棉*颜色#黑白*风格#清新*图案#条纹*图案#印花*衣样式#卫衣*衣领型#v领*衣长#中长款", "summary": "纯棉的面料上简单的配色,以大v领口修饰小巧脸型,不幼稚也过于成熟,黑白条纹构成的清新配色洋溢出满满的青春活力,充满了阳光的味道。一别传统卫衣,中长款膝的长度少女的甜美,胸口的印花成环形点缀,唤醒童趣,可爱炸裂。"} +{"content": "类型#裤*材质#牛仔布*风格#街头*风格#性感*图案#线条*裤款式#破洞*裤腰型#高腰", "summary": "在街头破洞元素一直是彰显个性自我与众不同的代表,这条牛仔裤的膝盖为破洞处理,露出腿部肌肤,展现性感魅力,同时也流露出不羁的态度。为了拉长腿部线条,裤子采用的是高腰版型设计,它转化了身体的比例。腿部还加有金属别针装饰,增加了裤子的时髦前卫感。"} +{"content": "类型#裙*颜色#粉色*风格#复古*风格#宫廷*风格#清新*图案#碎花*图案#复古*裙下摆#荷叶边*裙袖型#喇叭袖", "summary": "以淡雅的粉色作为底色,清新的碎花作为点缀,两者互相衬,满满的仙女气息。荷叶边蔓延至裙身,喇叭袖与荷叶边的搭配,宫廷复古轻松重现眼前。"} +{"content": "类型#裤*版型#宽松*材质#牛仔布*风格#运动*风格#休闲*风格#潮*图案#字母*图案#文字*图案#线条*图案#印花*图案#撞色", "summary": "设计师匠心独运将字母印花,巧妙点缀于上半身,远看如同翅膀,颇具新鲜趣味性。黑红、白红撞色设计,制造强烈视觉冲击,倍潮流。圆领套头款式,以基础简洁剪裁碰撞繁复图案,尤为时尚大气。裁剪细腻,线条明快流畅,宽松版型上身舒适,兼备休闲风和运动风。时尚百搭,可搭配休闲裤、运动裤、牛仔裤等。"} +{"content": "类型#上衣*材质#棉*材质#纤维*风格#清新*图案#印花*衣样式#外套", "summary": "在炎热的夏天,小清新的绿意与粉嫩的少女色搭配最是能拂去因天气带来的烦躁,首先在视觉上就是一片清爽,特别设计了多款印花,双面不同印花设计,无需在意正反面,带给你全新的使用体验。它在材质选择上外套为天然纯棉,内里为聚酯纤维,集柔软亲肤,透气吸湿于一体,在温度不下的夏天为你营造干爽舒适的睡眠氛围,让你在夏天也可好睡眠。"} +{"content": "类型#裙*材质#牛仔布*材质#水洗*颜色#浅色*颜色#深色*图案#拼色*图案#撞色*裙型#牛仔裙*裙下摆#毛边*裙款式#拼接*裙款式#破洞", "summary": "裤型很棒。裤脚口做了毛边的设计颜色了比较正的浅色水洗牛仔色,右腿膝盖的位置做了假破洞的设计,破洞是不露肉的设计,在里面拼接了一块深色牛仔布,撞色的拼色给人独特的视觉效果,对于腿型不那么好看的姑娘来说,也是可以放心入的。牛仔棉布面料,舒适度好。颜色是水洗的刚刚好的蓝。"} +{"content": "类型#裤*版型#宽松*版型#显瘦*裤腰型#高腰*裤口#小脚", "summary": "这一款裤子高腰设计,提升腰线自然显高。略微宽松的版式,上身轻松收身显瘦,惬意舒适中自然显大方。小脚裤型,精致优雅自然出彩。精挑细选的棉布材质,亲肤细腻穿着柔软,搭配出街,自然减龄。"} +{"content": "类型#裙*材质#雪纺*裙型#蛋糕*裙下摆#花边*裙款式#抽褶", "summary": "雪纺工艺,更加的突出了质感,不论是前胸的褶皱花边,还是三层波浪式蛋糕裙下摆,都强调了这款裙子的独特,穿起来像优雅的小仙子。质地柔软细滑,搭配漂亮的项链装饰穿在身上回头率高哦。"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*颜色#浅蓝色*风格#清新*风格#潮*裤款式#口袋*裤款式#纽扣*裤款式#拉链*裤口#毛边*裤口#小脚", "summary": "一件百搭时尚的牛仔裤,一年四季常备的款式,茵曼的这款是浅蓝色的款式,带有一点小清新视感,很有气质。修身小脚的裤型,能修饰腿型,瘦腿美腿,打造一双纤细修长的大长腿,让你更加高挑纤瘦。裤脚用毛边装饰,增添潮流帅气感,凸显细节。门襟用经典拉链纽扣,两边实用立体口袋,方便插手凹造型,实用又美观。"} +{"content": "类型#裙*风格#性感*裙型#直筒裙*裙袖型#泡泡袖*裙款式#抽褶", "summary": "裙子采用了舒适的直筒廓形设计,版型上适当的松量,利落不松垮,不会显得瘦的过分,也不会显得曲线美不够好,简直就是藏肉的一把好手。袖子采用了别致的抽褶设计,形成了微微的泡泡袖,为整体造型增添了不少的个性感。光是百搭易穿就足够令人疯狂。"} +{"content": "类型#裙*裙长#半身裙", "summary": "黑裙背后的黑纱长至脚踝,与前面到膝盖的半裙形成对比,为原本单一的版型增添了层次感。黑纱的轻柔与飘逸,让你在气场全开的同时,又多了一份柔情与浪漫,让小礼裙瞬间变得独一无二。"} +{"content": "类型#裙*裙长#半身裙", "summary": "拥有宽大裤脚的一条休闲裤,穿在身上似裙子又似裤子。既有半裙的内敛优雅,又有裤装的潇洒惬意,宽宽大大的样式能很好的隐藏腿上的肉肉,拉长身高,看起来又瘦又高。"} +{"content": "类型#裙*裙长#半身裙", "summary": "这款半身裙腰部采用了章贴设计,设计使半身裙更具吸睛亮点,打破裙子单调沉闷感,吸睛更时尚。而且元素层次感丰富,使整体更显时髦。"} +{"content": "类型#裙*裙长#半身裙", "summary": "半身裙备受美眉的喜爱,它可以搭配多种风格。此款半身裙最突出的亮点是腰封设计,采用腰封装饰,让裙子看起来更有设计感和时髦感,让半身裙变的不再单调,而是更加丰富和内涵。同时腰封设计可以更好的拉高腰线,让腿部看起来更加修长。因此,腰封设计是此款半身裙的点睛之笔。"} +{"content": "类型#裙*裙长#半身裙", "summary": "半身裙在裙摆处添加双色织带点缀,与裙身底色形成鲜明对比,打破了单一色调带来的枯燥乏味感,为整体造型注入一丝活泼学院味道。"} +{"content": "类型#裙*裙长#半身裙", "summary": "这款的设计感在意左右两边长短不同,断层的落差感,尤其上身后,能很明显的感受到细节的质感。这款半裙的版型比较修饰身材,能把身材曲线勾勒的很美,腰头的位置剪裁平整,上身很有型。"} +{"content": "类型#裙*裙长#半身裙", "summary": "每一条半裙上面的颜色位置不是固定排列的,定做的,不止颜色很特别,就连版型也是如此,下摆是呈360半圆弧度。关于360度旋转,纱裙为半透明的效果,更为仙气。为了方便日常穿着,还在里面加了一件单独的独立内衬,也就是带橡皮筋的安全短裤,可脱卸的设计,所以实际上是两件套,穿着更加方便。"} +{"content": "类型#裙*裙长#半身裙", "summary": "精致的鹅黄半身裙,搭配上美观的花纹款式,穿上身后耀眼夺目,张扬属于自己的个性魅力,春天的到来,刚好这款透气的裙子适合小仙女。"} +{"content": "类型#裙*裙长#半身裙", "summary": "真正好的版型一上身就能感受它带来的惊喜,r就有这个能力。设计师在腰节处做了多道省位,包括间距,大小,长度,都是经过调整达到zui佳。"} +{"content": "类型#裙*裙长#半身裙", "summary": "一件半裙,如同一枚的郁金香花朵,优雅中含苞欲放的温润与羞涩,包容着整个身形,修饰这双腿的曲线,有着浑然天成般自然,无论是走路还是坐立,都是自然雅致的模样。"} +{"content": "类型#裙*裙长#半身裙", "summary": "春夏天谁的衣柜里面没几条半裙?女孩子在夏天怎能,搭配上很是需要,百搭而且还不容易出错。但半身裙款式、穿法基本上都差别,怎么才能脱颖而出是关键,这重点就在挑选的款式上了。"} +{"content": "类型#裙*裙长#半身裙", "summary": "这款半身裙,选用的是优质的高品质面料,做工精湛,面料的质地细腻,手感柔软舒适,纹理清晰美观,穿着结实耐磨,内衬安全裤的设计长度适中,穿着舒适透气。"} +{"content": "类型#裙*裙长#半身裙", "summary": "半裙的腰头是借用旗袍的领型为设计,像穿上旗袍的高雅,散发古典韵味。裙身像孩童的画一样的贴布绣设计,充满童真的减龄感。既有优雅女人味又有少女感的一条半身裙。"} +{"content": "类型#裙*裙长#半身裙", "summary": "这款精心设计的半身裙,能在瞬间呈现出优雅迷人的美感,造就出匀称的身材曲线,迷人至极洒脱的个性的气息惹尽人们的喜爱,随时随地展显出优雅大牌气息,而且有很好的显廋效果,气质非凡。"} +{"content": "类型#裙*裙长#半身裙", "summary": "来自于tao的女童半身裙,采用松紧的腰头设计,恰到好处的弹力不仅能紧贴于宝宝腰部,营造出舒适的穿着感,而且还有助于宝宝在日常穿脱时,更加的省时省力。裙身上精美的图案点缀,充满了时尚又俏皮的气息,宝宝穿着更能彰显天真的个性和满满的活力哦。"} +{"content": "类型#裙*裙长#半身裙", "summary": "这件半裙的裙身是用细腻的网布,轻盈有质感不显臃肿,加上做了袭击的袭击的烫金工艺,整体更有细节感,还有加了同色系里布的设计,让整体的舒适度更好。"} +{"content": "类型#裙*裙长#半身裙", "summary": "层次设计半身裙,上面一层天丝麻裙摆活片设计,从侧面和下摆露出里面的丝裙摆,灵动又不失含,营造半露的迷人风情,让你穿出个性与时尚。"} +{"content": "类型#裙*裙长#半身裙", "summary": "非常百搭的一款半身裙时尚又富有气质,选取的面料非常细腻平实,而且有很好的抗皱感性,轻薄的面料也能给人带来很好的穿着体验,能够让人在行走中自带飘逸感。"} +{"content": "类型#裙*裙长#半身裙", "summary": "雪花半身裙,亮色纱线交错,清晰的编制肌理,低调而又不沉闷,因为面料本身比较别致,所以没有做其他装饰设计,简简单单就很耐看。"} +{"content": "类型#裙*裙长#半身裙", "summary": "此款半身裙采用的是精致的粗呢小香风编织面料,从细节中传递出一种优雅的气质。凹凸不平的表面,丰富了裙身的肌理感,奢华感油然而生。"} +{"content": "类型#裙*裙长#半身裙", "summary": "这款百搭时尚的仙女半身裙,整体设计非常的飘逸随性,穿上之后每个女孩子都能瞬间变成小仙女啦。料子非常的轻盈,透气性也很好,穿到夏天也很舒适。"} +{"content": "类型#裙*图案#印花*裙长#连衣裙*裙领型#立领*裙款式#盘扣", "summary": "这款改良旗袍连衣裙与我们平日里所见的连衣裙大有不同哦,尤其是将如此栩栩如生的印花图案点缀在衣衣上,瞬间柔和了旗袍裙带来的传统正式感,平添了几分女性的趣味性与时尚感;还有那简洁的立领设计以及盘扣的点缀,已然成为旗袍连衣裙的标配,瞬间流露出一丝中式风韵味。"} +{"content": "类型#裤*版型#显瘦*风格#简约*风格#民族风*图案#线条*裤长#连体裤*裤型#阔腿裤", "summary": "民族风连体裤,线条简约透彻,拥有自己冷静的一套处事。绚烂色彩结合阔腿裤剪裁,露出修身的线条,显得十分刻板端正,不。下半身的阔腿裤走特立独行的法则,避免腿型分明的尴尬。将比例很好的隐藏,可以气场。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*图案#条纹*图案#格子*衣样式#衬衫*衣领型#polo领*衣门襟#单排扣*衣款式#拼接", "summary": "polo领BRAND单排扣的经典衬衫版型搭配略宽松的oversize版型,打破呆板,看起来更加轻松随意。在经典的法式蓝白竖条纹基础上增加格子拼接元素,匠心独具。还能从视觉上更加显瘦,而精致细腻的做工将衬衫的质感又提升一个level。"} +{"content": "类型#上衣*颜色#黑色*颜色#灰色*风格#复古*风格#文艺*图案#格子*图案#复古*衣样式#外套*衣款式#拼接*衣款式#口袋", "summary": "柔软的外套肩膀与胸口口袋处拼接着皮革材质,使表面呈现出丰富多变的质感;黑色与灰色相间的格纹之间,点缀着细小的花朵图案,颇具复古文艺气息的同时,又不会过于死板。"} +{"content": "类型#裙*材质#蚕丝*图案#印花*裙长#连衣裙*裙款式#拼接*裙款式#木耳*裙款式#抽褶", "summary": "oz家的这款真丝连衣裙,舒适质感搭配层次丰富的裙摆拼接,丰富而不会觉得繁琐,视觉上尽显优雅的气质!腰部拼接的木耳褶边,加以细碎的褶皱尽显俏皮和可爱感!裙身上的印花,是一种很有艺术韵味的花朵,素雅好搭配的色彩,上身大气又尽显温柔感!"} +{"content": "类型#裙*版型#显瘦*裙长#连衣裙*裙衣长#中长款*裙领型#v领*裙衣门襟#系带", "summary": "这件连衣裙的腰部带有侧边系带的装饰,十分美观也能够很好的勾勒腰部曲线。常见小v领修饰脖领曲线微露迷人锁骨,展现出女性独特的魅力。整体中长款修身的版型巧妙的包裹着身体,得体,拥有十足的魅力。"} +{"content": "类型#裤*版型#宽松*风格#街头*风格#清新*裤型#直筒裤*裤款式#破洞*裤口#毛边", "summary": "带着一点小清新的裤子在设计上一定会有着自己的独特魅力。街头元素风格的破洞设计增加了毛边的做旧,看着也变得不会那么单调,给人一种非常时尚的前卫风格。宽松的直筒版型可以很好的掩饰腿部缺陷,适合大多数人穿着,裤腿则是有着做旧的毛边,折叠起来也非常有个性,整体都是属于非常耐看的款式。"} +{"content": "类型#裙*材质#雪纺*颜色#黄色*风格#清新*图案#碎花*裙长#连衣裙*裙袖长#五分袖", "summary": "这是款自带小清新感觉的连衣裙,穿起来很是减龄,清爽的黄色小碎花在雪纺的渲染下,格外具有层次感。色彩如同水墨画的晕染,那般富有想象力和动感,五分袖的设计格外端庄优雅,彰显出女性由内而外的自信与美丽。"} +{"content": "类型#裤*版型#宽松*材质#棉*颜色#黑色*风格#休闲*图案#线条*裤长#七分裤*裤腰型#高腰", "summary": "这款休闲裤是经典的黑色款,非常的实穿百搭,出街实力吸睛!采用纯棉的面料设计,上身质感更舒适。高腰的款式优化身材比例,七分的长度设计,更显双腿修长。宽松的裤身版型,更好的修饰臀部和大腿的线条。上身后彰显休闲时尚的气质感!"} +{"content": "类型#上衣*版型#显瘦*材质#羊毛*图案#印花*衣样式#毛衣*衣领型#圆领*衣袖长#长袖*衣门襟#套头", "summary": "来自BRAND,黑豹嵌花毛衣。精选100%羊毛材质打造,软糯轻薄,穿着透气。简洁小圆领长袖套头款式设计,略微修身的版型作为内搭或是外穿皆出彩。个性黑豹印花图案装饰,彰显霸气设计细节。"} +{"content": "类型#裙*版型#宽松*颜色#黑色*风格#性感", "summary": "candie’s是拉夏贝尔旗下主打少女风的时装品牌,它旗下的这款宽松打底黑色吊带裙,以宽松剪裁构筑版型,助力于甜美少女完成舒适自在的衣着造型。辅以兼具性感与清纯气息的吊带裙样式,尽显轻熟少女妩媚气息。"} +{"content": "类型#裤*版型#显瘦*材质#棉*颜色#白色*颜色#黑色*风格#复古*图案#复古*裤款式#拼接*裤款式#抽绳*裤口#小脚", "summary": "这款复古束脚裤,经典时尚的纯黑色为底色,侧边的白色拼接,给整件裤子增添了一分活力感。精选优质棉质面料,手感舒适,纹理细致,耐洗耐磨。裤脚的罗纹缩口设计,防风有型又显瘦。腰部抽绳设计,可调节松紧度,穿着舒适,无束缚感。"} +{"content": "类型#上衣*版型#宽松*颜色#军绿色*衣样式#风衣*衣领型#翻领*衣领型#小立领*衣长#常规*衣款式#腰带", "summary": "这款过膝长款风衣,采用经典军绿设计,搭配双排口点缀,立刻凸显优雅时尚范儿,袢加上精致肩章,层次感分明,给整体大大加分,形版型则不会显得臃肿。再加上宽松腰带的加持,瘦高的身材立体一秒体现。小立领设计,打破了风衣常规的翻领,勾勒出脖颈曲线。"} +{"content": "类型#裙*材质#雪纺*图案#几何*图案#印花*裙型#百褶*裙下摆#弧形*裙下摆#垂坠*裙长#连衣裙", "summary": "这款连衣裙甄选了柔软的雪纺面料打造,垂坠有型,穿着很舒适。弧形的领口,恰到好处的露出领口,浓浓的女人味。几何印花的造型,超级有女神范,自然百褶的元素,唯美浪漫。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*颜色#白色*颜色#黑色*颜色#黑白*风格#英伦*风格#复古*风格#文艺*图案#格子*图案#复古*衣样式#衬衫*衣领型#v领*衣门襟#系带*衣款式#拼接*衣款式#荷叶边", "summary": "这件宽松款式的的格纹衬衫,特别选用了细密的黑白格纹,经典的黑白格纹可以是“格纹”,暗黑系的配色加入反光的白色纹路形成的小格子,带来了许多活泼跳跃的感觉,又营造出英伦复古的怀旧文艺气息,这种格纹不仅时尚还暗藏显瘦心机。拼接的荷叶边,廓形立体飘逸灵动,垂于胸前更显浪漫甜美。领口黑色拼接系带,打造出v领视觉,精致又洋气。"} +{"content": "类型#裙*材质#棉*风格#文艺*图案#格子*裙型#包臀裙*裙型#鱼尾裙*裙下摆#荷叶边", "summary": "藏青色的格纹图案,将整件包臀裙饰以文艺复古风的设计,100%的纯棉面料,舒适亲肤,贴合身形的同时。前身立体感的荷叶边点缀,结合下身的鱼尾裙下摆,更为凸显浪漫情怀。"} +{"content": "类型#裙*版型#显瘦*颜色#红色*风格#简约*图案#条纹*图案#刺绣*图案#撞色*裙型#直筒裙*裙长#连衣裙*裙领型#圆领*裙款式#拼接*裙款式#不规则", "summary": "富有设计感的一款连衣裙,用红色与条纹的撞色不规则拼接,凸显新颖有个性,吸睛亮眼,不仅丰富视觉感受,更层次鲜明。侧边还有一个刺绣的点缀,精美可爱,让衣身更加时髦靓丽,充满个性帅气。连衣裙的版型是修身直筒的,搭配简约的圆领,方便好穿,也很百搭显瘦,不挑身形。"} +{"content": "类型#裙*版型#显瘦*颜色#白色*风格#休闲*风格#清新*裙型#背带裙*裙腰型#自然腰", "summary": "休闲感十足的一款背带裙,是少女的专属。修身的版型,勾勒出婀娜多姿的黄金曲线。自然腰的打造,彰显出优雅大方的少女感,清新的本白色仿佛在诉说着一个简单的故事,雅致而又十分的显韵味,简单呈现出都市时尚摩登感。"} +{"content": "类型#裙*风格#日系*风格#文艺*风格#知性*风格#性感*图案#格子*裙下摆#压褶*裙长#连衣裙*裙款式#拼接*裙款式#勾花镂空*裙款式#不规则", "summary": "镂空的挂脖领设计个性且十分吸睛,精巧别致的露出性感的锁骨,却不失文艺的气息,洋溢出日系风格的时尚感。不规则的压褶处理,结合衣身与衣袖的双层拼接,不显单调且充满了层次感,还可以遮肉,举手投足间凸显灵动柔美感。裙摆的大胆设计使原本中规中矩的连衣裙瞬间变得时髦又有趣,洋溢出随意风的惬意感。凹凸有致的立体格子装饰,彰显精致感,迸发着时尚亲和力,尽显知性优雅的气息。使整体透露出日系风格的甜美。"} +{"content": "类型#裙*风格#性感*图案#刺绣*裙型#大裙摆*裙长#连衣裙*裙衣长#中长款*裙袖型#喇叭袖*裙款式#勾花镂空*裙款式#收腰", "summary": "一款洋气而性感的镂空连衣裙,整体采用了镂空的刺绣面料,给肌肤带来舒适的清爽感,透视也增加了性感的韵味;自然散开的喇叭袖,微露肌肤性感而优雅;收腰大摆的中长款版型,上身尽显高挑身姿。"} +{"content": "类型#裤*颜色#白色*颜色#黑色*风格#通勤*风格#ol", "summary": "这是一款ol通勤风的西装裤。设计师选用经典黑色,显得腿很直哦!裤脚卷起来露出白色的设计,特别有感觉,打造个性时尚。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*材质#棉*颜色#黑色*风格#复古*图案#条纹*图案#复古*衣样式#衬衫*衣长#中长款*衣款式#露肩", "summary": "issdm这款竖条纹露肩衬衫带来满满复古BRAND风。经典的竖条纹元素时尚减龄更具年轻风采。足量纯棉面料质感绵软舒适穿着更亲肤透气。宽松慵懒的版型轮廓鲜明生动方便自由舒展身躯更展现年轻活力。酷劲十足的纯黑色调优雅高级不做作。个性十足的穿衣肩带设计带来一丝潮酷十足的哥特式风范气场十足。中长款版型显高显瘦更有气质。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*颜色#白色*裙下摆#花边*裙领型#娃娃领*裙款式#拼接*裙款式#飘带", "summary": "白色花边拼接娃娃领,配合飘带设计,妥妥的心机,乖巧的风格,减龄指数飙升;宽松的裙型,自然随身且十分显瘦;裙摆,宽摆设计,增加了整体层次感,既活泼又提气精神。"} +{"content": "类型#裤*颜色#粉色*风格#简约*风格#休闲*图案#拼色", "summary": "的这款拼色休闲女鞋,简约而自然,充满个性化。采用拼色粉色底+经典系带款式,是此单品的一大重要亮点,穿起来既年轻又有活力,特别好看。外型讨巧,包裹性良好,可搭配各种裤型,漂亮又实用。轻盈鞋身设计,让你几乎忘记鞋子的存在,使步伐更加轻松。"} +{"content": "类型#裙*颜色#红色*图案#印花*裙下摆#荷叶边*裙领型#v领", "summary": "大气活泼的红色裙身,会是一抹很独特的存在;精致的v领,能优化颈部的曲线,还能在不经意间,看到迷人的事业线;腰间增加了荷叶边的点缀,既能优化腰部的曲线,还带着立体的美感;精美的印花,装饰着衣身,丰富了裙身的色彩,精美有韵味会吸睛;轻薄的材质,透气性保护肌肤。"} +{"content": "类型#上衣*颜色#白色*风格#简约*衣样式#衬衫*衣款式#荷叶边", "summary": "这款衬衫崇尚简约风格,一切从简但又不失别样质感。简单白色的运用,看起来毫无瑕疵,有种的气质。前襟和袖口的荷叶边设计,使整体散发着优雅清纯的韵味,时尚无比。"} +{"content": "类型#上衣*版型#宽松*材质#棉*颜色#纯色*风格#休闲*风格#清新*图案#纯色*衣样式#开衫", "summary": "拥有春天般清新的纯色开衫,在春季怎么能少。基础的纯色系渲染,在搭配上能与衣橱所有的衣物组合,打造宝贝帅气的模样。宽松随性的版型,穿着休闲又日常,精选柔软的棉面料,让宝贝在春季穿着刚刚好,清爽又透气,享受舒适的穿着感。"} +{"content": "类型#裙*版型#h*风格#简约*图案#印花*裙长#连衣裙*裙领型#圆领", "summary": "经典的h型版型,打造出精致的连衣裙。秀气简约的印花,让你更具名媛韵味。简洁大气的圆领设计,更能修饰优美脖颈和脸型。尽情彰显女性优雅的魅力。暗色系的颜色,低调又有内涵。连衣裙的面料也很舒适,给你亲肤新体验。"} +{"content": "类型#上衣*颜色#纯色*图案#纯色*图案#线条*图案#撞色*衣样式#风衣*衣款式#口袋", "summary": "简洁大气的风衣廓形,自带着飘洒的自由属性,清爽的没有冗杂的线条,只用纯色与材质彰显内心的纯粹气质。两道斜插口袋轻巧在两侧,可以舒适自在地安放双手。正中点缀着一只撞色小扣,轻巧透出一点玩趣的少女个性,打破单一色调点亮整体。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*风格#休闲*图案#字母*图案#文字*图案#刺绣*裙型#牛仔裙", "summary": "时髦帅气的字母刺绣元素设计,很能突显女性青春活力的感觉。宽松休闲的长款牛仔裙款式,显瘦又遮肉,随性又让人轻松自如哦!百搭又个性十足!"} +{"content": "类型#上衣*版型#宽松*材质#网纱*颜色#白色*风格#复古*风格#性感*图案#斑马纹*图案#复古*图案#创意*衣样式#卫衣*衣款式#拼接", "summary": "这款由黑白色打造的卫衣,打破传统卫衣的局限性,衣袖处采用网纱拼接,复古优雅中充满着性感魅惑。宽窄不一的斑马纹,打破世俗的旧看法,创意无限让人浮想联翩。上身的效果宽松舒适,却能勾勒出前凸后翘的好身材,尤显做工精致。"} +{"content": "类型#裤*材质#棉*材质#牛仔布*材质#混纺*风格#复古*风格#简约*风格#休闲*图案#复古*图案#撞色*裤型#铅笔裤*裤款式#拼接", "summary": "这款铅笔裤采用棉质混纺面料,吸湿透气,手感厚实,穿着舒适。裤身两侧经典复古对称插袋,随意休闲,具有良好的实用性。设计师在简约的版型设计上添加了撞色拼接的元素,让简约的牛仔裤多了几分街头时尚腔调,同时又不失减少原本的复古调性。"} +{"content": "类型#裤*版型#宽松*材质#水洗*风格#运动*裤口#小脚", "summary": "水洗做旧工艺让整款裤子的色彩感得到很好的调试,让裤子不显平凡枯燥。而宽松的裤腿加上小脚的设计,增加了裤子穿着的舒适性还不显拖沓累赘,更加便于日常出行和运动。"} +{"content": "类型#裙*版型#宽松*材质#针织*风格#高贵*图案#线条*裙型#a字*裙型#鱼尾裙*裙领型#圆领*裙领型#高领*裙款式#钉珠*裙款式#抽褶*裙款式#收腰", "summary": "舒适的圆领,修饰颈部线条,时尚狐狸头针织图案,立体生动。高领保暖,褶皱弹力,鱼尾裙摆,充满浪漫风情又结实耐穿,风格精致、迷人而不失物料的轻薄、飘逸的特性。重工钉珠点缀,轻奢华丽的质感璀璨闪耀光泽。收腰a字版型收腰塑型效果好,上身高贵名媛范,宽松的设计不挑身材。"} +{"content": "类型#裙*版型#显瘦*图案#线条*裙下摆#开叉*裙长#半身裙*裙款式#不规则", "summary": "这款半身裙采用贴身版型设计,有一定的廓形度,上身舒适无束缚。正面不规则斜向小开叉,隐约露出腿部线条,非常显瘦。满的提花猫咪面料,设计感十足,为你赚足回头率。"} +{"content": "类型#上衣*材质#蕾丝*颜色#黑色*图案#创意*图案#撞色*图案#蕾丝*衣样式#外套*衣长#短款*衣门襟#拉链*衣款式#拼接*衣款式#螺纹*衣款式#拉链", "summary": "全新的时代,让我们摒弃千篇一律的设计。极具创意个性的黑色蕾丝短款外套,给了我们独特的视觉感受。黑色短款上衣能够拉高腰际线,修饰身形,塑造出大长腿女神形象。衣摆处的撞色螺纹拼接,彰显着新时尚趣味。而金属拉链又毫无突兀的期待装饰的作用,让衣身看起来更加时尚大气。"} +{"content": "类型#裤*材质#牛仔布*颜色#黑色*颜色#黄色*风格#复古*图案#字母*图案#文字*图案#复古", "summary": "来自BRAND的牛仔裤,依旧采用了复古的黑色牛仔布来塑造,并将黄色的车缝线密集缝制,让黑色的裤身添入了几分靓丽感。正面设计以简洁款式为主,背部则是经典的大m从腰头下一只贯穿至裤腿上,并将其印制出黄色的字母装饰,让经典得到颠覆,使得裤身潮感十足。"} +{"content": "类型#上衣*材质#天丝*衣样式#风衣*衣领型#翻领*衣款式#腰带", "summary": "BRAND这款天丝轻薄风衣采用了层次感的翻领设计,显得十分有个性,展现个人气质。腰间带有可拆腰带设计,方便人们秀出腰线,展现好身材。"} +{"content": "类型#上衣*材质#天丝*衣样式#风衣*衣领型#翻领*衣款式#腰带", "summary": "BRAND打造的一款比较适合初春搭配的薄款风衣,采用大翻领的设计,更加的潇洒,更能凸显你的气场。可以拆卸的腰带设计,随心搭出专属自己的风格,凸显你的魅力。采用的是天丝的面料,轻盈又爽滑,穿起来更加的舒适。"} +{"content": "类型#裤*颜色#绿色*风格#青春*风格#清新*风格#性感*图案#条纹*裤长#连体裤*裤型#阔腿裤", "summary": "连体裤是十分减龄又有范的单品,上身不用担心穿搭,又轻松显气质。这款连体裤是十分清新的绿色设计,淡淡的条纹极具气质范,缝的包边彰显精致的做工。裤子是无袖的设计,具有性感的气息,阔腿裤的版型搭配摇曳的荷叶边,更显俏皮和活力,也带来满满的青春范。"} +{"content": "类型#裙*风格#淑女*风格#英伦*风格#复古*风格#文艺*图案#格子*图案#复古*裙下摆#荷叶边*裙长#连衣裙*裙领型#娃娃领*裙衣门襟#系带*裙款式#抽绳", "summary": "最近时尚界大玩复古风潮,90年代的复古裙型,又再度流行起来。像这样一件极具文艺与复古味道的连衣裙,将双层荷叶边塑造娃娃领效果,甜美中透出怀旧的情调。格子图案的加入渲染英伦气息,让身上气质更显优雅干练。腰部的抽绳系带,让裙摆更显蓬松效果,展现淑女的一面。"} +{"content": "类型#裙*颜色#藏蓝色*风格#文艺*裙型#百褶*裙下摆#垂坠*裙款式#亮片", "summary": "对于爱好学院风的文艺少女来说,百褶裙简直就是的标志。百褶裙是学院风的经典单品,可以说是永不过时。这条百褶裙采用了文艺的藏蓝色,看起来温和低调充满了宁静的气息。简单的设计搭配亮片装饰让这条裙子穿起来更加少女。面料的原因让裙子拥有良好的垂坠感,穿在身上不会显得臃肿。这样一条裙子完全可以衣柜。"} +{"content": "类型#裙*颜色#黑白*风格#性感*图案#拼色*图案#线条*裙下摆#荷叶边*裙长#连衣裙*裙领型#一字领*裙款式#露肩*裙款式#收腰", "summary": "这是女神款一字领连衣裙,简洁性感的露肩一字领口设计令优美的肩颈线条一览无余。充满立体结构感的夸张袖口和下摆丰盈荷叶边裙摆,与收腰裁剪形成廓型上的对比,加以经典的黑白拼色,更是本季不容错过的主打元素。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*图案#印花*衣样式#衬衫*衣袖长#长袖*衣门襟#单排扣", "summary": "本款上衣整体采用较为宽松的直筒版型设计,藏肉显瘦,不挑身材,适合各种身形的人穿着。飘带领的领口设计,使得本款衬衫穿在身上看起来十分的甜美可爱。单排扣的衣门襟设计,又使得本款衬衫十分的经典大方。精美的印花图案装饰,使得本款衬衫不显得单调,上身给人一种独特的时尚魅力。长袖的设计,更加的贴合手臂曲线,上身更加的舒适贴身。"} +{"content": "类型#裙*材质#天丝*裙长#连衣裙*裙袖长#长袖", "summary": "这一款廓形简洁的长袖连衣裙,我们选用了优质的天丝面料,天丝材质细腻顺滑,手感柔软舒适,却不失筋骨,有着爽滑的触感,非常适合做春夏季的时装搭配。"} +{"content": "类型#上衣*版型#显瘦*风格#通勤*风格#运动*风格#休闲*衣样式#衬衫*衣领型#翻领", "summary": "简单配色和基础翻领设计让这款衬衫看起来大方利落,不加修饰反而更具穿搭性,日常休闲、运动出游甚至是通勤场合皆可穿着;且修身版型设计显高不挑人,驾驭轻松穿出绅士范;辅以袖口的搭扣让穿法灵活多变,时尚出街。"} +{"content": "类型#裙*颜色#纯色*图案#纯色*裙型#百褶*裙长#连衣裙*裙款式#木耳边*裙款式#抽褶", "summary": "有时候纯色布料看多了会造成视觉上的疲劳,但这款连衣裙却不会。虽然其主体部分只采用了一种面料,但在它的裙摆处融入了细腻褶皱工艺处理,所呈现出来的是层次丰富的百褶裙摆,它结合前襟的木耳边装饰,给连衣裙增添了浓浓的甜美气息。"} +{"content": "类型#裙*版型#宽松*材质#棉*材质#雪纺*图案#刺绣", "summary": "胸襟处彩色花朵刺绣,充满民族风情,透露一丝古典美感。优质雪纺面料配上宽松裙摆样式,不仅活动方便,还呈现飘逸灵动美感。做工整齐细腻,一丝不苟,彰显高端品质。优质纯棉内里,亲肤柔软,呵护宝宝的每一寸娇嫩肌肤。"} +{"content": "类型#裙*材质#针织*颜色#粉色*风格#简约*裙下摆#开叉*裙领型#圆领*裙领型#v领*裙衣门襟#系带*裙款式#亮丝*裙款式#拼接", "summary": "针织亮丝的材质,把人带入未来科技感的新领域。简约的拼接小圆领,区分层次感。藕粉色闪着微微的星星光泽,好似人鱼一般的梦幻迷人。开叉系带的袖口,摩登时尚。外搭的v领吊带裙,具有减重的视觉效果。"} +{"content": "类型#裙*风格#性感*裙型#包臀裙*裙下摆#荷叶边*裙腰型#高腰*裙款式#不规则", "summary": "高腰的版型,使双腿看起来更加修长,实现了身材比例的完美分割。包臀设计,展现出曼妙的身材曲线,散发出性感的女性气息。不规则的荷叶边设计是整条裙子的亮点,上身更是造型感十足。"} +{"content": "类型#裤*风格#通勤*风格#青春*图案#条纹*图案#印花*裤型#阔腿裤", "summary": "阔腿裤样式和竖条纹元素结合,更好的修饰身形,掩藏身材上的小缺点,显得身高腿长。作为近两年流行热点,阔腿裤利落大气的裤型更加适合通勤风格,增强女性职业感,气场强大。精致的印花蝴蝶穿梭在竖条纹间,增添几分灵动和有趣。"} +{"content": "类型#裙*版型#宽松*材质#蕾丝*颜色#黑色*风格#知性*风格#性感*图案#蕾丝*裙长#长裙", "summary": "这款宽松的长裙,穿着起来有着性感的感觉。精美的蕾丝花装饰衣身,既充满年轻女孩的活力,又展现着知性优雅的熟女风范。黑色调自带一分清冷气质,米色调轻松打造青春少女风,两款可满足你任何场合的需求。"} +{"content": "类型#裤*版型#宽松*材质#牛仔布*颜色#白色*风格#休闲*风格#青春*图案#条纹*图案#撞色*裤型#直筒裤*裤腰型#高腰", "summary": "这款很有青春气息的牛仔裤采用白色作为色调设计,撞色的明线的条纹勾勒其中,衬托出十足的活力气息,同时又营造了视觉上的立体感。宽松的直筒版型轮廓不挑身材,穿搭起来慵懒而休闲,自由驾驭感十足。高腰的版型剪裁着身更具舒适度,贴合不同人的需求。"} +{"content": "类型#上衣*材质#涤纶*风格#简约*风格#知性*图案#拼色*图案#线条*图案#撞色*衣样式#外套*衣领型#圆领*衣袖型#落肩袖", "summary": "raive这款粗花呢外套,采用涤纶材质打造,触感柔软舒适;整体采用杂色拼色的设计,色彩搭配和谐,温婉又不失活力;简约的低圆领,能够柔和脖颈线条,打造天鹅颈;慵懒型的落肩款式,彰显随性自如格调;撞色勾边处理,能够冲击视觉亮点,活跃整体氛围,打造优雅知性的名媛气质。"} +{"content": "类型#上衣*版型#显瘦*图案#线条*衣样式#卫衣*衣款式#抽绳", "summary": "七格格卫衣连衣裙在腰部处加入了可调节抽绳设计,使裙身的版型样式获得巧妙的提升与蜕变,层次感明显的同时很好的勾勒了腰部处的线条轮廓,缔造出纤细的视觉美感,傲人小蛮分钟。可调节抽绳的融入也带来了很大的实用性,以便于在上身穿着时可以根据自己的腰围进行适当灵活的调节,就算是微胖型的姑娘也可以轻松灵活的驾驭,既时髦又显瘦。"} +{"content": "类型#裙*版型#显瘦*材质#天丝*风格#青春*图案#格子*图案#线条*裙下摆#荷叶边*裙领型#v领*裙款式#不规则*裙款式#收腰", "summary": "格子是永不褪流行的经典元素,搭配天丝质感的吊带裙,更显别致优雅。v领设计增加精致感,能很好的修饰脸部线条。同色系带收腰提高腰线,勾勒曲线更显婀娜。不规则荷叶下摆增加层次感,修饰腿部线条,更显瘦显高挑。"} +{"content": "类型#上衣*风格#简约*风格#性感*图案#线条*衣样式#针织衫*衣领型#一字领*衣门襟#系带", "summary": "能够让你在一票极简风格中,领略到不一样美感的针织衫甚是独特!一字领带出流畅的设计线条,加入一点性感的小心思,就是肩部的系带穿绳,个性十足的同时,展示出性感的锁骨线,让你在简约中美美的迎来春天的穿搭!舒适贴身的款式,也是属于实穿又时髦的BRAND!"} +{"content": "类型#裙*裙下摆#开叉*裙领型#西装领", "summary": "一款很显女性风骨的单品,穿出不凡气质。优雅的西装领设计,经典时尚增添女性干练气场;裙摆式开叉下摆,带来更多活动空间,自由随性,穿出洒脱气质。"} +{"content": "类型#上衣*版型#显瘦*版型#立体剪裁*材质#水洗*风格#休闲*衣样式#衬衫*衣领型#翻领", "summary": "这款军事徽章休闲衬衫选用特色牛津纺面料制作,经过怀旧水洗工艺处理后,复刻出自然落色效果,更显时尚美观;翻领的设计让衣身更加具有时尚感,干练精神,更显年轻活力;而修身版型采用立体裁剪设计,提升时尚气质,彰显男士独特魅力。"} +{"content": "类型#裙*版型#显瘦*颜色#白色*图案#条纹*裙腰型#高腰*裙长#半身裙", "summary": "这款BRAND的半身裙,采用高腰修身的剪裁。其在视觉上巧妙的修饰着身材曲线,较短的裙摆露出修长的双腿,瞬间穿出大长腿的既视感。除此之外,其白色条纹的裙身点缀,具有较强的视觉冲击,在个性吸睛的同时,尽显不凡的穿搭品味。"} +{"content": "类型#上衣*版型#显瘦*风格#通勤*图案#花色*图案#印花*衣样式#衬衫", "summary": "这一款衬衫创造性的运用了印花元素,是整件衬衫最抢眼的地方,与其他衬衫不同,这款不论是在花色选择还是颜色搭配上都别具一格。前面的竖式剪裁更是特别,显瘦又前卫。上班族可以参考的方式将衬衫扎进半身裙中,时尚感浓浓的通勤风同样粉无数!"} +{"content": "类型#上衣*材质#棉*颜色#白色*颜色#蓝色*风格#淑女*风格#高贵*风格#清新*衣样式#衬衫*衣领型#翻领*衣袖型#泡泡袖", "summary": "这是一款专为女童打造的精致淑女范衬衫。拥有清新柔美的蓝色与白色两个色系,纯粹的色彩似将美好的梦想牢牢守护,同时凸显气质的高贵与优雅。采用纯棉面料并添加氨纶弹力成分,上身包容感更强,配合立体多片式剪裁,更显纤细窈窕的腰身曲线。小巧的翻领,易于衬托宝贝乖巧圆润的脸蛋,而压褶泡泡袖肩的设计,更是将甜美与浪漫尽情挥洒。"} +{"content": "类型#裙*版型#宽松*材质#棉*风格#复古*风格#清新*图案#复古*图案#刺绣*裙长#连衣裙*裙衣门襟#系带", "summary": "一款柔软透气的棉质连衣裙;具有复古韵味的绣花图案,巧妙的蔓延在袖部,精致唯美又不失趣味的设计感,为单调的裙身赋予了清新素雅的韵味;腰间可调的同色系系带,收紧则凸显纤腰穿出优雅范,松弛而系则显宽松穿出随性的魅力。"} +{"content": "类型#上衣*版型#宽松*颜色#纯色*风格#淑女*风格#简约*风格#清新*风格#职场*图案#纯色*衣样式#衬衫*衣款式#绑带*衣款式#收腰", "summary": "这款连衣裙采用衬衫设计风格,尽显职场简约干练气息,整体版型比较宽松,但是在腰部添加绑带设计,有收腰效果,勾勒完美身材曲线。衣身的纯色面料制作,简约大方,打造甜美淑女气质,轻薄面料自带垂感,清新时尚。"} +{"content": "类型#裙*风格#休闲*图案#条纹*裙下摆#开叉*裙长#半身裙*裙衣长#短款", "summary": "简洁的短款半身裙,在侧边做了两条条纹的装饰,让裙子变得休闲时髦起来,更具百搭性。裙摆的开叉设计,让你穿着更舒适,无束缚感。"} +{"content": "类型#上衣*图案#刺绣*衣样式#衬衫*衣门襟#系带", "summary": "古典风格的衬衣,选用素雅的颜色,聚集仙气。领口系带设计,翩翩东方少年的感觉。狐狸图案刺绣,打破沉闷,乐趣,下摆两边大的开叉,给腰身更多一点的自由空间。"} +{"content": "类型#裤*版型#宽松*颜色#纯色*风格#休闲*风格#潮*图案#纯色*裤长#短裤*裤款式#口袋", "summary": "这件韦恩泽维尔宽松休闲纯色短裤,本身在设计风格中就是非常的与众不同的,主要就是体现在裤子上的翻盖口袋装饰,让一条再简单不过的裤子展现出不一样的潮流。裤子采用的宽松的版型设计,让你不管有再粗的腿也能很好的穿着,采用的优质面料,透气吸汗,也是非常的亲肤。"} +{"content": "类型#上衣*风格#休闲*风格#潮*图案#印花*衣样式#开衫*衣样式#毛衣", "summary": "这是一款集柔软凉爽。不易产静电、起毛和起球为衣身的开衫,在我们印象中。提到开衫可能会联想到毛衣开衫,但是这款开衫是夏季的,透气又舒适。独特的印花底纹潮流时尚,流苏的装饰点缀个性新颖。蝙蝠版型设计休闲又时髦。"} +{"content": "类型#上衣*版型#显瘦*材质#棉*风格#简约*风格#性感*图案#线条*衣样式#针织衫*衣领型#v领*衣款式#罗纹", "summary": "这款来自massimodutti的针织衫,百分百全棉材质,自带亲肤触感,具有较好的弹力和透气性,结实耐穿。整体的版型简约大方,在修身的廓形下尽显高挑纤细的身形魅力。v领的领口直观展现颈部的线条和傲人的锁骨,性感十足。领口和下摆加入的罗纹束口,干练大气。"} +{"content": "类型#裙*风格#复古*图案#复古*图案#线条*图案#印花*裙下摆#荷叶边*裙长#长裙*裙袖型#喇叭袖*裙袖型#收口*裙衣门襟#系带", "summary": "一袭飘逸长裙再点缀甜美印花图案,温婉间不失清爽感,彰显了优雅气质。荷叶边收口的喇叭袖设计经典复古,体现了做工的精致繁复,增添了浪漫的风情。腰部的系带设计拉长了腿部线条,看起来更显高挑纤细。"} +{"content": "类型#裙*版型#显瘦*风格#清新*图案#植物*图案#印花*裙下摆#荷叶边*裙长#连衣裙*裙款式#收腰", "summary": "设计师将清新的花卉印花元素使用在裙子的设计当中,结合上荷叶边,赋予了连衣裙更多几分的甜美气质。收腰版型的设计非常贴心,可以避免裙子的腰身过于宽大,穿上之后,显瘦效果颇为不错,能够让你的身形看着更为窈窕。"} +{"content": "类型#上衣*风格#简约*衣样式#风衣*衣门襟#双排扣*衣款式#拼接*衣款式#绑带", "summary": "所有的时尚都离不开经典的延续。这款风衣的设计经典而简约,上身却自然流露出一份洒脱感。翻驳领挺括有型,加之衣片拼接立体感十足。双排扣开合飒爽帅气,开合穿着都有范儿。束腰绑带勾勒出纤细腰姿,随意一个整体更有美感。"} +{"content": "类型#裙*版型#显瘦*风格#民族风*图案#刺绣*裙长#长裙*裙款式#勾花镂空", "summary": "这条民族风的波西米亚长裙,有几处心机设计特别夺目。首先是精致的手工刺绣,传承了民族文化的美感而不失严密的针法,其次就是腰部镂空设计,既能遮肚又显瘦。"} +{"content": "类型#上衣*风格#潮*图案#字母*图案#文字*图案#印花*衣样式#外套*衣长#中长款", "summary": "在多风的季节,还是让孩子穿着巴拉巴拉中长款版型的女童外套吧,它不仅能很好的修饰身材,而且还很防风穿着不怕冷呢。看它身后那不同色彩的字母印花图案,在打破衣身单调性的同时,还给整体增添了一丝时尚气息,能轻松塑造具有潮流气息的萌娃形象哦。"} +{"content": "类型#裤*版型#显瘦*风格#性感*图案#线条*图案#刺绣*图案#撞色*裤款式#口袋*裤口#翻折", "summary": "修身版型,收裤脚设计,修正腿型衬出纤细柔美的腿部线条,精致显瘦。穿着时挽起裤脚,形成撞色的翻边观感,更具个性感。两侧口袋设计,实用美观。裤管花朵刺绣设计,增添几分神秘浪漫的气质。"} +{"content": "类型#裙*风格#性感*裙长#连衣裙*裙款式#立体装饰*裙款式#吊带", "summary": "推出的度假风的吊带连衣裙,采用的是深v的设计,很好的拉长你的脖颈,同时还能露出你优美的锁骨,显得超性感。吊带的款式设计,可以轻松凸显你的高挑感,看起来魅力十足。裙身上的立体装饰,更有设计感,让裙子更有仙气。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*颜色#纯色*风格#通勤*图案#纯色*图案#线条*衣样式#打底衫*衣领型#v领", "summary": "一件舒服好穿的打底可以说是衣橱里的许多单品的“救星”了。温柔的猪猪这款通勤感打底衫,采用v领设计,更加修饰肩颈线条;领口采用包边工艺,不易变形,更显质感;腰身微宽松版型,不限制身材,轻松掩盖不完美之处,更加显瘦;纯色设计,低调优雅。"} +{"content": "类型#裤*颜色#白色*颜色#黑白*图案#条纹*图案#线条*图案#撞色*裤长#九分裤*裤型#哈伦裤", "summary": "腰部的撞色设计将白色条纹缀在腰间,双层纹路不但增添了层次感还有着黑白色彩的对比显得简洁中有精致的设计感。九分裤长设计的哈伦版型有着彰显腿长的效果,看上去修长很有拔高身形的作用。上松下紧的设计让裤子完美地贴合腿部线条,勾勒出腿型的优势。醋酸纤维的面料使得裤身有适当的弹性,不论腿粗还是细都能驾驭好,有着自然的垂坠感。"} +{"content": "类型#上衣*材质#天丝*材质#针织*颜色#宝蓝色*风格#休闲*风格#高贵*图案#刺绣*衣样式#开衫*衣款式#口袋*衣款式#连帽", "summary": "针织开衫的实穿性能非常强,已然成为了都市女性衣橱必备的单品。这款开衫选用宝蓝色调,凸显出优雅且高贵的气质。添加了天丝材质,触感是爽滑而舒适的。采用连帽设计,又展现出了随性的休闲范。而大口袋的设计,不仅实用还提升了造型感。再加上小狗刺绣的点缀,还增添了趣味性和活力。"} +{"content": "类型#裙*材质#丝绒*风格#居家*风格#性感*裙长#连衣裙*裙领型#立领*裙款式#勾花镂空", "summary": "一款别致的两件套长衫连衣裙,多种穿搭,各具风格。肩部镂空的丝绒吊带裙,保留了传统中式立领,居家外穿皆适宜,传统中带着性感的小心思。"} +{"content": "类型#上衣*衣样式#雪纺衫*衣款式#绑带*衣款式#荷叶边", "summary": "这一款雪纺衫时尚的荷叶边装饰,随风摇曳轻盈出彩。精挑细选顺滑柔软的布料,泛着柔亮的光泽,穿着体验度不一般。加上领口绑带,精致美丽错落有致。整体看起来,款型不会显得单调,更显轻松随意感。"} +{"content": "类型#裤*颜色#黑白*风格#简约*风格#性感*图案#线条*图案#撞色*裤长#短裤", "summary": "长袖上衣加短裤的巧妙结合,瞬间让平淡无奇的造型立刻变得生动俏皮,特别的吸睛亮眼。此款黑白撞色上衣,显得非常有气质,简约中透露着时髦味道,既清雅又显气质,大v领设计,更为性感抚媚,且展露迷人纤细的颈部线条,波动着男士的心,且短裤的搭配,穿着干练脱俗,更加增添女人的柔情,洋溢着青春活力感。"} +{"content": "类型#上衣*材质#棉*颜色#白色*风格#休闲*图案#刺绣*衣样式#衬衫*衣领型#尖领*衣袖长#七分袖", "summary": "太平鸟与可口可乐推出的一款品,而且还是男士衬衫,尖领以及七分袖的设计定位于半休闲的场合。全身采用素净的白色,用料是纯棉质地,尽显柔和斯文的气质。而可口可乐的logo以白色刺绣的形式低调点缀于腰间。"} +{"content": "类型#裙*裙型#花苞裙*裙下摆#开叉*裙长#连衣裙*裙领型#v领", "summary": "一款甜美优雅气质连衣裙,选用一字带v领设计,展现的时尚魅力。另外选用花苞袖设计,丰富整体造型美感,更显甜美气质。搭配开叉裙摆设计,打造出一款与众不同的气质美衣,也就是此款连衣裙的亮点之处。"} +{"content": "类型#裙*风格#街头*裙下摆#花边*裙长#连衣裙*裙领型#立领*裙袖型#灯笼袖*裙款式#拼接*裙款式#钉珠", "summary": "镶边立领搭配精致的花边和桀骜的钉珠,将新时代年轻女孩的性情展现的淋漓尽致。拼接的半灯笼袖设计,不会显得手臂粗壮,还给整条裙子增添了活力。这条连衣裙,精致华美的同时又兼具桀骜不驯的街头气息,自由自在做自己,才是年轻人应有的态度。"} +{"content": "类型#上衣*风格#性感*衣样式#风衣*衣领型#翻领*衣门襟#双排扣*衣款式#露肩", "summary": "这款风衣双排扣的设计,延续了军装的那种英姿飒爽的感觉,加入了两侧露肩设计,展现优美性感的肩部曲线;简洁的翻领设计,凸显女性的大气干练气场,上身效果立体感强;同色系袢设计,勾勒出纤细腰身和迷人曲线。"} +{"content": "类型#裤*风格#复古*图案#蝴蝶结*图案#复古*图案#线条*裤型#阔腿裤*裤款式#松紧带", "summary": "设计师新颖的将金丝绒面料与阔腿裤的版型相结合,赋予了裤装几分复古的风情,垂顺的面料呈现出拉长腿部线条的视觉效果,更显高挑纤细身姿。松紧带设计,即穿脱更方便,又令其舒适没有束缚感。裤头蝴蝶结系带点缀,尽显女性闲适的自在感。"} +{"content": "类型#裙*颜色#黑白*风格#复古*风格#知性*图案#条纹*图案#蝴蝶结*图案#复古*裙长#连衣裙*裙款式#不规则*裙款式#收腰", "summary": "这款收腰连衣裙走的是端庄知性的风格路线,尽显出你的温柔温和的性情,非常的优美迷人。配合黑白条纹的设计,尽显出复古端庄的气质。不规则的裙摆,尽显出你的华丽大方。配合蝴蝶结的袖袢,体现出新鲜新颖的视觉效果,说明其品质可嘉。"} +{"content": "类型#上衣*版型#宽松*材质#棉*颜色#纯色*风格#文艺*风格#运动*风格#清新*图案#纯色*衣样式#polo", "summary": "仿佛是专门为了夏天而存在的一款polo衫,纯净耐看的纯色系列穿出男性的好气色,瞬间年轻好几岁不是问题。经典的版型延续了一贯的舒适性,上身宽松不紧勒肌肤,更显随性慵懒。柔软的纯棉材质轻松吸汗透气,即使是在运动后的状态也能完美应付。干净的纯白色更显清新文艺。"} +{"content": "类型#上衣*材质#蕾丝*图案#蕾丝*衣样式#西装*衣款式#拼接", "summary": "春暖花开,万物复苏。又到了西装发挥作用的时候,西服的硬朗是不可磨灭的。这款袖口蕾丝的皮西装,中和了中性的感觉,在帅气与女人之间随意切换,肌理感pu皮面料。袖口与拼接蕾丝可脱卸,复古风十足~"} +{"content": "类型#上衣*材质#蕾丝*图案#蕾丝*衣样式#西装*衣款式#拼接", "summary": "春暖花开,万物复苏。又到了西装发挥作用的时候,西服的硬朗是不可磨灭的。这款袖口蕾丝的皮西装,中和了中性的感觉,在帅气与女人之间随意切换,肌理感pu皮面料。袖口与拼接蕾丝可脱卸,复古风十足"} +{"content": "类型#上衣*材质#蕾丝*图案#蕾丝*衣样式#西装*衣款式#拼接", "summary": "春暖花开,万物复苏。又到了西装发挥作用的时候,西服的硬朗是不可磨灭的。这款袖口蕾丝的皮西装,中和了中性的感觉,在帅气与女人之间随意切换,肌理感pu皮面料。袖口与拼接蕾丝可脱卸,复古风十足。"} +{"content": "类型#上衣*版型#宽松*版型#显瘦*颜色#白色*颜色#绿色*风格#简约*风格#休闲*衣样式#衬衫*衣款式#不规则", "summary": "纯粹利落的经典衬衫款式,打造休闲时尚风,这款BRAND白色女士衬衫,简约的白色百搭又不乏大气美感。不规则的弧形下摆处理,让整体更具随性自然的休闲气息。宽松版型上身具有显瘦的效果,个性绿色贴布点缀,带来夏季清爽之感。"} +{"content": "类型#裙*版型#显瘦*材质#牛仔布*风格#复古*风格#性感*图案#复古*裙型#牛仔裙*裙型#包臀裙*裙下摆#开叉*裙下摆#毛边*裙长#半身裙", "summary": "牛仔半裙的出相信是每个mm都知道的,这是一款既复古又有范的牛仔半裙,全手工的漆点尽显个性和潇洒,为半裙增添满满的不羁气质。半裙是修身的包臀版型设计,视觉上尽显你的曼妙身材,也性感又抢眼。裙身有一道开叉设计,搭配下摆的毛边设计,更添率性气息。"} +{"content": "类型#上衣*衣样式#风衣*衣样式#外套*衣领型#翻领", "summary": "这件风衣外套在设计上极具突破性,不再是经典的风衣廓形,做了很多创新和改变。首先保留了翻领设计,但是不再是规矩的翻领,还有面料不再是挺阔的面料,加上下摆的百褶处理,穿着更显飘逸和灵动,没有一点点拘束感。"} +{"content": "类型#上衣*衣样式#风衣*衣样式#外套*衣领型#翻领", "summary": "风衣的干练和帅气,是其他外套无法给予的,上身随意但又不乏气场的这款风衣,简洁的翻领设计,流畅的剪裁尽显干练大气;同色系袢设计,勾勒纤细腰身展现出迷人的曲线;后背开叉和贴布点缀的亮点,增添设计细节又洒脱随性。"} +{"content": "类型#裤*版型#宽松*材质#亚麻*裤长#九分裤*裤长#连体裤*裤款式#勾花镂空*裤腰型#松紧腰", "summary": "这款连体裤版型宽松,对衣者的身材要求没有过多要求;九分裤设计则是小个子妹子们不可错过的显高武器,而腰间系绳设计,更是进一步提升了腰线;同时圆环镂空搭配系绳更方便调节腰部的松紧程度。亚麻面料上身舒适,是值得尝试的时尚单品。"} +{"content": "类型#裙*版型#宽松*版型#显瘦*裙领型#圆领*裙款式#勾花镂空*裙款式#纽扣", "summary": "让你孕期也能魅力依旧,经典的圆领,更具百搭感,宽松的镂空袖子设计,让裙子更立体,同时还能显瘦遮肉。精致的花瓣领设计,整体造型更具女人味,后背的一字纽扣设计,更适合孕妈穿着。"} +{"content": "类型#裤*版型#显瘦*材质#牛仔布*颜色#深蓝色*裤腰型#高腰", "summary": "misssixty的这条牛仔裤采用了时尚大气的烟灰色,改变了常规的深蓝色带来的审美疲劳。它是高腰裤的版型,腰身部位用一排扣子装饰,适合外穿把纤细的腰身展露出来。带有弹力的修身版型,让你即使是穿着牛仔裤也不会感到束缚感。"} +{"content": "类型#裤*材质#牛仔布*颜色#白色*风格#简约*图案#线条*裤长#短裤*裤型#阔腿裤*裤腰型#高腰*裤口#毛边", "summary": "设计剪裁立体有心意,穿着更加有型有范儿。设计师匠心独制,高腰阔腿设计修饰腿部线条为此款白色牛仔短裤添彩,另外做旧毛边工艺设计又将短裤点缀得恰到好处。简约大方的牛仔面料,使每个女孩子都能轻松驾驭,展现夏日里的好身材,释放女生独特魅力。"} +{"content": "类型#裙*版型#显瘦*材质#网纱*风格#性感*裙型#百褶*裙下摆#压褶*裙长#连衣裙*裙衣门襟#拉链*裙衣门襟#套头*裙款式#拼接*裙款式#拉链*裙款式#木耳边*裙款式#抽褶*裙款式#不规则", "summary": "内搭的连衣裙,几乎每个人都有一件。但是像这件这么性感妩媚的很少,没有拉链,套头就可以穿,网纱的垂感很好,因此是很显瘦的,而且大身网纱另外做了压褶,因此有了百褶的效果,不规则木耳边抽褶拼接,更添吸晴亮点。"} diff --git a/ptuning/AdvertiseGen/train.json b/ptuning/AdvertiseGen/train.json new file mode 100644 index 0000000000000000000000000000000000000000..45a137ea5034e4cd0ec37c25ce21a9cf2b4f396a Binary files /dev/null and b/ptuning/AdvertiseGen/train.json differ diff --git a/ptuning/README.md b/ptuning/README.md new file mode 100644 index 0000000000000000000000000000000000000000..2c828b31205a1a0425ed24185352cd2d90930dd6 --- /dev/null +++ b/ptuning/README.md @@ -0,0 +1,254 @@ +# ChatGLM-6B-PT +本仓库实现了对于 ChatGLM-6B 模型基于 [P-Tuning v2](https://github.com/THUDM/P-tuning-v2) 的微调。P-Tuning v2 将需要微调的参数量减少到原来的 0.1%,再通过模型量化、Gradient Checkpoint 等方法,最低只需要 7GB 显存即可运行。 + +下面以 [ADGEN](https://aclanthology.org/D19-1321.pdf) (广告生成) 数据集为例介绍代码的使用方法。 + +*Read this in [English](README_en.md).* + +## 软件依赖 +运行微调需要4.27.1版本的`transformers`。除 ChatGLM-6B 的依赖之外,还需要安装以下依赖 +``` +pip install rouge_chinese nltk jieba datasets +``` +## 使用方法 + +### 下载数据集 +ADGEN 数据集任务为根据输入(content)生成一段广告词(summary)。 + +```json +{ + "content": "类型#上衣*版型#宽松*版型#显瘦*图案#线条*衣样式#衬衫*衣袖型#泡泡袖*衣款式#抽绳", + "summary": "这件衬衫的款式非常的宽松,利落的线条可以很好的隐藏身材上的小缺点,穿在身上有着很好的显瘦效果。领口装饰了一个可爱的抽绳,漂亮的绳结展现出了十足的个性,配合时尚的泡泡袖型,尽显女性甜美可爱的气息。" +} +``` + +从 [Google Drive](https://drive.google.com/file/d/13_vf0xRTQsyneRKdD1bZIr93vBGOczrk/view?usp=sharing) 或者 [Tsinghua Cloud](https://cloud.tsinghua.edu.cn/f/b3f119a008264b1cabd1/?dl=1) 下载处理好的 ADGEN 数据集,将解压后的 `AdvertiseGen` 目录放到本目录下。 + +### 训练 + +#### P-tuning v2 + +运行以下指令进行训练: +```shell +bash train.sh +``` +`train.sh` 中的 `PRE_SEQ_LEN` 和 `LR` 分别是 soft prompt 长度和训练的学习率,可以进行调节以取得最佳的效果。P-Tuning-v2 方法会冻结全部的模型参数,可通过调整 `quantization_bit` 来被原始模型的量化等级,不加此选项则为 FP16 精度加载。 + +在默认配置 `quantization_bit=4`、`per_device_train_batch_size=1`、`gradient_accumulation_steps=16` 下,INT4 的模型参数被冻结,一次训练迭代会以 1 的批处理大小进行 16 次累加的前后向传播,等效为 16 的总批处理大小,此时最低只需 6.7G 显存。若想在同等批处理大小下提升训练效率,可在二者乘积不变的情况下,加大 `per_device_train_batch_size` 的值,但也会带来更多的显存消耗,请根据实际情况酌情调整。 + +如果你想要[从本地加载模型](https://github.com/THUDM/ChatGLM-6B#%E4%BB%8E%E6%9C%AC%E5%9C%B0%E5%8A%A0%E8%BD%BD%E6%A8%A1%E5%9E%8B),可以将 `train.sh` 中的 `THUDM/chatglm-6b` 改为你本地的模型路径。 + +#### Finetune + +如果需要进行全参数的 Finetune,需要安装 [Deepspeed](https://github.com/microsoft/DeepSpeed),然后运行以下指令: + +```shell +bash ds_train_finetune.sh +``` + +### 推理 + +将 `evaluate.sh` 中的 `CHECKPOINT` 更改为训练时保存的 checkpoint 名称,运行以下指令进行模型推理和评测: +```shell +bash evaluate.sh +``` +**[2023/04/10更新]** 在 P-tuning v2 训练时模型只保存 PrefixEncoder 部分的参数,所以在推理时需要同时加载原 ChatGLM-6B 模型以及 PrefixEncoder 的权重,因此需要指定参数(已更新 `evaluate.sh`) : + +```shell +--model_name_or_path THUDM/chatglm-6b +--ptuning_checkpoint $CHECKPOINT_PATH +``` + +仍然兼容旧版全参保存的 Checkpoint,只需要跟之前一样设定 `model_name_or_path`: + +```shell +--model_name_or_path $CHECKPOINT_PATH +``` + +评测指标为中文 Rouge score 和 BLEU-4。生成的结果保存在 +`./output/adgen-chatglm-6b-pt-8-1e-2/generated_predictions.txt`。 + +### 例子 +#### 示例1 +* Input: 类型#上衣\*材质#牛仔布\*颜色#白色\*风格#简约\*图案#刺绣\*衣样式#外套\*衣款式#破洞 +* Label: 简约而不简单的牛仔外套,白色的衣身十分百搭。衣身多处有做旧破洞设计,打破单调乏味,增加一丝造型看点。衣身后背处有趣味刺绣装饰,丰富层次感,彰显别样时尚。 +* Output[微调前]: 这件上衣的材质是牛仔布,颜色是白色,风格是简约,图案是刺绣,衣样式是外套,衣款式是破洞。 +* Output[微调后]: 这是一款简约的牛仔外套,破洞设计,将牛仔布破洞,带来一种随意与个性。破洞的牛仔外套,展现出时尚气息,带来一种休闲感。同时,刺绣图案,让整件外套更加立体。 + +#### 示例2 + +* Input: 类型#裙\*版型#显瘦\*风格#文艺\*风格#简约\*图案#印花\*图案#撞色\*裙下摆#压褶\*裙长#连衣裙\*裙领型#圆领 +* Label: 文艺个性的印花连衣裙,藏青色底蕴,低调又大气,撞色太阳花分布整个裙身,绚丽而美好,带来时尚减龄的气质。基础款的舒适圆领,简约不失大方,勾勒精致脸庞。领后是一粒包布扣固定,穿脱十分方便。前片立体的打褶设计,搭配后片压褶的做工,增添层次和空间感,显瘦又有型。 +* Output[微调前]: 类型#裙*版型#显瘦*风格#文艺*风格#简约*图案#印花*图案#撞色*裙下摆#压褶*裙长#连衣裙*裙领型#圆领 1\. 连衣裙:简约风格,裙长为膝盖以上,裙领型为圆领。2\. 裙下摆:压褶设计,使裙摆呈现出流畅的褶皱效果。3\. 裙领型:裙领型为圆领,使穿上连衣裙后更加有型。4\. 版型:采用显瘦设计,让连衣裙看起来更加苗条。5\. 风格:文艺风格,让连衣裙更加有内涵和品味。6\. 图案:印花设计,在连衣裙上印有独特的图案。7\. 撞色:采用撞色设计,让连衣裙在色彩上更加鲜明、富有层次感。 +* Output[微调后]: 这是一款文艺范的连衣裙,以印花为元素,采用简约的印花,既能够突出文艺气质,又能够展现简约风。在印花的同时又有领子和裙摆的压褶设计,更加凸显文艺气质。简约而不会过于单调,搭配出街,穿着十分舒适。 + +### 评估结果 + +| | Finetune | P-tuning v2 | LoRA | +| ------------- | ----------- | ----- | ------------- | +| BLEU-4 | 8.01 | 8.10 | 7.62 | +| Rouge-1 | 31.23 | 31.12 | 30.60 | +| Rouge-2 | 7.36 | 7.11 | 6.96 | +| Rouge-l | 25.08 | 24.97 | 24.80 | +| Training Loss | 3.00 | 3.74 | 3.32 | + + + +#### 实验设置 + + ``` +max_source_length=64 +max_target_length=64 +max_steps=3000 + ``` + +##### P-tuning v2 + +``` +pre_seq_len=128 +learning_rate=2e-2 +quantization_bit=4 +per_device_train_batch_size=16 +gradient_accumulation_steps=1 +``` + +##### Finetune + +``` +learning_rate=1e-4 +fp16 +num_gpus=4 +per_device_train_batch_size=4 +gradient_accumulation_steps=1 +``` + +##### LoRA + +实现采用的是 [simple_thu_chatglm6b](https://github.com/yuanzhoulvpi2017/zero_nlp/tree/main/simple_thu_chatglm6b) + +``` +learning_rate=5e-4 +per_device_train_batch_size=16 +gradient_accumulation_steps=1 +``` + + + +## 模型部署 +首先载入Tokenizer: + +```python +import os +import torch +from transformers import AutoConfig, AutoModel, AutoTokenizer + +# 载入Tokenizer +tokenizer = AutoTokenizer.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True) +``` + +1. 如果需要加载的是新 Checkpoint(只包含 PrefixEncoder 参数): + +```python +config = AutoConfig.from_pretrained("THUDM/chatglm-6b", trust_remote_code=True, pre_seq_len=128) +model = AutoModel.from_pretrained("THUDM/chatglm-6b", config=config, trust_remote_code=True) +prefix_state_dict = torch.load(os.path.join(CHECKPOINT_PATH, "pytorch_model.bin")) +new_prefix_state_dict = {} +for k, v in prefix_state_dict.items(): + if k.startswith("transformer.prefix_encoder."): + new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v +model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) +``` +注意你可能需要将 `pre_seq_len` 改成你训练时的实际值。如果你是[从本地加载模型](https://github.com/THUDM/ChatGLM-6B#%E4%BB%8E%E6%9C%AC%E5%9C%B0%E5%8A%A0%E8%BD%BD%E6%A8%A1%E5%9E%8B)的话,需要将 `THUDM/chatglm-6b` 改成本地的模型路径(注意不是checkpoint路径)。 + +2. 如果需要加载的是旧 Checkpoint(包含 ChatGLM-6B 以及 PrefixEncoder 参数),或者进行的是全参数微调,则直接加载整个 Checkpoint: + +```python +model = AutoModel.from_pretrained(CHECKPOINT_PATH, trust_remote_code=True) +``` + +之后根据需求可以进行量化,也可以直接使用: + +```python +# Comment out the following line if you don't use quantization +model = model.quantize(4) +model = model.half().cuda() +model.transformer.prefix_encoder.float() +model = model.eval() + +response, history = model.chat(tokenizer, "你好", history=[]) +``` + +**[23/04/19]** 你也可以直接运行支持加载 P-Tuning v2 checkpoint 的 [web demo](./web_demo.py) +```shell +bash web_demo.sh +``` +可能需要修改 [web_demo.sh](./web_demo.sh) 的内容以符合你实际的 checkpoint 情况。 + +## 使用自己的数据集 +修改 `train.sh` 和 `evaluate.sh` 中的 `train_file`、`validation_file`和`test_file`为你自己的 JSON 格式数据集路径,并将 `prompt_column` 和 `response_column` 改为 JSON 文件中输入文本和输出文本对应的 KEY。可能还需要增大 `max_source_length` 和 `max_target_length` 来匹配你自己的数据集中的最大输入输出长度。 + +## 对话数据集 + +如需要使用多轮对话数据对模型进行微调,可以提供聊天历史,例如 + +```json +{ + "prompt": "是的。上下水管都好的", + "response": "那就要检查线路了,一般风扇继电器是由电脑控制吸合的,如果电路存在断路,或者电脑坏了的话会出现继电器不吸合的情况!", + "history": [ + [ + "长城h3风扇不转。继电器好的。保险丝好的传感器新的风扇也新的这是为什么。就是继电器缺一个信号线", + "用电脑能读数据流吗?水温多少" + ], + [ + "95", + "上下水管温差怎么样啊?空气是不是都排干净了呢?" + ] + ] +} +``` + +训练时需要指定 `--history_column` 为数据中聊天历史的 key(在此例子中是 `history`),将自动把聊天历史拼接,例如: + +- Input + + ``` + [Round 0] + 问:长城h3风扇不转。继电器好的。保险丝好的传感器新的风扇也新的这是为什么。就是继电器缺一个信号线 + 答:用电脑能读数据流吗?水温多少 + [Round 1] + 问:95 + 答:上下水管温差怎么样啊?空气是不是都排干净了呢? + [Round 2] + 问:是的。上下水管都好的 + 答: + ``` + +- Label + + ``` + 那就要检查线路了,一般风扇继电器是由电脑控制吸合的,如果电路存在断路,或者电脑坏了的话会出现继电器不吸合的情况! + ``` + +要注意超过输入长度 `max_source_length` 的内容会被截。 + +可以参考以下指令: + +```shell +bash train_chat.sh +``` + +## 引用 + +``` +@inproceedings{liu2022p, + title={P-tuning: Prompt tuning can be comparable to fine-tuning across scales and tasks}, + author={Liu, Xiao and Ji, Kaixuan and Fu, Yicheng and Tam, Weng and Du, Zhengxiao and Yang, Zhilin and Tang, Jie}, + booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, + pages={61--68}, + year={2022} +} +``` + + + diff --git a/ptuning/README_en.md b/ptuning/README_en.md new file mode 100644 index 0000000000000000000000000000000000000000..9282da32c467eb17316c05b65a5522f99d149340 --- /dev/null +++ b/ptuning/README_en.md @@ -0,0 +1,115 @@ +# ChatGLM-6B-PT +This repository implements tuning of the ChatGLM-6B model based on [P-Tuning v2](https://github.com/THUDM/P-tuning-v2). P-Tuning v2 reduces the amount of parameters that need to be optimized to 0.1% of the full fine-tuning, and then through model quantization, Gradient Checkpoint and other methods, it only needs a minimum of 7GB of video memory to run. + +The following uses the [ADGEN](https://aclanthology.org/D19-1321.pdf) (advertising generation) dataset as an example to introduce how to use the code. + +## Software dependencies +Running p-tuning requires version 4.27.1 of `transformers`. In addition to the dependencies of ChatGLM-6B, the following dependencies are required +``` +pip install rouge_chinese nltk jieba datasets +``` +## Instructions + +### Download the dataset +The task of the ADGEN dataset is to generate an advertisement word (summary) based on the input (content). + +```json +{ + "content": "类型#上衣*版型#宽松*版型#显瘦*图案#线条*衣样式#衬衫*衣袖型#泡泡袖*衣款式#抽绳", + "summary": "这件衬衫的款式非常的宽松,利落的线条可以很好的隐藏身材上的小缺点,穿在身上有着很好的显瘦效果。领口装饰了一个可爱的抽绳,漂亮的绳结展现出了十足的个性,配合时尚的泡泡袖型,尽显女性甜美可爱的气息。" +} +``` + +From [Google Drive](https://drive.google.com/file/d/13_vf0xRTQsyneRKdD1bZIr93vBGOczrk/view?usp=sharing) or [Tsinghua Cloud](https://cloud.tsinghua.edu.cn/f/b3f119a008264b1cabd1/?dl=1) Download the processed ADGEN dataset, and put the decompressed `AdvertiseGen` directory into this directory. + +### Training +Run the following commands for training: +```shell +bash train.sh +``` +`PRE_SEQ_LEN` and `LR` in `train.sh` are soft prompt length and training learning rate respectively, which can be adjusted to achieve the best results. The P-Tuning-v2 method will freeze all model parameters, and the quantization level of the original model can be adjusted by adjusting `quantization_bit`. If this option is not added, it will be loaded with FP16 precision. + +Under the default configuration of `per_device_train_batch_size=1`, `gradient_accumulation_steps=16`, the model parameters of INT4 are frozen, and a training iteration will perform 16 cumulative forward and backward propagations with a batch size of 1, which is equivalent to the total batch size of 16, and only 6.7G GPU memory is required at this time with `quantization_bit=4`. If you want to improve the training efficiency under the same batch size, you can increase the value of `per_device_train_batch_size` while keeping the product of the two unchanged, but it will also bring more GPU memory consumption, please adjust it according to the actual situation. + +### Inference + +Change `CHECKPOINT` in `evaluate.sh` to the checkpoint name saved during training, and run the following commands for model inference and evaluation: +```shell +bash evaluate.sh +``` + +The evaluation indicators are Chinese Rouge score and BLEU-4. The generated results are saved in +`./output/adgen-chatglm-6b-pt-8-1e-2/generated_predictions.txt`. + +### Example +#### Example 1 +* Input: 类型#上衣\*材质#牛仔布\*颜色#白色\*风格#简约\*图案#刺绣\*衣样式#外套\*衣款式#破洞 +* Label: 简约而不简单的牛仔外套,白色的衣身十分百搭。衣身多处有做旧破洞设计,打破单调乏味,增加一丝造型看点。衣身后背处有趣味刺绣装饰,丰富层次感,彰显别样时尚。 +* Output[微调前]: 这件上衣的材质是牛仔布,颜色是白色,风格是简约,图案是刺绣,衣样式是外套,衣款式是破洞。 +* Output[微调后]: 这是一款简约的牛仔外套,破洞设计,将牛仔布破洞,带来一种随意与个性。破洞的牛仔外套,展现出时尚气息,带来一种休闲感。同时,刺绣图案,让整件外套更加立体。 + +#### Example 2 + +* Input: 类型#裙\*版型#显瘦\*风格#文艺\*风格#简约\*图案#印花\*图案#撞色\*裙下摆#压褶\*裙长#连衣裙\*裙领型#圆领 +* Label: 文艺个性的印花连衣裙,藏青色底蕴,低调又大气,撞色太阳花分布整个裙身,绚丽而美好,带来时尚减龄的气质。基础款的舒适圆领,简约不失大方,勾勒精致脸庞。领后是一粒包布扣固定,穿脱十分方便。前片立体的打褶设计,搭配后片压褶的做工,增添层次和空间感,显瘦又有型。 +* Output[微调前]: 类型#裙*版型#显瘦*风格#文艺*风格#简约*图案#印花*图案#撞色*裙下摆#压褶*裙长#连衣裙*裙领型#圆领 1\. 连衣裙:简约风格,裙长为膝盖以上,裙领型为圆领。2\. 裙下摆:压褶设计,使裙摆呈现出流畅的褶皱效果。3\. 裙领型:裙领型为圆领,使穿上连衣裙后更加有型。4\. 版型:采用显瘦设计,让连衣裙看起来更加苗条。5\. 风格:文艺风格,让连衣裙更加有内涵和品味。6\. 图案:印花设计,在连衣裙上印有独特的图案。7\. 撞色:采用撞色设计,让连衣裙在色彩上更加鲜明、富有层次感。 +* Output[微调后]: 这是一款文艺范的连衣裙,以印花为元素,采用简约的印花,既能够突出文艺气质,又能够展现简约风。在印花的同时又有领子和裙摆的压褶设计,更加凸显文艺气质。简约而不会过于单调,搭配出街,穿着十分舒适。 + +### evaluation result + +| | P-tuning v2 | LoRA | +| ------- | ----------- | ----- | +| BLEU-4 | 7.71 | 6.13 | +| Rouge-1 | 31.35 | 28.36 | +| Rouge-2 | 7.19 | 4.38 | +| Rouge-l | 25.17 | 17.54 | + +#### Experiment Settings + + ``` +max_source_length=64 +max_target_length=64 +per_device_train_batch_size=1 +gradient_accumulation_steps=16 +max_steps=3000 + ``` + +##### P-tuning v2 + +``` +pre_seq_len=128 +learning_rate=2e-2 +quantization_bit=4 +``` + +##### LoRA + +``` +learning_rate=5e-4 +``` + +The implementation uses [simple_thu_chatglm6b](https://github.com/yuanzhoulvpi2017/zero_nlp/tree/main/simple_thu_chatglm6b) + + + +## Model Deployment +Replace `THUDM/chatglm-6b` in the corresponding demo or code with the path of the checkpoint after P-Tuning(in the example, `./output/adgen-chatglm-6b-pt-8-1e-2/ checkpoint-3000`). Note that the current fine-tuning does not support multiple rounds of data, so only the responses from the first round of the conversation are fine-tuned. + +## Use your own dataset +Modify `train_file`, `validation_file` and `test_file` in `train.sh` and `evaluate.sh` to your own JSON format dataset paths, and change `prompt_column` and `response_column` to the keys in the JSON file corresponding to input text and output text. + +## TODO +* [ ] Support for chat data +* [ ] Support for full finetuning + +## quoting + +``` +@inproceedings{liu2022p, + title={P-tuning: Prompt tuning can be comparable to fine-tuning across scales and tasks}, + author={Liu, Xiao and Ji, Kaixuan and Fu, Yicheng and Tam, Weng and Du, Zhengxiao and Yang, Zhilin and Tang, Jie}, + booktitle={Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, + pages={61--68}, + year={2022} +} +``` \ No newline at end of file diff --git a/ptuning/__pycache__/arguments.cpython-39.pyc b/ptuning/__pycache__/arguments.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d9ee02ca26e4e914484d2e85dd8c262b74a4f61 Binary files /dev/null and b/ptuning/__pycache__/arguments.cpython-39.pyc differ diff --git a/ptuning/__pycache__/trainer.cpython-39.pyc b/ptuning/__pycache__/trainer.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6684646dd6a6bef65f853b370f121ef866032d1d Binary files /dev/null and b/ptuning/__pycache__/trainer.cpython-39.pyc differ diff --git a/ptuning/__pycache__/trainer_seq2seq.cpython-39.pyc b/ptuning/__pycache__/trainer_seq2seq.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..394d3f9f6073b95e83065153907e1535223c5e6c Binary files /dev/null and b/ptuning/__pycache__/trainer_seq2seq.cpython-39.pyc differ diff --git a/ptuning/arguments.py b/ptuning/arguments.py new file mode 100644 index 0000000000000000000000000000000000000000..fda1f3522261f50768984402d9ac691557ea63f3 --- /dev/null +++ b/ptuning/arguments.py @@ -0,0 +1,224 @@ +from dataclasses import dataclass, field +from typing import Optional + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. + """ + + model_name_or_path: str = field( + metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} + ) + ptuning_checkpoint: str = field( + default=None, metadata={"help": "Path to p-tuning v2 checkpoints"} + ) + config_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} + ) + tokenizer_name: Optional[str] = field( + default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} + ) + cache_dir: Optional[str] = field( + default=None, + metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, + ) + use_fast_tokenizer: bool = field( + default=True, + metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, + ) + model_revision: str = field( + default="main", + metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, + ) + use_auth_token: bool = field( + default=False, + metadata={ + "help": ( + "Will use the token generated when running `huggingface-cli login` (necessary to use this script " + "with private models)." + ) + }, + ) + resize_position_embeddings: Optional[bool] = field( + default=None, + metadata={ + "help": ( + "Whether to automatically resize the position embeddings if `max_source_length` exceeds " + "the model's position embeddings." + ) + }, + ) + quantization_bit: Optional[int] = field( + default=None + ) + pre_seq_len: Optional[int] = field( + default=None + ) + prefix_projection: bool = field( + default=False + ) + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + lang: Optional[str] = field(default=None, metadata={"help": "Language id for summarization."}) + + dataset_name: Optional[str] = field( + default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} + ) + dataset_config_name: Optional[str] = field( + default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} + ) + prompt_column: Optional[str] = field( + default=None, + metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."}, + ) + response_column: Optional[str] = field( + default=None, + metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."}, + ) + history_column: Optional[str] = field( + default=None, + metadata={"help": "The name of the column in the datasets containing the history of chat."}, + ) + train_file: Optional[str] = field( + default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} + ) + validation_file: Optional[str] = field( + default=None, + metadata={ + "help": ( + "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." + ) + }, + ) + test_file: Optional[str] = field( + default=None, + metadata={ + "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." + }, + ) + overwrite_cache: bool = field( + default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={"help": "The number of processes to use for the preprocessing."}, + ) + max_source_length: Optional[int] = field( + default=1024, + metadata={ + "help": ( + "The maximum total input sequence length after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded." + ) + }, + ) + max_target_length: Optional[int] = field( + default=128, + metadata={ + "help": ( + "The maximum total sequence length for target text after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded." + ) + }, + ) + val_max_target_length: Optional[int] = field( + default=None, + metadata={ + "help": ( + "The maximum total sequence length for validation target text after tokenization. Sequences longer " + "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`." + "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " + "during ``evaluate`` and ``predict``." + ) + }, + ) + pad_to_max_length: bool = field( + default=False, + metadata={ + "help": ( + "Whether to pad all samples to model maximum sentence length. " + "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " + "efficient on GPU but very bad for TPU." + ) + }, + ) + max_train_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of training examples to this " + "value if set." + ) + }, + ) + max_eval_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of evaluation examples to this " + "value if set." + ) + }, + ) + max_predict_samples: Optional[int] = field( + default=None, + metadata={ + "help": ( + "For debugging purposes or quicker training, truncate the number of prediction examples to this " + "value if set." + ) + }, + ) + num_beams: Optional[int] = field( + default=None, + metadata={ + "help": ( + "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " + "which is used during ``evaluate`` and ``predict``." + ) + }, + ) + ignore_pad_token_for_loss: bool = field( + default=True, + metadata={ + "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." + }, + ) + source_prefix: Optional[str] = field( + default="", metadata={"help": "A prefix to add before every source text (useful for T5 models)."} + ) + + forced_bos_token: Optional[str] = field( + default=None, + metadata={ + "help": ( + "The token to force as the first generated token after the decoder_start_token_id." + "Useful for multilingual models like mBART where the first generated token" + "needs to be the target language token (Usually it is the target language token)" + ) + }, + ) + + + + def __post_init__(self): + if self.dataset_name is None and self.train_file is None and self.validation_file is None and self.test_file is None: + raise ValueError("Need either a dataset name or a training/validation/test file.") + else: + if self.train_file is not None: + extension = self.train_file.split(".")[-1] + assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." + if self.validation_file is not None: + extension = self.validation_file.split(".")[-1] + assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." + if self.val_max_target_length is None: + self.val_max_target_length = self.max_target_length + diff --git a/ptuning/deepspeed.json b/ptuning/deepspeed.json new file mode 100644 index 0000000000000000000000000000000000000000..8e4550902c187e202b56bab025eb891f0bc06c84 --- /dev/null +++ b/ptuning/deepspeed.json @@ -0,0 +1,21 @@ +{ + "train_micro_batch_size_per_gpu": "auto", + "zero_allow_untested_optimizer": true, + "fp16": { + "enabled": "auto", + "loss_scale": 0, + "initial_scale_power": 16, + "loss_scale_window": 1000, + "hysteresis": 2, + "min_loss_scale": 1 + }, + "zero_optimization": { + "stage": 2, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": false, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients" : true + } +} \ No newline at end of file diff --git a/ptuning/ds_train_finetune.sh b/ptuning/ds_train_finetune.sh new file mode 100644 index 0000000000000000000000000000000000000000..531a8004dbed00819aa767c420cdc483e7c0abed --- /dev/null +++ b/ptuning/ds_train_finetune.sh @@ -0,0 +1,28 @@ + +LR=1e-4 + +MASTER_PORT=$(shuf -n 1 -i 10000-65535) + +deepspeed --num_gpus=4 --master_port $MASTER_PORT main.py \ + --deepspeed deepspeed.json \ + --do_train \ + --train_file AdvertiseGen/train.json \ + --test_file AdvertiseGen/dev.json \ + --prompt_column content \ + --response_column summary \ + --overwrite_cache \ + --model_name_or_path THUDM/chatglm-6b \ + --output_dir ./output/adgen-chatglm-6b-ft-$LR \ + --overwrite_output_dir \ + --max_source_length 64 \ + --max_target_length 64 \ + --per_device_train_batch_size 4 \ + --per_device_eval_batch_size 1 \ + --gradient_accumulation_steps 1 \ + --predict_with_generate \ + --max_steps 5000 \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate $LR \ + --fp16 + diff --git a/ptuning/evaluate.sh b/ptuning/evaluate.sh new file mode 100644 index 0000000000000000000000000000000000000000..2f652a65e4c24622a4748f0ced2340f6f998d127 --- /dev/null +++ b/ptuning/evaluate.sh @@ -0,0 +1,21 @@ +PRE_SEQ_LEN=128 +CHECKPOINT=adgen-chatglm-6b-pt-128-2e-2 +STEP=100 + +CUDA_VISIBLE_DEVICES=0 python3 main.py \ + --do_predict \ + --validation_file AdvertiseGen/dev.json \ + --test_file AdvertiseGen/dev.json \ + --overwrite_cache \ + --prompt_column content \ + --response_column summary \ + --model_name_or_path /home/wangyan/project/hft/uptest \ + --ptuning_checkpoint ./output/$CHECKPOINT/checkpoint-$STEP \ + --output_dir ./output/$CHECKPOINT \ + --overwrite_output_dir \ + --max_source_length 64 \ + --max_target_length 64 \ + --per_device_eval_batch_size 1 \ + --predict_with_generate \ + --pre_seq_len $PRE_SEQ_LEN \ + --quantization_bit 4 diff --git a/ptuning/evaluate_finetune.sh b/ptuning/evaluate_finetune.sh new file mode 100644 index 0000000000000000000000000000000000000000..e275c3cbbec9ee65ad5e4a958a0ea52c248964c4 --- /dev/null +++ b/ptuning/evaluate_finetune.sh @@ -0,0 +1,18 @@ +CHECKPOINT=adgen-chatglm-6b-ft-1e-4 +STEP=3000 + +CUDA_VISIBLE_DEVICES=0 python3 main.py \ + --do_predict \ + --validation_file AdvertiseGen/dev.json \ + --test_file AdvertiseGen/dev.json \ + --overwrite_cache \ + --prompt_column content \ + --response_column summary \ + --model_name_or_path ./output/$CHECKPOINT/checkpoint-$STEP \ + --output_dir ./output/$CHECKPOINT \ + --overwrite_output_dir \ + --max_source_length 256 \ + --max_target_length 256 \ + --per_device_eval_batch_size 1 \ + --predict_with_generate \ + --fp16_full_eval diff --git a/ptuning/main.py b/ptuning/main.py new file mode 100644 index 0000000000000000000000000000000000000000..43ecdf814352cf388bd9cb319b9938d33737d59d --- /dev/null +++ b/ptuning/main.py @@ -0,0 +1,431 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Fine-tuning the library models for sequence to sequence. +""" +# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. + +import logging +import os +import sys +import json + +import numpy as np +from datasets import load_dataset +import jieba +from rouge_chinese import Rouge +from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction +import torch + +import transformers +from transformers import ( + AutoConfig, + AutoModel, + AutoTokenizer, + AutoTokenizer, + DataCollatorForSeq2Seq, + HfArgumentParser, + Seq2SeqTrainingArguments, + set_seed, +) +from trainer_seq2seq import Seq2SeqTrainer + +from arguments import ModelArguments, DataTrainingArguments + +logger = logging.getLogger(__name__) + +def main(): + + parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + # Setup logging + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + handlers=[logging.StreamHandler(sys.stdout)], + ) + + if training_args.should_log: + # The default of training_args.log_level is passive, so we set log level at info here to have that default. + transformers.utils.logging.set_verbosity_info() + + log_level = training_args.get_process_log_level() + logger.setLevel(log_level) + # datasets.utils.logging.set_verbosity(log_level) + transformers.utils.logging.set_verbosity(log_level) + transformers.utils.logging.enable_default_handler() + transformers.utils.logging.enable_explicit_format() + + # Log on each process the small summary: + logger.warning( + f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" + ) + logger.info(f"Training/evaluation parameters {training_args}") + + # Set seed before initializing model. + set_seed(training_args.seed) + + # Load dataset + data_files = {} + if data_args.train_file is not None: + data_files["train"] = data_args.train_file + extension = data_args.train_file.split(".")[-1] + if data_args.validation_file is not None: + data_files["validation"] = data_args.validation_file + extension = data_args.validation_file.split(".")[-1] + if data_args.test_file is not None: + data_files["test"] = data_args.test_file + extension = data_args.test_file.split(".")[-1] + + raw_datasets = load_dataset( + extension, + data_files=data_files, + cache_dir=model_args.cache_dir, + use_auth_token=True if model_args.use_auth_token else None, + ) + + # Load pretrained model and tokenizer + config = AutoConfig.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) + config.pre_seq_len = model_args.pre_seq_len + config.prefix_projection = model_args.prefix_projection + + tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, trust_remote_code=True) + + if model_args.ptuning_checkpoint is not None: + # Evaluation + # Loading extra state dict of prefix encoder + model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) + prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin")) + new_prefix_state_dict = {} + for k, v in prefix_state_dict.items(): + if k.startswith("transformer.prefix_encoder."): + new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v + model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) + else: + model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) + + if model_args.quantization_bit is not None: + print(f"Quantized to {model_args.quantization_bit} bit") + model = model.quantize(model_args.quantization_bit) + if model_args.pre_seq_len is not None: + # P-tuning v2 + model = model.half() + model.transformer.prefix_encoder.float() + else: + # Finetune + model = model.float() + + prefix = data_args.source_prefix if data_args.source_prefix is not None else "" + + # Preprocessing the datasets. + # We need to tokenize inputs and targets. + if training_args.do_train: + column_names = raw_datasets["train"].column_names + elif training_args.do_eval: + column_names = raw_datasets["validation"].column_names + elif training_args.do_predict: + column_names = raw_datasets["test"].column_names + else: + logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") + return + + # Get the column names for input/target. + prompt_column = data_args.prompt_column + response_column = data_args.response_column + history_column = data_args.history_column + + # Temporarily set max_target_length for training. + max_target_length = data_args.max_target_length + + def preprocess_function_eval(examples): + inputs, targets = [], [] + for i in range(len(examples[prompt_column])): + if examples[prompt_column][i] and examples[response_column][i]: + query = examples[prompt_column][i] + if history_column is None or len(examples[history_column][i]) == 0: + prompt = query + else: + prompt = "" + history = examples[history_column][i] + for turn_idx, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(turn_idx, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + inputs.append(prompt) + targets.append(examples[response_column][i]) + + inputs = [prefix + inp for inp in inputs] + model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, truncation=True, padding=True) + labels = tokenizer(text_target=targets, max_length=max_target_length, truncation=True) + + if data_args.ignore_pad_token_for_loss: + labels["input_ids"] = [ + [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] + ] + model_inputs["labels"] = labels["input_ids"] + + return model_inputs + + def preprocess_function_train(examples): + max_seq_length = data_args.max_source_length + data_args.max_target_length + + model_inputs = { + "input_ids": [], + "labels": [], + } + for i in range(len(examples[prompt_column])): + if examples[prompt_column][i] and examples[response_column][i]: + query, answer = examples[prompt_column][i], examples[response_column][i] + + if history_column is None: + prompt = query + else: + prompt = "" + history = examples[history_column][i] + for turn_idx, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(turn_idx, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + + prompt = prefix + prompt + a_ids = tokenizer.encode(text=prompt, add_special_tokens=False) + b_ids = tokenizer.encode(text=answer, add_special_tokens=False) + + if len(a_ids) > data_args.max_source_length - 1: + a_ids = a_ids[: data_args.max_source_length - 1] + + if len(b_ids) > data_args.max_target_length - 2: + b_ids = b_ids[: data_args.max_target_length - 2] + + input_ids = tokenizer.build_inputs_with_special_tokens(a_ids, b_ids) + + context_length = input_ids.index(tokenizer.bos_token_id) + mask_position = context_length - 1 + labels = [-100] * context_length + input_ids[mask_position+1:] + + pad_len = max_seq_length - len(input_ids) + input_ids = input_ids + [tokenizer.pad_token_id] * pad_len + labels = labels + [tokenizer.pad_token_id] * pad_len + if data_args.ignore_pad_token_for_loss: + labels = [(l if l != tokenizer.pad_token_id else -100) for l in labels] + + model_inputs["input_ids"].append(input_ids) + model_inputs["labels"].append(labels) + + return model_inputs + + def print_dataset_example(example): + print("input_ids",example["input_ids"]) + print("inputs", tokenizer.decode(example["input_ids"])) + print("label_ids", example["labels"]) + print("labels", tokenizer.decode(example["labels"])) + + if training_args.do_train: + if "train" not in raw_datasets: + raise ValueError("--do_train requires a train dataset") + train_dataset = raw_datasets["train"] + if data_args.max_train_samples is not None: + max_train_samples = min(len(train_dataset), data_args.max_train_samples) + train_dataset = train_dataset.select(range(max_train_samples)) + with training_args.main_process_first(desc="train dataset map pre-processing"): + train_dataset = train_dataset.map( + preprocess_function_train, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on train dataset", + ) + print_dataset_example(train_dataset[0]) + + if training_args.do_eval: + max_target_length = data_args.val_max_target_length + if "validation" not in raw_datasets: + raise ValueError("--do_eval requires a validation dataset") + eval_dataset = raw_datasets["validation"] + if data_args.max_eval_samples is not None: + max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) + eval_dataset = eval_dataset.select(range(max_eval_samples)) + with training_args.main_process_first(desc="validation dataset map pre-processing"): + eval_dataset = eval_dataset.map( + preprocess_function_eval, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on validation dataset", + ) + print_dataset_example(eval_dataset[0]) + + if training_args.do_predict: + max_target_length = data_args.val_max_target_length + if "test" not in raw_datasets: + raise ValueError("--do_predict requires a test dataset") + predict_dataset = raw_datasets["test"] + if data_args.max_predict_samples is not None: + max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) + predict_dataset = predict_dataset.select(range(max_predict_samples)) + with training_args.main_process_first(desc="prediction dataset map pre-processing"): + predict_dataset = predict_dataset.map( + preprocess_function_eval, + batched=True, + num_proc=data_args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=not data_args.overwrite_cache, + desc="Running tokenizer on prediction dataset", + ) + print_dataset_example(predict_dataset[0]) + + # Data collator + label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id + data_collator = DataCollatorForSeq2Seq( + tokenizer, + model=model, + label_pad_token_id=label_pad_token_id, + pad_to_multiple_of=None, + padding=False + ) + + # Metric + def compute_metrics(eval_preds): + preds, labels = eval_preds + if isinstance(preds, tuple): + preds = preds[0] + decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) + if data_args.ignore_pad_token_for_loss: + # Replace -100 in the labels as we can't decode them. + labels = np.where(labels != -100, labels, tokenizer.pad_token_id) + decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) + + score_dict = { + "rouge-1": [], + "rouge-2": [], + "rouge-l": [], + "bleu-4": [] + } + for pred, label in zip(decoded_preds, decoded_labels): + hypothesis = list(jieba.cut(pred)) + reference = list(jieba.cut(label)) + rouge = Rouge() + scores = rouge.get_scores(' '.join(hypothesis) , ' '.join(reference)) + result = scores[0] + + for k, v in result.items(): + score_dict[k].append(round(v["f"] * 100, 4)) + bleu_score = sentence_bleu([list(label)], list(pred), smoothing_function=SmoothingFunction().method3) + score_dict["bleu-4"].append(round(bleu_score * 100, 4)) + + for k, v in score_dict.items(): + score_dict[k] = float(np.mean(v)) + return score_dict + + # Override the decoding parameters of Seq2SeqTrainer + training_args.generation_max_length = ( + training_args.generation_max_length + if training_args.generation_max_length is not None + else data_args.val_max_target_length + ) + training_args.generation_num_beams = ( + data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams + ) + # Initialize our Trainer + trainer = Seq2SeqTrainer( + model=model, + args=training_args, + train_dataset=train_dataset if training_args.do_train else None, + eval_dataset=eval_dataset if training_args.do_eval else None, + tokenizer=tokenizer, + data_collator=data_collator, + compute_metrics=compute_metrics if training_args.predict_with_generate else None, + save_prefixencoder=model_args.pre_seq_len is not None + ) + + # Training + if training_args.do_train: + checkpoint = None + if training_args.resume_from_checkpoint is not None: + checkpoint = training_args.resume_from_checkpoint + # elif last_checkpoint is not None: + # checkpoint = last_checkpoint + model.gradient_checkpointing_enable() + model.enable_input_require_grads() + train_result = trainer.train(resume_from_checkpoint=checkpoint) + # trainer.save_model() # Saves the tokenizer too for easy upload + + metrics = train_result.metrics + max_train_samples = ( + data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) + ) + metrics["train_samples"] = min(max_train_samples, len(train_dataset)) + + trainer.log_metrics("train", metrics) + trainer.save_metrics("train", metrics) + trainer.save_state() + + # Evaluation + results = {} + if training_args.do_eval: + logger.info("*** Evaluate ***") + metrics = trainer.evaluate(metric_key_prefix="eval", do_sample=True, top_p=0.7, max_length=512, temperature=0.95) + max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) + metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) + + trainer.log_metrics("eval", metrics) + trainer.save_metrics("eval", metrics) + + if training_args.do_predict: + logger.info("*** Predict ***") + + predict_results = trainer.predict(predict_dataset, metric_key_prefix="predict", max_length=512, do_sample=True, top_p=0.7, temperature=0.95) + metrics = predict_results.metrics + max_predict_samples = ( + data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) + ) + metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) + + trainer.log_metrics("predict", metrics) + trainer.save_metrics("predict", metrics) + + if trainer.is_world_process_zero(): + if training_args.predict_with_generate: + predictions = tokenizer.batch_decode( + predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True + ) + predictions = [pred.strip() for pred in predictions] + labels = tokenizer.batch_decode( + predict_results.label_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True + ) + labels = [label.strip() for label in labels] + output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt") + with open(output_prediction_file, "w", encoding="utf-8") as writer: + for p, l in zip(predictions, labels): + res = json.dumps({"labels": l, "predict": p}, ensure_ascii=False) + writer.write(f"{res}\n") + return results + + +def _mp_fn(index): + # For xla_spawn (TPUs) + main() + + +if __name__ == "__main__": + main() diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/all_results.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/all_results.json new file mode 100644 index 0000000000000000000000000000000000000000..ff385825a821d968d0264e6f03e2c2f9388f236a --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/all_results.json @@ -0,0 +1,8 @@ +{ + "epoch": 0.01, + "train_loss": 4.456654052734375, + "train_runtime": 395.1154, + "train_samples": 114599, + "train_samples_per_second": 4.049, + "train_steps_per_second": 0.253 +} \ No newline at end of file diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/config.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/config.json new file mode 100644 index 0000000000000000000000000000000000000000..06341c327f5b1ce5c3e53e96c56be7de127a2c1f --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/config.json @@ -0,0 +1,31 @@ +{ + "_name_or_path": "/home/wangyan/project/hft/uptest", + "architectures": [ + "ChatGLMForConditionalGeneration" + ], + "auto_map": { + "AutoConfig": "configuration_chatglm.ChatGLMConfig", + "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration", + "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration" + }, + "bos_token_id": 130004, + "eos_token_id": 130005, + "gmask_token_id": 130001, + "hidden_size": 4096, + "inner_hidden_size": 16384, + "layernorm_epsilon": 1e-05, + "mask_token_id": 130000, + "max_sequence_length": 2048, + "model_type": "chatglm", + "num_attention_heads": 32, + "num_layers": 28, + "pad_token_id": 3, + "position_encoding_2d": true, + "pre_seq_len": 128, + "prefix_projection": false, + "quantization_bit": 4, + "torch_dtype": "float16", + "transformers_version": "4.27.1", + "use_cache": true, + "vocab_size": 130528 +} diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/configuration_chatglm.py b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/configuration_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..78f3425d5f63ad43f31b092b8d62b44d28d52f15 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/configuration_chatglm.py @@ -0,0 +1,103 @@ +""" ChatGLM model configuration """ + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class ChatGLMConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`~ChatGLMModel`]. + It is used to instantiate an ChatGLM model according to the specified arguments, defining the model + architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of + the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used + to control the model outputs. Read the documentation from [`PretrainedConfig`] + for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 150528): + Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`~ChatGLMModel`] or + [`~TFChatGLMModel`]. + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 28): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + inner_hidden_size (`int`, *optional*, defaults to 16384): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + max_sequence_length (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. + Typically set this to something large just in case (e.g., 512 or 1024 or 2048). + layernorm_epsilon (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether the model should return the last key/values attentions (not used by all models). + Example: + + ```python + >>> from configuration_chatglm import ChatGLMConfig + >>> from modeling_chatglm import ChatGLMModel + + >>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration + >>> configuration = ChatGLMConfig() + + >>> # Initializing a model from the THUDM/ChatGLM-6B style configuration + >>> model = ChatGLMModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ``` +""" + model_type = "chatglm" + + def __init__( + self, + vocab_size=150528, + hidden_size=4096, + num_layers=28, + num_attention_heads=32, + layernorm_epsilon=1e-5, + use_cache=False, + bos_token_id=150004, + eos_token_id=150005, + mask_token_id=150000, + gmask_token_id=150001, + pad_token_id=0, + max_sequence_length=2048, + inner_hidden_size=16384, + position_encoding_2d=True, + quantization_bit=0, + pre_seq_len=None, + prefix_projection=False, + **kwargs + ): + self.num_layers = num_layers + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + self.max_sequence_length = max_sequence_length + self.layernorm_epsilon = layernorm_epsilon + self.inner_hidden_size = inner_hidden_size + self.use_cache = use_cache + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + self.pad_token_id = pad_token_id + self.mask_token_id = mask_token_id + self.gmask_token_id = gmask_token_id + self.position_encoding_2d = position_encoding_2d + self.quantization_bit = quantization_bit + self.pre_seq_len = pre_seq_len + self.prefix_projection = prefix_projection + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + **kwargs + ) diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/generation_config.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e6191613b8cca2cd0d91cc92e90f2a353388ec3e --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/generation_config.json @@ -0,0 +1,7 @@ +{ + "_from_model_config": true, + "bos_token_id": 130004, + "eos_token_id": 130005, + "pad_token_id": 3, + "transformers_version": "4.27.1" +} diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/ice_text.model b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/ice_text.model new file mode 100644 index 0000000000000000000000000000000000000000..0dcfe31e02ad0767e0c80a469340bf97f58e777a --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/ice_text.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e974d9a69c242ce014c88c2b26089270f6198f3c0b700a887666cd3e816f17e +size 2706249 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/modeling_chatglm.py b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/modeling_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..fc291119053cdf164b697ad022735f916fb3e8b1 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/modeling_chatglm.py @@ -0,0 +1,1435 @@ +""" PyTorch ChatGLM model. """ + +import math +import copy +import os +import warnings +import re +import sys + +import torch +import torch.utils.checkpoint +import torch.nn.functional as F +from torch import nn +from torch.nn import CrossEntropyLoss, LayerNorm +from torch.nn.utils import skip_init +from typing import Optional, Tuple, Union, List, Callable, Dict, Any + +from transformers.utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + BaseModelOutputWithPastAndCrossAttentions, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import logging +from transformers.generation.logits_process import LogitsProcessor +from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput + +from .configuration_chatglm import ChatGLMConfig + +# flags required to enable jit fusion kernels + +if sys.platform != 'darwin': + torch._C._jit_set_profiling_mode(False) + torch._C._jit_set_profiling_executor(False) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM-6B" +_CONFIG_FOR_DOC = "ChatGLM6BConfig" + +CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "THUDM/chatglm-6b", + # See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm +] + + +class InvalidScoreLogitsProcessor(LogitsProcessor): + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + if torch.isnan(scores).any() or torch.isinf(scores).any(): + scores.zero_() + scores[..., 5] = 5e4 + return scores + + +def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] + for n in name + ): + logger.info(f"Skipping {'/'.join(name)}") + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info(f"Skipping {'/'.join(name)}") + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + assert ( + pointer.shape == array.shape + ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + return model + + +class PrefixEncoder(torch.nn.Module): + """ + The torch.nn model to encode the prefix + Input shape: (batch-size, prefix-length) + Output shape: (batch-size, prefix-length, 2*layers*hidden) + """ + + def __init__(self, config): + super().__init__() + self.prefix_projection = config.prefix_projection + if self.prefix_projection: + # Use a two-layer MLP to encode the prefix + self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size) + self.trans = torch.nn.Sequential( + torch.nn.Linear(config.hidden_size, config.hidden_size), + torch.nn.Tanh(), + torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2) + ) + else: + self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.hidden_size * 2) + + def forward(self, prefix: torch.Tensor): + if self.prefix_projection: + prefix_tokens = self.embedding(prefix) + past_key_values = self.trans(prefix_tokens) + else: + past_key_values = self.embedding(prefix) + return past_key_values + + +@torch.jit.script +def gelu_impl(x): + """OpenAI's gelu implementation.""" + return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * + (1.0 + 0.044715 * x * x))) + + +def gelu(x): + return gelu_impl(x) + + +class RotaryEmbedding(torch.nn.Module): + def __init__(self, dim, base=10000, precision=torch.half, learnable=False): + super().__init__() + inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) + inv_freq = inv_freq.half() + self.learnable = learnable + if learnable: + self.inv_freq = torch.nn.Parameter(inv_freq) + self.max_seq_len_cached = None + else: + self.register_buffer('inv_freq', inv_freq) + self.max_seq_len_cached = None + self.cos_cached = None + self.sin_cached = None + self.precision = precision + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, + error_msgs): + pass + + def forward(self, x, seq_dim=1, seq_len=None): + if seq_len is None: + seq_len = x.shape[seq_dim] + if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached): + self.max_seq_len_cached = None if self.learnable else seq_len + t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + if self.precision == torch.bfloat16: + emb = emb.float() + + # [sx, 1 (b * np), hn] + cos_cached = emb.cos()[:, None, :] + sin_cached = emb.sin()[:, None, :] + if self.precision == torch.bfloat16: + cos_cached = cos_cached.bfloat16() + sin_cached = sin_cached.bfloat16() + if self.learnable: + return cos_cached, sin_cached + self.cos_cached, self.sin_cached = cos_cached, sin_cached + return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...] + + def _apply(self, fn): + if self.cos_cached is not None: + self.cos_cached = fn(self.cos_cached) + if self.sin_cached is not None: + self.sin_cached = fn(self.sin_cached) + return super()._apply(fn) + + +def rotate_half(x): + x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:] + return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions + + +@torch.jit.script +def apply_rotary_pos_emb_index(q, k, cos, sin, position_id): + # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn] + cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \ + F.embedding(position_id, sin.squeeze(1)).unsqueeze(2) + q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin) + return q, k + + +def attention_fn( + self, + query_layer, + key_layer, + value_layer, + attention_mask, + hidden_size_per_partition, + layer_id, + layer_past=None, + scaling_attention_score=True, + use_cache=False, +): + if layer_past is not None: + past_key, past_value = layer_past[0], layer_past[1] + key_layer = torch.cat((past_key, key_layer), dim=0) + value_layer = torch.cat((past_value, value_layer), dim=0) + + # seqlen, batch, num_attention_heads, hidden_size_per_attention_head + seq_len, b, nh, hidden_size = key_layer.shape + + if use_cache: + present = (key_layer, value_layer) + else: + present = None + + query_key_layer_scaling_coeff = float(layer_id + 1) + if scaling_attention_score: + query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff) + + # =================================== + # Raw attention scores. [b, np, s, s] + # =================================== + + # [b, np, sq, sk] + output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) + + # [sq, b, np, hn] -> [sq, b * np, hn] + query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) + # [sk, b, np, hn] -> [sk, b * np, hn] + key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) + + matmul_result = torch.zeros( + 1, 1, 1, + dtype=query_layer.dtype, + device=query_layer.device, + ) + + matmul_result = torch.baddbmm( + matmul_result, + query_layer.transpose(0, 1), # [b * np, sq, hn] + key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=1.0, + ) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + + if self.scale_mask_softmax: + self.scale_mask_softmax.scale = query_key_layer_scaling_coeff + attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous()) + else: + if not (attention_mask == 0).all(): + # if auto-regressive, skip + attention_scores.masked_fill_(attention_mask, -10000.0) + dtype = attention_scores.dtype + attention_scores = attention_scores.float() + attention_scores = attention_scores * query_key_layer_scaling_coeff + + attention_probs = F.softmax(attention_scores, dim=-1) + + attention_probs = attention_probs.type(dtype) + + # ========================= + # Context layer. [sq, b, hp] + # ========================= + + # value_layer -> context layer. + # [sk, b, np, hn] --> [b, np, sq, hn] + + # context layer shape: [b, np, sq, hn] + output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) + + # change view [sk, b * np, hn] + value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) + + # change view [b * np, sq, sk] + attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) + + # matmul: [b * np, sq, hn] + context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) + + # change view [b, np, sq, hn] + context_layer = context_layer.view(*output_size) + + # [b, np, sq, hn] --> [sq, b, np, hn] + context_layer = context_layer.permute(2, 0, 1, 3).contiguous() + + # [sq, b, np, hn] --> [sq, b, hp] + new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, present, attention_probs) + + return outputs + + +def default_init(cls, *args, **kwargs): + return cls(*args, **kwargs) + + +class SelfAttention(torch.nn.Module): + def __init__(self, hidden_size, num_attention_heads, + layer_id, hidden_size_per_attention_head=None, bias=True, + params_dtype=torch.float, position_encoding_2d=True, empty_init=True): + if empty_init: + init_method = skip_init + else: + init_method = default_init + super(SelfAttention, self).__init__() + + self.layer_id = layer_id + self.hidden_size = hidden_size + self.hidden_size_per_partition = hidden_size + self.num_attention_heads = num_attention_heads + self.num_attention_heads_per_partition = num_attention_heads + self.position_encoding_2d = position_encoding_2d + self.rotary_emb = RotaryEmbedding( + self.hidden_size // (self.num_attention_heads * 2) + if position_encoding_2d + else self.hidden_size // self.num_attention_heads, + base=10000, + precision=torch.half, + learnable=False, + ) + + self.scale_mask_softmax = None + + if hidden_size_per_attention_head is None: + self.hidden_size_per_attention_head = hidden_size // num_attention_heads + else: + self.hidden_size_per_attention_head = hidden_size_per_attention_head + + self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head + + # Strided linear layer. + self.query_key_value = init_method( + torch.nn.Linear, + hidden_size, + 3 * self.inner_hidden_size, + bias=bias, + dtype=params_dtype, + ) + + self.dense = init_method( + torch.nn.Linear, + self.inner_hidden_size, + hidden_size, + bias=bias, + dtype=params_dtype, + ) + + @staticmethod + def attention_mask_func(attention_scores, attention_mask): + attention_scores.masked_fill_(attention_mask, -10000.0) + return attention_scores + + def split_tensor_along_last_dim(self, tensor, num_partitions, + contiguous_split_chunks=False): + """Split a tensor along its last dimension. + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = tensor.size()[last_dim] // num_partitions + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + def forward( + self, + hidden_states: torch.Tensor, + position_ids, + attention_mask: torch.Tensor, + layer_id, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + """ + hidden_states: [seq_len, batch, hidden_size] + attention_mask: [(1, 1), seq_len, seq_len] + """ + + # [seq_len, batch, 3 * hidden_size] + mixed_raw_layer = self.query_key_value(hidden_states) + + # [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head] + new_tensor_shape = mixed_raw_layer.size()[:-1] + ( + self.num_attention_heads_per_partition, + 3 * self.hidden_size_per_attention_head, + ) + mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape) + + # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head] + (query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3) + + if self.position_encoding_2d: + q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1)) + k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1)) + cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1) + position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \ + position_ids[:, 1, :].transpose(0, 1).contiguous() + q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids) + q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids) + query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1)) + key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1)) + else: + position_ids = position_ids.transpose(0, 1) + cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1) + # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head] + query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids) + + # [seq_len, batch, hidden_size] + context_layer, present, attention_probs = attention_fn( + self=self, + query_layer=query_layer, + key_layer=key_layer, + value_layer=value_layer, + attention_mask=attention_mask, + hidden_size_per_partition=self.hidden_size_per_partition, + layer_id=layer_id, + layer_past=layer_past, + use_cache=use_cache + ) + + output = self.dense(context_layer) + + outputs = (output, present) + + if output_attentions: + outputs += (attention_probs,) + + return outputs # output, present, attention_probs + + +class GEGLU(torch.nn.Module): + def __init__(self): + super().__init__() + self.activation_fn = F.gelu + + def forward(self, x): + # dim=-1 breaks in jit for pt<1.10 + x1, x2 = x.chunk(2, dim=(x.ndim - 1)) + return x1 * self.activation_fn(x2) + + +class GLU(torch.nn.Module): + def __init__(self, hidden_size, inner_hidden_size=None, + layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float, empty_init=True): + super(GLU, self).__init__() + if empty_init: + init_method = skip_init + else: + init_method = default_init + self.layer_id = layer_id + self.activation_func = activation_func + + # Project to 4h. + self.hidden_size = hidden_size + if inner_hidden_size is None: + inner_hidden_size = 4 * hidden_size + self.inner_hidden_size = inner_hidden_size + self.dense_h_to_4h = init_method( + torch.nn.Linear, + self.hidden_size, + self.inner_hidden_size, + bias=bias, + dtype=params_dtype, + ) + # Project back to h. + self.dense_4h_to_h = init_method( + torch.nn.Linear, + self.inner_hidden_size, + self.hidden_size, + bias=bias, + dtype=params_dtype, + ) + + def forward(self, hidden_states): + """ + hidden_states: [seq_len, batch, hidden_size] + """ + + # [seq_len, batch, inner_hidden_size] + intermediate_parallel = self.dense_h_to_4h(hidden_states) + + intermediate_parallel = self.activation_func(intermediate_parallel) + + output = self.dense_4h_to_h(intermediate_parallel) + + return output + + +class GLMBlock(torch.nn.Module): + def __init__( + self, + hidden_size, + num_attention_heads, + layernorm_epsilon, + layer_id, + inner_hidden_size=None, + hidden_size_per_attention_head=None, + layernorm=LayerNorm, + use_bias=True, + params_dtype=torch.float, + num_layers=28, + position_encoding_2d=True, + empty_init=True + ): + super(GLMBlock, self).__init__() + # Set output layer initialization if not provided. + + self.layer_id = layer_id + + # Layernorm on the input data. + self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon) + + self.position_encoding_2d = position_encoding_2d + + # Self attention. + self.attention = SelfAttention( + hidden_size, + num_attention_heads, + layer_id, + hidden_size_per_attention_head=hidden_size_per_attention_head, + bias=use_bias, + params_dtype=params_dtype, + position_encoding_2d=self.position_encoding_2d, + empty_init=empty_init + ) + + # Layernorm on the input data. + self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon) + + self.num_layers = num_layers + + # GLU + self.mlp = GLU( + hidden_size, + inner_hidden_size=inner_hidden_size, + bias=use_bias, + layer_id=layer_id, + params_dtype=params_dtype, + empty_init=empty_init + ) + + def forward( + self, + hidden_states: torch.Tensor, + position_ids, + attention_mask: torch.Tensor, + layer_id, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + """ + hidden_states: [seq_len, batch, hidden_size] + attention_mask: [(1, 1), seq_len, seq_len] + """ + + # Layer norm at the begining of the transformer layer. + # [seq_len, batch, hidden_size] + attention_input = self.input_layernorm(hidden_states) + + # Self attention. + attention_outputs = self.attention( + attention_input, + position_ids, + attention_mask=attention_mask, + layer_id=layer_id, + layer_past=layer_past, + use_cache=use_cache, + output_attentions=output_attentions + ) + + attention_output = attention_outputs[0] + + outputs = attention_outputs[1:] + + # Residual connection. + alpha = (2 * self.num_layers) ** 0.5 + hidden_states = attention_input * alpha + attention_output + + mlp_input = self.post_attention_layernorm(hidden_states) + + # MLP. + mlp_output = self.mlp(mlp_input) + + # Second residual connection. + output = mlp_input * alpha + mlp_output + + if use_cache: + outputs = (output,) + outputs + else: + outputs = (output,) + outputs[1:] + + return outputs # hidden_states, present, attentions + + +class ChatGLMPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and + a simple interface for downloading and loading pretrained models. + """ + + is_parallelizable = False + supports_gradient_checkpointing = True + config_class = ChatGLMConfig + base_model_prefix = "transformer" + _no_split_modules = ["GLMBlock"] + + def __init__(self, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + def _init_weights(self, module: nn.Module): + """Initialize the weights.""" + return + + def get_masks(self, input_ids, device): + batch_size, seq_length = input_ids.shape + context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids] + attention_mask = torch.ones((batch_size, seq_length, seq_length), device=device) + attention_mask.tril_() + for i, context_length in enumerate(context_lengths): + attention_mask[i, :, :context_length] = 1 + attention_mask.unsqueeze_(1) + attention_mask = (attention_mask < 0.5).bool() + + return attention_mask + + def get_position_ids(self, input_ids, mask_positions, device, use_gmasks=None): + batch_size, seq_length = input_ids.shape + if use_gmasks is None: + use_gmasks = [False] * batch_size + context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids] + if self.position_encoding_2d: + position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + for i, context_length in enumerate(context_lengths): + position_ids[i, context_length:] = mask_positions[i] + block_position_ids = [torch.cat(( + torch.zeros(context_length, dtype=torch.long, device=device), + torch.arange(seq_length - context_length, dtype=torch.long, device=device) + 1 + )) for context_length in context_lengths] + block_position_ids = torch.stack(block_position_ids, dim=0) + position_ids = torch.stack((position_ids, block_position_ids), dim=1) + else: + position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + for i, context_length in enumerate(context_lengths): + if not use_gmasks[i]: + position_ids[context_length:] = mask_positions[i] + + return position_ids + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, ChatGLMModel): + module.gradient_checkpointing = value + + +CHATGLM_6B_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general + usage and behavior. + + Parameters: + config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the configuration. + Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CHATGLM_6B_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`ChatGLM6BTokenizer`]. + See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. + Selected in the range `[0, config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert *input_ids* indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.", + CHATGLM_6B_START_DOCSTRING, +) +class ChatGLMModel(ChatGLMPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well + as a decoder, in which case a layer of cross-attention is added between + the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, + Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the + `is_decoder` argument of the configuration set to `True`. + To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` + argument and `add_cross_attention` set to `True`; an + `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + def __init__(self, config: ChatGLMConfig, empty_init=True): + super().__init__(config) + if empty_init: + init_method = skip_init + else: + init_method = default_init + # recording parameters + self.max_sequence_length = config.max_sequence_length + self.hidden_size = config.hidden_size + self.params_dtype = torch.half + self.num_attention_heads = config.num_attention_heads + self.vocab_size = config.vocab_size + self.num_layers = config.num_layers + self.layernorm_epsilon = config.layernorm_epsilon + self.inner_hidden_size = config.inner_hidden_size + self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads + self.position_encoding_2d = config.position_encoding_2d + self.pre_seq_len = config.pre_seq_len + self.prefix_projection = config.prefix_projection + + self.word_embeddings = init_method( + torch.nn.Embedding, + num_embeddings=self.vocab_size, embedding_dim=self.hidden_size, + dtype=self.params_dtype + ) + self.gradient_checkpointing = False + + def get_layer(layer_id): + return GLMBlock( + self.hidden_size, + self.num_attention_heads, + self.layernorm_epsilon, + layer_id, + inner_hidden_size=self.inner_hidden_size, + hidden_size_per_attention_head=self.hidden_size_per_attention_head, + layernorm=LayerNorm, + use_bias=True, + params_dtype=self.params_dtype, + position_encoding_2d=self.position_encoding_2d, + empty_init=empty_init + ) + + self.layers = torch.nn.ModuleList( + [get_layer(layer_id) for layer_id in range(self.num_layers)] + ) + + # Final layer norm before output. + self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon) + + if self.pre_seq_len is not None: + for param in self.parameters(): + param.requires_grad = False + self.prefix_tokens = torch.arange(self.pre_seq_len).long() + self.prefix_encoder = PrefixEncoder(config) + self.dropout = torch.nn.Dropout(0.1) + + # total_params = sum(p.numel() for p in self.parameters()) + # trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad) + # print("Using p-tuning v2: # trainable_params = {} / {}".format(trainable_params, total_params)) + + def get_input_embeddings(self): + return self.word_embeddings + + def set_input_embeddings(self, new_embeddings: torch.Tensor): + self.word_embeddings = new_embeddings + + def get_prompt(self, batch_size, device, dtype=torch.half): + prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device) + past_key_values = self.prefix_encoder(prefix_tokens).type(dtype) + past_key_values = past_key_values.view( + batch_size, + self.pre_seq_len, + self.num_layers * 2, + self.num_attention_heads, + self.hidden_size // self.num_attention_heads + ) + # seq_len, b, nh, hidden_size + past_key_values = self.dropout(past_key_values) + past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2) + # past_key_values = [(v[0], v[1]) for v in past_key_values] + return past_key_values + + @add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPastAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + inputs_embeds: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]: + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + if past_key_values is None: + if self.pre_seq_len is not None: + past_key_values = self.get_prompt(batch_size=input_ids.shape[0], device=input_ids.device, + dtype=inputs_embeds.dtype) + else: + past_key_values = tuple([None] * len(self.layers)) + + if attention_mask is None: + attention_mask = self.get_masks( + input_ids, + device=input_ids.device + ) + + + if position_ids is None: + MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id + seqs = input_ids.tolist() + + mask_positions, use_gmasks = [], [] + for seq in seqs: + mask_token = gMASK if gMASK in seq else MASK + use_gmask = mask_token == gMASK + mask_positions.append(seq.index(mask_token)) + use_gmasks.append(use_gmask) + + position_ids = self.get_position_ids( + input_ids, + mask_positions=mask_positions, + device=input_ids.device, + use_gmasks=use_gmasks + ) + + if self.pre_seq_len is not None and attention_mask is not None: + prefix_attention_mask = torch.ones(batch_size, 1, input_ids.size(-1), self.pre_seq_len).to( + attention_mask.device) + prefix_attention_mask = (prefix_attention_mask < 0.5).bool() + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=3) + + # [seq_len, batch, hidden_size] + hidden_states = inputs_embeds.transpose(0, 1) + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + if attention_mask is None: + attention_mask = torch.zeros(1, 1, device=input_ids.device).bool() + else: + attention_mask = attention_mask.to(hidden_states.device) + + for i, layer in enumerate(self.layers): + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + layer_past = past_key_values[i] + + if self.gradient_checkpointing and self.training: + layer_ret = torch.utils.checkpoint.checkpoint( + layer, + hidden_states, + position_ids, + attention_mask, + torch.tensor(i), + layer_past, + use_cache, + output_attentions + ) + else: + layer_ret = layer( + hidden_states, + position_ids=position_ids, + attention_mask=attention_mask, + layer_id=torch.tensor(i), + layer_past=layer_past, + use_cache=use_cache, + output_attentions=output_attentions + ) + + hidden_states = layer_ret[0] + + if use_cache: + presents = presents + (layer_ret[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],) + + # Final layer norm. + hidden_states = self.final_layernorm(hidden_states) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): + def __init__(self, config: ChatGLMConfig, empty_init=True): + super().__init__(config) + if empty_init: + init_method = skip_init + else: + init_method = default_init + + # self.hidden_size = config.hidden_size + # self.params_dtype = torch.half + # self.vocab_size = config.vocab_size + self.max_sequence_length = config.max_sequence_length + + self.position_encoding_2d = config.position_encoding_2d + + self.transformer = ChatGLMModel(config, empty_init=empty_init) + + self.lm_head = init_method( + nn.Linear, + config.hidden_size, + config.vocab_size, + bias=False, + dtype=torch.half + ) + + self.config = config + + self.quantized = False + + if self.config.quantization_bit: + self.quantize(self.config.quantization_bit, empty_init=True) + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def _update_model_kwargs_for_generation( + self, + outputs: ModelOutput, + model_kwargs: Dict[str, Any], + is_encoder_decoder: bool = False, + standardize_cache_format: bool = False, + ) -> Dict[str, Any]: + # update past_key_values + model_kwargs["past_key_values"] = self._extract_past_from_model_output( + outputs, standardize_cache_format=standardize_cache_format + ) + + # update attention mask + if "attention_mask" in model_kwargs: + attention_mask = model_kwargs["attention_mask"] + if attention_mask is not None and attention_mask.dtype == torch.bool: + attention_mask = torch.cat( + [attention_mask, attention_mask.new_ones((*attention_mask.shape[:3], 1))], dim=3) + new_attention_mask = attention_mask[:, :, -1:].clone() + new_attention_mask[..., -1] = False + model_kwargs["attention_mask"] = torch.cat( + [attention_mask, new_attention_mask], dim=2 + ) + + # update position ids + if "position_ids" in model_kwargs: + position_ids = model_kwargs["position_ids"] + new_position_id = position_ids[..., -1:].clone() + new_position_id[:, 1, :] += 1 + model_kwargs["position_ids"] = torch.cat( + [position_ids, new_position_id], dim=-1 + ) + + return model_kwargs + + def prepare_inputs_for_generation( + self, + input_ids: torch.LongTensor, + past: Optional[torch.Tensor] = None, + past_key_values: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + **kwargs + ) -> dict: + batch_size, seq_length = input_ids.shape + MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id + seqs = input_ids.tolist() + mask_positions, use_gmasks = [], [] + for seq in seqs: + mask_token = gMASK if gMASK in seq else MASK + use_gmask = mask_token == gMASK + mask_positions.append(seq.index(mask_token)) + use_gmasks.append(use_gmask) + + # only last token for input_ids if past is not None + if past is not None or past_key_values is not None: + last_token = input_ids[:, -1].unsqueeze(-1) + if attention_mask is not None and attention_mask.dtype == torch.bool: + attention_mask = attention_mask[:, :, -1:] + else: + attention_mask = None + if position_ids is not None: + position_ids = position_ids[..., -1:] + else: + context_lengths = [seq.index(self.config.bos_token_id) for seq in seqs] + if self.position_encoding_2d: + position_ids = torch.tensor( + [[mask_position, seq_length - context_length] for mask_position, context_length in + zip(mask_positions, context_lengths)], dtype=torch.long, device=input_ids.device).unsqueeze(-1) + else: + position_ids = torch.tensor([mask_position for mask_position in mask_positions], dtype=torch.long, + device=input_ids.device).unsqueeze(-1) + + if past is None: + past = past_key_values + return { + "input_ids": last_token, + "past_key_values": past, + "position_ids": position_ids, + "attention_mask": attention_mask + } + else: + if attention_mask is not None and attention_mask.dtype != torch.bool: + logger.warning_once(f"The dtype of attention mask ({attention_mask.dtype}) is not bool") + attention_mask = None + if attention_mask is None: + attention_mask = self.get_masks( + input_ids, + device=input_ids.device + ) + if position_ids is None: + position_ids = self.get_position_ids( + input_ids, + device=input_ids.device, + mask_positions=mask_positions, + use_gmasks=use_gmasks + ) + + return { + "input_ids": input_ids, + "past_key_values": past, + "position_ids": position_ids, + "attention_mask": attention_mask + } + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + + lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous() + + loss = None + if labels is not None: + lm_logits = lm_logits.to(torch.float32) + + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss(ignore_index=-100) + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + lm_logits = lm_logits.to(hidden_states.dtype) + loss = loss.to(hidden_states.dtype) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + @staticmethod + def _reorder_cache( + past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor + ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + + Output shares the same memory storage as `past`. + """ + return tuple( + ( + layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)), + layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)), + ) + for layer_past in past + ) + + def process_response(self, response): + response = response.strip() + response = response.replace("[[训练时间]]", "2023年") + punkts = [ + [",", ","], + ["!", "!"], + [":", ":"], + [";", ";"], + ["\?", "?"], + ] + for item in punkts: + response = re.sub(r"([\u4e00-\u9fff])%s" % item[0], r"\1%s" % item[1], response) + response = re.sub(r"%s([\u4e00-\u9fff])" % item[0], r"%s\1" % item[1], response) + return response + + @torch.no_grad() + def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1, + do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs): + if history is None: + history = [] + if logits_processor is None: + logits_processor = LogitsProcessorList() + logits_processor.append(InvalidScoreLogitsProcessor()) + gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p, + "temperature": temperature, "logits_processor": logits_processor, **kwargs} + if not history: + prompt = query + else: + prompt = "" + for i, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + inputs = tokenizer([prompt], return_tensors="pt") + inputs = inputs.to(self.device) + outputs = self.generate(**inputs, **gen_kwargs) + outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] + response = tokenizer.decode(outputs) + response = self.process_response(response) + history = history + [(query, response)] + return response, history + + @torch.no_grad() + def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, + do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs): + if history is None: + history = [] + if logits_processor is None: + logits_processor = LogitsProcessorList() + logits_processor.append(InvalidScoreLogitsProcessor()) + gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p, + "temperature": temperature, "logits_processor": logits_processor, **kwargs} + if not history: + prompt = query + else: + prompt = "" + for i, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + inputs = tokenizer([prompt], return_tensors="pt") + inputs = inputs.to(self.device) + for outputs in self.stream_generate(**inputs, **gen_kwargs): + outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] + response = tokenizer.decode(outputs) + response = self.process_response(response) + new_history = history + [(query, response)] + yield response, new_history + + @torch.no_grad() + def stream_generate( + self, + input_ids, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, + **kwargs, + ): + batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1] + + if generation_config is None: + generation_config = self.generation_config + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) + bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if has_default_max_length and generation_config.max_new_tokens is None: + warnings.warn( + f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " + "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" + " recommend using `max_new_tokens` to control the maximum length of the generation.", + UserWarning, + ) + elif generation_config.max_new_tokens is not None: + generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length + if not has_default_max_length: + logger.warn( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)", + UserWarning, + ) + + if input_ids_seq_length >= generation_config.max_length: + input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" + logger.warning( + f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing `max_new_tokens`." + ) + + # 2. Set generation parameters if not already defined + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + encoder_input_ids=input_ids, + prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, + logits_processor=logits_processor, + ) + + stopping_criteria = self._get_stopping_criteria( + generation_config=generation_config, stopping_criteria=stopping_criteria + ) + logits_warper = self._get_logits_warper(generation_config) + + unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) + scores = None + while True: + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + # forward pass to get next token + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=False, + output_hidden_states=False, + ) + + next_token_logits = outputs.logits[:, -1, :] + + # pre-process distribution + next_token_scores = logits_processor(input_ids, next_token_logits) + next_token_scores = logits_warper(input_ids, next_token_scores) + + # sample + probs = nn.functional.softmax(next_token_scores, dim=-1) + if generation_config.do_sample: + next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) + else: + next_tokens = torch.argmax(probs, dim=-1) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long()) + + # stop when each sentence is finished, or if we exceed the maximum length + if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): + break + yield input_ids + + def quantize(self, bits: int, empty_init=False, **kwargs): + if bits == 0: + return + + from .quantization import quantize + + if self.quantized: + logger.info("Already quantized.") + return self + + self.quantized = True + + self.config.quantization_bit = bits + + self.transformer = quantize(self.transformer, bits, empty_init=empty_init, **kwargs) + return self diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/optimizer.pt b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..e14148b50a9c91046c59c8e3701444db2c9a20e7 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2040cd9904e7b657b554d0b4b9b65c4a604303e9e045948a559a71b03292ce9 +size 234882351 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/pytorch_model.bin b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..bd439562467c7cc4fa6f0579d242b2f8582c3faf --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fd9079d9d67795180ab785a8d9be1931f209c43d811fe9a82d792150f83dddf +size 117441341 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/quantization.py b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..6f469f6a25a8233fe881608168daeba0bc809540 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/quantization.py @@ -0,0 +1,201 @@ +from torch.nn import Linear +from torch.nn.parameter import Parameter + +import bz2 +import torch +import base64 +import ctypes +from transformers.utils import logging + +from typing import List +from functools import partial + +logger = logging.get_logger(__name__) + +try: + from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up + + class Kernel: + def __init__(self, code: bytes, function_names: List[str]): + self.code = code + self._function_names = function_names + self._cmodule = LazyKernelCModule(self.code) + + for name in self._function_names: + setattr(self, name, KernelFunction(self._cmodule, name)) + + quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ" + + kernels = Kernel( + bz2.decompress(base64.b64decode(quantization_code)), + [ + "int4WeightCompression", + "int4WeightExtractionFloat", + "int4WeightExtractionHalf", + "int8WeightExtractionFloat", + "int8WeightExtractionHalf", + ], + ) +except Exception as exception: + kernels = None + logger.warning("Failed to load cpm_kernels:" + str(exception)) + + +class W8A16Linear(torch.autograd.Function): + @staticmethod + def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width): + ctx.inp_shape = inp.size() + ctx.weight_bit_width = weight_bit_width + out_features = quant_w.size(0) + inp = inp.contiguous().view(-1, inp.size(-1)) + weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width) + ctx.weight_shape = weight.size() + output = inp.mm(weight.t()) + ctx.save_for_backward(inp, quant_w, scale_w) + return output.view(*(ctx.inp_shape[:-1] + (out_features,))) + + @staticmethod + def backward(ctx, grad_output: torch.Tensor): + inp, quant_w, scale_w = ctx.saved_tensors + weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width) + grad_output = grad_output.contiguous().view(-1, weight.size(0)) + grad_input = grad_output.mm(weight) + grad_weight = grad_output.t().mm(inp) + return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None + + +def compress_int4_weight(weight: torch.Tensor): # (n, m) + with torch.cuda.device(weight.device): + n, m = weight.size(0), weight.size(1) + assert m % 2 == 0 + m = m // 2 + out = torch.empty(n, m, dtype=torch.int8, device="cuda") + stream = torch.cuda.current_stream() + + gridDim = (n, 1, 1) + blockDim = (min(round_up(m, 32), 1024), 1, 1) + + kernels.int4WeightCompression( + gridDim, + blockDim, + 0, + stream, + [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)], + ) + return out + + +def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int): + if source_bit_width == 8: + func = kernels.int8WeightExtractionHalf + elif source_bit_width == 4: + func = kernels.int4WeightExtractionHalf + else: + assert False, "Unsupported bit-width" + + with torch.cuda.device(weight.device): + n, m = weight.size(0), weight.size(1) + out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device="cuda") + stream = torch.cuda.current_stream() + + gridDim = (n, 1, 1) + blockDim = (min(round_up(m, 32), 1024), 1, 1) + + func( + gridDim, + blockDim, + 0, + stream, + [ + ctypes.c_void_p(weight.data_ptr()), + ctypes.c_void_p(scale_list.data_ptr()), + ctypes.c_void_p(out.data_ptr()), + ctypes.c_int32(n), + ctypes.c_int32(m), + ], + ) + return out + + +class QuantizedLinear(Linear): + def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, empty_init=False, *args, **kwargs): + super(QuantizedLinear, self).__init__(*args, **kwargs) + self.weight_bit_width = weight_bit_width + + shape = self.weight.shape + del self.weight + + if weight_tensor is None or empty_init: + self.weight = torch.empty( + shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"] + ) + self.weight_scale = torch.empty(shape[0], dtype=kwargs["dtype"], device=kwargs["device"]) + else: + self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half() + self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8) + if weight_bit_width == 4: + self.weight = compress_int4_weight(self.weight) + + self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False) + self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False) + if bias_tensor is not None: + self.bias = Parameter(bias_tensor.to(kwargs["device"]), requires_grad=False) + else: + self.bias = None + + def forward(self, input): + output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width) + if self.bias is not None: + output = output + self.bias + return output + + +def quantize(model, weight_bit_width, empty_init=False, **kwargs): + """Replace fp16 linear with quantized linear""" + + for layer in model.layers: + layer.attention.query_key_value = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.attention.query_key_value.weight.to(torch.cuda.current_device()), + bias_tensor=layer.attention.query_key_value.bias, + in_features=layer.attention.query_key_value.in_features, + out_features=layer.attention.query_key_value.out_features, + bias=True, + dtype=torch.half, + device=layer.attention.query_key_value.weight.device, + empty_init=empty_init + ) + layer.attention.dense = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.attention.dense.weight.to(torch.cuda.current_device()), + bias_tensor=layer.attention.dense.bias, + in_features=layer.attention.dense.in_features, + out_features=layer.attention.dense.out_features, + bias=True, + dtype=torch.half, + device=layer.attention.dense.weight.device, + empty_init=empty_init + ) + layer.mlp.dense_h_to_4h = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()), + bias_tensor=layer.mlp.dense_h_to_4h.bias, + in_features=layer.mlp.dense_h_to_4h.in_features, + out_features=layer.mlp.dense_h_to_4h.out_features, + bias=True, + dtype=torch.half, + device=layer.mlp.dense_h_to_4h.weight.device, + empty_init=empty_init + ) + layer.mlp.dense_4h_to_h = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()), + bias_tensor=layer.mlp.dense_4h_to_h.bias, + in_features=layer.mlp.dense_4h_to_h.in_features, + out_features=layer.mlp.dense_4h_to_h.out_features, + bias=True, + dtype=torch.half, + device=layer.mlp.dense_4h_to_h.weight.device, + empty_init=empty_init + ) + return model diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/rng_state.pth b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..b8bc4d6665791b6d04910a7fa2198159d30d9c49 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0807d8b9b5da8a50ac37dc742c51f2fd14818229529350c25105e80232d0c12 +size 14575 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/scheduler.pt b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..6253cc10c5e62c9e59549de13b77dff1709919ee --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e8a7ee269617f79917695125e8a223db714e608f9db41b8e07a0b863380f2395 +size 627 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/special_tokens_map.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..1f897c919b758e64c56eb1a7b34b39b569040086 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/special_tokens_map.json @@ -0,0 +1,7 @@ +{ + "bos_token": "", + "eos_token": "", + "mask_token": "[MASK]", + "pad_token": "", + "unk_token": "" +} diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/tokenization_chatglm.py b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/tokenization_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..1d4f0ba532543b6dbdacdd83d30324b7a6abfad3 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/tokenization_chatglm.py @@ -0,0 +1,430 @@ +"""Tokenization classes for ChatGLM.""" +from typing import List, Optional, Union +import os + +from transformers.tokenization_utils import PreTrainedTokenizer +from transformers.utils import logging, PaddingStrategy +from transformers.tokenization_utils_base import EncodedInput, BatchEncoding +from typing import Dict +import sentencepiece as spm +import numpy as np + +logger = logging.get_logger(__name__) + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "THUDM/chatglm-6b": 2048, +} + + +class TextTokenizer: + def __init__(self, model_path): + self.sp = spm.SentencePieceProcessor() + self.sp.Load(model_path) + self.num_tokens = self.sp.vocab_size() + + def encode(self, text): + return self.sp.EncodeAsIds(text) + + def decode(self, ids: List[int]): + return self.sp.DecodeIds(ids) + + def tokenize(self, text): + return self.sp.EncodeAsPieces(text) + + def convert_tokens_to_ids(self, tokens): + return [self.sp.PieceToId(token) for token in tokens] + + def convert_token_to_id(self, token): + return self.sp.PieceToId(token) + + def convert_id_to_token(self, idx): + return self.sp.IdToPiece(idx) + + def __len__(self): + return self.num_tokens + + +class SPTokenizer: + def __init__( + self, + vocab_file, + num_image_tokens=20000, + max_blank_length=80, + byte_fallback=True, + ): + assert vocab_file is not None + self.vocab_file = vocab_file + self.num_image_tokens = num_image_tokens + self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "", "", "", "", ""] + self.max_blank_length = max_blank_length + self.byte_fallback = byte_fallback + self.text_tokenizer = TextTokenizer(vocab_file) + + def _get_text_tokenizer(self): + return self.text_tokenizer + + @staticmethod + def get_blank_token(length: int): + assert length >= 2 + return f"<|blank_{length}|>" + + @staticmethod + def get_tab_token(): + return f"<|tab|>" + + @property + def num_text_tokens(self): + return self.text_tokenizer.num_tokens + + @property + def num_tokens(self): + return self.num_image_tokens + self.num_text_tokens + + @staticmethod + def _encode_whitespaces(text: str, max_len: int = 80): + text = text.replace("\t", SPTokenizer.get_tab_token()) + for i in range(max_len, 1, -1): + text = text.replace(" " * i, SPTokenizer.get_blank_token(i)) + return text + + def _preprocess(self, text: str, linebreak=True, whitespaces=True): + if linebreak: + text = text.replace("\n", "") + if whitespaces: + text = self._encode_whitespaces(text, max_len=self.max_blank_length) + return text + + def encode( + self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True + ) -> List[int]: + """ + @param text: Text to encode. + @param linebreak: Whether to encode newline (\n) in text. + @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding. + @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text. + @param add_dummy_prefix: Whether to add dummy blank space in the beginning. + """ + text = self._preprocess(text, linebreak, whitespaces) + if not add_dummy_prefix: + text = "" + text + tmp = self._get_text_tokenizer().encode(text) + tokens = [x + self.num_image_tokens for x in tmp] + return tokens if add_dummy_prefix else tokens[2:] + + def decode(self, text_ids: List[int]) -> str: + ids = [int(_id) - self.num_image_tokens for _id in text_ids] + ids = [_id for _id in ids if _id >= 0] + text = self._get_text_tokenizer().decode(ids) + text = text.replace("", "\n") + text = text.replace(SPTokenizer.get_tab_token(), "\t") + for i in range(2, self.max_blank_length + 1): + text = text.replace(self.get_blank_token(i), " " * i) + return text + + def tokenize( + self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True + ) -> List[str]: + """ + @param text: Text to encode. + @param linebreak: Whether to encode newline (\n) in text. + @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding. + @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text. + @param add_dummy_prefix: Whether to add dummy blank space in the beginning. + """ + text = self._preprocess(text, linebreak, whitespaces) + if not add_dummy_prefix: + text = "" + text + tokens = self._get_text_tokenizer().tokenize(text) + return tokens if add_dummy_prefix else tokens[2:] + + def __getitem__(self, x: Union[int, str]): + if isinstance(x, int): + if x < self.num_image_tokens: + return "".format(x) + else: + return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens) + elif isinstance(x, str): + if x.startswith("") and x[7:-1].isdigit(): + return int(x[7:-1]) + else: + return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens + else: + raise ValueError("The key should be str or int.") + + +class ChatGLMTokenizer(PreTrainedTokenizer): + """ + Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + """ + + vocab_files_names = {"vocab_file": "ice_text.model"} + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask", "position_ids"] + + def __init__( + self, + vocab_file, + do_lower_case=False, + remove_space=False, + bos_token='', + eos_token='', + end_token='', + mask_token='[MASK]', + gmask_token='[gMASK]', + padding_side="left", + pad_token="", + unk_token="", + num_image_tokens=20000, + **kwargs + ) -> None: + super().__init__( + do_lower_case=do_lower_case, + remove_space=remove_space, + padding_side=padding_side, + bos_token=bos_token, + eos_token=eos_token, + end_token=end_token, + mask_token=mask_token, + gmask_token=gmask_token, + pad_token=pad_token, + unk_token=unk_token, + num_image_tokens=num_image_tokens, + **kwargs + ) + + self.do_lower_case = do_lower_case + self.remove_space = remove_space + self.vocab_file = vocab_file + + self.bos_token = bos_token + self.eos_token = eos_token + self.end_token = end_token + self.mask_token = mask_token + self.gmask_token = gmask_token + + self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens) + + """ Initialisation """ + + @property + def gmask_token_id(self) -> Optional[int]: + if self.gmask_token is None: + return None + return self.convert_tokens_to_ids(self.gmask_token) + + @property + def end_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the end of context token in the vocabulary. Returns `None` if the token has not been + set. + """ + if self.end_token is None: + return None + return self.convert_tokens_to_ids(self.end_token) + + @property + def vocab_size(self): + """ Returns vocab size """ + return self.sp_tokenizer.num_tokens + + def get_vocab(self): + """ Returns vocab as a dict """ + vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def preprocess_text(self, inputs): + if self.remove_space: + outputs = " ".join(inputs.strip().split()) + else: + outputs = inputs + + if self.do_lower_case: + outputs = outputs.lower() + + return outputs + + def _tokenize(self, text, **kwargs): + """ Returns a tokenized string. """ + text = self.preprocess_text(text) + + seq = self.sp_tokenizer.tokenize(text) + + return seq + + def _decode( + self, + token_ids: Union[int, List[int]], + skip_special_tokens: bool = False, + clean_up_tokenization_spaces: bool = True, + **kwargs + ) -> str: + if isinstance(token_ids, int): + token_ids = [token_ids] + if len(token_ids) == 0: + return "" + if self.pad_token_id in token_ids: # remove pad + token_ids = list(filter((self.pad_token_id).__ne__, token_ids)) + return self.sp_tokenizer.decode(token_ids) + + def _convert_token_to_id(self, token): + """ Converts a token (str) in an id using the vocab. """ + return self.sp_tokenizer[token] + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.sp_tokenizer[index] + + def save_vocabulary(self, save_directory, filename_prefix=None): + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + filename_prefix (`str`, *optional*): + An optional prefix to add to the named of the saved files. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, self.vocab_files_names["vocab_file"] + ) + else: + vocab_file = save_directory + + with open(self.vocab_file, 'rb') as fin: + proto_str = fin.read() + + with open(vocab_file, "wb") as writer: + writer.write(proto_str) + + return (vocab_file,) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + gmask_id = self.sp_tokenizer[self.gmask_token] + eos_id = self.sp_tokenizer[self.eos_token] + token_ids_0 = token_ids_0 + [gmask_id, self.sp_tokenizer[self.bos_token]] + if token_ids_1 is not None: + token_ids_0 = token_ids_0 + token_ids_1 + [eos_id] + return token_ids_0 + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + """ + Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + + Args: + encoded_inputs: + Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). + max_length: maximum length of the returned list and optionally padding length (see below). + Will truncate by taking into account the special tokens. + padding_strategy: PaddingStrategy to use for padding. + + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch + - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) + - PaddingStrategy.DO_NOT_PAD: Do not pad + The tokenizer padding sides are defined in self.padding_side: + + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability + `>= 7.5` (Volta). + return_attention_mask: + (optional) Set to False to avoid returning attention mask (default: set to model specifics) + """ + # Load from model defaults + bos_token_id = self.sp_tokenizer[self.bos_token] + mask_token_id = self.sp_tokenizer[self.mask_token] + gmask_token_id = self.sp_tokenizer[self.gmask_token] + assert self.padding_side == "left" + + required_input = encoded_inputs[self.model_input_names[0]] + seq_length = len(required_input) + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): + max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length + + # Initialize attention mask if not present. + if max_length is not None: + if "attention_mask" not in encoded_inputs: + if bos_token_id in required_input: + context_length = required_input.index(bos_token_id) + else: + context_length = seq_length + attention_mask = np.ones((1, seq_length, seq_length)) + attention_mask = np.tril(attention_mask) + attention_mask[:, :, :context_length] = 1 + attention_mask = np.bool_(attention_mask < 0.5) + encoded_inputs["attention_mask"] = attention_mask + + if "position_ids" not in encoded_inputs: + if bos_token_id in required_input: + context_length = required_input.index(bos_token_id) + else: + context_length = seq_length + position_ids = np.arange(seq_length, dtype=np.int64) + mask_token = mask_token_id if mask_token_id in required_input else gmask_token_id + if mask_token in required_input: + mask_position = required_input.index(mask_token) + position_ids[context_length:] = mask_position + block_position_ids = np.concatenate( + [np.zeros(context_length, dtype=np.int64), + np.arange(1, seq_length - context_length + 1, dtype=np.int64)]) + encoded_inputs["position_ids"] = np.stack([position_ids, block_position_ids], axis=0) + + if needs_to_be_padded: + difference = max_length - len(required_input) + + if "attention_mask" in encoded_inputs: + encoded_inputs["attention_mask"] = np.pad(encoded_inputs["attention_mask"], + pad_width=[(0, 0), (difference, 0), (difference, 0)], + mode='constant', constant_values=True) + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ + "token_type_ids" + ] + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] + if "position_ids" in encoded_inputs: + encoded_inputs["position_ids"] = np.pad(encoded_inputs["position_ids"], + pad_width=[(0, 0), (difference, 0)]) + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input + + return encoded_inputs diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/tokenizer_config.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..f3f8e1c935cc40c270ff6ac75c05b4208533688a --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/tokenizer_config.json @@ -0,0 +1,22 @@ +{ + "auto_map": { + "AutoTokenizer": [ + "tokenization_chatglm.ChatGLMTokenizer", + null + ] + }, + "bos_token": "", + "do_lower_case": false, + "end_token": "", + "eos_token": "", + "gmask_token": "[gMASK]", + "mask_token": "[MASK]", + "model_max_length": 1000000000000000019884624838656, + "num_image_tokens": 0, + "pad_token": "", + "padding_side": "left", + "remove_space": false, + "special_tokens_map_file": null, + "tokenizer_class": "ChatGLMTokenizer", + "unk_token": "" +} diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/trainer_state.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..ef5a41d0e6a10a111e1ed23a65195201c7d60cc4 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/trainer_state.json @@ -0,0 +1,76 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.013961727414724387, + "global_step": 100, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.0, + "learning_rate": 0.018000000000000002, + "loss": 5.1828, + "step": 10 + }, + { + "epoch": 0.0, + "learning_rate": 0.016, + "loss": 4.5979, + "step": 20 + }, + { + "epoch": 0.0, + "learning_rate": 0.013999999999999999, + "loss": 4.5608, + "step": 30 + }, + { + "epoch": 0.01, + "learning_rate": 0.012, + "loss": 4.375, + "step": 40 + }, + { + "epoch": 0.01, + "learning_rate": 0.01, + "loss": 4.4015, + "step": 50 + }, + { + "epoch": 0.01, + "learning_rate": 0.008, + "loss": 4.2929, + "step": 60 + }, + { + "epoch": 0.01, + "learning_rate": 0.006, + "loss": 4.3323, + "step": 70 + }, + { + "epoch": 0.01, + "learning_rate": 0.004, + "loss": 4.2657, + "step": 80 + }, + { + "epoch": 0.01, + "learning_rate": 0.002, + "loss": 4.2733, + "step": 90 + }, + { + "epoch": 0.01, + "learning_rate": 0.0, + "loss": 4.2842, + "step": 100 + } + ], + "max_steps": 100, + "num_train_epochs": 1, + "total_flos": 3466572123340800.0, + "trial_name": null, + "trial_params": null +} diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/training_args.bin b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..09a2bca30693fb288fb455aee1deca55c7e989e5 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69a6c8c7bd73e62303e866b1cd895083f806eb4826833c1bcdd3e9c460c635e1 +size 3707 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/config.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/config.json new file mode 100644 index 0000000000000000000000000000000000000000..06341c327f5b1ce5c3e53e96c56be7de127a2c1f --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/config.json @@ -0,0 +1,31 @@ +{ + "_name_or_path": "/home/wangyan/project/hft/uptest", + "architectures": [ + "ChatGLMForConditionalGeneration" + ], + "auto_map": { + "AutoConfig": "configuration_chatglm.ChatGLMConfig", + "AutoModel": "modeling_chatglm.ChatGLMForConditionalGeneration", + "AutoModelForSeq2SeqLM": "modeling_chatglm.ChatGLMForConditionalGeneration" + }, + "bos_token_id": 130004, + "eos_token_id": 130005, + "gmask_token_id": 130001, + "hidden_size": 4096, + "inner_hidden_size": 16384, + "layernorm_epsilon": 1e-05, + "mask_token_id": 130000, + "max_sequence_length": 2048, + "model_type": "chatglm", + "num_attention_heads": 32, + "num_layers": 28, + "pad_token_id": 3, + "position_encoding_2d": true, + "pre_seq_len": 128, + "prefix_projection": false, + "quantization_bit": 4, + "torch_dtype": "float16", + "transformers_version": "4.27.1", + "use_cache": true, + "vocab_size": 130528 +} diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/configuration_chatglm.py b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/configuration_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..78f3425d5f63ad43f31b092b8d62b44d28d52f15 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/configuration_chatglm.py @@ -0,0 +1,103 @@ +""" ChatGLM model configuration """ + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +logger = logging.get_logger(__name__) + + +class ChatGLMConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`~ChatGLMModel`]. + It is used to instantiate an ChatGLM model according to the specified arguments, defining the model + architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of + the ChatGLM-6B [THUDM/ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used + to control the model outputs. Read the documentation from [`PretrainedConfig`] + for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 150528): + Vocabulary size of the ChatGLM-6B model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`~ChatGLMModel`] or + [`~TFChatGLMModel`]. + hidden_size (`int`, *optional*, defaults to 4096): + Dimension of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 28): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 32): + Number of attention heads for each attention layer in the Transformer encoder. + inner_hidden_size (`int`, *optional*, defaults to 16384): + Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + max_sequence_length (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. + Typically set this to something large just in case (e.g., 512 or 1024 or 2048). + layernorm_epsilon (`float`, *optional*, defaults to 1e-5): + The epsilon used by the layer normalization layers. + use_cache (`bool`, *optional*, defaults to `True`): + Whether the model should return the last key/values attentions (not used by all models). + Example: + + ```python + >>> from configuration_chatglm import ChatGLMConfig + >>> from modeling_chatglm import ChatGLMModel + + >>> # Initializing a ChatGLM-6B THUDM/ChatGLM-6B style configuration + >>> configuration = ChatGLMConfig() + + >>> # Initializing a model from the THUDM/ChatGLM-6B style configuration + >>> model = ChatGLMModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ``` +""" + model_type = "chatglm" + + def __init__( + self, + vocab_size=150528, + hidden_size=4096, + num_layers=28, + num_attention_heads=32, + layernorm_epsilon=1e-5, + use_cache=False, + bos_token_id=150004, + eos_token_id=150005, + mask_token_id=150000, + gmask_token_id=150001, + pad_token_id=0, + max_sequence_length=2048, + inner_hidden_size=16384, + position_encoding_2d=True, + quantization_bit=0, + pre_seq_len=None, + prefix_projection=False, + **kwargs + ): + self.num_layers = num_layers + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_attention_heads = num_attention_heads + self.max_sequence_length = max_sequence_length + self.layernorm_epsilon = layernorm_epsilon + self.inner_hidden_size = inner_hidden_size + self.use_cache = use_cache + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + self.pad_token_id = pad_token_id + self.mask_token_id = mask_token_id + self.gmask_token_id = gmask_token_id + self.position_encoding_2d = position_encoding_2d + self.quantization_bit = quantization_bit + self.pre_seq_len = pre_seq_len + self.prefix_projection = prefix_projection + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + **kwargs + ) diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/generation_config.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/generation_config.json new file mode 100644 index 0000000000000000000000000000000000000000..e6191613b8cca2cd0d91cc92e90f2a353388ec3e --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/generation_config.json @@ -0,0 +1,7 @@ +{ + "_from_model_config": true, + "bos_token_id": 130004, + "eos_token_id": 130005, + "pad_token_id": 3, + "transformers_version": "4.27.1" +} diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/ice_text.model b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/ice_text.model new file mode 100644 index 0000000000000000000000000000000000000000..0dcfe31e02ad0767e0c80a469340bf97f58e777a --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/ice_text.model @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e974d9a69c242ce014c88c2b26089270f6198f3c0b700a887666cd3e816f17e +size 2706249 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/modeling_chatglm.py b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/modeling_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..fc291119053cdf164b697ad022735f916fb3e8b1 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/modeling_chatglm.py @@ -0,0 +1,1435 @@ +""" PyTorch ChatGLM model. """ + +import math +import copy +import os +import warnings +import re +import sys + +import torch +import torch.utils.checkpoint +import torch.nn.functional as F +from torch import nn +from torch.nn import CrossEntropyLoss, LayerNorm +from torch.nn.utils import skip_init +from typing import Optional, Tuple, Union, List, Callable, Dict, Any + +from transformers.utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, +) +from transformers.modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, + BaseModelOutputWithPastAndCrossAttentions, +) +from transformers.modeling_utils import PreTrainedModel +from transformers.utils import logging +from transformers.generation.logits_process import LogitsProcessor +from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput + +from .configuration_chatglm import ChatGLMConfig + +# flags required to enable jit fusion kernels + +if sys.platform != 'darwin': + torch._C._jit_set_profiling_mode(False) + torch._C._jit_set_profiling_executor(False) + torch._C._jit_override_can_fuse_on_cpu(True) + torch._C._jit_override_can_fuse_on_gpu(True) + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "THUDM/ChatGLM-6B" +_CONFIG_FOR_DOC = "ChatGLM6BConfig" + +CHATGLM_6B_PRETRAINED_MODEL_ARCHIVE_LIST = [ + "THUDM/chatglm-6b", + # See all ChatGLM-6B models at https://huggingface.co/models?filter=chatglm +] + + +class InvalidScoreLogitsProcessor(LogitsProcessor): + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + if torch.isnan(scores).any() or torch.isinf(scores).any(): + scores.zero_() + scores[..., 5] = 5e4 + return scores + + +def load_tf_weights_in_chatglm_6b(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import re + + import numpy as np + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name.split("/") + # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v + # which are not required for using pretrained model + if any( + n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] + for n in name + ): + logger.info(f"Skipping {'/'.join(name)}") + continue + pointer = model + for m_name in name: + if re.fullmatch(r"[A-Za-z]+_\d+", m_name): + scope_names = re.split(r"_(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "kernel" or scope_names[0] == "gamma": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "output_bias" or scope_names[0] == "beta": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "output_weights": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "squad": + pointer = getattr(pointer, "classifier") + else: + try: + pointer = getattr(pointer, scope_names[0]) + except AttributeError: + logger.info(f"Skipping {'/'.join(name)}") + continue + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + if m_name[-11:] == "_embeddings": + pointer = getattr(pointer, "weight") + elif m_name == "kernel": + array = np.transpose(array) + try: + assert ( + pointer.shape == array.shape + ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" + except AssertionError as e: + e.args += (pointer.shape, array.shape) + raise + logger.info(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + return model + + +class PrefixEncoder(torch.nn.Module): + """ + The torch.nn model to encode the prefix + Input shape: (batch-size, prefix-length) + Output shape: (batch-size, prefix-length, 2*layers*hidden) + """ + + def __init__(self, config): + super().__init__() + self.prefix_projection = config.prefix_projection + if self.prefix_projection: + # Use a two-layer MLP to encode the prefix + self.embedding = torch.nn.Embedding(config.pre_seq_len, config.hidden_size) + self.trans = torch.nn.Sequential( + torch.nn.Linear(config.hidden_size, config.hidden_size), + torch.nn.Tanh(), + torch.nn.Linear(config.hidden_size, config.num_layers * config.hidden_size * 2) + ) + else: + self.embedding = torch.nn.Embedding(config.pre_seq_len, config.num_layers * config.hidden_size * 2) + + def forward(self, prefix: torch.Tensor): + if self.prefix_projection: + prefix_tokens = self.embedding(prefix) + past_key_values = self.trans(prefix_tokens) + else: + past_key_values = self.embedding(prefix) + return past_key_values + + +@torch.jit.script +def gelu_impl(x): + """OpenAI's gelu implementation.""" + return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * + (1.0 + 0.044715 * x * x))) + + +def gelu(x): + return gelu_impl(x) + + +class RotaryEmbedding(torch.nn.Module): + def __init__(self, dim, base=10000, precision=torch.half, learnable=False): + super().__init__() + inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) + inv_freq = inv_freq.half() + self.learnable = learnable + if learnable: + self.inv_freq = torch.nn.Parameter(inv_freq) + self.max_seq_len_cached = None + else: + self.register_buffer('inv_freq', inv_freq) + self.max_seq_len_cached = None + self.cos_cached = None + self.sin_cached = None + self.precision = precision + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, + error_msgs): + pass + + def forward(self, x, seq_dim=1, seq_len=None): + if seq_len is None: + seq_len = x.shape[seq_dim] + if self.max_seq_len_cached is None or (seq_len > self.max_seq_len_cached): + self.max_seq_len_cached = None if self.learnable else seq_len + t = torch.arange(seq_len, device=x.device, dtype=self.inv_freq.dtype) + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1).to(x.device) + if self.precision == torch.bfloat16: + emb = emb.float() + + # [sx, 1 (b * np), hn] + cos_cached = emb.cos()[:, None, :] + sin_cached = emb.sin()[:, None, :] + if self.precision == torch.bfloat16: + cos_cached = cos_cached.bfloat16() + sin_cached = sin_cached.bfloat16() + if self.learnable: + return cos_cached, sin_cached + self.cos_cached, self.sin_cached = cos_cached, sin_cached + return self.cos_cached[:seq_len, ...], self.sin_cached[:seq_len, ...] + + def _apply(self, fn): + if self.cos_cached is not None: + self.cos_cached = fn(self.cos_cached) + if self.sin_cached is not None: + self.sin_cached = fn(self.sin_cached) + return super()._apply(fn) + + +def rotate_half(x): + x1, x2 = x[..., :x.shape[-1] // 2], x[..., x.shape[-1] // 2:] + return torch.cat((-x2, x1), dim=x1.ndim - 1) # dim=-1 triggers a bug in earlier torch versions + + +@torch.jit.script +def apply_rotary_pos_emb_index(q, k, cos, sin, position_id): + # position_id: [sq, b], q, k: [sq, b, np, hn], cos: [sq, 1, hn] -> [sq, b, 1, hn] + cos, sin = F.embedding(position_id, cos.squeeze(1)).unsqueeze(2), \ + F.embedding(position_id, sin.squeeze(1)).unsqueeze(2) + q, k = (q * cos) + (rotate_half(q) * sin), (k * cos) + (rotate_half(k) * sin) + return q, k + + +def attention_fn( + self, + query_layer, + key_layer, + value_layer, + attention_mask, + hidden_size_per_partition, + layer_id, + layer_past=None, + scaling_attention_score=True, + use_cache=False, +): + if layer_past is not None: + past_key, past_value = layer_past[0], layer_past[1] + key_layer = torch.cat((past_key, key_layer), dim=0) + value_layer = torch.cat((past_value, value_layer), dim=0) + + # seqlen, batch, num_attention_heads, hidden_size_per_attention_head + seq_len, b, nh, hidden_size = key_layer.shape + + if use_cache: + present = (key_layer, value_layer) + else: + present = None + + query_key_layer_scaling_coeff = float(layer_id + 1) + if scaling_attention_score: + query_layer = query_layer / (math.sqrt(hidden_size) * query_key_layer_scaling_coeff) + + # =================================== + # Raw attention scores. [b, np, s, s] + # =================================== + + # [b, np, sq, sk] + output_size = (query_layer.size(1), query_layer.size(2), query_layer.size(0), key_layer.size(0)) + + # [sq, b, np, hn] -> [sq, b * np, hn] + query_layer = query_layer.view(output_size[2], output_size[0] * output_size[1], -1) + # [sk, b, np, hn] -> [sk, b * np, hn] + key_layer = key_layer.view(output_size[3], output_size[0] * output_size[1], -1) + + matmul_result = torch.zeros( + 1, 1, 1, + dtype=query_layer.dtype, + device=query_layer.device, + ) + + matmul_result = torch.baddbmm( + matmul_result, + query_layer.transpose(0, 1), # [b * np, sq, hn] + key_layer.transpose(0, 1).transpose(1, 2), # [b * np, hn, sk] + beta=0.0, + alpha=1.0, + ) + + # change view to [b, np, sq, sk] + attention_scores = matmul_result.view(*output_size) + + if self.scale_mask_softmax: + self.scale_mask_softmax.scale = query_key_layer_scaling_coeff + attention_probs = self.scale_mask_softmax(attention_scores, attention_mask.contiguous()) + else: + if not (attention_mask == 0).all(): + # if auto-regressive, skip + attention_scores.masked_fill_(attention_mask, -10000.0) + dtype = attention_scores.dtype + attention_scores = attention_scores.float() + attention_scores = attention_scores * query_key_layer_scaling_coeff + + attention_probs = F.softmax(attention_scores, dim=-1) + + attention_probs = attention_probs.type(dtype) + + # ========================= + # Context layer. [sq, b, hp] + # ========================= + + # value_layer -> context layer. + # [sk, b, np, hn] --> [b, np, sq, hn] + + # context layer shape: [b, np, sq, hn] + output_size = (value_layer.size(1), value_layer.size(2), query_layer.size(0), value_layer.size(3)) + + # change view [sk, b * np, hn] + value_layer = value_layer.view(value_layer.size(0), output_size[0] * output_size[1], -1) + + # change view [b * np, sq, sk] + attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1) + + # matmul: [b * np, sq, hn] + context_layer = torch.bmm(attention_probs, value_layer.transpose(0, 1)) + + # change view [b, np, sq, hn] + context_layer = context_layer.view(*output_size) + + # [b, np, sq, hn] --> [sq, b, np, hn] + context_layer = context_layer.permute(2, 0, 1, 3).contiguous() + + # [sq, b, np, hn] --> [sq, b, hp] + new_context_layer_shape = context_layer.size()[:-2] + (hidden_size_per_partition,) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, present, attention_probs) + + return outputs + + +def default_init(cls, *args, **kwargs): + return cls(*args, **kwargs) + + +class SelfAttention(torch.nn.Module): + def __init__(self, hidden_size, num_attention_heads, + layer_id, hidden_size_per_attention_head=None, bias=True, + params_dtype=torch.float, position_encoding_2d=True, empty_init=True): + if empty_init: + init_method = skip_init + else: + init_method = default_init + super(SelfAttention, self).__init__() + + self.layer_id = layer_id + self.hidden_size = hidden_size + self.hidden_size_per_partition = hidden_size + self.num_attention_heads = num_attention_heads + self.num_attention_heads_per_partition = num_attention_heads + self.position_encoding_2d = position_encoding_2d + self.rotary_emb = RotaryEmbedding( + self.hidden_size // (self.num_attention_heads * 2) + if position_encoding_2d + else self.hidden_size // self.num_attention_heads, + base=10000, + precision=torch.half, + learnable=False, + ) + + self.scale_mask_softmax = None + + if hidden_size_per_attention_head is None: + self.hidden_size_per_attention_head = hidden_size // num_attention_heads + else: + self.hidden_size_per_attention_head = hidden_size_per_attention_head + + self.inner_hidden_size = num_attention_heads * self.hidden_size_per_attention_head + + # Strided linear layer. + self.query_key_value = init_method( + torch.nn.Linear, + hidden_size, + 3 * self.inner_hidden_size, + bias=bias, + dtype=params_dtype, + ) + + self.dense = init_method( + torch.nn.Linear, + self.inner_hidden_size, + hidden_size, + bias=bias, + dtype=params_dtype, + ) + + @staticmethod + def attention_mask_func(attention_scores, attention_mask): + attention_scores.masked_fill_(attention_mask, -10000.0) + return attention_scores + + def split_tensor_along_last_dim(self, tensor, num_partitions, + contiguous_split_chunks=False): + """Split a tensor along its last dimension. + Arguments: + tensor: input tensor. + num_partitions: number of partitions to split the tensor + contiguous_split_chunks: If True, make each chunk contiguous + in memory. + """ + # Get the size and dimension. + last_dim = tensor.dim() - 1 + last_dim_size = tensor.size()[last_dim] // num_partitions + # Split. + tensor_list = torch.split(tensor, last_dim_size, dim=last_dim) + # Note: torch.split does not create contiguous tensors by default. + if contiguous_split_chunks: + return tuple(chunk.contiguous() for chunk in tensor_list) + + return tensor_list + + def forward( + self, + hidden_states: torch.Tensor, + position_ids, + attention_mask: torch.Tensor, + layer_id, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + """ + hidden_states: [seq_len, batch, hidden_size] + attention_mask: [(1, 1), seq_len, seq_len] + """ + + # [seq_len, batch, 3 * hidden_size] + mixed_raw_layer = self.query_key_value(hidden_states) + + # [seq_len, batch, 3 * hidden_size] --> [seq_len, batch, num_attention_heads, 3 * hidden_size_per_attention_head] + new_tensor_shape = mixed_raw_layer.size()[:-1] + ( + self.num_attention_heads_per_partition, + 3 * self.hidden_size_per_attention_head, + ) + mixed_raw_layer = mixed_raw_layer.view(*new_tensor_shape) + + # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head] + (query_layer, key_layer, value_layer) = self.split_tensor_along_last_dim(mixed_raw_layer, 3) + + if self.position_encoding_2d: + q1, q2 = query_layer.chunk(2, dim=(query_layer.ndim - 1)) + k1, k2 = key_layer.chunk(2, dim=(key_layer.ndim - 1)) + cos, sin = self.rotary_emb(q1, seq_len=position_ids.max() + 1) + position_ids, block_position_ids = position_ids[:, 0, :].transpose(0, 1).contiguous(), \ + position_ids[:, 1, :].transpose(0, 1).contiguous() + q1, k1 = apply_rotary_pos_emb_index(q1, k1, cos, sin, position_ids) + q2, k2 = apply_rotary_pos_emb_index(q2, k2, cos, sin, block_position_ids) + query_layer = torch.concat([q1, q2], dim=(q1.ndim - 1)) + key_layer = torch.concat([k1, k2], dim=(k1.ndim - 1)) + else: + position_ids = position_ids.transpose(0, 1) + cos, sin = self.rotary_emb(value_layer, seq_len=position_ids.max() + 1) + # [seq_len, batch, num_attention_heads, hidden_size_per_attention_head] + query_layer, key_layer = apply_rotary_pos_emb_index(query_layer, key_layer, cos, sin, position_ids) + + # [seq_len, batch, hidden_size] + context_layer, present, attention_probs = attention_fn( + self=self, + query_layer=query_layer, + key_layer=key_layer, + value_layer=value_layer, + attention_mask=attention_mask, + hidden_size_per_partition=self.hidden_size_per_partition, + layer_id=layer_id, + layer_past=layer_past, + use_cache=use_cache + ) + + output = self.dense(context_layer) + + outputs = (output, present) + + if output_attentions: + outputs += (attention_probs,) + + return outputs # output, present, attention_probs + + +class GEGLU(torch.nn.Module): + def __init__(self): + super().__init__() + self.activation_fn = F.gelu + + def forward(self, x): + # dim=-1 breaks in jit for pt<1.10 + x1, x2 = x.chunk(2, dim=(x.ndim - 1)) + return x1 * self.activation_fn(x2) + + +class GLU(torch.nn.Module): + def __init__(self, hidden_size, inner_hidden_size=None, + layer_id=None, bias=True, activation_func=gelu, params_dtype=torch.float, empty_init=True): + super(GLU, self).__init__() + if empty_init: + init_method = skip_init + else: + init_method = default_init + self.layer_id = layer_id + self.activation_func = activation_func + + # Project to 4h. + self.hidden_size = hidden_size + if inner_hidden_size is None: + inner_hidden_size = 4 * hidden_size + self.inner_hidden_size = inner_hidden_size + self.dense_h_to_4h = init_method( + torch.nn.Linear, + self.hidden_size, + self.inner_hidden_size, + bias=bias, + dtype=params_dtype, + ) + # Project back to h. + self.dense_4h_to_h = init_method( + torch.nn.Linear, + self.inner_hidden_size, + self.hidden_size, + bias=bias, + dtype=params_dtype, + ) + + def forward(self, hidden_states): + """ + hidden_states: [seq_len, batch, hidden_size] + """ + + # [seq_len, batch, inner_hidden_size] + intermediate_parallel = self.dense_h_to_4h(hidden_states) + + intermediate_parallel = self.activation_func(intermediate_parallel) + + output = self.dense_4h_to_h(intermediate_parallel) + + return output + + +class GLMBlock(torch.nn.Module): + def __init__( + self, + hidden_size, + num_attention_heads, + layernorm_epsilon, + layer_id, + inner_hidden_size=None, + hidden_size_per_attention_head=None, + layernorm=LayerNorm, + use_bias=True, + params_dtype=torch.float, + num_layers=28, + position_encoding_2d=True, + empty_init=True + ): + super(GLMBlock, self).__init__() + # Set output layer initialization if not provided. + + self.layer_id = layer_id + + # Layernorm on the input data. + self.input_layernorm = layernorm(hidden_size, eps=layernorm_epsilon) + + self.position_encoding_2d = position_encoding_2d + + # Self attention. + self.attention = SelfAttention( + hidden_size, + num_attention_heads, + layer_id, + hidden_size_per_attention_head=hidden_size_per_attention_head, + bias=use_bias, + params_dtype=params_dtype, + position_encoding_2d=self.position_encoding_2d, + empty_init=empty_init + ) + + # Layernorm on the input data. + self.post_attention_layernorm = layernorm(hidden_size, eps=layernorm_epsilon) + + self.num_layers = num_layers + + # GLU + self.mlp = GLU( + hidden_size, + inner_hidden_size=inner_hidden_size, + bias=use_bias, + layer_id=layer_id, + params_dtype=params_dtype, + empty_init=empty_init + ) + + def forward( + self, + hidden_states: torch.Tensor, + position_ids, + attention_mask: torch.Tensor, + layer_id, + layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, + use_cache: bool = False, + output_attentions: bool = False, + ): + """ + hidden_states: [seq_len, batch, hidden_size] + attention_mask: [(1, 1), seq_len, seq_len] + """ + + # Layer norm at the begining of the transformer layer. + # [seq_len, batch, hidden_size] + attention_input = self.input_layernorm(hidden_states) + + # Self attention. + attention_outputs = self.attention( + attention_input, + position_ids, + attention_mask=attention_mask, + layer_id=layer_id, + layer_past=layer_past, + use_cache=use_cache, + output_attentions=output_attentions + ) + + attention_output = attention_outputs[0] + + outputs = attention_outputs[1:] + + # Residual connection. + alpha = (2 * self.num_layers) ** 0.5 + hidden_states = attention_input * alpha + attention_output + + mlp_input = self.post_attention_layernorm(hidden_states) + + # MLP. + mlp_output = self.mlp(mlp_input) + + # Second residual connection. + output = mlp_input * alpha + mlp_output + + if use_cache: + outputs = (output,) + outputs + else: + outputs = (output,) + outputs[1:] + + return outputs # hidden_states, present, attentions + + +class ChatGLMPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and + a simple interface for downloading and loading pretrained models. + """ + + is_parallelizable = False + supports_gradient_checkpointing = True + config_class = ChatGLMConfig + base_model_prefix = "transformer" + _no_split_modules = ["GLMBlock"] + + def __init__(self, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + def _init_weights(self, module: nn.Module): + """Initialize the weights.""" + return + + def get_masks(self, input_ids, device): + batch_size, seq_length = input_ids.shape + context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids] + attention_mask = torch.ones((batch_size, seq_length, seq_length), device=device) + attention_mask.tril_() + for i, context_length in enumerate(context_lengths): + attention_mask[i, :, :context_length] = 1 + attention_mask.unsqueeze_(1) + attention_mask = (attention_mask < 0.5).bool() + + return attention_mask + + def get_position_ids(self, input_ids, mask_positions, device, use_gmasks=None): + batch_size, seq_length = input_ids.shape + if use_gmasks is None: + use_gmasks = [False] * batch_size + context_lengths = [seq.tolist().index(self.config.bos_token_id) for seq in input_ids] + if self.position_encoding_2d: + position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + for i, context_length in enumerate(context_lengths): + position_ids[i, context_length:] = mask_positions[i] + block_position_ids = [torch.cat(( + torch.zeros(context_length, dtype=torch.long, device=device), + torch.arange(seq_length - context_length, dtype=torch.long, device=device) + 1 + )) for context_length in context_lengths] + block_position_ids = torch.stack(block_position_ids, dim=0) + position_ids = torch.stack((position_ids, block_position_ids), dim=1) + else: + position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1) + for i, context_length in enumerate(context_lengths): + if not use_gmasks[i]: + position_ids[context_length:] = mask_positions[i] + + return position_ids + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, ChatGLMModel): + module.gradient_checkpointing = value + + +CHATGLM_6B_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general + usage and behavior. + + Parameters: + config ([`~ChatGLM6BConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the configuration. + Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CHATGLM_6B_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`ChatGLM6BTokenizer`]. + See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. + Selected in the range `[0, config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + This is useful if you want more control over how to convert *input_ids* indices into associated vectors + than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare ChatGLM-6B Model transformer outputting raw hidden-states without any specific head on top.", + CHATGLM_6B_START_DOCSTRING, +) +class ChatGLMModel(ChatGLMPreTrainedModel): + """ + + The model can behave as an encoder (with only self-attention) as well + as a decoder, in which case a layer of cross-attention is added between + the self-attention layers, following the architecture described in [Attention is + all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, + Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. + + To behave as an decoder the model needs to be initialized with the + `is_decoder` argument of the configuration set to `True`. + To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` + argument and `add_cross_attention` set to `True`; an + `encoder_hidden_states` is then expected as an input to the forward pass. + """ + + def __init__(self, config: ChatGLMConfig, empty_init=True): + super().__init__(config) + if empty_init: + init_method = skip_init + else: + init_method = default_init + # recording parameters + self.max_sequence_length = config.max_sequence_length + self.hidden_size = config.hidden_size + self.params_dtype = torch.half + self.num_attention_heads = config.num_attention_heads + self.vocab_size = config.vocab_size + self.num_layers = config.num_layers + self.layernorm_epsilon = config.layernorm_epsilon + self.inner_hidden_size = config.inner_hidden_size + self.hidden_size_per_attention_head = self.hidden_size // self.num_attention_heads + self.position_encoding_2d = config.position_encoding_2d + self.pre_seq_len = config.pre_seq_len + self.prefix_projection = config.prefix_projection + + self.word_embeddings = init_method( + torch.nn.Embedding, + num_embeddings=self.vocab_size, embedding_dim=self.hidden_size, + dtype=self.params_dtype + ) + self.gradient_checkpointing = False + + def get_layer(layer_id): + return GLMBlock( + self.hidden_size, + self.num_attention_heads, + self.layernorm_epsilon, + layer_id, + inner_hidden_size=self.inner_hidden_size, + hidden_size_per_attention_head=self.hidden_size_per_attention_head, + layernorm=LayerNorm, + use_bias=True, + params_dtype=self.params_dtype, + position_encoding_2d=self.position_encoding_2d, + empty_init=empty_init + ) + + self.layers = torch.nn.ModuleList( + [get_layer(layer_id) for layer_id in range(self.num_layers)] + ) + + # Final layer norm before output. + self.final_layernorm = LayerNorm(self.hidden_size, eps=self.layernorm_epsilon) + + if self.pre_seq_len is not None: + for param in self.parameters(): + param.requires_grad = False + self.prefix_tokens = torch.arange(self.pre_seq_len).long() + self.prefix_encoder = PrefixEncoder(config) + self.dropout = torch.nn.Dropout(0.1) + + # total_params = sum(p.numel() for p in self.parameters()) + # trainable_params = sum(p.numel() for p in self.parameters() if p.requires_grad) + # print("Using p-tuning v2: # trainable_params = {} / {}".format(trainable_params, total_params)) + + def get_input_embeddings(self): + return self.word_embeddings + + def set_input_embeddings(self, new_embeddings: torch.Tensor): + self.word_embeddings = new_embeddings + + def get_prompt(self, batch_size, device, dtype=torch.half): + prefix_tokens = self.prefix_tokens.unsqueeze(0).expand(batch_size, -1).to(device) + past_key_values = self.prefix_encoder(prefix_tokens).type(dtype) + past_key_values = past_key_values.view( + batch_size, + self.pre_seq_len, + self.num_layers * 2, + self.num_attention_heads, + self.hidden_size // self.num_attention_heads + ) + # seq_len, b, nh, hidden_size + past_key_values = self.dropout(past_key_values) + past_key_values = past_key_values.permute([2, 1, 0, 3, 4]).split(2) + # past_key_values = [(v[0], v[1]) for v in past_key_values] + return past_key_values + + @add_start_docstrings_to_model_forward(CHATGLM_6B_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPastAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None, + inputs_embeds: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPast]: + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + batch_size, seq_length = input_ids.shape[:2] + elif inputs_embeds is not None: + batch_size, seq_length = inputs_embeds.shape[:2] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + if past_key_values is None: + if self.pre_seq_len is not None: + past_key_values = self.get_prompt(batch_size=input_ids.shape[0], device=input_ids.device, + dtype=inputs_embeds.dtype) + else: + past_key_values = tuple([None] * len(self.layers)) + + if attention_mask is None: + attention_mask = self.get_masks( + input_ids, + device=input_ids.device + ) + + + if position_ids is None: + MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id + seqs = input_ids.tolist() + + mask_positions, use_gmasks = [], [] + for seq in seqs: + mask_token = gMASK if gMASK in seq else MASK + use_gmask = mask_token == gMASK + mask_positions.append(seq.index(mask_token)) + use_gmasks.append(use_gmask) + + position_ids = self.get_position_ids( + input_ids, + mask_positions=mask_positions, + device=input_ids.device, + use_gmasks=use_gmasks + ) + + if self.pre_seq_len is not None and attention_mask is not None: + prefix_attention_mask = torch.ones(batch_size, 1, input_ids.size(-1), self.pre_seq_len).to( + attention_mask.device) + prefix_attention_mask = (prefix_attention_mask < 0.5).bool() + attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=3) + + # [seq_len, batch, hidden_size] + hidden_states = inputs_embeds.transpose(0, 1) + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + if attention_mask is None: + attention_mask = torch.zeros(1, 1, device=input_ids.device).bool() + else: + attention_mask = attention_mask.to(hidden_states.device) + + for i, layer in enumerate(self.layers): + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + layer_past = past_key_values[i] + + if self.gradient_checkpointing and self.training: + layer_ret = torch.utils.checkpoint.checkpoint( + layer, + hidden_states, + position_ids, + attention_mask, + torch.tensor(i), + layer_past, + use_cache, + output_attentions + ) + else: + layer_ret = layer( + hidden_states, + position_ids=position_ids, + attention_mask=attention_mask, + layer_id=torch.tensor(i), + layer_past=layer_past, + use_cache=use_cache, + output_attentions=output_attentions + ) + + hidden_states = layer_ret[0] + + if use_cache: + presents = presents + (layer_ret[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (layer_ret[2 if use_cache else 1],) + + # Final layer norm. + hidden_states = self.final_layernorm(hidden_states) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel): + def __init__(self, config: ChatGLMConfig, empty_init=True): + super().__init__(config) + if empty_init: + init_method = skip_init + else: + init_method = default_init + + # self.hidden_size = config.hidden_size + # self.params_dtype = torch.half + # self.vocab_size = config.vocab_size + self.max_sequence_length = config.max_sequence_length + + self.position_encoding_2d = config.position_encoding_2d + + self.transformer = ChatGLMModel(config, empty_init=empty_init) + + self.lm_head = init_method( + nn.Linear, + config.hidden_size, + config.vocab_size, + bias=False, + dtype=torch.half + ) + + self.config = config + + self.quantized = False + + if self.config.quantization_bit: + self.quantize(self.config.quantization_bit, empty_init=True) + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def _update_model_kwargs_for_generation( + self, + outputs: ModelOutput, + model_kwargs: Dict[str, Any], + is_encoder_decoder: bool = False, + standardize_cache_format: bool = False, + ) -> Dict[str, Any]: + # update past_key_values + model_kwargs["past_key_values"] = self._extract_past_from_model_output( + outputs, standardize_cache_format=standardize_cache_format + ) + + # update attention mask + if "attention_mask" in model_kwargs: + attention_mask = model_kwargs["attention_mask"] + if attention_mask is not None and attention_mask.dtype == torch.bool: + attention_mask = torch.cat( + [attention_mask, attention_mask.new_ones((*attention_mask.shape[:3], 1))], dim=3) + new_attention_mask = attention_mask[:, :, -1:].clone() + new_attention_mask[..., -1] = False + model_kwargs["attention_mask"] = torch.cat( + [attention_mask, new_attention_mask], dim=2 + ) + + # update position ids + if "position_ids" in model_kwargs: + position_ids = model_kwargs["position_ids"] + new_position_id = position_ids[..., -1:].clone() + new_position_id[:, 1, :] += 1 + model_kwargs["position_ids"] = torch.cat( + [position_ids, new_position_id], dim=-1 + ) + + return model_kwargs + + def prepare_inputs_for_generation( + self, + input_ids: torch.LongTensor, + past: Optional[torch.Tensor] = None, + past_key_values: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + **kwargs + ) -> dict: + batch_size, seq_length = input_ids.shape + MASK, gMASK = self.config.mask_token_id, self.config.gmask_token_id + seqs = input_ids.tolist() + mask_positions, use_gmasks = [], [] + for seq in seqs: + mask_token = gMASK if gMASK in seq else MASK + use_gmask = mask_token == gMASK + mask_positions.append(seq.index(mask_token)) + use_gmasks.append(use_gmask) + + # only last token for input_ids if past is not None + if past is not None or past_key_values is not None: + last_token = input_ids[:, -1].unsqueeze(-1) + if attention_mask is not None and attention_mask.dtype == torch.bool: + attention_mask = attention_mask[:, :, -1:] + else: + attention_mask = None + if position_ids is not None: + position_ids = position_ids[..., -1:] + else: + context_lengths = [seq.index(self.config.bos_token_id) for seq in seqs] + if self.position_encoding_2d: + position_ids = torch.tensor( + [[mask_position, seq_length - context_length] for mask_position, context_length in + zip(mask_positions, context_lengths)], dtype=torch.long, device=input_ids.device).unsqueeze(-1) + else: + position_ids = torch.tensor([mask_position for mask_position in mask_positions], dtype=torch.long, + device=input_ids.device).unsqueeze(-1) + + if past is None: + past = past_key_values + return { + "input_ids": last_token, + "past_key_values": past, + "position_ids": position_ids, + "attention_mask": attention_mask + } + else: + if attention_mask is not None and attention_mask.dtype != torch.bool: + logger.warning_once(f"The dtype of attention mask ({attention_mask.dtype}) is not bool") + attention_mask = None + if attention_mask is None: + attention_mask = self.get_masks( + input_ids, + device=input_ids.device + ) + if position_ids is None: + position_ids = self.get_position_ids( + input_ids, + device=input_ids.device, + mask_positions=mask_positions, + use_gmasks=use_gmasks + ) + + return { + "input_ids": input_ids, + "past_key_values": past, + "position_ids": position_ids, + "attention_mask": attention_mask + } + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids=input_ids, + position_ids=position_ids, + attention_mask=attention_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + + lm_logits = self.lm_head(hidden_states).permute(1, 0, 2).contiguous() + + loss = None + if labels is not None: + lm_logits = lm_logits.to(torch.float32) + + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss(ignore_index=-100) + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + lm_logits = lm_logits.to(hidden_states.dtype) + loss = loss.to(hidden_states.dtype) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + @staticmethod + def _reorder_cache( + past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor + ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + + Output shares the same memory storage as `past`. + """ + return tuple( + ( + layer_past[0].index_select(1, beam_idx.to(layer_past[0].device)), + layer_past[1].index_select(1, beam_idx.to(layer_past[1].device)), + ) + for layer_past in past + ) + + def process_response(self, response): + response = response.strip() + response = response.replace("[[训练时间]]", "2023年") + punkts = [ + [",", ","], + ["!", "!"], + [":", ":"], + [";", ";"], + ["\?", "?"], + ] + for item in punkts: + response = re.sub(r"([\u4e00-\u9fff])%s" % item[0], r"\1%s" % item[1], response) + response = re.sub(r"%s([\u4e00-\u9fff])" % item[0], r"%s\1" % item[1], response) + return response + + @torch.no_grad() + def chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, num_beams=1, + do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs): + if history is None: + history = [] + if logits_processor is None: + logits_processor = LogitsProcessorList() + logits_processor.append(InvalidScoreLogitsProcessor()) + gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p, + "temperature": temperature, "logits_processor": logits_processor, **kwargs} + if not history: + prompt = query + else: + prompt = "" + for i, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + inputs = tokenizer([prompt], return_tensors="pt") + inputs = inputs.to(self.device) + outputs = self.generate(**inputs, **gen_kwargs) + outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] + response = tokenizer.decode(outputs) + response = self.process_response(response) + history = history + [(query, response)] + return response, history + + @torch.no_grad() + def stream_chat(self, tokenizer, query: str, history: List[Tuple[str, str]] = None, max_length: int = 2048, + do_sample=True, top_p=0.7, temperature=0.95, logits_processor=None, **kwargs): + if history is None: + history = [] + if logits_processor is None: + logits_processor = LogitsProcessorList() + logits_processor.append(InvalidScoreLogitsProcessor()) + gen_kwargs = {"max_length": max_length, "do_sample": do_sample, "top_p": top_p, + "temperature": temperature, "logits_processor": logits_processor, **kwargs} + if not history: + prompt = query + else: + prompt = "" + for i, (old_query, response) in enumerate(history): + prompt += "[Round {}]\n问:{}\n答:{}\n".format(i, old_query, response) + prompt += "[Round {}]\n问:{}\n答:".format(len(history), query) + inputs = tokenizer([prompt], return_tensors="pt") + inputs = inputs.to(self.device) + for outputs in self.stream_generate(**inputs, **gen_kwargs): + outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):] + response = tokenizer.decode(outputs) + response = self.process_response(response) + new_history = history + [(query, response)] + yield response, new_history + + @torch.no_grad() + def stream_generate( + self, + input_ids, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, + **kwargs, + ): + batch_size, input_ids_seq_length = input_ids.shape[0], input_ids.shape[-1] + + if generation_config is None: + generation_config = self.generation_config + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) + bos_token_id, eos_token_id = generation_config.bos_token_id, generation_config.eos_token_id + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if has_default_max_length and generation_config.max_new_tokens is None: + warnings.warn( + f"Using `max_length`'s default ({generation_config.max_length}) to control the generation length. " + "This behaviour is deprecated and will be removed from the config in v5 of Transformers -- we" + " recommend using `max_new_tokens` to control the maximum length of the generation.", + UserWarning, + ) + elif generation_config.max_new_tokens is not None: + generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length + if not has_default_max_length: + logger.warn( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)", + UserWarning, + ) + + if input_ids_seq_length >= generation_config.max_length: + input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" + logger.warning( + f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing `max_new_tokens`." + ) + + # 2. Set generation parameters if not already defined + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + encoder_input_ids=input_ids, + prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, + logits_processor=logits_processor, + ) + + stopping_criteria = self._get_stopping_criteria( + generation_config=generation_config, stopping_criteria=stopping_criteria + ) + logits_warper = self._get_logits_warper(generation_config) + + unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) + scores = None + while True: + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + # forward pass to get next token + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=False, + output_hidden_states=False, + ) + + next_token_logits = outputs.logits[:, -1, :] + + # pre-process distribution + next_token_scores = logits_processor(input_ids, next_token_logits) + next_token_scores = logits_warper(input_ids, next_token_scores) + + # sample + probs = nn.functional.softmax(next_token_scores, dim=-1) + if generation_config.do_sample: + next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) + else: + next_tokens = torch.argmax(probs, dim=-1) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + unfinished_sequences = unfinished_sequences.mul((sum(next_tokens != i for i in eos_token_id)).long()) + + # stop when each sentence is finished, or if we exceed the maximum length + if unfinished_sequences.max() == 0 or stopping_criteria(input_ids, scores): + break + yield input_ids + + def quantize(self, bits: int, empty_init=False, **kwargs): + if bits == 0: + return + + from .quantization import quantize + + if self.quantized: + logger.info("Already quantized.") + return self + + self.quantized = True + + self.config.quantization_bit = bits + + self.transformer = quantize(self.transformer, bits, empty_init=empty_init, **kwargs) + return self diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/optimizer.pt b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/optimizer.pt new file mode 100644 index 0000000000000000000000000000000000000000..2b90af6ff850f45e85712bfed5fd5242d16df73e --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/optimizer.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e002d0c9702b39ed3641f99df3ed5f5112cd4b0c6cda46a5c44b813b11da8bf8 +size 234882351 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/pytorch_model.bin b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..c8e95bb179bd55f0a7b487b5a7f3eebc5317ff67 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1c10da4f2c7745bca3fd6175a5ad4de3a8a8bcc874b65c978682dd110d3dbf4 +size 117441341 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/quantization.py b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..6f469f6a25a8233fe881608168daeba0bc809540 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/quantization.py @@ -0,0 +1,201 @@ +from torch.nn import Linear +from torch.nn.parameter import Parameter + +import bz2 +import torch +import base64 +import ctypes +from transformers.utils import logging + +from typing import List +from functools import partial + +logger = logging.get_logger(__name__) + +try: + from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up + + class Kernel: + def __init__(self, code: bytes, function_names: List[str]): + self.code = code + self._function_names = function_names + self._cmodule = LazyKernelCModule(self.code) + + for name in self._function_names: + setattr(self, name, KernelFunction(self._cmodule, name)) + + quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ" + + kernels = Kernel( + bz2.decompress(base64.b64decode(quantization_code)), + [ + "int4WeightCompression", + "int4WeightExtractionFloat", + "int4WeightExtractionHalf", + "int8WeightExtractionFloat", + "int8WeightExtractionHalf", + ], + ) +except Exception as exception: + kernels = None + logger.warning("Failed to load cpm_kernels:" + str(exception)) + + +class W8A16Linear(torch.autograd.Function): + @staticmethod + def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width): + ctx.inp_shape = inp.size() + ctx.weight_bit_width = weight_bit_width + out_features = quant_w.size(0) + inp = inp.contiguous().view(-1, inp.size(-1)) + weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width) + ctx.weight_shape = weight.size() + output = inp.mm(weight.t()) + ctx.save_for_backward(inp, quant_w, scale_w) + return output.view(*(ctx.inp_shape[:-1] + (out_features,))) + + @staticmethod + def backward(ctx, grad_output: torch.Tensor): + inp, quant_w, scale_w = ctx.saved_tensors + weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width) + grad_output = grad_output.contiguous().view(-1, weight.size(0)) + grad_input = grad_output.mm(weight) + grad_weight = grad_output.t().mm(inp) + return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None + + +def compress_int4_weight(weight: torch.Tensor): # (n, m) + with torch.cuda.device(weight.device): + n, m = weight.size(0), weight.size(1) + assert m % 2 == 0 + m = m // 2 + out = torch.empty(n, m, dtype=torch.int8, device="cuda") + stream = torch.cuda.current_stream() + + gridDim = (n, 1, 1) + blockDim = (min(round_up(m, 32), 1024), 1, 1) + + kernels.int4WeightCompression( + gridDim, + blockDim, + 0, + stream, + [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)], + ) + return out + + +def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int): + if source_bit_width == 8: + func = kernels.int8WeightExtractionHalf + elif source_bit_width == 4: + func = kernels.int4WeightExtractionHalf + else: + assert False, "Unsupported bit-width" + + with torch.cuda.device(weight.device): + n, m = weight.size(0), weight.size(1) + out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device="cuda") + stream = torch.cuda.current_stream() + + gridDim = (n, 1, 1) + blockDim = (min(round_up(m, 32), 1024), 1, 1) + + func( + gridDim, + blockDim, + 0, + stream, + [ + ctypes.c_void_p(weight.data_ptr()), + ctypes.c_void_p(scale_list.data_ptr()), + ctypes.c_void_p(out.data_ptr()), + ctypes.c_int32(n), + ctypes.c_int32(m), + ], + ) + return out + + +class QuantizedLinear(Linear): + def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, empty_init=False, *args, **kwargs): + super(QuantizedLinear, self).__init__(*args, **kwargs) + self.weight_bit_width = weight_bit_width + + shape = self.weight.shape + del self.weight + + if weight_tensor is None or empty_init: + self.weight = torch.empty( + shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"] + ) + self.weight_scale = torch.empty(shape[0], dtype=kwargs["dtype"], device=kwargs["device"]) + else: + self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half() + self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8) + if weight_bit_width == 4: + self.weight = compress_int4_weight(self.weight) + + self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False) + self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False) + if bias_tensor is not None: + self.bias = Parameter(bias_tensor.to(kwargs["device"]), requires_grad=False) + else: + self.bias = None + + def forward(self, input): + output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width) + if self.bias is not None: + output = output + self.bias + return output + + +def quantize(model, weight_bit_width, empty_init=False, **kwargs): + """Replace fp16 linear with quantized linear""" + + for layer in model.layers: + layer.attention.query_key_value = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.attention.query_key_value.weight.to(torch.cuda.current_device()), + bias_tensor=layer.attention.query_key_value.bias, + in_features=layer.attention.query_key_value.in_features, + out_features=layer.attention.query_key_value.out_features, + bias=True, + dtype=torch.half, + device=layer.attention.query_key_value.weight.device, + empty_init=empty_init + ) + layer.attention.dense = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.attention.dense.weight.to(torch.cuda.current_device()), + bias_tensor=layer.attention.dense.bias, + in_features=layer.attention.dense.in_features, + out_features=layer.attention.dense.out_features, + bias=True, + dtype=torch.half, + device=layer.attention.dense.weight.device, + empty_init=empty_init + ) + layer.mlp.dense_h_to_4h = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()), + bias_tensor=layer.mlp.dense_h_to_4h.bias, + in_features=layer.mlp.dense_h_to_4h.in_features, + out_features=layer.mlp.dense_h_to_4h.out_features, + bias=True, + dtype=torch.half, + device=layer.mlp.dense_h_to_4h.weight.device, + empty_init=empty_init + ) + layer.mlp.dense_4h_to_h = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()), + bias_tensor=layer.mlp.dense_4h_to_h.bias, + in_features=layer.mlp.dense_4h_to_h.in_features, + out_features=layer.mlp.dense_4h_to_h.out_features, + bias=True, + dtype=torch.half, + device=layer.mlp.dense_4h_to_h.weight.device, + empty_init=empty_init + ) + return model diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/rng_state.pth b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/rng_state.pth new file mode 100644 index 0000000000000000000000000000000000000000..cc3ebd81066fe789a2871ffce852b9fe1d599590 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/rng_state.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21167843eea23c7bb13414b3a4505f43cf15f12d9e863a31b2ac887e3b2902d5 +size 14575 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/scheduler.pt b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/scheduler.pt new file mode 100644 index 0000000000000000000000000000000000000000..b8d22a4aba762af71ae8ed065b0110c04194c55c --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/scheduler.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b1b3555a90d88e426c39c52ddb55243e6c71e4454bd8b74c88bee0f30cea0d88 +size 627 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/special_tokens_map.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/special_tokens_map.json new file mode 100644 index 0000000000000000000000000000000000000000..1f897c919b758e64c56eb1a7b34b39b569040086 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/special_tokens_map.json @@ -0,0 +1,7 @@ +{ + "bos_token": "", + "eos_token": "", + "mask_token": "[MASK]", + "pad_token": "", + "unk_token": "" +} diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/tokenization_chatglm.py b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/tokenization_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..1d4f0ba532543b6dbdacdd83d30324b7a6abfad3 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/tokenization_chatglm.py @@ -0,0 +1,430 @@ +"""Tokenization classes for ChatGLM.""" +from typing import List, Optional, Union +import os + +from transformers.tokenization_utils import PreTrainedTokenizer +from transformers.utils import logging, PaddingStrategy +from transformers.tokenization_utils_base import EncodedInput, BatchEncoding +from typing import Dict +import sentencepiece as spm +import numpy as np + +logger = logging.get_logger(__name__) + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "THUDM/chatglm-6b": 2048, +} + + +class TextTokenizer: + def __init__(self, model_path): + self.sp = spm.SentencePieceProcessor() + self.sp.Load(model_path) + self.num_tokens = self.sp.vocab_size() + + def encode(self, text): + return self.sp.EncodeAsIds(text) + + def decode(self, ids: List[int]): + return self.sp.DecodeIds(ids) + + def tokenize(self, text): + return self.sp.EncodeAsPieces(text) + + def convert_tokens_to_ids(self, tokens): + return [self.sp.PieceToId(token) for token in tokens] + + def convert_token_to_id(self, token): + return self.sp.PieceToId(token) + + def convert_id_to_token(self, idx): + return self.sp.IdToPiece(idx) + + def __len__(self): + return self.num_tokens + + +class SPTokenizer: + def __init__( + self, + vocab_file, + num_image_tokens=20000, + max_blank_length=80, + byte_fallback=True, + ): + assert vocab_file is not None + self.vocab_file = vocab_file + self.num_image_tokens = num_image_tokens + self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "", "", "", "", ""] + self.max_blank_length = max_blank_length + self.byte_fallback = byte_fallback + self.text_tokenizer = TextTokenizer(vocab_file) + + def _get_text_tokenizer(self): + return self.text_tokenizer + + @staticmethod + def get_blank_token(length: int): + assert length >= 2 + return f"<|blank_{length}|>" + + @staticmethod + def get_tab_token(): + return f"<|tab|>" + + @property + def num_text_tokens(self): + return self.text_tokenizer.num_tokens + + @property + def num_tokens(self): + return self.num_image_tokens + self.num_text_tokens + + @staticmethod + def _encode_whitespaces(text: str, max_len: int = 80): + text = text.replace("\t", SPTokenizer.get_tab_token()) + for i in range(max_len, 1, -1): + text = text.replace(" " * i, SPTokenizer.get_blank_token(i)) + return text + + def _preprocess(self, text: str, linebreak=True, whitespaces=True): + if linebreak: + text = text.replace("\n", "") + if whitespaces: + text = self._encode_whitespaces(text, max_len=self.max_blank_length) + return text + + def encode( + self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True + ) -> List[int]: + """ + @param text: Text to encode. + @param linebreak: Whether to encode newline (\n) in text. + @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding. + @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text. + @param add_dummy_prefix: Whether to add dummy blank space in the beginning. + """ + text = self._preprocess(text, linebreak, whitespaces) + if not add_dummy_prefix: + text = "" + text + tmp = self._get_text_tokenizer().encode(text) + tokens = [x + self.num_image_tokens for x in tmp] + return tokens if add_dummy_prefix else tokens[2:] + + def decode(self, text_ids: List[int]) -> str: + ids = [int(_id) - self.num_image_tokens for _id in text_ids] + ids = [_id for _id in ids if _id >= 0] + text = self._get_text_tokenizer().decode(ids) + text = text.replace("", "\n") + text = text.replace(SPTokenizer.get_tab_token(), "\t") + for i in range(2, self.max_blank_length + 1): + text = text.replace(self.get_blank_token(i), " " * i) + return text + + def tokenize( + self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True + ) -> List[str]: + """ + @param text: Text to encode. + @param linebreak: Whether to encode newline (\n) in text. + @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding. + @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text. + @param add_dummy_prefix: Whether to add dummy blank space in the beginning. + """ + text = self._preprocess(text, linebreak, whitespaces) + if not add_dummy_prefix: + text = "" + text + tokens = self._get_text_tokenizer().tokenize(text) + return tokens if add_dummy_prefix else tokens[2:] + + def __getitem__(self, x: Union[int, str]): + if isinstance(x, int): + if x < self.num_image_tokens: + return "".format(x) + else: + return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens) + elif isinstance(x, str): + if x.startswith("") and x[7:-1].isdigit(): + return int(x[7:-1]) + else: + return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens + else: + raise ValueError("The key should be str or int.") + + +class ChatGLMTokenizer(PreTrainedTokenizer): + """ + Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + """ + + vocab_files_names = {"vocab_file": "ice_text.model"} + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask", "position_ids"] + + def __init__( + self, + vocab_file, + do_lower_case=False, + remove_space=False, + bos_token='', + eos_token='', + end_token='', + mask_token='[MASK]', + gmask_token='[gMASK]', + padding_side="left", + pad_token="", + unk_token="", + num_image_tokens=20000, + **kwargs + ) -> None: + super().__init__( + do_lower_case=do_lower_case, + remove_space=remove_space, + padding_side=padding_side, + bos_token=bos_token, + eos_token=eos_token, + end_token=end_token, + mask_token=mask_token, + gmask_token=gmask_token, + pad_token=pad_token, + unk_token=unk_token, + num_image_tokens=num_image_tokens, + **kwargs + ) + + self.do_lower_case = do_lower_case + self.remove_space = remove_space + self.vocab_file = vocab_file + + self.bos_token = bos_token + self.eos_token = eos_token + self.end_token = end_token + self.mask_token = mask_token + self.gmask_token = gmask_token + + self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens) + + """ Initialisation """ + + @property + def gmask_token_id(self) -> Optional[int]: + if self.gmask_token is None: + return None + return self.convert_tokens_to_ids(self.gmask_token) + + @property + def end_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the end of context token in the vocabulary. Returns `None` if the token has not been + set. + """ + if self.end_token is None: + return None + return self.convert_tokens_to_ids(self.end_token) + + @property + def vocab_size(self): + """ Returns vocab size """ + return self.sp_tokenizer.num_tokens + + def get_vocab(self): + """ Returns vocab as a dict """ + vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def preprocess_text(self, inputs): + if self.remove_space: + outputs = " ".join(inputs.strip().split()) + else: + outputs = inputs + + if self.do_lower_case: + outputs = outputs.lower() + + return outputs + + def _tokenize(self, text, **kwargs): + """ Returns a tokenized string. """ + text = self.preprocess_text(text) + + seq = self.sp_tokenizer.tokenize(text) + + return seq + + def _decode( + self, + token_ids: Union[int, List[int]], + skip_special_tokens: bool = False, + clean_up_tokenization_spaces: bool = True, + **kwargs + ) -> str: + if isinstance(token_ids, int): + token_ids = [token_ids] + if len(token_ids) == 0: + return "" + if self.pad_token_id in token_ids: # remove pad + token_ids = list(filter((self.pad_token_id).__ne__, token_ids)) + return self.sp_tokenizer.decode(token_ids) + + def _convert_token_to_id(self, token): + """ Converts a token (str) in an id using the vocab. """ + return self.sp_tokenizer[token] + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.sp_tokenizer[index] + + def save_vocabulary(self, save_directory, filename_prefix=None): + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + filename_prefix (`str`, *optional*): + An optional prefix to add to the named of the saved files. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, self.vocab_files_names["vocab_file"] + ) + else: + vocab_file = save_directory + + with open(self.vocab_file, 'rb') as fin: + proto_str = fin.read() + + with open(vocab_file, "wb") as writer: + writer.write(proto_str) + + return (vocab_file,) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + gmask_id = self.sp_tokenizer[self.gmask_token] + eos_id = self.sp_tokenizer[self.eos_token] + token_ids_0 = token_ids_0 + [gmask_id, self.sp_tokenizer[self.bos_token]] + if token_ids_1 is not None: + token_ids_0 = token_ids_0 + token_ids_1 + [eos_id] + return token_ids_0 + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + """ + Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + + Args: + encoded_inputs: + Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). + max_length: maximum length of the returned list and optionally padding length (see below). + Will truncate by taking into account the special tokens. + padding_strategy: PaddingStrategy to use for padding. + + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch + - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) + - PaddingStrategy.DO_NOT_PAD: Do not pad + The tokenizer padding sides are defined in self.padding_side: + + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability + `>= 7.5` (Volta). + return_attention_mask: + (optional) Set to False to avoid returning attention mask (default: set to model specifics) + """ + # Load from model defaults + bos_token_id = self.sp_tokenizer[self.bos_token] + mask_token_id = self.sp_tokenizer[self.mask_token] + gmask_token_id = self.sp_tokenizer[self.gmask_token] + assert self.padding_side == "left" + + required_input = encoded_inputs[self.model_input_names[0]] + seq_length = len(required_input) + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): + max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length + + # Initialize attention mask if not present. + if max_length is not None: + if "attention_mask" not in encoded_inputs: + if bos_token_id in required_input: + context_length = required_input.index(bos_token_id) + else: + context_length = seq_length + attention_mask = np.ones((1, seq_length, seq_length)) + attention_mask = np.tril(attention_mask) + attention_mask[:, :, :context_length] = 1 + attention_mask = np.bool_(attention_mask < 0.5) + encoded_inputs["attention_mask"] = attention_mask + + if "position_ids" not in encoded_inputs: + if bos_token_id in required_input: + context_length = required_input.index(bos_token_id) + else: + context_length = seq_length + position_ids = np.arange(seq_length, dtype=np.int64) + mask_token = mask_token_id if mask_token_id in required_input else gmask_token_id + if mask_token in required_input: + mask_position = required_input.index(mask_token) + position_ids[context_length:] = mask_position + block_position_ids = np.concatenate( + [np.zeros(context_length, dtype=np.int64), + np.arange(1, seq_length - context_length + 1, dtype=np.int64)]) + encoded_inputs["position_ids"] = np.stack([position_ids, block_position_ids], axis=0) + + if needs_to_be_padded: + difference = max_length - len(required_input) + + if "attention_mask" in encoded_inputs: + encoded_inputs["attention_mask"] = np.pad(encoded_inputs["attention_mask"], + pad_width=[(0, 0), (difference, 0), (difference, 0)], + mode='constant', constant_values=True) + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ + "token_type_ids" + ] + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] + if "position_ids" in encoded_inputs: + encoded_inputs["position_ids"] = np.pad(encoded_inputs["position_ids"], + pad_width=[(0, 0), (difference, 0)]) + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input + + return encoded_inputs diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/tokenizer_config.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..f3f8e1c935cc40c270ff6ac75c05b4208533688a --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/tokenizer_config.json @@ -0,0 +1,22 @@ +{ + "auto_map": { + "AutoTokenizer": [ + "tokenization_chatglm.ChatGLMTokenizer", + null + ] + }, + "bos_token": "", + "do_lower_case": false, + "end_token": "", + "eos_token": "", + "gmask_token": "[gMASK]", + "mask_token": "[MASK]", + "model_max_length": 1000000000000000019884624838656, + "num_image_tokens": 0, + "pad_token": "", + "padding_side": "left", + "remove_space": false, + "special_tokens_map_file": null, + "tokenizer_class": "ChatGLMTokenizer", + "unk_token": "" +} diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/trainer_state.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..cf83e3820eb1f0292e9f8b75663fba20ab300a8e --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/trainer_state.json @@ -0,0 +1,46 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.0069808637073621935, + "global_step": 50, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.0, + "learning_rate": 0.018000000000000002, + "loss": 5.1828, + "step": 10 + }, + { + "epoch": 0.0, + "learning_rate": 0.016, + "loss": 4.5979, + "step": 20 + }, + { + "epoch": 0.0, + "learning_rate": 0.013999999999999999, + "loss": 4.5608, + "step": 30 + }, + { + "epoch": 0.01, + "learning_rate": 0.012, + "loss": 4.375, + "step": 40 + }, + { + "epoch": 0.01, + "learning_rate": 0.01, + "loss": 4.4015, + "step": 50 + } + ], + "max_steps": 100, + "num_train_epochs": 1, + "total_flos": 1733286061670400.0, + "trial_name": null, + "trial_params": null +} diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/training_args.bin b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/training_args.bin new file mode 100644 index 0000000000000000000000000000000000000000..09a2bca30693fb288fb455aee1deca55c7e989e5 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/training_args.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69a6c8c7bd73e62303e866b1cd895083f806eb4826833c1bcdd3e9c460c635e1 +size 3707 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/runs/Apr21_16-33-10_10-254-135-19/1682066185.300249/events.out.tfevents.1682066185.10-254-135-19 b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/runs/Apr21_16-33-10_10-254-135-19/1682066185.300249/events.out.tfevents.1682066185.10-254-135-19 new file mode 100644 index 0000000000000000000000000000000000000000..635dcb2eb0f665633a9b48addfee655e1be4b17d --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/runs/Apr21_16-33-10_10-254-135-19/1682066185.300249/events.out.tfevents.1682066185.10-254-135-19 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a0b32e76e264574c9496e912e7c0d162baa8b805393582180e438b1d2908268 +size 5806 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/runs/Apr21_16-33-10_10-254-135-19/events.out.tfevents.1682066185.10-254-135-19 b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/runs/Apr21_16-33-10_10-254-135-19/events.out.tfevents.1682066185.10-254-135-19 new file mode 100644 index 0000000000000000000000000000000000000000..ab60e93db7e3da7ee83ebc71afe77f24ad778800 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/runs/Apr21_16-33-10_10-254-135-19/events.out.tfevents.1682066185.10-254-135-19 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d233106b889f9e10595ea0a1d6338837807a70e308624de9242b0635711b1058 +size 6214 diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/train_results.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/train_results.json new file mode 100644 index 0000000000000000000000000000000000000000..ff385825a821d968d0264e6f03e2c2f9388f236a --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/train_results.json @@ -0,0 +1,8 @@ +{ + "epoch": 0.01, + "train_loss": 4.456654052734375, + "train_runtime": 395.1154, + "train_samples": 114599, + "train_samples_per_second": 4.049, + "train_steps_per_second": 0.253 +} \ No newline at end of file diff --git a/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/trainer_state.json b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/trainer_state.json new file mode 100644 index 0000000000000000000000000000000000000000..2d1325ef4857896603e101e5f2894107507efb58 --- /dev/null +++ b/ptuning/output/adgen-chatglm-6b-pt-128-2e-2/trainer_state.json @@ -0,0 +1,85 @@ +{ + "best_metric": null, + "best_model_checkpoint": null, + "epoch": 0.013961727414724387, + "global_step": 100, + "is_hyper_param_search": false, + "is_local_process_zero": true, + "is_world_process_zero": true, + "log_history": [ + { + "epoch": 0.0, + "learning_rate": 0.018000000000000002, + "loss": 5.1828, + "step": 10 + }, + { + "epoch": 0.0, + "learning_rate": 0.016, + "loss": 4.5979, + "step": 20 + }, + { + "epoch": 0.0, + "learning_rate": 0.013999999999999999, + "loss": 4.5608, + "step": 30 + }, + { + "epoch": 0.01, + "learning_rate": 0.012, + "loss": 4.375, + "step": 40 + }, + { + "epoch": 0.01, + "learning_rate": 0.01, + "loss": 4.4015, + "step": 50 + }, + { + "epoch": 0.01, + "learning_rate": 0.008, + "loss": 4.2929, + "step": 60 + }, + { + "epoch": 0.01, + "learning_rate": 0.006, + "loss": 4.3323, + "step": 70 + }, + { + "epoch": 0.01, + "learning_rate": 0.004, + "loss": 4.2657, + "step": 80 + }, + { + "epoch": 0.01, + "learning_rate": 0.002, + "loss": 4.2733, + "step": 90 + }, + { + "epoch": 0.01, + "learning_rate": 0.0, + "loss": 4.2842, + "step": 100 + }, + { + "epoch": 0.01, + "step": 100, + "total_flos": 3466572123340800.0, + "train_loss": 4.456654052734375, + "train_runtime": 395.1154, + "train_samples_per_second": 4.049, + "train_steps_per_second": 0.253 + } + ], + "max_steps": 100, + "num_train_epochs": 1, + "total_flos": 3466572123340800.0, + "trial_name": null, + "trial_params": null +} diff --git a/ptuning/train.sh b/ptuning/train.sh new file mode 100644 index 0000000000000000000000000000000000000000..f99b7f6365f55213bc86324de86455906205f9dd --- /dev/null +++ b/ptuning/train.sh @@ -0,0 +1,26 @@ +PRE_SEQ_LEN=128 +LR=2e-2 + +CUDA_VISIBLE_DEVICES=0 python3 main.py \ + --do_train \ + --train_file AdvertiseGen/train.json \ + --validation_file AdvertiseGen/dev.json \ + --prompt_column content \ + --response_column summary \ + --overwrite_cache \ + --model_name_or_path /home/wangyan/project/hft/uptest \ + --output_dir output/adgen-chatglm-6b-pt-$PRE_SEQ_LEN-$LR \ + --overwrite_output_dir \ + --max_source_length 64 \ + --max_target_length 64 \ + --per_device_train_batch_size 1 \ + --per_device_eval_batch_size 1 \ + --gradient_accumulation_steps 16 \ + --predict_with_generate \ + --max_steps 100 \ + --logging_steps 10 \ + --save_steps 50 \ + --learning_rate $LR \ + --pre_seq_len $PRE_SEQ_LEN \ + --quantization_bit 4 + diff --git a/ptuning/train_chat.sh b/ptuning/train_chat.sh new file mode 100644 index 0000000000000000000000000000000000000000..b0f5cdc241ef94f039c93df483ee76cb8668ce2a --- /dev/null +++ b/ptuning/train_chat.sh @@ -0,0 +1,27 @@ +PRE_SEQ_LEN=8 +LR=1e-2 + +CUDA_VISIBLE_DEVICES=0 python3 main.py \ + --do_train \ + --train_file $CHAT_TRAIN_DATA \ + --validation_file $CHAT_VAL_DATA \ + --prompt_column prompt \ + --response_column response \ + --history_column history \ + --overwrite_cache \ + --model_name_or_path THUDM/chatglm-6b \ + --output_dir $CHECKPOINT_NAME \ + --overwrite_output_dir \ + --max_source_length 256 \ + --max_target_length 256 \ + --per_device_train_batch_size 1 \ + --per_device_eval_batch_size 1 \ + --gradient_accumulation_steps 16 \ + --predict_with_generate \ + --max_steps 3000 \ + --logging_steps 10 \ + --save_steps 1000 \ + --learning_rate $LR \ + --pre_seq_len $PRE_SEQ_LEN \ + --quantization_bit 4 + diff --git a/ptuning/trainer.py b/ptuning/trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..63101bc9d3dfb65ff5a444c7c151b8d4d241f2c9 --- /dev/null +++ b/ptuning/trainer.py @@ -0,0 +1,3830 @@ +# coding=utf-8 +# Copyright 2020-present the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task. +""" + +import contextlib +import functools +import glob +import inspect +import math +import os +import random +import re +import shutil +import sys +import time +import warnings +from collections.abc import Mapping +from distutils.util import strtobool +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +from tqdm.auto import tqdm + + +# Integrations must be imported before ML frameworks: +# isort: off +from transformers.integrations import ( + default_hp_search_backend, + get_reporting_integration_callbacks, + hp_params, + is_fairscale_available, + is_optuna_available, + is_ray_tune_available, + is_sigopt_available, + is_wandb_available, + run_hp_search_optuna, + run_hp_search_ray, + run_hp_search_sigopt, + run_hp_search_wandb, +) + +# isort: on + +import numpy as np +import torch +import torch.distributed as dist +from huggingface_hub import Repository, create_repo +from packaging import version +from torch import nn +from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler +from torch.utils.data.distributed import DistributedSampler + +from transformers import __version__ +from transformers.configuration_utils import PretrainedConfig +from transformers.data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator +from transformers.debug_utils import DebugOption, DebugUnderflowOverflow +from transformers.deepspeed import deepspeed_init, is_deepspeed_zero3_enabled +from transformers.dependency_versions_check import dep_version_check +from transformers.modelcard import TrainingSummary +from transformers.modeling_utils import PreTrainedModel, load_sharded_checkpoint, unwrap_model +from transformers.models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_MAPPING_NAMES +from transformers.optimization import Adafactor, get_scheduler +from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS, is_torch_greater_or_equal_than_1_10, is_torch_less_than_1_11 +from transformers.tokenization_utils_base import PreTrainedTokenizerBase +from transformers.trainer_callback import ( + CallbackHandler, + DefaultFlowCallback, + PrinterCallback, + ProgressCallback, + TrainerCallback, + TrainerControl, + TrainerState, +) +from transformers.trainer_pt_utils import ( + DistributedLengthGroupedSampler, + DistributedSamplerWithLoop, + DistributedTensorGatherer, + IterableDatasetShard, + LabelSmoother, + LengthGroupedSampler, + SequentialDistributedSampler, + ShardSampler, + distributed_broadcast_scalars, + distributed_concat, + find_batch_size, + get_module_class_from_name, + get_parameter_names, + nested_concat, + nested_detach, + nested_numpify, + nested_truncate, + nested_xla_mesh_reduce, + reissue_pt_warnings, +) +from transformers.trainer_utils import ( + PREFIX_CHECKPOINT_DIR, + BestRun, + EvalLoopOutput, + EvalPrediction, + FSDPOption, + HPSearchBackend, + HubStrategy, + IntervalStrategy, + PredictionOutput, + RemoveColumnsCollator, + ShardedDDPOption, + TrainerMemoryTracker, + TrainOutput, + default_compute_objective, + default_hp_space, + denumpify_detensorize, + enable_full_determinism, + find_executable_batch_size, + get_last_checkpoint, + has_length, + number_of_arguments, + seed_worker, + set_seed, + speed_metrics, +) +from transformers.training_args import OptimizerNames, ParallelMode, TrainingArguments +from transformers.utils import ( + CONFIG_NAME, + WEIGHTS_INDEX_NAME, + WEIGHTS_NAME, + can_return_loss, + find_labels, + get_full_repo_name, + is_accelerate_available, + is_apex_available, + is_datasets_available, + is_in_notebook, + is_ipex_available, + is_sagemaker_dp_enabled, + is_sagemaker_mp_enabled, + is_torch_compile_available, + is_torch_neuroncore_available, + is_torch_tpu_available, + logging, +) +from transformers.utils.generic import ContextManagers + + +_is_native_cpu_amp_available = is_torch_greater_or_equal_than_1_10 + +DEFAULT_CALLBACKS = [DefaultFlowCallback] +DEFAULT_PROGRESS_CALLBACK = ProgressCallback + +if is_in_notebook(): + from transformers.utils.notebook import NotebookProgressCallback + + DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback + +if is_apex_available(): + from apex import amp + +if is_datasets_available(): + import datasets + +if is_torch_tpu_available(check_device=False): + import torch_xla.core.xla_model as xm + import torch_xla.debug.metrics as met + import torch_xla.distributed.parallel_loader as pl + +if is_fairscale_available(): + dep_version_check("fairscale") + import fairscale + from fairscale.nn.data_parallel import FullyShardedDataParallel as FullyShardedDDP + from fairscale.nn.data_parallel import ShardedDataParallel as ShardedDDP + from fairscale.nn.wrap import auto_wrap + from fairscale.optim import OSS + from fairscale.optim.grad_scaler import ShardedGradScaler + + +if is_sagemaker_mp_enabled(): + import smdistributed.modelparallel.torch as smp + from smdistributed.modelparallel import __version__ as SMP_VERSION + + IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10") + + from transformers.trainer_pt_utils import smp_forward_backward, smp_forward_only, smp_gather, smp_nested_concat +else: + IS_SAGEMAKER_MP_POST_1_10 = False + + +skip_first_batches = None +if is_accelerate_available(): + from accelerate import __version__ as accelerate_version + + if version.parse(accelerate_version) >= version.parse("0.16"): + from accelerate import skip_first_batches + + +if TYPE_CHECKING: + import optuna + +logger = logging.get_logger(__name__) + + +# Name of the files used for checkpointing +TRAINING_ARGS_NAME = "training_args.bin" +TRAINER_STATE_NAME = "trainer_state.json" +OPTIMIZER_NAME = "optimizer.pt" +SCHEDULER_NAME = "scheduler.pt" +SCALER_NAME = "scaler.pt" + + +class Trainer: + """ + Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. + + Args: + model ([`PreTrainedModel`] or `torch.nn.Module`, *optional*): + The model to train, evaluate or use for predictions. If not provided, a `model_init` must be passed. + + + + [`Trainer`] is optimized to work with the [`PreTrainedModel`] provided by the library. You can still use + your own models defined as `torch.nn.Module` as long as they work the same way as the 🤗 Transformers + models. + + + + args ([`TrainingArguments`], *optional*): + The arguments to tweak for training. Will default to a basic instance of [`TrainingArguments`] with the + `output_dir` set to a directory named *tmp_trainer* in the current directory if not provided. + data_collator (`DataCollator`, *optional*): + The function to use to form a batch from a list of elements of `train_dataset` or `eval_dataset`. Will + default to [`default_data_collator`] if no `tokenizer` is provided, an instance of + [`DataCollatorWithPadding`] otherwise. + train_dataset (`torch.utils.data.Dataset` or `torch.utils.data.IterableDataset`, *optional*): + The dataset to use for training. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. + + Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a + distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a + `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will + manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally + sets the seed of the RNGs used. + eval_dataset (Union[`torch.utils.data.Dataset`, Dict[str, `torch.utils.data.Dataset`]), *optional*): + The dataset to use for evaluation. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. If it is a dictionary, it will evaluate on each + dataset prepending the dictionary key to the metric name. + tokenizer ([`PreTrainedTokenizerBase`], *optional*): + The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs to the + maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an + interrupted training or reuse the fine-tuned model. + model_init (`Callable[[], PreTrainedModel]`, *optional*): + A function that instantiates the model to be used. If provided, each call to [`~Trainer.train`] will start + from a new instance of the model as given by this function. + + The function may have zero argument, or a single one containing the optuna/Ray Tune/SigOpt trial object, to + be able to choose different architectures according to hyper parameters (such as layer count, sizes of + inner layers, dropout probabilities etc). + compute_metrics (`Callable[[EvalPrediction], Dict]`, *optional*): + The function that will be used to compute metrics at evaluation. Must take a [`EvalPrediction`] and return + a dictionary string to metric values. + callbacks (List of [`TrainerCallback`], *optional*): + A list of callbacks to customize the training loop. Will add those to the list of default callbacks + detailed in [here](callback). + + If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method. + optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*): A tuple + containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your model + and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`. + preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`, *optional*): + A function that preprocess the logits right before caching them at each evaluation step. Must take two + tensors, the logits and the labels, and return the logits once processed as desired. The modifications made + by this function will be reflected in the predictions received by `compute_metrics`. + + Note that the labels (second parameter) will be `None` if the dataset does not have them. + + Important attributes: + + - **model** -- Always points to the core model. If using a transformers model, it will be a [`PreTrainedModel`] + subclass. + - **model_wrapped** -- Always points to the most external model in case one or more other modules wrap the + original model. This is the model that should be used for the forward pass. For example, under `DeepSpeed`, + the inner model is wrapped in `DeepSpeed` and then again in `torch.nn.DistributedDataParallel`. If the inner + model hasn't been wrapped, then `self.model_wrapped` is the same as `self.model`. + - **is_model_parallel** -- Whether or not a model has been switched to a model parallel mode (different from + data parallelism, this means some of the model layers are split on different GPUs). + - **place_model_on_device** -- Whether or not to automatically place the model on the device - it will be set + to `False` if model parallel or deepspeed is used, or if the default + `TrainingArguments.place_model_on_device` is overridden to return `False` . + - **is_in_train** -- Whether or not a model is currently running `train` (e.g. when `evaluate` is called while + in `train`) + + """ + + from transformers.trainer_pt_utils import _get_learning_rate, log_metrics, metrics_format, save_metrics, save_state + + def __init__( + self, + model: Union[PreTrainedModel, nn.Module] = None, + args: TrainingArguments = None, + data_collator: Optional[DataCollator] = None, + train_dataset: Optional[Dataset] = None, + eval_dataset: Optional[Union[Dataset, Dict[str, Dataset]]] = None, + tokenizer: Optional[PreTrainedTokenizerBase] = None, + model_init: Optional[Callable[[], PreTrainedModel]] = None, + compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, + callbacks: Optional[List[TrainerCallback]] = None, + optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), + preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None, + save_prefixencoder: bool = False, + ): + self.save_prefixencoder = save_prefixencoder + if args is None: + output_dir = "tmp_trainer" + logger.info(f"No `TrainingArguments` passed, using `output_dir={output_dir}`.") + args = TrainingArguments(output_dir=output_dir) + self.args = args + # Seed must be set before instantiating the model when using model + enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) + self.hp_name = None + self.deepspeed = None + self.is_in_train = False + + # memory metrics - must set up as early as possible + self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics) + self._memory_tracker.start() + + # set the correct log level depending on the node + log_level = args.get_process_log_level() + logging.set_verbosity(log_level) + + # force device and distributed setup init explicitly + args._setup_devices + + if model is None: + if model_init is not None: + self.model_init = model_init + model = self.call_model_init() + else: + raise RuntimeError("`Trainer` requires either a `model` or `model_init` argument") + else: + if model_init is not None: + warnings.warn( + "`Trainer` requires either a `model` or `model_init` argument, but not both. `model_init` will" + " overwrite your model when calling the `train` method. This will become a fatal error in the next" + " release.", + FutureWarning, + ) + self.model_init = model_init + + if model.__class__.__name__ in MODEL_MAPPING_NAMES: + raise ValueError( + f"The model you have picked ({model.__class__.__name__}) cannot be used as is for training: it only " + "computes hidden states and does not accept any labels. You should choose a model with a head " + "suitable for your task like any of the `AutoModelForXxx` listed at " + "https://huggingface.co/docs/transformers/model_doc/auto." + ) + + if hasattr(model, "is_parallelizable") and model.is_parallelizable and model.model_parallel: + self.is_model_parallel = True + else: + self.is_model_parallel = False + + # At this stage the model is already loaded + if getattr(model, "is_loaded_in_8bit", False): + if getattr(model, "_is_int8_training_enabled", False): + logger.info( + "The model is loaded in 8-bit precision. To train this model you need to add additional modules" + " inside the model such as adapters using `peft` library and freeze the model weights. Please" + " check " + " the examples in https://github.com/huggingface/peft for more details." + ) + else: + raise ValueError( + "The model you want to train is loaded in 8-bit precision. if you want to fine-tune an 8-bit" + " model, please make sure that you have installed `bitsandbytes>=0.37.0`. " + ) + + # Setup Sharded DDP training + self.sharded_ddp = None + if len(args.sharded_ddp) > 0: + if args.deepspeed: + raise ValueError( + "Using --sharded_ddp xxx together with --deepspeed is not possible, deactivate one of those flags." + ) + if len(args.fsdp) > 0: + raise ValueError( + "Using --sharded_ddp xxx together with --fsdp is not possible, deactivate one of those flags." + ) + + if args.local_rank == -1: + raise ValueError("Using sharded DDP only works in distributed training.") + elif not is_fairscale_available(): + raise ImportError("Sharded DDP training requires fairscale: `pip install fairscale`.") + elif ShardedDDPOption.SIMPLE not in args.sharded_ddp and FullyShardedDDP is None: + raise ImportError( + "Sharded DDP in a mode other than simple training requires fairscale version >= 0.3, found " + f"{fairscale.__version__}. Upgrade your fairscale library: `pip install --upgrade fairscale`." + ) + elif ShardedDDPOption.SIMPLE in args.sharded_ddp: + self.sharded_ddp = ShardedDDPOption.SIMPLE + elif ShardedDDPOption.ZERO_DP_2 in args.sharded_ddp: + self.sharded_ddp = ShardedDDPOption.ZERO_DP_2 + elif ShardedDDPOption.ZERO_DP_3 in args.sharded_ddp: + self.sharded_ddp = ShardedDDPOption.ZERO_DP_3 + + self.fsdp = None + if len(args.fsdp) > 0: + if args.deepspeed: + raise ValueError( + "Using --fsdp xxx together with --deepspeed is not possible, deactivate one of those flags." + ) + if not args.fsdp_config["xla"] and args.local_rank == -1: + raise ValueError("Using fsdp only works in distributed training.") + + # dep_version_check("torch>=1.12.0") + # Would have to update setup.py with torch>=1.12.0 + # which isn't ideally given that it will force people not using FSDP to also use torch>=1.12.0 + # below is the current alternative. + if version.parse(version.parse(torch.__version__).base_version) < version.parse("1.12.0"): + raise ValueError("FSDP requires PyTorch >= 1.12.0") + + from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch, ShardingStrategy + + if FSDPOption.FULL_SHARD in args.fsdp: + self.fsdp = ShardingStrategy.FULL_SHARD + elif FSDPOption.SHARD_GRAD_OP in args.fsdp: + self.fsdp = ShardingStrategy.SHARD_GRAD_OP + elif FSDPOption.NO_SHARD in args.fsdp: + self.fsdp = ShardingStrategy.NO_SHARD + + self.backward_prefetch = BackwardPrefetch.BACKWARD_PRE + if "backward_prefetch" in self.args.fsdp_config and "backward_pos" not in self.backward_prefetch: + self.backward_prefetch = BackwardPrefetch.BACKWARD_POST + + self.forword_prefetch = False + if self.args.fsdp_config.get("forword_prefect", False): + self.forword_prefetch = True + + self.limit_all_gathers = False + if self.args.fsdp_config.get("limit_all_gathers", False): + self.limit_all_gathers = True + + # one place to sort out whether to place the model on device or not + # postpone switching model to cuda when: + # 1. MP - since we are trying to fit a much bigger than 1 gpu model + # 2. fp16-enabled DeepSpeed loads the model in half the size and it doesn't need .to() anyway, + # and we only use deepspeed for training at the moment + # 3. full bf16 or fp16 eval - since the model needs to be cast to the right dtype first + # 4. Sharded DDP - same as MP + # 5. FSDP - same as MP + self.place_model_on_device = args.place_model_on_device + if ( + self.is_model_parallel + or args.deepspeed + or ((args.fp16_full_eval or args.bf16_full_eval) and not args.do_train) + or (self.sharded_ddp in [ShardedDDPOption.ZERO_DP_2, ShardedDDPOption.ZERO_DP_3]) + or (self.fsdp is not None) + ): + self.place_model_on_device = False + + default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer) + self.data_collator = data_collator if data_collator is not None else default_collator + self.train_dataset = train_dataset + self.eval_dataset = eval_dataset + self.tokenizer = tokenizer + + if self.place_model_on_device and not getattr(model, "is_loaded_in_8bit", False): + self._move_model_to_device(model, args.device) + + # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs + if self.is_model_parallel: + self.args._n_gpu = 1 + + # later use `self.model is self.model_wrapped` to check if it's wrapped or not + self.model_wrapped = model + self.model = model + + self.compute_metrics = compute_metrics + self.preprocess_logits_for_metrics = preprocess_logits_for_metrics + self.optimizer, self.lr_scheduler = optimizers + if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): + raise RuntimeError( + "Passing a `model_init` is incompatible with providing the `optimizers` argument. " + "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." + ) + if is_torch_tpu_available() and self.optimizer is not None: + for param in self.model.parameters(): + model_device = param.device + break + for param_group in self.optimizer.param_groups: + if len(param_group["params"]) > 0: + optimizer_device = param_group["params"][0].device + break + if model_device != optimizer_device: + raise ValueError( + "The model and the optimizer parameters are not on the same device, which probably means you" + " created an optimizer around your model **before** putting on the device and passing it to the" + " `Trainer`. Make sure the lines `import torch_xla.core.xla_model as xm` and" + " `model.to(xm.xla_device())` is performed before the optimizer creation in your script." + ) + if ((self.sharded_ddp is not None) or args.deepspeed or (self.fsdp is not None)) and ( + self.optimizer is not None or self.lr_scheduler is not None + ): + raise RuntimeError( + "Passing `optimizers` is not allowed if Fairscale, Deepspeed or PyTorch FSDP is enabled." + "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." + ) + default_callbacks = DEFAULT_CALLBACKS + get_reporting_integration_callbacks(self.args.report_to) + callbacks = default_callbacks if callbacks is None else default_callbacks + callbacks + self.callback_handler = CallbackHandler( + callbacks, self.model, self.tokenizer, self.optimizer, self.lr_scheduler + ) + self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) + + # Will be set to True by `self._setup_loggers()` on first call to `self.log()`. + self._loggers_initialized = False + + # Create clone of distant repo and output directory if needed + if self.args.push_to_hub: + self.init_git_repo(at_init=True) + # In case of pull, we need to make sure every process has the latest. + if is_torch_tpu_available(): + xm.rendezvous("init git repo") + elif args.local_rank != -1: + dist.barrier() + + if self.args.should_save: + os.makedirs(self.args.output_dir, exist_ok=True) + + if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): + raise ValueError("The `data_collator` should be a simple callable (function, class with `__call__`).") + + if args.max_steps > 0: + logger.info("max_steps is given, it will override any value given in num_train_epochs") + + if train_dataset is not None and not has_length(train_dataset) and args.max_steps <= 0: + raise ValueError("train_dataset does not implement __len__, max_steps has to be specified") + + if ( + train_dataset is not None + and isinstance(train_dataset, torch.utils.data.IterableDataset) + and args.group_by_length + ): + raise ValueError("the `--group_by_length` option is only available for `Dataset`, not `IterableDataset") + + self._signature_columns = None + + # Mixed precision setup + self.use_apex = False + self.use_cuda_amp = False + self.use_cpu_amp = False + + # Mixed precision setup for SageMaker Model Parallel + if is_sagemaker_mp_enabled(): + # BF16 + model parallelism in SageMaker: currently not supported, raise an error + if args.bf16: + raise ValueError("SageMaker Model Parallelism does not support BF16 yet. Please use FP16 instead ") + + if IS_SAGEMAKER_MP_POST_1_10: + # When there's mismatch between SMP config and trainer argument, use SMP config as truth + if args.fp16 != smp.state.cfg.fp16: + logger.warning( + f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}," + f"but FP16 provided in trainer argument is {args.fp16}," + f"setting to {smp.state.cfg.fp16}" + ) + args.fp16 = smp.state.cfg.fp16 + else: + # smp < 1.10 does not support fp16 in trainer. + if hasattr(smp.state.cfg, "fp16"): + logger.warning( + f"FP16 provided in SM_HP_MP_PARAMETERS is {smp.state.cfg.fp16}, " + "but SageMaker Model Parallelism < 1.10 does not support FP16 in trainer." + ) + + if args.fp16 or args.bf16: + if args.half_precision_backend == "auto": + if args.device == torch.device("cpu"): + if args.fp16: + raise ValueError("Tried to use `fp16` but it is not supported on cpu") + elif _is_native_cpu_amp_available: + args.half_precision_backend = "cpu_amp" + else: + raise ValueError("Tried to use cpu amp but native cpu amp is not available") + else: + args.half_precision_backend = "cuda_amp" + + logger.info(f"Using {args.half_precision_backend} half precision backend") + + self.do_grad_scaling = False + if (args.fp16 or args.bf16) and not (args.deepspeed or is_sagemaker_mp_enabled() or is_torch_tpu_available()): + # deepspeed and SageMaker Model Parallel manage their own half precision + if args.half_precision_backend == "cuda_amp": + self.use_cuda_amp = True + self.amp_dtype = torch.float16 if args.fp16 else torch.bfloat16 + # bf16 does not need grad scaling + self.do_grad_scaling = self.amp_dtype == torch.float16 + if self.do_grad_scaling: + if self.sharded_ddp is not None: + self.scaler = ShardedGradScaler() + elif self.fsdp is not None: + from torch.distributed.fsdp.sharded_grad_scaler import ( + ShardedGradScaler as FSDPShardedGradScaler, + ) + + self.scaler = FSDPShardedGradScaler() + elif is_torch_tpu_available(): + from torch_xla.amp import GradScaler + + self.scaler = GradScaler() + else: + self.scaler = torch.cuda.amp.GradScaler() + elif args.half_precision_backend == "cpu_amp": + self.use_cpu_amp = True + self.amp_dtype = torch.bfloat16 + else: + if not is_apex_available(): + raise ImportError( + "Using FP16 with APEX but APEX is not installed, please refer to" + " https://www.github.com/nvidia/apex." + ) + self.use_apex = True + + # FP16 + model parallelism in SageMaker: gradient clipping does not work for now so we raise a helpful error. + if ( + is_sagemaker_mp_enabled() + and self.use_cuda_amp + and args.max_grad_norm is not None + and args.max_grad_norm > 0 + ): + raise ValueError( + "SageMaker Model Parallelism in mixed precision mode does not support gradient clipping yet. Pass " + "along 'max_grad_norm': 0 in your hyperparameters." + ) + + # Label smoothing + if self.args.label_smoothing_factor != 0: + self.label_smoother = LabelSmoother(epsilon=self.args.label_smoothing_factor) + else: + self.label_smoother = None + + self.state = TrainerState( + is_local_process_zero=self.is_local_process_zero(), + is_world_process_zero=self.is_world_process_zero(), + ) + + self.control = TrainerControl() + # Internal variable to count flos in each process, will be accumulated in `self.state.total_flos` then + # returned to 0 every time flos need to be logged + self.current_flos = 0 + self.hp_search_backend = None + self.use_tune_checkpoints = False + default_label_names = find_labels(self.model.__class__) + self.label_names = default_label_names if self.args.label_names is None else self.args.label_names + self.can_return_loss = can_return_loss(self.model.__class__) + self.control = self.callback_handler.on_init_end(self.args, self.state, self.control) + + # Internal variables to keep track of the original batch size + self._train_batch_size = args.train_batch_size + + # very last + self._memory_tracker.stop_and_update_metrics() + + # torch.compile + if args.torch_compile and not is_torch_compile_available(): + raise RuntimeError("Using torch.compile requires PyTorch 2.0 or higher.") + + def add_callback(self, callback): + """ + Add a callback to the current list of [`~transformer.TrainerCallback`]. + + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will instantiate a member of that class. + """ + self.callback_handler.add_callback(callback) + + def pop_callback(self, callback): + """ + Remove a callback from the current list of [`~transformer.TrainerCallback`] and returns it. + + If the callback is not found, returns `None` (and no error is raised). + + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will pop the first member of that class found in the list of callbacks. + + Returns: + [`~transformer.TrainerCallback`]: The callback removed, if found. + """ + return self.callback_handler.pop_callback(callback) + + def remove_callback(self, callback): + """ + Remove a callback from the current list of [`~transformer.TrainerCallback`]. + + Args: + callback (`type` or [`~transformer.TrainerCallback`]): + A [`~transformer.TrainerCallback`] class or an instance of a [`~transformer.TrainerCallback`]. In the + first case, will remove the first member of that class found in the list of callbacks. + """ + self.callback_handler.remove_callback(callback) + + def _move_model_to_device(self, model, device): + model = model.to(device) + # Moving a model to an XLA device disconnects the tied weights, so we have to retie them. + if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"): + model.tie_weights() + + def _set_signature_columns_if_needed(self): + if self._signature_columns is None: + # Inspect model forward signature to keep only the arguments it accepts. + signature = inspect.signature(self.model.forward) + self._signature_columns = list(signature.parameters.keys()) + # Labels may be named label or label_ids, the default data collator handles that. + self._signature_columns += list(set(["label", "label_ids"] + self.label_names)) + + def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None): + if not self.args.remove_unused_columns: + return dataset + self._set_signature_columns_if_needed() + signature_columns = self._signature_columns + + ignored_columns = list(set(dataset.column_names) - set(signature_columns)) + if len(ignored_columns) > 0: + dset_description = "" if description is None else f"in the {description} set" + logger.info( + f"The following columns {dset_description} don't have a corresponding argument in " + f"`{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}." + f" If {', '.join(ignored_columns)} are not expected by `{self.model.__class__.__name__}.forward`, " + " you can safely ignore this message." + ) + + columns = [k for k in signature_columns if k in dataset.column_names] + + if version.parse(datasets.__version__) < version.parse("1.4.0"): + dataset.set_format( + type=dataset.format["type"], columns=columns, format_kwargs=dataset.format["format_kwargs"] + ) + return dataset + else: + return dataset.remove_columns(ignored_columns) + + def _get_collator_with_removed_columns( + self, data_collator: Callable, description: Optional[str] = None + ) -> Callable: + """Wrap the data collator in a callable removing unused columns.""" + if not self.args.remove_unused_columns: + return data_collator + self._set_signature_columns_if_needed() + signature_columns = self._signature_columns + + remove_columns_collator = RemoveColumnsCollator( + data_collator=data_collator, + signature_columns=signature_columns, + logger=logger, + description=description, + model_name=self.model.__class__.__name__, + ) + return remove_columns_collator + + def _get_train_sampler(self) -> Optional[torch.utils.data.Sampler]: + if self.train_dataset is None or not has_length(self.train_dataset): + return None + + generator = None + if self.args.world_size <= 1: + generator = torch.Generator() + # for backwards compatibility, we generate a seed here (which is sampled from a generator seeded with + # `args.seed`) if data_seed isn't provided. + # Further on in this method, we default to `args.seed` instead. + if self.args.data_seed is None: + seed = int(torch.empty((), dtype=torch.int64).random_().item()) + else: + seed = self.args.data_seed + generator.manual_seed(seed) + + seed = self.args.data_seed if self.args.data_seed is not None else self.args.seed + + # Build the sampler. + if self.args.group_by_length: + if is_datasets_available() and isinstance(self.train_dataset, datasets.Dataset): + lengths = ( + self.train_dataset[self.args.length_column_name] + if self.args.length_column_name in self.train_dataset.column_names + else None + ) + else: + lengths = None + model_input_name = self.tokenizer.model_input_names[0] if self.tokenizer is not None else None + if self.args.world_size <= 1: + return LengthGroupedSampler( + self.args.train_batch_size * self.args.gradient_accumulation_steps, + dataset=self.train_dataset, + lengths=lengths, + model_input_name=model_input_name, + generator=generator, + ) + else: + return DistributedLengthGroupedSampler( + self.args.train_batch_size * self.args.gradient_accumulation_steps, + dataset=self.train_dataset, + num_replicas=self.args.world_size, + rank=self.args.process_index, + lengths=lengths, + model_input_name=model_input_name, + seed=seed, + ) + + else: + if self.args.world_size <= 1: + return RandomSampler(self.train_dataset, generator=generator) + elif ( + self.args.parallel_mode in [ParallelMode.TPU, ParallelMode.SAGEMAKER_MODEL_PARALLEL] + and not self.args.dataloader_drop_last + ): + # Use a loop for TPUs when drop_last is False to have all batches have the same size. + return DistributedSamplerWithLoop( + self.train_dataset, + batch_size=self.args.per_device_train_batch_size, + num_replicas=self.args.world_size, + rank=self.args.process_index, + seed=seed, + ) + else: + return DistributedSampler( + self.train_dataset, + num_replicas=self.args.world_size, + rank=self.args.process_index, + seed=seed, + ) + + def get_train_dataloader(self) -> DataLoader: + """ + Returns the training [`~torch.utils.data.DataLoader`]. + + Will use no sampler if `train_dataset` does not implement `__len__`, a random sampler (adapted to distributed + training if necessary) otherwise. + + Subclass and override this method if you want to inject some custom behavior. + """ + if self.train_dataset is None: + raise ValueError("Trainer: training requires a train_dataset.") + + train_dataset = self.train_dataset + data_collator = self.data_collator + if is_datasets_available() and isinstance(train_dataset, datasets.Dataset): + train_dataset = self._remove_unused_columns(train_dataset, description="training") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="training") + + if isinstance(train_dataset, torch.utils.data.IterableDataset): + if self.args.world_size > 1: + train_dataset = IterableDatasetShard( + train_dataset, + batch_size=self._train_batch_size, + drop_last=self.args.dataloader_drop_last, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + + return DataLoader( + train_dataset, + batch_size=self._train_batch_size, + collate_fn=data_collator, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + train_sampler = self._get_train_sampler() + + return DataLoader( + train_dataset, + batch_size=self._train_batch_size, + sampler=train_sampler, + collate_fn=data_collator, + drop_last=self.args.dataloader_drop_last, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + worker_init_fn=seed_worker, + ) + + def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.Sampler]: + # Deprecated code + if self.args.use_legacy_prediction_loop: + if is_torch_tpu_available(): + return SequentialDistributedSampler( + eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal() + ) + elif is_sagemaker_mp_enabled(): + return SequentialDistributedSampler( + eval_dataset, + num_replicas=smp.dp_size(), + rank=smp.dp_rank(), + batch_size=self.args.per_device_eval_batch_size, + ) + elif self.args.local_rank != -1: + return SequentialDistributedSampler(eval_dataset) + else: + return SequentialSampler(eval_dataset) + + if self.args.world_size <= 1: + return SequentialSampler(eval_dataset) + else: + return ShardSampler( + eval_dataset, + batch_size=self.args.per_device_eval_batch_size, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + + def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: + """ + Returns the evaluation [`~torch.utils.data.DataLoader`]. + + Subclass and override this method if you want to inject some custom behavior. + + Args: + eval_dataset (`torch.utils.data.Dataset`, *optional*): + If provided, will override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns not accepted + by the `model.forward()` method are automatically removed. It must implement `__len__`. + """ + if eval_dataset is None and self.eval_dataset is None: + raise ValueError("Trainer: evaluation requires an eval_dataset.") + eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset + data_collator = self.data_collator + + if is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): + eval_dataset = self._remove_unused_columns(eval_dataset, description="evaluation") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="evaluation") + + if isinstance(eval_dataset, torch.utils.data.IterableDataset): + if self.args.world_size > 1: + eval_dataset = IterableDatasetShard( + eval_dataset, + batch_size=self.args.per_device_eval_batch_size, + drop_last=self.args.dataloader_drop_last, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + return DataLoader( + eval_dataset, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + eval_sampler = self._get_eval_sampler(eval_dataset) + + return DataLoader( + eval_dataset, + sampler=eval_sampler, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + drop_last=self.args.dataloader_drop_last, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: + """ + Returns the test [`~torch.utils.data.DataLoader`]. + + Subclass and override this method if you want to inject some custom behavior. + + Args: + test_dataset (`torch.utils.data.Dataset`, *optional*): + The test dataset to use. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. It must implement `__len__`. + """ + data_collator = self.data_collator + + if is_datasets_available() and isinstance(test_dataset, datasets.Dataset): + test_dataset = self._remove_unused_columns(test_dataset, description="test") + else: + data_collator = self._get_collator_with_removed_columns(data_collator, description="test") + + if isinstance(test_dataset, torch.utils.data.IterableDataset): + if self.args.world_size > 1: + test_dataset = IterableDatasetShard( + test_dataset, + batch_size=self.args.eval_batch_size, + drop_last=self.args.dataloader_drop_last, + num_processes=self.args.world_size, + process_index=self.args.process_index, + ) + return DataLoader( + test_dataset, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + test_sampler = self._get_eval_sampler(test_dataset) + + # We use the same batch_size as for eval. + return DataLoader( + test_dataset, + sampler=test_sampler, + batch_size=self.args.eval_batch_size, + collate_fn=data_collator, + drop_last=self.args.dataloader_drop_last, + num_workers=self.args.dataloader_num_workers, + pin_memory=self.args.dataloader_pin_memory, + ) + + def create_optimizer_and_scheduler(self, num_training_steps: int): + """ + Setup the optimizer and the learning rate scheduler. + + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method (or `create_optimizer` and/or + `create_scheduler`) in a subclass. + """ + self.create_optimizer() + if IS_SAGEMAKER_MP_POST_1_10 and smp.state.cfg.fp16: + # If smp >= 1.10 and fp16 is enabled, we unwrap the optimizer + optimizer = self.optimizer.optimizer + else: + optimizer = self.optimizer + self.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer) + + def create_optimizer(self): + """ + Setup the optimizer. + + We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the + Trainer's init through `optimizers`, or subclass and override this method in a subclass. + """ + opt_model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model + + if self.optimizer is None: + decay_parameters = get_parameter_names(opt_model, ALL_LAYERNORM_LAYERS) + decay_parameters = [name for name in decay_parameters if "bias" not in name] + optimizer_grouped_parameters = [ + { + "params": [ + p for n, p in opt_model.named_parameters() if (n in decay_parameters and p.requires_grad) + ], + "weight_decay": self.args.weight_decay, + }, + { + "params": [ + p for n, p in opt_model.named_parameters() if (n not in decay_parameters and p.requires_grad) + ], + "weight_decay": 0.0, + }, + ] + + optimizer_cls, optimizer_kwargs = Trainer.get_optimizer_cls_and_kwargs(self.args) + + if self.sharded_ddp == ShardedDDPOption.SIMPLE: + self.optimizer = OSS( + params=optimizer_grouped_parameters, + optim=optimizer_cls, + **optimizer_kwargs, + ) + else: + self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs) + if optimizer_cls.__name__ == "Adam8bit": + import bitsandbytes + + manager = bitsandbytes.optim.GlobalOptimManager.get_instance() + + skipped = 0 + for module in opt_model.modules(): + if isinstance(module, nn.Embedding): + skipped += sum({p.data_ptr(): p.numel() for p in module.parameters()}.values()) + print(f"skipped {module}: {skipped/2**20}M params") + manager.register_module_override(module, "weight", {"optim_bits": 32}) + logger.debug(f"bitsandbytes: will optimize {module} in fp32") + print(f"skipped: {skipped/2**20}M params") + + if is_sagemaker_mp_enabled(): + self.optimizer = smp.DistributedOptimizer(self.optimizer) + + return self.optimizer + + @staticmethod + def get_optimizer_cls_and_kwargs(args: TrainingArguments) -> Tuple[Any, Any]: + """ + Returns the optimizer class and optimizer parameters based on the training arguments. + + Args: + args (`transformers.training_args.TrainingArguments`): + The training arguments for the training session. + + """ + + # parse args.optim_args + optim_args = {} + if args.optim_args: + for mapping in args.optim_args.replace(" ", "").split(","): + key, value = mapping.split("=") + optim_args[key] = value + + optimizer_kwargs = {"lr": args.learning_rate} + + adam_kwargs = { + "betas": (args.adam_beta1, args.adam_beta2), + "eps": args.adam_epsilon, + } + if args.optim == OptimizerNames.ADAFACTOR: + optimizer_cls = Adafactor + optimizer_kwargs.update({"scale_parameter": False, "relative_step": False}) + elif args.optim == OptimizerNames.ADAMW_HF: + from transformers.optimization import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + elif args.optim in [OptimizerNames.ADAMW_TORCH, OptimizerNames.ADAMW_TORCH_FUSED]: + from torch.optim import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + if args.optim == OptimizerNames.ADAMW_TORCH_FUSED: + optimizer_kwargs.update({"fused": True}) + elif args.optim == OptimizerNames.ADAMW_TORCH_XLA: + try: + from torch_xla.amp.syncfree import AdamW + + optimizer_cls = AdamW + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer failed to import syncfree AdamW from torch_xla.") + elif args.optim == OptimizerNames.ADAMW_APEX_FUSED: + try: + from apex.optimizers import FusedAdam + + optimizer_cls = FusedAdam + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer tried to instantiate apex FusedAdam but apex is not installed!") + elif args.optim == OptimizerNames.ADAMW_BNB: + try: + from bitsandbytes.optim import Adam8bit + + optimizer_cls = Adam8bit + optimizer_kwargs.update(adam_kwargs) + except ImportError: + raise ValueError("Trainer tried to instantiate bnb Adam8bit but bnb is not installed!") + elif args.optim == OptimizerNames.ADAMW_ANYPRECISION: + try: + from torchdistx.optimizers import AnyPrecisionAdamW + + optimizer_cls = AnyPrecisionAdamW + optimizer_kwargs.update(adam_kwargs) + + # TODO Change dtypes back to M=FP32, Var = BF16, Kahan = False once they can be cast together in torchdistx. + optimizer_kwargs.update( + { + "use_kahan_summation": strtobool(optim_args.get("use_kahan_summation", "False")), + "momentum_dtype": getattr(torch, optim_args.get("momentum_dtype", "float32")), + "variance_dtype": getattr(torch, optim_args.get("variance_dtype", "float32")), + "compensation_buffer_dtype": getattr( + torch, optim_args.get("compensation_buffer_dtype", "bfloat16") + ), + } + ) + except ImportError: + raise ValueError("Please install https://github.com/pytorch/torchdistx") + elif args.optim == OptimizerNames.SGD: + optimizer_cls = torch.optim.SGD + elif args.optim == OptimizerNames.ADAGRAD: + optimizer_cls = torch.optim.Adagrad + else: + raise ValueError(f"Trainer cannot instantiate unsupported optimizer: {args.optim}") + return optimizer_cls, optimizer_kwargs + + def create_scheduler(self, num_training_steps: int, optimizer: torch.optim.Optimizer = None): + """ + Setup the scheduler. The optimizer of the trainer must have been set up either before this method is called or + passed as an argument. + + Args: + num_training_steps (int): The number of training steps to do. + """ + if self.lr_scheduler is None: + self.lr_scheduler = get_scheduler( + self.args.lr_scheduler_type, + optimizer=self.optimizer if optimizer is None else optimizer, + num_warmup_steps=self.args.get_warmup_steps(num_training_steps), + num_training_steps=num_training_steps, + ) + return self.lr_scheduler + + def num_examples(self, dataloader: DataLoader) -> int: + """ + Helper to get number of samples in a [`~torch.utils.data.DataLoader`] by accessing its dataset. When + dataloader.dataset does not exist or has no length, estimates as best it can + """ + try: + dataset = dataloader.dataset + # Special case for IterableDatasetShard, we need to dig deeper + if isinstance(dataset, IterableDatasetShard): + return len(dataloader.dataset.dataset) + return len(dataloader.dataset) + except (NameError, AttributeError, TypeError): # no dataset or length, estimate by length of dataloader + return len(dataloader) * self.args.per_device_train_batch_size + + def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): + """HP search setup code""" + self._trial = trial + + if self.hp_search_backend is None or trial is None: + return + if self.hp_search_backend == HPSearchBackend.OPTUNA: + params = self.hp_space(trial) + elif self.hp_search_backend == HPSearchBackend.RAY: + params = trial + params.pop("wandb", None) + elif self.hp_search_backend == HPSearchBackend.SIGOPT: + params = {k: int(v) if isinstance(v, str) else v for k, v in trial.assignments.items()} + elif self.hp_search_backend == HPSearchBackend.WANDB: + params = trial + + for key, value in params.items(): + if not hasattr(self.args, key): + logger.warning( + f"Trying to set {key} in the hyperparameter search but there is no corresponding field in" + " `TrainingArguments`." + ) + continue + old_attr = getattr(self.args, key, None) + # Casting value to the proper type + if old_attr is not None: + value = type(old_attr)(value) + setattr(self.args, key, value) + if self.hp_search_backend == HPSearchBackend.OPTUNA: + logger.info(f"Trial: {trial.params}") + if self.hp_search_backend == HPSearchBackend.SIGOPT: + logger.info(f"SigOpt Assignments: {trial.assignments}") + if self.hp_search_backend == HPSearchBackend.WANDB: + logger.info(f"W&B Sweep parameters: {trial}") + if self.args.deepspeed: + # Rebuild the deepspeed config to reflect the updated training parameters + from transformers.deepspeed import HfTrainerDeepSpeedConfig + + self.args.hf_deepspeed_config = HfTrainerDeepSpeedConfig(self.args.deepspeed) + self.args.hf_deepspeed_config.trainer_config_process(self.args) + + def _report_to_hp_search(self, trial: Union["optuna.Trial", Dict[str, Any]], step: int, metrics: Dict[str, float]): + if self.hp_search_backend is None or trial is None: + return + self.objective = self.compute_objective(metrics.copy()) + if self.hp_search_backend == HPSearchBackend.OPTUNA: + import optuna + + trial.report(self.objective, step) + if trial.should_prune(): + self.callback_handler.on_train_end(self.args, self.state, self.control) + raise optuna.TrialPruned() + elif self.hp_search_backend == HPSearchBackend.RAY: + from ray import tune + + if self.control.should_save: + self._tune_save_checkpoint() + tune.report(objective=self.objective, **metrics) + + def _tune_save_checkpoint(self): + from ray import tune + + if not self.use_tune_checkpoints: + return + with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir: + output_dir = os.path.join(checkpoint_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}") + self.save_model(output_dir, _internal_call=True) + if self.args.should_save: + self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) + torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + + def call_model_init(self, trial=None): + model_init_argcount = number_of_arguments(self.model_init) + if model_init_argcount == 0: + model = self.model_init() + elif model_init_argcount == 1: + model = self.model_init(trial) + else: + raise RuntimeError("model_init should have 0 or 1 argument.") + + if model is None: + raise RuntimeError("model_init should not return None.") + + return model + + def torch_jit_model_eval(self, model, dataloader, training=False): + if not training: + if dataloader is None: + logger.warning("failed to use PyTorch jit mode due to current dataloader is none.") + return model + example_batch = next(iter(dataloader)) + example_batch = self._prepare_inputs(example_batch) + try: + jit_model = model.eval() + with ContextManagers([self.autocast_smart_context_manager(cache_enabled=False), torch.no_grad()]): + if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.14.0"): + if isinstance(example_batch, dict): + jit_model = torch.jit.trace(jit_model, example_kwarg_inputs=example_batch, strict=False) + else: + jit_model = torch.jit.trace( + jit_model, + example_kwarg_inputs={key: example_batch[key] for key in example_batch}, + strict=False, + ) + else: + jit_inputs = [] + for key in example_batch: + example_tensor = torch.ones_like(example_batch[key]) + jit_inputs.append(example_tensor) + jit_inputs = tuple(jit_inputs) + jit_model = torch.jit.trace(jit_model, jit_inputs, strict=False) + jit_model = torch.jit.freeze(jit_model) + with torch.no_grad(): + jit_model(**example_batch) + jit_model(**example_batch) + model = jit_model + self.use_cpu_amp = False + self.use_cuda_amp = False + except (RuntimeError, TypeError, ValueError, NameError, IndexError) as e: + logger.warning(f"failed to use PyTorch jit mode due to: {e}.") + + return model + + def ipex_optimize_model(self, model, training=False, dtype=torch.float32): + if not is_ipex_available(): + raise ImportError( + "Using IPEX but IPEX is not installed or IPEX's version does not match current PyTorch, please refer" + " to https://github.com/intel/intel-extension-for-pytorch." + ) + + import intel_extension_for_pytorch as ipex + + if not training: + model.eval() + dtype = torch.bfloat16 if not self.is_in_train and self.args.bf16_full_eval else dtype + # conv_bn_folding is disabled as it fails in symbolic tracing, resulting in ipex warnings + model = ipex.optimize(model, dtype=dtype, level="O1", conv_bn_folding=False, inplace=not self.is_in_train) + else: + if not model.training: + model.train() + model, self.optimizer = ipex.optimize( + model, dtype=dtype, optimizer=self.optimizer, inplace=True, level="O1" + ) + + return model + + def _wrap_model(self, model, training=True, dataloader=None): + if self.args.torch_compile: + model = torch.compile(model, backend=self.args.torch_compile_backend, mode=self.args.torch_compile_mode) + + if self.args.use_ipex: + dtype = torch.bfloat16 if self.use_cpu_amp else torch.float32 + model = self.ipex_optimize_model(model, training, dtype=dtype) + + if is_sagemaker_mp_enabled(): + # Wrapping the base model twice in a DistributedModel will raise an error. + if isinstance(self.model_wrapped, smp.model.DistributedModel): + return self.model_wrapped + return smp.DistributedModel(model, backward_passes_per_step=self.args.gradient_accumulation_steps) + + # already initialized its own DDP and AMP + if self.deepspeed: + return self.deepspeed + + # train/eval could be run multiple-times - if already wrapped, don't re-wrap it again + if unwrap_model(model) is not model: + return model + + # Mixed precision training with apex (torch < 1.6) + if self.use_apex and training: + model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) + + # Multi-gpu training (should be after apex fp16 initialization) + if self.args.n_gpu > 1: + model = nn.DataParallel(model) + + if self.args.jit_mode_eval: + start_time = time.time() + model = self.torch_jit_model_eval(model, dataloader, training) + self.jit_compilation_time = round(time.time() - start_time, 4) + + # Note: in torch.distributed mode, there's no point in wrapping the model + # inside a DistributedDataParallel as we'll be under `no_grad` anyways. + if not training: + return model + + # Distributed training (should be after apex fp16 initialization) + if self.sharded_ddp is not None: + # Sharded DDP! + if self.sharded_ddp == ShardedDDPOption.SIMPLE: + model = ShardedDDP(model, self.optimizer) + else: + mixed_precision = self.args.fp16 or self.args.bf16 + cpu_offload = ShardedDDPOption.OFFLOAD in self.args.sharded_ddp + zero_3 = self.sharded_ddp == ShardedDDPOption.ZERO_DP_3 + # XXX: Breaking the self.model convention but I see no way around it for now. + if ShardedDDPOption.AUTO_WRAP in self.args.sharded_ddp: + model = auto_wrap(model) + self.model = model = FullyShardedDDP( + model, + mixed_precision=mixed_precision, + reshard_after_forward=zero_3, + cpu_offload=cpu_offload, + ).to(self.args.device) + # Distributed training using PyTorch FSDP + elif self.fsdp is not None: + if not self.args.fsdp_config["xla"]: + # PyTorch FSDP! + from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload, MixedPrecision + from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy, transformer_auto_wrap_policy + + if FSDPOption.OFFLOAD in self.args.fsdp: + cpu_offload = CPUOffload(offload_params=True) + else: + cpu_offload = CPUOffload(offload_params=False) + + auto_wrap_policy = None + + if FSDPOption.AUTO_WRAP in self.args.fsdp: + if self.args.fsdp_config["fsdp_min_num_params"] > 0: + auto_wrap_policy = functools.partial( + size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"] + ) + elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None: + transformer_cls_to_wrap = set() + for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]: + transformer_cls = get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise Exception("Could not find the transformer layer class to wrap in the model.") + else: + transformer_cls_to_wrap.add(transformer_cls) + auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + # Transformer layer class to wrap + transformer_layer_cls=transformer_cls_to_wrap, + ) + mixed_precision_policy = None + dtype = None + if self.args.fp16: + dtype = torch.float16 + elif self.args.bf16: + dtype = torch.bfloat16 + if dtype is not None: + mixed_precision_policy = MixedPrecision(param_dtype=dtype, reduce_dtype=dtype, buffer_dtype=dtype) + if type(model) != FSDP: + # XXX: Breaking the self.model convention but I see no way around it for now. + self.model = model = FSDP( + model, + sharding_strategy=self.fsdp, + cpu_offload=cpu_offload, + auto_wrap_policy=auto_wrap_policy, + mixed_precision=mixed_precision_policy, + device_id=self.args.device, + backward_prefetch=self.backward_prefetch, + forward_prefetch=self.forword_prefetch, + limit_all_gathers=self.limit_all_gathers, + ) + else: + try: + from torch_xla.distributed.fsdp import XlaFullyShardedDataParallel as FSDP + from torch_xla.distributed.fsdp import checkpoint_module + from torch_xla.distributed.fsdp.wrap import ( + size_based_auto_wrap_policy, + transformer_auto_wrap_policy, + ) + except ImportError: + raise ImportError("Missing XLA FSDP related module; please make sure to use torch-xla >= 2.0.") + auto_wrap_policy = None + auto_wrapper_callable = None + if self.args.fsdp_config["fsdp_min_num_params"] > 0: + auto_wrap_policy = functools.partial( + size_based_auto_wrap_policy, min_num_params=self.args.fsdp_config["fsdp_min_num_params"] + ) + elif self.args.fsdp_config.get("fsdp_transformer_layer_cls_to_wrap", None) is not None: + transformer_cls_to_wrap = set() + for layer_class in self.args.fsdp_config["fsdp_transformer_layer_cls_to_wrap"]: + transformer_cls = get_module_class_from_name(model, layer_class) + if transformer_cls is None: + raise Exception("Could not find the transformer layer class to wrap in the model.") + else: + transformer_cls_to_wrap.add(transformer_cls) + auto_wrap_policy = functools.partial( + transformer_auto_wrap_policy, + # Transformer layer class to wrap + transformer_layer_cls=transformer_cls_to_wrap, + ) + fsdp_kwargs = self.args.xla_fsdp_config + if self.args.fsdp_config["xla_fsdp_grad_ckpt"]: + # Apply gradient checkpointing to auto-wrapped sub-modules if specified + def auto_wrapper_callable(m, *args, **kwargs): + return FSDP(checkpoint_module(m), *args, **kwargs) + + # Wrap the base model with an outer FSDP wrapper + self.model = model = FSDP( + model, + auto_wrap_policy=auto_wrap_policy, + auto_wrapper_callable=auto_wrapper_callable, + **fsdp_kwargs, + ) + + # Patch `xm.optimizer_step` should not reduce gradients in this case, + # as FSDP does not need gradient reduction over sharded parameters. + def patched_optimizer_step(optimizer, barrier=False, optimizer_args={}): + loss = optimizer.step(**optimizer_args) + if barrier: + xm.mark_step() + return loss + + xm.optimizer_step = patched_optimizer_step + elif is_sagemaker_dp_enabled(): + model = nn.parallel.DistributedDataParallel( + model, device_ids=[int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))] + ) + elif self.args.local_rank != -1: + kwargs = {} + if self.args.ddp_find_unused_parameters is not None: + kwargs["find_unused_parameters"] = self.args.ddp_find_unused_parameters + elif isinstance(model, PreTrainedModel): + # find_unused_parameters breaks checkpointing as per + # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021 + kwargs["find_unused_parameters"] = not model.is_gradient_checkpointing + else: + kwargs["find_unused_parameters"] = True + + if self.args.ddp_bucket_cap_mb is not None: + kwargs["bucket_cap_mb"] = self.args.ddp_bucket_cap_mb + if is_torch_neuroncore_available(): + return model + model = nn.parallel.DistributedDataParallel( + model, + device_ids=[self.args.local_rank] if self.args._n_gpu != 0 else None, + output_device=self.args.local_rank if self.args._n_gpu != 0 else None, + **kwargs, + ) + + return model + + def train( + self, + resume_from_checkpoint: Optional[Union[str, bool]] = None, + trial: Union["optuna.Trial", Dict[str, Any]] = None, + ignore_keys_for_eval: Optional[List[str]] = None, + **kwargs, + ): + """ + Main training entry point. + + Args: + resume_from_checkpoint (`str` or `bool`, *optional*): + If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a + `bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance + of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here. + trial (`optuna.Trial` or `Dict[str, Any]`, *optional*): + The trial run or the hyperparameter dictionary for hyperparameter search. + ignore_keys_for_eval (`List[str]`, *optional*) + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions for evaluation during the training. + kwargs: + Additional keyword arguments used to hide deprecated arguments + """ + if resume_from_checkpoint is False: + resume_from_checkpoint = None + + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + args = self.args + + self.is_in_train = True + + # do_train is not a reliable argument, as it might not be set and .train() still called, so + # the following is a workaround: + if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train: + self._move_model_to_device(self.model, args.device) + + if "model_path" in kwargs: + resume_from_checkpoint = kwargs.pop("model_path") + warnings.warn( + "`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` " + "instead.", + FutureWarning, + ) + if len(kwargs) > 0: + raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.") + # This might change the seed so needs to run first. + self._hp_search_setup(trial) + self._train_batch_size = self.args.train_batch_size + + # Model re-init + model_reloaded = False + if self.model_init is not None: + # Seed must be set before instantiating the model when using model_init. + enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed) + self.model = self.call_model_init(trial) + model_reloaded = True + # Reinitializes optimizer and scheduler + self.optimizer, self.lr_scheduler = None, None + + # Load potential model checkpoint + if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint: + resume_from_checkpoint = get_last_checkpoint(args.output_dir) + if resume_from_checkpoint is None: + raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})") + + if resume_from_checkpoint is not None and not is_sagemaker_mp_enabled() and args.deepspeed is None: + self._load_from_checkpoint(resume_from_checkpoint) + + # If model was re-initialized, put it on the right device and update self.model_wrapped + if model_reloaded: + if self.place_model_on_device: + self._move_model_to_device(self.model, args.device) + self.model_wrapped = self.model + + inner_training_loop = find_executable_batch_size( + self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size + ) + return inner_training_loop( + args=args, + resume_from_checkpoint=resume_from_checkpoint, + trial=trial, + ignore_keys_for_eval=ignore_keys_for_eval, + ) + + def _inner_training_loop( + self, batch_size=None, args=None, resume_from_checkpoint=None, trial=None, ignore_keys_for_eval=None + ): + self._train_batch_size = batch_size + # Data loader and number of training steps + train_dataloader = self.get_train_dataloader() + + # Setting up training control variables: + # number of training epochs: num_train_epochs + # number of training steps per epoch: num_update_steps_per_epoch + # total number of training steps to execute: max_steps + total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size + + len_dataloader = None + if has_length(train_dataloader): + len_dataloader = len(train_dataloader) + num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps + num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) + num_examples = self.num_examples(train_dataloader) + if args.max_steps > 0: + max_steps = args.max_steps + num_train_epochs = args.max_steps // num_update_steps_per_epoch + int( + args.max_steps % num_update_steps_per_epoch > 0 + ) + # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's + # the best we can do. + num_train_samples = args.max_steps * total_train_batch_size + else: + max_steps = math.ceil(args.num_train_epochs * num_update_steps_per_epoch) + num_train_epochs = math.ceil(args.num_train_epochs) + num_train_samples = self.num_examples(train_dataloader) * args.num_train_epochs + elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size + max_steps = args.max_steps + # Setting a very large number of epochs so we go as many times as necessary over the iterator. + num_train_epochs = sys.maxsize + num_update_steps_per_epoch = max_steps + num_examples = total_train_batch_size * args.max_steps + num_train_samples = args.max_steps * total_train_batch_size + else: + raise ValueError( + "args.max_steps must be set to a positive value if dataloader does not have a length, was" + f" {args.max_steps}" + ) + + if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug: + if self.args.n_gpu > 1: + # nn.DataParallel(model) replicates the model, creating new variables and module + # references registered here no longer work on other gpus, breaking the module + raise ValueError( + "Currently --debug underflow_overflow is not supported under DP. Please use DDP" + " (torch.distributed.launch)." + ) + else: + debug_overflow = DebugUnderflowOverflow(self.model) # noqa + + delay_optimizer_creation = ( + self.sharded_ddp is not None + and self.sharded_ddp != ShardedDDPOption.SIMPLE + or is_sagemaker_mp_enabled() + or self.fsdp is not None + ) + if args.deepspeed: + deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( + self, num_training_steps=max_steps, resume_from_checkpoint=resume_from_checkpoint + ) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + elif not delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + + self.state = TrainerState() + self.state.is_hyper_param_search = trial is not None + + # Activate gradient checkpointing if needed + if args.gradient_checkpointing: + self.model.gradient_checkpointing_enable() + + model = self._wrap_model(self.model_wrapped) + + if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None: + self._load_from_checkpoint(resume_from_checkpoint, model) + + # for the rest of this function `model` is the outside model, whether it was wrapped or not + if model is not self.model: + self.model_wrapped = model + + if delay_optimizer_creation: + self.create_optimizer_and_scheduler(num_training_steps=max_steps) + + # Check if saved optimizer or scheduler states exist + self._load_optimizer_and_scheduler(resume_from_checkpoint) + + # important: at this point: + # self.model is the Transformers Model + # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc. + + # Train! + logger.info("***** Running training *****") + logger.info(f" Num examples = {num_examples}") + logger.info(f" Num Epochs = {num_train_epochs}") + logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") + logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}") + logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") + logger.info(f" Total optimization steps = {max_steps}") + logger.info( + f" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}" + ) + + self.state.epoch = 0 + start_time = time.time() + epochs_trained = 0 + steps_trained_in_current_epoch = 0 + steps_trained_progress_bar = None + + # Check if continuing training from a checkpoint + if resume_from_checkpoint is not None and os.path.isfile( + os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME) + ): + self.state = TrainerState.load_from_json(os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)) + epochs_trained = self.state.global_step // num_update_steps_per_epoch + if not args.ignore_data_skip: + steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) + steps_trained_in_current_epoch *= args.gradient_accumulation_steps + else: + steps_trained_in_current_epoch = 0 + + logger.info(" Continuing training from checkpoint, will skip to saved global_step") + logger.info(f" Continuing training from epoch {epochs_trained}") + logger.info(f" Continuing training from global step {self.state.global_step}") + if not args.ignore_data_skip: + if skip_first_batches is None: + logger.info( + f" Will skip the first {epochs_trained} epochs then the first" + f" {steps_trained_in_current_epoch} batches in the first epoch. If this takes a lot of time," + " you can install the latest version of Accelerate with `pip install -U accelerate`.You can" + " also add the `--ignore_data_skip` flag to your launch command, but you will resume the" + " training on data already seen by your model." + ) + else: + logger.info( + f" Will skip the first {epochs_trained} epochs then the first" + f" {steps_trained_in_current_epoch} batches in the first epoch." + ) + if self.is_local_process_zero() and not args.disable_tqdm and skip_first_batches is None: + steps_trained_progress_bar = tqdm(total=steps_trained_in_current_epoch) + steps_trained_progress_bar.set_description("Skipping the first batches") + + # Update the references + self.callback_handler.model = self.model + self.callback_handler.optimizer = self.optimizer + self.callback_handler.lr_scheduler = self.lr_scheduler + self.callback_handler.train_dataloader = train_dataloader + if self.hp_name is not None and self._trial is not None: + # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial + # parameter to Train when using DDP. + self.state.trial_name = self.hp_name(self._trial) + if trial is not None: + assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial + self.state.trial_params = hp_params(assignments) + else: + self.state.trial_params = None + # This should be the same if the state has been saved but in case the training arguments changed, it's safer + # to set this after the load. + self.state.max_steps = max_steps + self.state.num_train_epochs = num_train_epochs + self.state.is_local_process_zero = self.is_local_process_zero() + self.state.is_world_process_zero = self.is_world_process_zero() + + # tr_loss is a tensor to avoid synchronization of TPUs through .item() + tr_loss = torch.tensor(0.0).to(args.device) + # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses + self._total_loss_scalar = 0.0 + self._globalstep_last_logged = self.state.global_step + model.zero_grad() + + self.control = self.callback_handler.on_train_begin(args, self.state, self.control) + + # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point. + if not args.ignore_data_skip: + for epoch in range(epochs_trained): + is_random_sampler = hasattr(train_dataloader, "sampler") and isinstance( + train_dataloader.sampler, RandomSampler + ) + if is_torch_less_than_1_11 or not is_random_sampler: + # We just need to begin an iteration to create the randomization of the sampler. + # That was before PyTorch 1.11 however... + for _ in train_dataloader: + break + else: + # Otherwise we need to call the whooooole sampler cause there is some random operation added + # AT THE VERY END! + _ = list(train_dataloader.sampler) + + total_batched_samples = 0 + for epoch in range(epochs_trained, num_train_epochs): + if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler): + train_dataloader.sampler.set_epoch(epoch) + elif hasattr(train_dataloader, "dataset") and isinstance(train_dataloader.dataset, IterableDatasetShard): + train_dataloader.dataset.set_epoch(epoch) + + if is_torch_tpu_available(): + parallel_loader = pl.ParallelLoader(train_dataloader, [args.device]).per_device_loader(args.device) + epoch_iterator = parallel_loader + else: + epoch_iterator = train_dataloader + + # Reset the past mems state at the beginning of each epoch if necessary. + if args.past_index >= 0: + self._past = None + + steps_in_epoch = ( + len(epoch_iterator) + if len_dataloader is not None + else args.max_steps * args.gradient_accumulation_steps + ) + self.control = self.callback_handler.on_epoch_begin(args, self.state, self.control) + + if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0: + self._load_rng_state(resume_from_checkpoint) + + rng_to_sync = False + steps_skipped = 0 + if skip_first_batches is not None and steps_trained_in_current_epoch > 0: + epoch_iterator = skip_first_batches(epoch_iterator, steps_trained_in_current_epoch) + steps_skipped = steps_trained_in_current_epoch + steps_trained_in_current_epoch = 0 + rng_to_sync = True + + step = -1 + for step, inputs in enumerate(epoch_iterator): + total_batched_samples += 1 + if rng_to_sync: + self._load_rng_state(resume_from_checkpoint) + rng_to_sync = False + + # Skip past any already trained steps if resuming training + if steps_trained_in_current_epoch > 0: + steps_trained_in_current_epoch -= 1 + if steps_trained_progress_bar is not None: + steps_trained_progress_bar.update(1) + if steps_trained_in_current_epoch == 0: + self._load_rng_state(resume_from_checkpoint) + continue + elif steps_trained_progress_bar is not None: + steps_trained_progress_bar.close() + steps_trained_progress_bar = None + + if step % args.gradient_accumulation_steps == 0: + self.control = self.callback_handler.on_step_begin(args, self.state, self.control) + + if ( + (total_batched_samples % args.gradient_accumulation_steps != 0) + and args.local_rank != -1 + and args._no_sync_in_gradient_accumulation + ): + # Avoid unnecessary DDP synchronization since there will be no backward pass on this example. + with model.no_sync(): + tr_loss_step = self.training_step(model, inputs) + else: + tr_loss_step = self.training_step(model, inputs) + + if ( + args.logging_nan_inf_filter + and not is_torch_tpu_available() + and (torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step)) + ): + # if loss is nan or inf simply add the average of previous logged losses + tr_loss += tr_loss / (1 + self.state.global_step - self._globalstep_last_logged) + else: + tr_loss += tr_loss_step + + self.current_flos += float(self.floating_point_ops(inputs)) + + # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps + if self.deepspeed: + self.deepspeed.step() + + if total_batched_samples % args.gradient_accumulation_steps == 0 or ( + # last step in epoch but step is always smaller than gradient_accumulation_steps + steps_in_epoch <= args.gradient_accumulation_steps + and (step + 1) == steps_in_epoch + ): + # Gradient clipping + if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed: + # deepspeed does its own clipping + + if self.do_grad_scaling: + # Reduce gradients first for XLA + if is_torch_tpu_available(): + gradients = xm._fetch_gradients(self.optimizer) + xm.all_reduce("sum", gradients, scale=1.0 / xm.xrt_world_size()) + # AMP: gradients need unscaling + self.scaler.unscale_(self.optimizer) + + if is_sagemaker_mp_enabled() and args.fp16: + self.optimizer.clip_master_grads(args.max_grad_norm) + elif hasattr(self.optimizer, "clip_grad_norm"): + # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping + self.optimizer.clip_grad_norm(args.max_grad_norm) + elif hasattr(model, "clip_grad_norm_"): + # Some models (like FullyShardedDDP) have a specific way to do gradient clipping + model.clip_grad_norm_(args.max_grad_norm) + else: + # Revert to normal clipping otherwise, handling Apex or full precision + nn.utils.clip_grad_norm_( + amp.master_params(self.optimizer) if self.use_apex else model.parameters(), + args.max_grad_norm, + ) + + # Optimizer step + optimizer_was_run = True + if self.deepspeed: + pass # called outside the loop + elif is_torch_tpu_available(): + if self.do_grad_scaling: + self.scaler.step(self.optimizer) + self.scaler.update() + else: + xm.optimizer_step(self.optimizer) + elif self.do_grad_scaling: + scale_before = self.scaler.get_scale() + self.scaler.step(self.optimizer) + self.scaler.update() + scale_after = self.scaler.get_scale() + optimizer_was_run = scale_before <= scale_after + else: + self.optimizer.step() + + if optimizer_was_run and not self.deepspeed: + self.lr_scheduler.step() + + model.zero_grad() + self.state.global_step += 1 + self.state.epoch = epoch + (step + 1 + steps_skipped) / steps_in_epoch + self.control = self.callback_handler.on_step_end(args, self.state, self.control) + + self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) + else: + self.control = self.callback_handler.on_substep_end(args, self.state, self.control) + + if self.control.should_epoch_stop or self.control.should_training_stop: + break + if step < 0: + logger.warning( + "There seems to be not a single sample in your epoch_iterator, stopping training at step" + f" {self.state.global_step}! This is expected if you're using an IterableDataset and set" + f" num_steps ({max_steps}) higher than the number of available samples." + ) + self.control.should_training_stop = True + + self.control = self.callback_handler.on_epoch_end(args, self.state, self.control) + self._maybe_log_save_evaluate(tr_loss, model, trial, epoch, ignore_keys_for_eval) + + if DebugOption.TPU_METRICS_DEBUG in self.args.debug: + if is_torch_tpu_available(): + # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) + xm.master_print(met.metrics_report()) + else: + logger.warning( + "You enabled PyTorch/XLA debug metrics but you don't have a TPU " + "configured. Check your training configuration if this is unexpected." + ) + if self.control.should_training_stop: + break + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of training + delattr(self, "_past") + + logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") + if args.load_best_model_at_end and self.state.best_model_checkpoint is not None: + # Wait for everyone to get here so we are sur the model has been saved by process 0. + if is_torch_tpu_available(): + xm.rendezvous("load_best_model_at_end") + elif args.local_rank != -1: + dist.barrier() + elif is_sagemaker_mp_enabled(): + smp.barrier() + + self._load_best_model() + + # add remaining tr_loss + self._total_loss_scalar += tr_loss.item() + train_loss = self._total_loss_scalar / self.state.global_step + + metrics = speed_metrics("train", start_time, num_samples=num_train_samples, num_steps=self.state.max_steps) + self.store_flos() + metrics["total_flos"] = self.state.total_flos + metrics["train_loss"] = train_loss + + self.is_in_train = False + + self._memory_tracker.stop_and_update_metrics(metrics) + + self.log(metrics) + + run_dir = self._get_output_dir(trial) + checkpoints_sorted = self._sorted_checkpoints(use_mtime=False, output_dir=run_dir) + + # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint and process allowed to save. + if self.args.should_save and self.state.best_model_checkpoint is not None and self.args.save_total_limit == 1: + for checkpoint in checkpoints_sorted: + if checkpoint != self.state.best_model_checkpoint: + logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") + shutil.rmtree(checkpoint) + + self.control = self.callback_handler.on_train_end(args, self.state, self.control) + + return TrainOutput(self.state.global_step, train_loss, metrics) + + def _get_output_dir(self, trial): + if self.hp_search_backend is not None and trial is not None: + if self.hp_search_backend == HPSearchBackend.OPTUNA: + run_id = trial.number + elif self.hp_search_backend == HPSearchBackend.RAY: + from ray import tune + + run_id = tune.get_trial_id() + elif self.hp_search_backend == HPSearchBackend.SIGOPT: + run_id = trial.id + elif self.hp_search_backend == HPSearchBackend.WANDB: + import wandb + + run_id = wandb.run.id + run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}" + run_dir = os.path.join(self.args.output_dir, run_name) + else: + run_dir = self.args.output_dir + return run_dir + + def _load_from_checkpoint(self, resume_from_checkpoint, model=None): + if model is None: + model = self.model + + if not os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)) and not os.path.isfile( + os.path.join(resume_from_checkpoint, WEIGHTS_INDEX_NAME) + ): + raise ValueError(f"Can't find a valid checkpoint at {resume_from_checkpoint}") + + logger.info(f"Loading model from {resume_from_checkpoint}.") + + if os.path.isfile(os.path.join(resume_from_checkpoint, CONFIG_NAME)): + config = PretrainedConfig.from_json_file(os.path.join(resume_from_checkpoint, CONFIG_NAME)) + checkpoint_version = config.transformers_version + if checkpoint_version is not None and checkpoint_version != __version__: + logger.warning( + f"You are resuming training from a checkpoint trained with {checkpoint_version} of " + f"Transformers but your current version is {__version__}. This is not recommended and could " + "yield to errors or unwanted behaviors." + ) + + if os.path.isfile(os.path.join(resume_from_checkpoint, WEIGHTS_NAME)): + # If the model is on the GPU, it still works! + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(resume_from_checkpoint, "user_content.pt")): + # If the 'user_content.pt' file exists, load with the new smp api. + # Checkpoint must have been saved with the new smp api. + smp.resume_from_checkpoint( + path=resume_from_checkpoint, tag=WEIGHTS_NAME, partial=False, load_optimizer=False + ) + else: + # If the 'user_content.pt' file does NOT exist, load with the old smp api. + # Checkpoint must have been saved with the old smp api. + if hasattr(self.args, "fp16") and self.args.fp16 is True: + logger.warning( + "Enabling FP16 and loading from smp < 1.10 checkpoint together is not suppported." + ) + state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu") + # Required for smp to not auto-translate state_dict from hf to smp (is already smp). + state_dict["_smp_is_partial"] = False + load_result = model.load_state_dict(state_dict, strict=True) + # release memory + del state_dict + else: + # We load the model state dict on the CPU to avoid an OOM error. + state_dict = torch.load(os.path.join(resume_from_checkpoint, WEIGHTS_NAME), map_location="cpu") + # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 + # which takes *args instead of **kwargs + load_result = model.load_state_dict(state_dict, False) + # release memory + del state_dict + self._issue_warnings_after_load(load_result) + else: + # We load the sharded checkpoint + load_result = load_sharded_checkpoint(model, resume_from_checkpoint, strict=is_sagemaker_mp_enabled()) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result) + + def _load_best_model(self): + logger.info(f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric}).") + best_model_path = os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME) + model = self.model_wrapped if is_sagemaker_mp_enabled() else self.model + if os.path.exists(best_model_path): + if self.deepspeed: + if self.model_wrapped is not None: + # this removes the pre-hooks from the previous engine + self.model_wrapped.destroy() + self.model_wrapped = None + + # temp hack until Deepspeed fixes the problem with resume from an existing engine that did some stepping + deepspeed_engine, optimizer, lr_scheduler = deepspeed_init( + self, + num_training_steps=self.args.max_steps, + resume_from_checkpoint=self.state.best_model_checkpoint, + ) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + else: + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(self.state.best_model_checkpoint, "user_content.pt")): + # If the 'user_content.pt' file exists, load with the new smp api. + # Checkpoint must have been saved with the new smp api. + smp.resume_from_checkpoint( + path=self.state.best_model_checkpoint, + tag=WEIGHTS_NAME, + partial=False, + load_optimizer=False, + ) + else: + # If the 'user_content.pt' file does NOT exist, load with the old smp api. + # Checkpoint must have been saved with the old smp api. + state_dict = torch.load(best_model_path, map_location="cpu") + state_dict["_smp_is_partial"] = False + load_result = model.load_state_dict(state_dict, strict=True) + else: + # We load the model state dict on the CPU to avoid an OOM error. + state_dict = torch.load(best_model_path, map_location="cpu") + # If the model is on the GPU, it still works! + # workaround for FSDP bug https://github.com/pytorch/pytorch/issues/82963 + # which takes *args instead of **kwargs + load_result = model.load_state_dict(state_dict, False) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result) + elif os.path.exists(os.path.join(self.state.best_model_checkpoint, WEIGHTS_INDEX_NAME)): + load_result = load_sharded_checkpoint( + model, self.state.best_model_checkpoint, strict=is_sagemaker_mp_enabled() + ) + if not is_sagemaker_mp_enabled(): + self._issue_warnings_after_load(load_result) + else: + logger.warning( + f"Could not locate the best model at {best_model_path}, if you are running a distributed training " + "on multiple nodes, you should activate `--save_on_each_node`." + ) + + def _issue_warnings_after_load(self, load_result): + if len(load_result.missing_keys) != 0: + if self.model._keys_to_ignore_on_save is not None and set(load_result.missing_keys) == set( + self.model._keys_to_ignore_on_save + ): + self.model.tie_weights() + else: + logger.warning(f"There were missing keys in the checkpoint model loaded: {load_result.missing_keys}.") + if len(load_result.unexpected_keys) != 0: + logger.warning( + f"There were unexpected keys in the checkpoint model loaded: {load_result.unexpected_keys}." + ) + + def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch, ignore_keys_for_eval): + if self.control.should_log: + if is_torch_tpu_available(): + xm.mark_step() + + logs: Dict[str, float] = {} + + # all_gather + mean() to get average loss over all processes + tr_loss_scalar = self._nested_gather(tr_loss).mean().item() + + # reset tr_loss to zero + tr_loss -= tr_loss + + logs["loss"] = round(tr_loss_scalar / (self.state.global_step - self._globalstep_last_logged), 4) + logs["learning_rate"] = self._get_learning_rate() + + self._total_loss_scalar += tr_loss_scalar + self._globalstep_last_logged = self.state.global_step + self.store_flos() + + self.log(logs) + + metrics = None + if self.control.should_evaluate: + if isinstance(self.eval_dataset, dict): + for eval_dataset_name, eval_dataset in self.eval_dataset.items(): + metrics = self.evaluate( + eval_dataset=eval_dataset, + ignore_keys=ignore_keys_for_eval, + metric_key_prefix=f"eval_{eval_dataset_name}", + ) + else: + metrics = self.evaluate(ignore_keys=ignore_keys_for_eval) + self._report_to_hp_search(trial, self.state.global_step, metrics) + + if self.control.should_save: + self._save_checkpoint(model, trial, metrics=metrics) + self.control = self.callback_handler.on_save(self.args, self.state, self.control) + + def _load_rng_state(self, checkpoint): + # Load RNG states from `checkpoint` + if checkpoint is None: + return + + if self.args.world_size > 1: + process_index = self.args.process_index + rng_file = os.path.join(checkpoint, f"rng_state_{process_index}.pth") + if not os.path.isfile(rng_file): + logger.info( + f"Didn't find an RNG file for process {process_index}, if you are resuming a training that " + "wasn't launched in a distributed fashion, reproducibility is not guaranteed." + ) + return + else: + rng_file = os.path.join(checkpoint, "rng_state.pth") + if not os.path.isfile(rng_file): + logger.info( + "Didn't find an RNG file, if you are resuming a training that was launched in a distributed " + "fashion, reproducibility is not guaranteed." + ) + return + + checkpoint_rng_state = torch.load(rng_file) + random.setstate(checkpoint_rng_state["python"]) + np.random.set_state(checkpoint_rng_state["numpy"]) + torch.random.set_rng_state(checkpoint_rng_state["cpu"]) + if torch.cuda.is_available(): + if self.args.local_rank != -1: + torch.cuda.random.set_rng_state(checkpoint_rng_state["cuda"]) + else: + try: + torch.cuda.random.set_rng_state_all(checkpoint_rng_state["cuda"]) + except Exception as e: + logger.info( + f"Didn't manage to set back the RNG states of the GPU because of the following error:\n {e}" + "\nThis won't yield the same results as if the training had not been interrupted." + ) + if is_torch_tpu_available(): + xm.set_rng_state(checkpoint_rng_state["xla"]) + + def _save_checkpoint(self, model, trial, metrics=None): + # In all cases, including ddp/dp/deepspeed, self.model is always a reference to the model we + # want to save except FullyShardedDDP. + # assert unwrap_model(model) is self.model, "internal model should be a reference to self.model" + + # Save model checkpoint + checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" + + if self.hp_search_backend is None and trial is None: + self.store_flos() + + run_dir = self._get_output_dir(trial=trial) + output_dir = os.path.join(run_dir, checkpoint_folder) + self.save_model(output_dir, _internal_call=True) + if self.deepspeed: + # under zero3 model file itself doesn't get saved since it's bogus! Unless deepspeed + # config `stage3_gather_16bit_weights_on_model_save` is True + self.deepspeed.save_checkpoint(output_dir) + + # Save optimizer and scheduler + if self.sharded_ddp == ShardedDDPOption.SIMPLE: + self.optimizer.consolidate_state_dict() + + if is_torch_tpu_available(): + xm.rendezvous("saving_optimizer_states") + xm.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + with warnings.catch_warnings(record=True) as caught_warnings: + xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + elif is_sagemaker_mp_enabled(): + opt_state_dict = self.optimizer.local_state_dict(gather_if_shard=False) + smp.barrier() + if smp.rdp_rank() == 0 or smp.state.cfg.shard_optimizer_state: + smp.save( + opt_state_dict, + os.path.join(output_dir, OPTIMIZER_NAME), + partial=True, + v3=smp.state.cfg.shard_optimizer_state, + ) + if self.args.should_save: + with warnings.catch_warnings(record=True) as caught_warnings: + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + if self.do_grad_scaling: + torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)) + elif self.args.should_save and not self.deepspeed: + # deepspeed.save_checkpoint above saves model/optim/sched + torch.save(self.optimizer.state_dict(), os.path.join(output_dir, OPTIMIZER_NAME)) + with warnings.catch_warnings(record=True) as caught_warnings: + torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, SCHEDULER_NAME)) + reissue_pt_warnings(caught_warnings) + if self.do_grad_scaling: + torch.save(self.scaler.state_dict(), os.path.join(output_dir, SCALER_NAME)) + + # Determine the new best metric / best model checkpoint + if metrics is not None and self.args.metric_for_best_model is not None: + metric_to_check = self.args.metric_for_best_model + if not metric_to_check.startswith("eval_"): + metric_to_check = f"eval_{metric_to_check}" + metric_value = metrics[metric_to_check] + + operator = np.greater if self.args.greater_is_better else np.less + if ( + self.state.best_metric is None + or self.state.best_model_checkpoint is None + or operator(metric_value, self.state.best_metric) + ): + self.state.best_metric = metric_value + self.state.best_model_checkpoint = output_dir + + # Save the Trainer state + if self.args.should_save: + self.state.save_to_json(os.path.join(output_dir, TRAINER_STATE_NAME)) + + # Save RNG state in non-distributed training + rng_states = { + "python": random.getstate(), + "numpy": np.random.get_state(), + "cpu": torch.random.get_rng_state(), + } + if torch.cuda.is_available(): + if self.args.local_rank == -1: + # In non distributed, we save the global CUDA RNG state (will take care of DataParallel) + rng_states["cuda"] = torch.cuda.random.get_rng_state_all() + else: + rng_states["cuda"] = torch.cuda.random.get_rng_state() + + if is_torch_tpu_available(): + rng_states["xla"] = xm.get_rng_state() + + # A process can arrive here before the process 0 has a chance to save the model, in which case output_dir may + # not yet exist. + os.makedirs(output_dir, exist_ok=True) + + if self.args.world_size <= 1: + torch.save(rng_states, os.path.join(output_dir, "rng_state.pth")) + else: + torch.save(rng_states, os.path.join(output_dir, f"rng_state_{self.args.process_index}.pth")) + + if self.args.push_to_hub: + self._push_from_checkpoint(output_dir) + + # Maybe delete some older checkpoints. + if self.args.should_save: + self._rotate_checkpoints(use_mtime=True, output_dir=run_dir) + + def _load_optimizer_and_scheduler(self, checkpoint): + """If optimizer and scheduler states exist, load them.""" + if checkpoint is None: + return + + if self.deepspeed: + # deepspeed loads optimizer/lr_scheduler together with the model in deepspeed_init + return + + checkpoint_file_exists = ( + glob.glob(os.path.join(checkpoint, OPTIMIZER_NAME) + "_*") + if is_sagemaker_mp_enabled() + else os.path.isfile(os.path.join(checkpoint, OPTIMIZER_NAME)) + ) + if checkpoint_file_exists and os.path.isfile(os.path.join(checkpoint, SCHEDULER_NAME)): + # Load in optimizer and scheduler states + if is_torch_tpu_available(): + # On TPU we have to take some extra precautions to properly load the states on the right device. + optimizer_state = torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location="cpu") + with warnings.catch_warnings(record=True) as caught_warnings: + lr_scheduler_state = torch.load(os.path.join(checkpoint, SCHEDULER_NAME), map_location="cpu") + reissue_pt_warnings(caught_warnings) + + xm.send_cpu_data_to_device(optimizer_state, self.args.device) + xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) + + self.optimizer.load_state_dict(optimizer_state) + self.lr_scheduler.load_state_dict(lr_scheduler_state) + else: + map_location = "cpu" if is_sagemaker_mp_enabled() else self.args.device + if is_sagemaker_mp_enabled(): + if os.path.isfile(os.path.join(checkpoint, "user_content.pt")): + # Optimizer checkpoint was saved with smp >= 1.10 + def opt_load_hook(mod, opt): + opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) + + else: + # Optimizer checkpoint was saved with smp < 1.10 + def opt_load_hook(mod, opt): + if IS_SAGEMAKER_MP_POST_1_10: + opt.load_state_dict( + smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True, back_compat=True) + ) + else: + opt.load_state_dict(smp.load(os.path.join(checkpoint, OPTIMIZER_NAME), partial=True)) + + self.model_wrapped.register_post_step_hook(opt_load_hook) + else: + self.optimizer.load_state_dict( + torch.load(os.path.join(checkpoint, OPTIMIZER_NAME), map_location=map_location) + ) + with warnings.catch_warnings(record=True) as caught_warnings: + self.lr_scheduler.load_state_dict(torch.load(os.path.join(checkpoint, SCHEDULER_NAME))) + reissue_pt_warnings(caught_warnings) + if self.do_grad_scaling and os.path.isfile(os.path.join(checkpoint, SCALER_NAME)): + self.scaler.load_state_dict(torch.load(os.path.join(checkpoint, SCALER_NAME))) + + def hyperparameter_search( + self, + hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None, + compute_objective: Optional[Callable[[Dict[str, float]], float]] = None, + n_trials: int = 20, + direction: str = "minimize", + backend: Optional[Union["str", HPSearchBackend]] = None, + hp_name: Optional[Callable[["optuna.Trial"], str]] = None, + **kwargs, + ) -> BestRun: + """ + Launch an hyperparameter search using `optuna` or `Ray Tune` or `SigOpt`. The optimized quantity is determined + by `compute_objective`, which defaults to a function returning the evaluation loss when no metric is provided, + the sum of all metrics otherwise. + + + + To use this method, you need to have provided a `model_init` when initializing your [`Trainer`]: we need to + reinitialize the model at each new run. This is incompatible with the `optimizers` argument, so you need to + subclass [`Trainer`] and override the method [`~Trainer.create_optimizer_and_scheduler`] for custom + optimizer/scheduler. + + + + Args: + hp_space (`Callable[["optuna.Trial"], Dict[str, float]]`, *optional*): + A function that defines the hyperparameter search space. Will default to + [`~trainer_utils.default_hp_space_optuna`] or [`~trainer_utils.default_hp_space_ray`] or + [`~trainer_utils.default_hp_space_sigopt`] depending on your backend. + compute_objective (`Callable[[Dict[str, float]], float]`, *optional*): + A function computing the objective to minimize or maximize from the metrics returned by the `evaluate` + method. Will default to [`~trainer_utils.default_compute_objective`]. + n_trials (`int`, *optional*, defaults to 100): + The number of trial runs to test. + direction (`str`, *optional*, defaults to `"minimize"`): + Whether to optimize greater or lower objects. Can be `"minimize"` or `"maximize"`, you should pick + `"minimize"` when optimizing the validation loss, `"maximize"` when optimizing one or several metrics. + backend (`str` or [`~training_utils.HPSearchBackend`], *optional*): + The backend to use for hyperparameter search. Will default to optuna or Ray Tune or SigOpt, depending + on which one is installed. If all are installed, will default to optuna. + hp_name (`Callable[["optuna.Trial"], str]]`, *optional*): + A function that defines the trial/run name. Will default to None. + kwargs (`Dict[str, Any]`, *optional*): + Additional keyword arguments passed along to `optuna.create_study` or `ray.tune.run`. For more + information see: + + - the documentation of + [optuna.create_study](https://optuna.readthedocs.io/en/stable/reference/generated/optuna.study.create_study.html) + - the documentation of [tune.run](https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run) + - the documentation of [sigopt](https://app.sigopt.com/docs/endpoints/experiments/create) + + Returns: + [`trainer_utils.BestRun`]: All the information about the best run. Experiment summary can be found in + `run_summary` attribute for Ray backend. + """ + if backend is None: + backend = default_hp_search_backend() + if backend is None: + raise RuntimeError( + "At least one of optuna or ray should be installed. " + "To install optuna run `pip install optuna`. " + "To install ray run `pip install ray[tune]`. " + "To install sigopt run `pip install sigopt`." + ) + backend = HPSearchBackend(backend) + if backend == HPSearchBackend.OPTUNA and not is_optuna_available(): + raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.") + if backend == HPSearchBackend.RAY and not is_ray_tune_available(): + raise RuntimeError( + "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`." + ) + if backend == HPSearchBackend.SIGOPT and not is_sigopt_available(): + raise RuntimeError("You picked the sigopt backend, but it is not installed. Use `pip install sigopt`.") + if backend == HPSearchBackend.WANDB and not is_wandb_available(): + raise RuntimeError("You picked the wandb backend, but it is not installed. Use `pip install wandb`.") + self.hp_search_backend = backend + if self.model_init is None: + raise RuntimeError( + "To use hyperparameter search, you need to pass your model through a model_init function." + ) + + self.hp_space = default_hp_space[backend] if hp_space is None else hp_space + self.hp_name = hp_name + self.compute_objective = default_compute_objective if compute_objective is None else compute_objective + + backend_dict = { + HPSearchBackend.OPTUNA: run_hp_search_optuna, + HPSearchBackend.RAY: run_hp_search_ray, + HPSearchBackend.SIGOPT: run_hp_search_sigopt, + HPSearchBackend.WANDB: run_hp_search_wandb, + } + best_run = backend_dict[backend](self, n_trials, direction, **kwargs) + + self.hp_search_backend = None + return best_run + + def log(self, logs: Dict[str, float]) -> None: + """ + Log `logs` on the various objects watching training. + + Subclass and override this method to inject custom behavior. + + Args: + logs (`Dict[str, float]`): + The values to log. + """ + if self.state.epoch is not None: + logs["epoch"] = round(self.state.epoch, 2) + + output = {**logs, **{"step": self.state.global_step}} + self.state.log_history.append(output) + self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) + + def _prepare_input(self, data: Union[torch.Tensor, Any]) -> Union[torch.Tensor, Any]: + """ + Prepares one `data` before feeding it to the model, be it a tensor or a nested list/dictionary of tensors. + """ + if isinstance(data, Mapping): + return type(data)({k: self._prepare_input(v) for k, v in data.items()}) + elif isinstance(data, (tuple, list)): + return type(data)(self._prepare_input(v) for v in data) + elif isinstance(data, torch.Tensor): + kwargs = {"device": self.args.device} + if self.deepspeed and (torch.is_floating_point(data) or torch.is_complex(data)): + # NLP models inputs are int/uint and those get adjusted to the right dtype of the + # embedding. Other models such as wav2vec2's inputs are already float and thus + # may need special handling to match the dtypes of the model + kwargs.update({"dtype": self.args.hf_deepspeed_config.dtype()}) + return data.to(**kwargs) + return data + + def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: + """ + Prepare `inputs` before feeding them to the model, converting them to tensors if they are not already and + handling potential state. + """ + inputs = self._prepare_input(inputs) + if len(inputs) == 0: + raise ValueError( + "The batch received was empty, your model won't be able to train on it. Double-check that your " + f"training dataset contains keys expected by the model: {','.join(self._signature_columns)}." + ) + if self.args.past_index >= 0 and self._past is not None: + inputs["mems"] = self._past + + return inputs + + def compute_loss_context_manager(self): + """ + A helper wrapper to group together context managers. + """ + return self.autocast_smart_context_manager() + + def autocast_smart_context_manager(self, cache_enabled: Optional[bool] = True): + """ + A helper wrapper that creates an appropriate context manager for `autocast` while feeding it the desired + arguments, depending on the situation. + """ + if self.use_cuda_amp or self.use_cpu_amp: + if is_torch_greater_or_equal_than_1_10: + ctx_manager = ( + torch.cpu.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) + if self.use_cpu_amp + else torch.cuda.amp.autocast(cache_enabled=cache_enabled, dtype=self.amp_dtype) + ) + else: + ctx_manager = torch.cuda.amp.autocast() + else: + ctx_manager = contextlib.nullcontext() if sys.version_info >= (3, 7) else contextlib.suppress() + + return ctx_manager + + def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: + """ + Perform a training step on a batch of inputs. + + Subclass and override to inject custom behavior. + + Args: + model (`nn.Module`): + The model to train. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + + Return: + `torch.Tensor`: The tensor with training loss on this batch. + """ + model.train() + inputs = self._prepare_inputs(inputs) + + if is_sagemaker_mp_enabled(): + loss_mb = smp_forward_backward(model, inputs, self.args.gradient_accumulation_steps) + return loss_mb.reduce_mean().detach().to(self.args.device) + + with self.compute_loss_context_manager(): + loss = self.compute_loss(model, inputs) + + if self.args.n_gpu > 1: + loss = loss.mean() # mean() to average on multi-gpu parallel training + + if self.args.gradient_accumulation_steps > 1 and not self.deepspeed: + # deepspeed handles loss scaling by gradient_accumulation_steps in its `backward` + loss = loss / self.args.gradient_accumulation_steps + + if self.do_grad_scaling: + self.scaler.scale(loss).backward() + elif self.use_apex: + with amp.scale_loss(loss, self.optimizer) as scaled_loss: + scaled_loss.backward() + elif self.deepspeed: + # loss gets scaled under gradient_accumulation_steps in deepspeed + loss = self.deepspeed.backward(loss) + else: + loss.backward() + + return loss.detach() + + def compute_loss(self, model, inputs, return_outputs=False): + """ + How the loss is computed by Trainer. By default, all models return the loss in the first element. + + Subclass and override for custom behavior. + """ + if self.label_smoother is not None and "labels" in inputs: + labels = inputs.pop("labels") + else: + labels = None + outputs = model(**inputs) + # Save past state if it exists + # TODO: this needs to be fixed and made cleaner later. + if self.args.past_index >= 0: + self._past = outputs[self.args.past_index] + + if labels is not None: + if unwrap_model(model)._get_name() in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES.values(): + loss = self.label_smoother(outputs, labels, shift_labels=True) + else: + loss = self.label_smoother(outputs, labels) + else: + if isinstance(outputs, dict) and "loss" not in outputs: + raise ValueError( + "The model did not return a loss from the inputs, only the following keys: " + f"{','.join(outputs.keys())}. For reference, the inputs it received are {','.join(inputs.keys())}." + ) + # We don't use .loss here since the model may return tuples instead of ModelOutput. + loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0] + + return (loss, outputs) if return_outputs else loss + + def is_local_process_zero(self) -> bool: + """ + Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several + machines) main process. + """ + return self.args.local_process_index == 0 + + def is_world_process_zero(self) -> bool: + """ + Whether or not this process is the global main process (when training in a distributed fashion on several + machines, this is only going to be `True` for one process). + """ + # Special case for SageMaker ModelParallel since there process_index is dp_process_index, not the global + # process index. + if is_sagemaker_mp_enabled(): + return smp.rank() == 0 + else: + return self.args.process_index == 0 + + def save_model(self, output_dir: Optional[str] = None, _internal_call: bool = False): + """ + Will save the model, so you can reload it using `from_pretrained()`. + + Will only save from the main process. + """ + + if output_dir is None: + output_dir = self.args.output_dir + + if is_torch_tpu_available(): + self._save_tpu(output_dir) + elif is_sagemaker_mp_enabled(): + # Calling the state_dict needs to be done on the wrapped model and on all processes. + os.makedirs(output_dir, exist_ok=True) + state_dict = self.model_wrapped.state_dict() + if self.args.should_save: + self._save(output_dir, state_dict=state_dict) + if IS_SAGEMAKER_MP_POST_1_10: + # 'user_content.pt' indicates model state_dict saved with smp >= 1.10 + Path(os.path.join(output_dir, "user_content.pt")).touch() + elif ( + ShardedDDPOption.ZERO_DP_2 in self.args.sharded_ddp + or ShardedDDPOption.ZERO_DP_3 in self.args.sharded_ddp + or self.fsdp is not None + ): + state_dict = self.model.state_dict() + + if self.args.should_save: + self._save(output_dir, state_dict=state_dict) + elif self.deepspeed: + # this takes care of everything as long as we aren't under zero3 + if self.args.should_save: + self._save(output_dir) + + if is_deepspeed_zero3_enabled(): + # It's too complicated to try to override different places where the weights dump gets + # saved, so since under zero3 the file is bogus, simply delete it. The user should + # either user deepspeed checkpoint to resume or to recover full weights use + # zero_to_fp32.py stored in the checkpoint. + if self.args.should_save: + file = os.path.join(output_dir, WEIGHTS_NAME) + if os.path.isfile(file): + # logger.info(f"deepspeed zero3: removing {file}, see zero_to_fp32.py to recover weights") + os.remove(file) + + # now save the real model if stage3_gather_16bit_weights_on_model_save=True + # if false it will not be saved. + # This must be called on all ranks + if not self.deepspeed.save_16bit_model(output_dir, WEIGHTS_NAME): + logger.warning( + "deepspeed.save_16bit_model didn't save the model, since" + " stage3_gather_16bit_weights_on_model_save=false. Saving the full checkpoint instead, use" + " zero_to_fp32.py to recover weights" + ) + self.deepspeed.save_checkpoint(output_dir) + + elif self.args.should_save: + self._save(output_dir) + + # Push to the Hub when `save_model` is called by the user. + if self.args.push_to_hub and not _internal_call: + self.push_to_hub(commit_message="Model save") + + def _save_tpu(self, output_dir: Optional[str] = None): + output_dir = output_dir if output_dir is not None else self.args.output_dir + logger.info(f"Saving model checkpoint to {output_dir}") + + if xm.is_master_ordinal(): + os.makedirs(output_dir, exist_ok=True) + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + # Save a trained model and configuration using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + xm.rendezvous("saving_checkpoint") + if not isinstance(self.model, PreTrainedModel): + if isinstance(unwrap_model(self.model), PreTrainedModel): + unwrap_model(self.model).save_pretrained( + output_dir, + is_main_process=self.args.should_save, + state_dict=self.model.state_dict(), + save_function=xm.save, + ) + else: + logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") + state_dict = self.model.state_dict() + xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) + else: + self.model.save_pretrained(output_dir, is_main_process=self.args.should_save, save_function=xm.save) + if self.tokenizer is not None and self.args.should_save: + self.tokenizer.save_pretrained(output_dir) + + def _save(self, output_dir: Optional[str] = None, state_dict=None): + # If we are executing this function, we are the process zero, so we don't check for that. + output_dir = output_dir if output_dir is not None else self.args.output_dir + os.makedirs(output_dir, exist_ok=True) + logger.info(f"Saving model checkpoint to {output_dir}") + # Save a trained model and configuration using `save_pretrained()`. + # They can then be reloaded using `from_pretrained()` + if not isinstance(self.model, PreTrainedModel): + if isinstance(unwrap_model(self.model), PreTrainedModel): + if state_dict is None: + state_dict = self.model.state_dict() + unwrap_model(self.model).save_pretrained(output_dir, state_dict=filtered_state_dict) + else: + logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") + if state_dict is None: + state_dict = self.model.state_dict() + torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) + else: + if self.save_prefixencoder: + print("Saving PrefixEncoder") + state_dict = self.model.state_dict() + filtered_state_dict = {} + for k, v in self.model.named_parameters(): + if v.requires_grad: + filtered_state_dict[k] = state_dict[k] + self.model.save_pretrained(output_dir, state_dict=filtered_state_dict) + else: + print("Saving the whole model") + self.model.save_pretrained(output_dir, state_dict=state_dict) + if self.tokenizer is not None: + self.tokenizer.save_pretrained(output_dir) + + # Good practice: save your training arguments together with the trained model + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + def store_flos(self): + # Storing the number of floating-point operations that went into the model + if self.args.local_rank != -1: + self.state.total_flos += ( + distributed_broadcast_scalars([self.current_flos], device=self.args.device).sum().item() + ) + self.current_flos = 0 + else: + self.state.total_flos += self.current_flos + self.current_flos = 0 + + def _sorted_checkpoints( + self, output_dir=None, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False + ) -> List[str]: + ordering_and_checkpoint_path = [] + + glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)] + + for path in glob_checkpoints: + if use_mtime: + ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) + else: + regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) + if regex_match is not None and regex_match.groups() is not None: + ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) + + checkpoints_sorted = sorted(ordering_and_checkpoint_path) + checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] + # Make sure we don't delete the best model. + if self.state.best_model_checkpoint is not None: + best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) + for i in range(best_model_index, len(checkpoints_sorted) - 2): + checkpoints_sorted[i], checkpoints_sorted[i + 1] = checkpoints_sorted[i + 1], checkpoints_sorted[i] + return checkpoints_sorted + + def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None: + if self.args.save_total_limit is None or self.args.save_total_limit <= 0: + return + + # Check if we should delete older checkpoint(s) + checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime, output_dir=output_dir) + if len(checkpoints_sorted) <= self.args.save_total_limit: + return + + # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which + # we don't do to allow resuming. + save_total_limit = self.args.save_total_limit + if ( + self.state.best_model_checkpoint is not None + and self.args.save_total_limit == 1 + and checkpoints_sorted[-1] != self.state.best_model_checkpoint + ): + save_total_limit = 2 + + number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit) + checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] + for checkpoint in checkpoints_to_be_deleted: + logger.info(f"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit") + shutil.rmtree(checkpoint, ignore_errors=True) + + def evaluate( + self, + eval_dataset: Optional[Dataset] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> Dict[str, float]: + """ + Run evaluation and returns metrics. + + The calling script will be responsible for providing a method to compute metrics, as they are task-dependent + (pass it to the init `compute_metrics` argument). + + You can also subclass and override this method to inject custom behavior. + + Args: + eval_dataset (`Dataset`, *optional*): + Pass a dataset if you wish to override `self.eval_dataset`. If it is a [`~datasets.Dataset`], columns + not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__` + method. + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is "eval" (default) + + Returns: + A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The + dictionary also contains the epoch number which comes from the training state. + """ + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + eval_dataloader = self.get_eval_dataloader(eval_dataset) + start_time = time.time() + + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop + output = eval_loop( + eval_dataloader, + description="Evaluation", + # No point gathering the predictions if there are no metrics, otherwise we defer to + # self.args.prediction_loss_only + prediction_loss_only=True if self.compute_metrics is None else None, + ignore_keys=ignore_keys, + metric_key_prefix=metric_key_prefix, + ) + + total_batch_size = self.args.eval_batch_size * self.args.world_size + if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: + start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] + output.metrics.update( + speed_metrics( + metric_key_prefix, + start_time, + num_samples=output.num_samples, + num_steps=math.ceil(output.num_samples / total_batch_size), + ) + ) + + self.log(output.metrics) + + if DebugOption.TPU_METRICS_DEBUG in self.args.debug: + # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) + xm.master_print(met.metrics_report()) + + self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) + + self._memory_tracker.stop_and_update_metrics(output.metrics) + + return output.metrics + + def predict( + self, test_dataset: Dataset, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "test" + ) -> PredictionOutput: + """ + Run prediction and returns predictions and potential metrics. + + Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method + will also return metrics, like in `evaluate()`. + + Args: + test_dataset (`Dataset`): + Dataset to run the predictions on. If it is an `datasets.Dataset`, columns not accepted by the + `model.forward()` method are automatically removed. Has to implement the method `__len__` + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"test"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "test_bleu" if the prefix is "test" (default) + + + + If your predictions or labels have different sequence length (for instance because you're doing dynamic padding + in a token classification task) the predictions will be padded (on the right) to allow for concatenation into + one array. The padding index is -100. + + + + Returns: *NamedTuple* A namedtuple with the following keys: + + - predictions (`np.ndarray`): The predictions on `test_dataset`. + - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). + - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained + labels). + """ + # memory metrics - must set up as early as possible + self._memory_tracker.start() + + test_dataloader = self.get_test_dataloader(test_dataset) + start_time = time.time() + + eval_loop = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop + output = eval_loop( + test_dataloader, description="Prediction", ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix + ) + total_batch_size = self.args.eval_batch_size * self.args.world_size + if f"{metric_key_prefix}_jit_compilation_time" in output.metrics: + start_time += output.metrics[f"{metric_key_prefix}_jit_compilation_time"] + output.metrics.update( + speed_metrics( + metric_key_prefix, + start_time, + num_samples=output.num_samples, + num_steps=math.ceil(output.num_samples / total_batch_size), + ) + ) + + self.control = self.callback_handler.on_predict(self.args, self.state, self.control, output.metrics) + self._memory_tracker.stop_and_update_metrics(output.metrics) + + return PredictionOutput(predictions=output.predictions, label_ids=output.label_ids, metrics=output.metrics) + + def evaluation_loop( + self, + dataloader: DataLoader, + description: str, + prediction_loss_only: Optional[bool] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> EvalLoopOutput: + """ + Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. + + Works both with or without labels. + """ + args = self.args + + prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only + + # if eval is called w/o train init deepspeed here + if args.deepspeed and not self.deepspeed: + # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval + # from the checkpoint eventually + deepspeed_engine, _, _ = deepspeed_init( + self, num_training_steps=0, resume_from_checkpoint=None, inference=True + ) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + + model = self._wrap_model(self.model, training=False, dataloader=dataloader) + + # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called + # while ``train`` is running, cast it to the right dtype first and then put on device + if not self.is_in_train: + if args.fp16_full_eval: + model = model.to(dtype=torch.float16, device=args.device) + elif args.bf16_full_eval: + model = model.to(dtype=torch.bfloat16, device=args.device) + + batch_size = self.args.eval_batch_size + + logger.info(f"***** Running {description} *****") + if has_length(dataloader): + logger.info(f" Num examples = {self.num_examples(dataloader)}") + else: + logger.info(" Num examples: Unknown") + logger.info(f" Batch size = {batch_size}") + + model.eval() + + self.callback_handler.eval_dataloader = dataloader + # Do this before wrapping. + eval_dataset = getattr(dataloader, "dataset", None) + + if is_torch_tpu_available(): + dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device) + + if args.past_index >= 0: + self._past = None + + # Initialize containers + # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps) + losses_host = None + preds_host = None + labels_host = None + inputs_host = None + + # losses/preds/labels on CPU (final containers) + all_losses = None + all_preds = None + all_labels = None + all_inputs = None + # Will be useful when we have an iterable dataset so don't know its length. + + observed_num_examples = 0 + # Main evaluation loop + for step, inputs in enumerate(dataloader): + # Update the observed num examples + observed_batch_size = find_batch_size(inputs) + if observed_batch_size is not None: + observed_num_examples += observed_batch_size + # For batch samplers, batch_size is not known by the dataloader in advance. + if batch_size is None: + batch_size = observed_batch_size + + # Prediction step + loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) + inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None + + if is_torch_tpu_available(): + xm.mark_step() + + # Update containers on host + if loss is not None: + losses = self._nested_gather(loss.repeat(batch_size)) + losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) + if labels is not None: + labels = self._pad_across_processes(labels) + labels = self._nested_gather(labels) + labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) + if inputs_decode is not None: + inputs_decode = self._pad_across_processes(inputs_decode) + inputs_decode = self._nested_gather(inputs_decode) + inputs_host = ( + inputs_decode + if inputs_host is None + else nested_concat(inputs_host, inputs_decode, padding_index=-100) + ) + if logits is not None: + logits = self._pad_across_processes(logits) + logits = self._nested_gather(logits) + if self.preprocess_logits_for_metrics is not None: + logits = self.preprocess_logits_for_metrics(logits, labels) + preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) + self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) + + # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. + if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: + if losses_host is not None: + losses = nested_numpify(losses_host) + all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) + if preds_host is not None: + logits = nested_numpify(preds_host) + all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) + if inputs_host is not None: + inputs_decode = nested_numpify(inputs_host) + all_inputs = ( + inputs_decode + if all_inputs is None + else nested_concat(all_inputs, inputs_decode, padding_index=-100) + ) + if labels_host is not None: + labels = nested_numpify(labels_host) + all_labels = ( + labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) + ) + + # Set back to None to begin a new accumulation + losses_host, preds_host, inputs_host, labels_host = None, None, None, None + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of the evaluation loop + delattr(self, "_past") + + # Gather all remaining tensors and put them back on the CPU + if losses_host is not None: + losses = nested_numpify(losses_host) + all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0) + if preds_host is not None: + logits = nested_numpify(preds_host) + all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) + if inputs_host is not None: + inputs_decode = nested_numpify(inputs_host) + all_inputs = ( + inputs_decode if all_inputs is None else nested_concat(all_inputs, inputs_decode, padding_index=-100) + ) + if labels_host is not None: + labels = nested_numpify(labels_host) + all_labels = labels if all_labels is None else nested_concat(all_labels, labels, padding_index=-100) + + # Number of samples + if has_length(eval_dataset): + num_samples = len(eval_dataset) + # The instance check is weird and does not actually check for the type, but whether the dataset has the right + # methods. Therefore we need to make sure it also has the attribute. + elif isinstance(eval_dataset, IterableDatasetShard) and getattr(eval_dataset, "num_examples", 0) > 0: + num_samples = eval_dataset.num_examples + else: + if has_length(dataloader): + num_samples = self.num_examples(dataloader) + else: # both len(dataloader.dataset) and len(dataloader) fail + num_samples = observed_num_examples + if num_samples == 0 and observed_num_examples > 0: + num_samples = observed_num_examples + + # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of + # samplers has been rounded to a multiple of batch_size, so we truncate. + if all_losses is not None: + all_losses = all_losses[:num_samples] + if all_preds is not None: + all_preds = nested_truncate(all_preds, num_samples) + if all_labels is not None: + all_labels = nested_truncate(all_labels, num_samples) + if all_inputs is not None: + all_inputs = nested_truncate(all_inputs, num_samples) + + # Metrics! + if self.compute_metrics is not None and all_preds is not None and all_labels is not None: + if args.include_inputs_for_metrics: + metrics = self.compute_metrics( + EvalPrediction(predictions=all_preds, label_ids=all_labels, inputs=all_inputs) + ) + else: + metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels)) + else: + metrics = {} + + # To be JSON-serializable, we need to remove numpy types or zero-d tensors + metrics = denumpify_detensorize(metrics) + + if all_losses is not None: + metrics[f"{metric_key_prefix}_loss"] = all_losses.mean().item() + if hasattr(self, "jit_compilation_time"): + metrics[f"{metric_key_prefix}_jit_compilation_time"] = self.jit_compilation_time + + # Prefix all keys with metric_key_prefix + '_' + for key in list(metrics.keys()): + if not key.startswith(f"{metric_key_prefix}_"): + metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) + + return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples) + + def _nested_gather(self, tensors, name=None): + """ + Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before + concatenating them to `gathered` + """ + if tensors is None: + return + if is_torch_tpu_available(): + if name is None: + name = "nested_gather" + tensors = nested_xla_mesh_reduce(tensors, name) + elif is_sagemaker_mp_enabled(): + tensors = smp_gather(tensors) + elif self.args.local_rank != -1: + tensors = distributed_concat(tensors) + return tensors + + # Copied from Accelerate. + def _pad_across_processes(self, tensor, pad_index=-100): + """ + Recursively pad the tensors in a nested list/tuple/dictionary of tensors from all devices to the same size so + they can safely be gathered. + """ + if isinstance(tensor, (list, tuple)): + return type(tensor)(self._pad_across_processes(t, pad_index=pad_index) for t in tensor) + elif isinstance(tensor, dict): + return type(tensor)({k: self._pad_across_processes(v, pad_index=pad_index) for k, v in tensor.items()}) + elif not isinstance(tensor, torch.Tensor): + raise TypeError( + f"Can't pad the values of type {type(tensor)}, only of nested list/tuple/dicts of tensors." + ) + + if len(tensor.shape) < 2: + return tensor + # Gather all sizes + size = torch.tensor(tensor.shape, device=tensor.device)[None] + sizes = self._nested_gather(size).cpu() + + max_size = max(s[1] for s in sizes) + # When extracting XLA graphs for compilation, max_size is 0, + # so use inequality to avoid errors. + if tensor.shape[1] >= max_size: + return tensor + + # Then pad to the maximum size + old_size = tensor.shape + new_size = list(old_size) + new_size[1] = max_size + new_tensor = tensor.new_zeros(tuple(new_size)) + pad_index + new_tensor[:, : old_size[1]] = tensor + return new_tensor + + def prediction_step( + self, + model: nn.Module, + inputs: Dict[str, Union[torch.Tensor, Any]], + prediction_loss_only: bool, + ignore_keys: Optional[List[str]] = None, + ) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: + """ + Perform an evaluation step on `model` using `inputs`. + + Subclass and override to inject custom behavior. + + Args: + model (`nn.Module`): + The model to evaluate. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + prediction_loss_only (`bool`): + Whether or not to return the loss only. + ignore_keys (`Lst[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + + Return: + Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, + logits and labels (each being optional). + """ + has_labels = False if len(self.label_names) == 0 else all(inputs.get(k) is not None for k in self.label_names) + # For CLIP-like models capable of returning loss values. + # If `return_loss` is not specified or being `None` in `inputs`, we check if the default value of `return_loss` + # is `True` in `model.forward`. + return_loss = inputs.get("return_loss", None) + if return_loss is None: + return_loss = self.can_return_loss + loss_without_labels = True if len(self.label_names) == 0 and return_loss else False + + inputs = self._prepare_inputs(inputs) + if ignore_keys is None: + if hasattr(self.model, "config"): + ignore_keys = getattr(self.model.config, "keys_to_ignore_at_inference", []) + else: + ignore_keys = [] + + # labels may be popped when computing the loss (label smoothing for instance) so we grab them first. + if has_labels or loss_without_labels: + labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) + if len(labels) == 1: + labels = labels[0] + else: + labels = None + + with torch.no_grad(): + if is_sagemaker_mp_enabled(): + raw_outputs = smp_forward_only(model, inputs) + if has_labels or loss_without_labels: + if isinstance(raw_outputs, dict): + loss_mb = raw_outputs["loss"] + logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys + ["loss"]) + else: + loss_mb = raw_outputs[0] + logits_mb = raw_outputs[1:] + + loss = loss_mb.reduce_mean().detach().cpu() + logits = smp_nested_concat(logits_mb) + else: + loss = None + if isinstance(raw_outputs, dict): + logits_mb = tuple(v for k, v in raw_outputs.items() if k not in ignore_keys) + else: + logits_mb = raw_outputs + logits = smp_nested_concat(logits_mb) + else: + if has_labels or loss_without_labels: + with self.compute_loss_context_manager(): + loss, outputs = self.compute_loss(model, inputs, return_outputs=True) + loss = loss.mean().detach() + + if isinstance(outputs, dict): + logits = tuple(v for k, v in outputs.items() if k not in ignore_keys + ["loss"]) + else: + logits = outputs[1:] + else: + loss = None + with self.compute_loss_context_manager(): + outputs = model(**inputs) + if isinstance(outputs, dict): + logits = tuple(v for k, v in outputs.items() if k not in ignore_keys) + else: + logits = outputs + # TODO: this needs to be fixed and made cleaner later. + if self.args.past_index >= 0: + self._past = outputs[self.args.past_index - 1] + + if prediction_loss_only: + return (loss, None, None) + + logits = nested_detach(logits) + if len(logits) == 1: + logits = logits[0] + + return (loss, logits, labels) + + def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]): + """ + For models that inherit from [`PreTrainedModel`], uses that method to compute the number of floating point + operations for every backward + forward pass. If using another model, either implement such a method in the + model or subclass and override this method. + + Args: + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + Returns: + `int`: The number of floating-point operations. + """ + if hasattr(self.model, "floating_point_ops"): + return self.model.floating_point_ops(inputs) + else: + return 0 + + def init_git_repo(self, at_init: bool = False): + """ + Initializes a git repo in `self.args.hub_model_id`. + + Args: + at_init (`bool`, *optional*, defaults to `False`): + Whether this function is called before any training or not. If `self.args.overwrite_output_dir` is + `True` and `at_init` is `True`, the path to the repo (which is `self.args.output_dir`) might be wiped + out. + """ + if not self.is_world_process_zero(): + return + if self.args.hub_model_id is None: + repo_name = Path(self.args.output_dir).absolute().name + else: + repo_name = self.args.hub_model_id + if "/" not in repo_name: + repo_name = get_full_repo_name(repo_name, token=self.args.hub_token) + + # Make sure the repo exists. + create_repo(repo_name, token=self.args.hub_token, private=self.args.hub_private_repo, exist_ok=True) + try: + self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token) + except EnvironmentError: + if self.args.overwrite_output_dir and at_init: + # Try again after wiping output_dir + shutil.rmtree(self.args.output_dir) + self.repo = Repository(self.args.output_dir, clone_from=repo_name, token=self.args.hub_token) + else: + raise + + self.repo.git_pull() + + # By default, ignore the checkpoint folders + if ( + not os.path.exists(os.path.join(self.args.output_dir, ".gitignore")) + and self.args.hub_strategy != HubStrategy.ALL_CHECKPOINTS + ): + with open(os.path.join(self.args.output_dir, ".gitignore"), "w", encoding="utf-8") as writer: + writer.writelines(["checkpoint-*/"]) + + # Add "*.sagemaker" to .gitignore if using SageMaker + if os.environ.get("SM_TRAINING_ENV"): + self._add_sm_patterns_to_gitignore() + + self.push_in_progress = None + + def create_model_card( + self, + language: Optional[str] = None, + license: Optional[str] = None, + tags: Union[str, List[str], None] = None, + model_name: Optional[str] = None, + finetuned_from: Optional[str] = None, + tasks: Union[str, List[str], None] = None, + dataset_tags: Union[str, List[str], None] = None, + dataset: Union[str, List[str], None] = None, + dataset_args: Union[str, List[str], None] = None, + ): + """ + Creates a draft of a model card using the information available to the `Trainer`. + + Args: + language (`str`, *optional*): + The language of the model (if applicable) + license (`str`, *optional*): + The license of the model. Will default to the license of the pretrained model used, if the original + model given to the `Trainer` comes from a repo on the Hub. + tags (`str` or `List[str]`, *optional*): + Some tags to be included in the metadata of the model card. + model_name (`str`, *optional*): + The name of the model. + finetuned_from (`str`, *optional*): + The name of the model used to fine-tune this one (if applicable). Will default to the name of the repo + of the original model given to the `Trainer` (if it comes from the Hub). + tasks (`str` or `List[str]`, *optional*): + One or several task identifiers, to be included in the metadata of the model card. + dataset_tags (`str` or `List[str]`, *optional*): + One or several dataset tags, to be included in the metadata of the model card. + dataset (`str` or `List[str]`, *optional*): + One or several dataset identifiers, to be included in the metadata of the model card. + dataset_args (`str` or `List[str]`, *optional*): + One or several dataset arguments, to be included in the metadata of the model card. + """ + if not self.is_world_process_zero(): + return + + training_summary = TrainingSummary.from_trainer( + self, + language=language, + license=license, + tags=tags, + model_name=model_name, + finetuned_from=finetuned_from, + tasks=tasks, + dataset_tags=dataset_tags, + dataset=dataset, + dataset_args=dataset_args, + ) + model_card = training_summary.to_model_card() + with open(os.path.join(self.args.output_dir, "README.md"), "w") as f: + f.write(model_card) + + def _push_from_checkpoint(self, checkpoint_folder): + # Only push from one node. + if not self.is_world_process_zero() or self.args.hub_strategy == HubStrategy.END: + return + # If we haven't finished the last push, we don't do this one. + if self.push_in_progress is not None and not self.push_in_progress.is_done: + return + + output_dir = self.args.output_dir + # To avoid a new synchronization of all model weights, we just copy the file from the checkpoint folder + modeling_files = [CONFIG_NAME, WEIGHTS_NAME] + for modeling_file in modeling_files: + if os.path.isfile(os.path.join(checkpoint_folder, modeling_file)): + shutil.copy(os.path.join(checkpoint_folder, modeling_file), os.path.join(output_dir, modeling_file)) + # Saving the tokenizer is fast and we don't know how many files it may have spawned, so we resave it to be sure. + if self.tokenizer is not None: + self.tokenizer.save_pretrained(output_dir) + # Same for the training arguments + torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME)) + + try: + if self.args.hub_strategy == HubStrategy.CHECKPOINT: + # Temporarily move the checkpoint just saved for the push + tmp_checkpoint = os.path.join(output_dir, "last-checkpoint") + # We have to remove the "last-checkpoint" dir if it exists, otherwise the checkpoint is moved as a + # subfolder. + if os.path.isdir(tmp_checkpoint): + shutil.rmtree(tmp_checkpoint) + shutil.move(checkpoint_folder, tmp_checkpoint) + + if self.args.save_strategy == IntervalStrategy.STEPS: + commit_message = f"Training in progress, step {self.state.global_step}" + else: + commit_message = f"Training in progress, epoch {int(self.state.epoch)}" + _, self.push_in_progress = self.repo.push_to_hub( + commit_message=commit_message, blocking=False, auto_lfs_prune=True + ) + finally: + if self.args.hub_strategy == HubStrategy.CHECKPOINT: + # Move back the checkpoint to its place + shutil.move(tmp_checkpoint, checkpoint_folder) + + def push_to_hub(self, commit_message: Optional[str] = "End of training", blocking: bool = True, **kwargs) -> str: + """ + Upload *self.model* and *self.tokenizer* to the 🤗 model hub on the repo *self.args.hub_model_id*. + + Parameters: + commit_message (`str`, *optional*, defaults to `"End of training"`): + Message to commit while pushing. + blocking (`bool`, *optional*, defaults to `True`): + Whether the function should return only when the `git push` has finished. + kwargs: + Additional keyword arguments passed along to [`~Trainer.create_model_card`]. + + Returns: + The url of the commit of your model in the given repository if `blocking=False`, a tuple with the url of + the commit and an object to track the progress of the commit if `blocking=True` + """ + # If a user calls manually `push_to_hub` with `self.args.push_to_hub = False`, we try to create the repo but + # it might fail. + if not hasattr(self, "repo"): + self.init_git_repo() + + model_name = kwargs.pop("model_name", None) + if model_name is None and self.args.should_save: + if self.args.hub_model_id is None: + model_name = Path(self.args.output_dir).name + else: + model_name = self.args.hub_model_id.split("/")[-1] + + # Needs to be executed on all processes for TPU training, but will only save on the processed determined by + # self.args.should_save. + self.save_model(_internal_call=True) + + # Only push from one node. + if not self.is_world_process_zero(): + return + + # Cancel any async push in progress if blocking=True. The commits will all be pushed together. + if blocking and self.push_in_progress is not None and not self.push_in_progress.is_done: + self.push_in_progress._process.kill() + self.push_in_progress = None + + git_head_commit_url = self.repo.push_to_hub( + commit_message=commit_message, blocking=blocking, auto_lfs_prune=True + ) + # push separately the model card to be independant from the rest of the model + if self.args.should_save: + self.create_model_card(model_name=model_name, **kwargs) + try: + self.repo.push_to_hub( + commit_message="update model card README.md", blocking=blocking, auto_lfs_prune=True + ) + except EnvironmentError as exc: + logger.error(f"Error pushing update to the model card. Please read logs and retry.\n${exc}") + + return git_head_commit_url + + # + # Deprecated code + # + + def prediction_loop( + self, + dataloader: DataLoader, + description: str, + prediction_loss_only: Optional[bool] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + ) -> EvalLoopOutput: + """ + Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`. + + Works both with or without labels. + """ + args = self.args + + if not has_length(dataloader): + raise ValueError("dataloader must implement a working __len__") + + prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only + + # if eval is called w/o train init deepspeed here + if args.deepspeed and not self.deepspeed: + # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval + # from the checkpoint eventually + deepspeed_engine, _, _ = deepspeed_init(self, num_training_steps=0, resume_from_checkpoint=None) + self.model = deepspeed_engine.module + self.model_wrapped = deepspeed_engine + self.deepspeed = deepspeed_engine + # XXX: we don't need optim/sched for inference, but this needs to be sorted out, since + # for example the Z3-optimizer is a must for zero3 to work even for inference - what we + # don't need is the deepspeed basic optimizer which is self.optimizer.optimizer + deepspeed_engine.optimizer.optimizer = None + deepspeed_engine.lr_scheduler = None + + model = self._wrap_model(self.model, training=False, dataloader=dataloader) + + # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called + # while ``train`` is running, cast it to the right dtype first and then put on device + if not self.is_in_train: + if args.fp16_full_eval: + model = model.to(dtype=torch.float16, device=args.device) + elif args.bf16_full_eval: + model = model.to(dtype=torch.bfloat16, device=args.device) + + batch_size = dataloader.batch_size + num_examples = self.num_examples(dataloader) + logger.info(f"***** Running {description} *****") + logger.info(f" Num examples = {num_examples}") + logger.info(f" Batch size = {batch_size}") + losses_host: torch.Tensor = None + preds_host: Union[torch.Tensor, List[torch.Tensor]] = None + labels_host: Union[torch.Tensor, List[torch.Tensor]] = None + inputs_host: Union[torch.Tensor, List[torch.Tensor]] = None + + world_size = max(1, args.world_size) + + eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size) + if not prediction_loss_only: + # The actual number of eval_sample can be greater than num_examples in distributed settings (when we pass + # a batch size to the sampler) + make_multiple_of = None + if hasattr(dataloader, "sampler") and isinstance(dataloader.sampler, SequentialDistributedSampler): + make_multiple_of = dataloader.sampler.batch_size + preds_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + labels_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + inputs_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=make_multiple_of) + + model.eval() + + if is_torch_tpu_available(): + dataloader = pl.ParallelLoader(dataloader, [args.device]).per_device_loader(args.device) + + if args.past_index >= 0: + self._past = None + + self.callback_handler.eval_dataloader = dataloader + + for step, inputs in enumerate(dataloader): + loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) + inputs_decode = self._prepare_input(inputs["input_ids"]) if args.include_inputs_for_metrics else None + + if loss is not None: + losses = loss.repeat(batch_size) + losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) + if logits is not None: + preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) + if labels is not None: + labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) + if inputs_decode is not None: + inputs_host = ( + inputs_decode + if inputs_host is None + else nested_concat(inputs_host, inputs_decode, padding_index=-100) + ) + self.control = self.callback_handler.on_prediction_step(args, self.state, self.control) + + # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. + if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0: + eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) + if not prediction_loss_only: + preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) + labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) + inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) + + # Set back to None to begin a new accumulation + losses_host, preds_host, labels_host, inputs_host = None, None, None, None + + if args.past_index and hasattr(self, "_past"): + # Clean the state at the end of the evaluation loop + delattr(self, "_past") + + # Gather all remaining tensors and put them back on the CPU + eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) + if not prediction_loss_only: + preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) + labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) + inputs_gatherer.add_arrays(self._gather_and_numpify(inputs_host, "eval_inputs_ids")) + + eval_loss = eval_losses_gatherer.finalize() + preds = preds_gatherer.finalize() if not prediction_loss_only else None + label_ids = labels_gatherer.finalize() if not prediction_loss_only else None + inputs_ids = inputs_gatherer.finalize() if not prediction_loss_only else None + + if self.compute_metrics is not None and preds is not None and label_ids is not None: + if args.include_inputs_for_metrics: + metrics = self.compute_metrics( + EvalPrediction(predictions=preds, label_ids=label_ids, inputs=inputs_ids) + ) + else: + metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) + else: + metrics = {} + + # To be JSON-serializable, we need to remove numpy types or zero-d tensors + metrics = denumpify_detensorize(metrics) + + if eval_loss is not None: + metrics[f"{metric_key_prefix}_loss"] = eval_loss.mean().item() + + # Prefix all keys with metric_key_prefix + '_' + for key in list(metrics.keys()): + if not key.startswith(f"{metric_key_prefix}_"): + metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key) + + return EvalLoopOutput(predictions=preds, label_ids=label_ids, metrics=metrics, num_samples=num_examples) + + def _gather_and_numpify(self, tensors, name): + """ + Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before + concatenating them to `gathered` + """ + if tensors is None: + return + if is_torch_tpu_available(): + tensors = nested_xla_mesh_reduce(tensors, name) + elif is_sagemaker_mp_enabled(): + tensors = smp_gather(tensors) + elif self.args.local_rank != -1: + tensors = distributed_concat(tensors) + + return nested_numpify(tensors) + + def _add_sm_patterns_to_gitignore(self) -> None: + """Add SageMaker Checkpointing patterns to .gitignore file.""" + # Make sure we only do this on the main process + if not self.is_world_process_zero(): + return + + patterns = ["*.sagemaker-uploading", "*.sagemaker-uploaded"] + + # Get current .gitignore content + if os.path.exists(os.path.join(self.repo.local_dir, ".gitignore")): + with open(os.path.join(self.repo.local_dir, ".gitignore"), "r") as f: + current_content = f.read() + else: + current_content = "" + + # Add the patterns to .gitignore + content = current_content + for pattern in patterns: + if pattern not in content: + if content.endswith("\n"): + content += pattern + else: + content += f"\n{pattern}" + + # Write the .gitignore file if it has changed + if content != current_content: + with open(os.path.join(self.repo.local_dir, ".gitignore"), "w") as f: + logger.debug(f"Writing .gitignore file. Content: {content}") + f.write(content) + + self.repo.git_add(".gitignore") + + # avoid race condition with git status + time.sleep(0.5) + + if not self.repo.is_repo_clean(): + self.repo.git_commit("Add *.sagemaker patterns to .gitignore.") + self.repo.git_push() diff --git a/ptuning/trainer_seq2seq.py b/ptuning/trainer_seq2seq.py new file mode 100644 index 0000000000000000000000000000000000000000..19d5cf12a274944a3ea3ce689414eab72636e0bd --- /dev/null +++ b/ptuning/trainer_seq2seq.py @@ -0,0 +1,247 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, List, Optional, Tuple, Union + +import torch +from torch import nn +from torch.utils.data import Dataset + +from transformers.deepspeed import is_deepspeed_zero3_enabled +from trainer import Trainer +from transformers.trainer_utils import PredictionOutput +from transformers.utils import logging + + +logger = logging.get_logger(__name__) + + +class Seq2SeqTrainer(Trainer): + def evaluate( + self, + eval_dataset: Optional[Dataset] = None, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "eval", + **gen_kwargs + ) -> Dict[str, float]: + """ + Run evaluation and returns metrics. + + The calling script will be responsible for providing a method to compute metrics, as they are task-dependent + (pass it to the init `compute_metrics` argument). + + You can also subclass and override this method to inject custom behavior. + + Args: + eval_dataset (`Dataset`, *optional*): + Pass a dataset if you wish to override `self.eval_dataset`. If it is an [`~datasets.Dataset`], columns + not accepted by the `model.forward()` method are automatically removed. It must implement the `__len__` + method. + ignore_keys (`List[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is `"eval"` (default) + max_length (`int`, *optional*): + The maximum target length to use when predicting with the generate method. + num_beams (`int`, *optional*): + Number of beams for beam search that will be used when predicting with the generate method. 1 means no + beam search. + gen_kwargs: + Additional `generate` specific kwargs. + + Returns: + A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The + dictionary also contains the epoch number which comes from the training state. + """ + + gen_kwargs = gen_kwargs.copy() + if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None: + gen_kwargs["max_length"] = self.args.generation_max_length + gen_kwargs["num_beams"] = ( + gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams + ) + self._gen_kwargs = gen_kwargs + + return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) + + def predict( + self, + test_dataset: Dataset, + ignore_keys: Optional[List[str]] = None, + metric_key_prefix: str = "test", + **gen_kwargs + ) -> PredictionOutput: + """ + Run prediction and returns predictions and potential metrics. + + Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method + will also return metrics, like in `evaluate()`. + + Args: + test_dataset (`Dataset`): + Dataset to run the predictions on. If it is a [`~datasets.Dataset`], columns not accepted by the + `model.forward()` method are automatically removed. Has to implement the method `__len__` + ignore_keys (`List[str]`, *optional*): + A list of keys in the output of your model (if it is a dictionary) that should be ignored when + gathering predictions. + metric_key_prefix (`str`, *optional*, defaults to `"eval"`): + An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named + "eval_bleu" if the prefix is `"eval"` (default) + max_length (`int`, *optional*): + The maximum target length to use when predicting with the generate method. + num_beams (`int`, *optional*): + Number of beams for beam search that will be used when predicting with the generate method. 1 means no + beam search. + gen_kwargs: + Additional `generate` specific kwargs. + + + + If your predictions or labels have different sequence lengths (for instance because you're doing dynamic + padding in a token classification task) the predictions will be padded (on the right) to allow for + concatenation into one array. The padding index is -100. + + + + Returns: *NamedTuple* A namedtuple with the following keys: + + - predictions (`np.ndarray`): The predictions on `test_dataset`. + - label_ids (`np.ndarray`, *optional*): The labels (if the dataset contained some). + - metrics (`Dict[str, float]`, *optional*): The potential dictionary of metrics (if the dataset contained + labels). + """ + + gen_kwargs = gen_kwargs.copy() + if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None: + gen_kwargs["max_length"] = self.args.generation_max_length + gen_kwargs["num_beams"] = ( + gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.args.generation_num_beams + ) + self._gen_kwargs = gen_kwargs + + + return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix) + + def prediction_step( + self, + model: nn.Module, + inputs: Dict[str, Union[torch.Tensor, Any]], + prediction_loss_only: bool, + ignore_keys: Optional[List[str]] = None, + ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: + """ + Perform an evaluation step on `model` using `inputs`. + + Subclass and override to inject custom behavior. + + Args: + model (`nn.Module`): + The model to evaluate. + inputs (`Dict[str, Union[torch.Tensor, Any]]`): + The inputs and targets of the model. + + The dictionary will be unpacked before being fed to the model. Most models expect the targets under the + argument `labels`. Check your model's documentation for all accepted arguments. + prediction_loss_only (`bool`): + Whether or not to return the loss only. + + Return: + Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and + labels (each being optional). + """ + + if not self.args.predict_with_generate or prediction_loss_only: + return super().prediction_step( + model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys + ) + + has_labels = "labels" in inputs + inputs = self._prepare_inputs(inputs) + + # XXX: adapt synced_gpus for fairscale as well + gen_kwargs = self._gen_kwargs.copy() + if gen_kwargs.get("max_length") is None and gen_kwargs.get("max_new_tokens") is None: + gen_kwargs["max_length"] = self.model.config.max_length + gen_kwargs["num_beams"] = ( + gen_kwargs["num_beams"] if gen_kwargs.get("num_beams") is not None else self.model.config.num_beams + ) + default_synced_gpus = True if is_deepspeed_zero3_enabled() else False + gen_kwargs["synced_gpus"] = ( + gen_kwargs["synced_gpus"] if gen_kwargs.get("synced_gpus") is not None else default_synced_gpus + ) + + if "attention_mask" in inputs: + gen_kwargs["attention_mask"] = inputs.get("attention_mask", None) + if "position_ids" in inputs: + gen_kwargs["position_ids"] = inputs.get("position_ids", None) + if "global_attention_mask" in inputs: + gen_kwargs["global_attention_mask"] = inputs.get("global_attention_mask", None) + + # prepare generation inputs + # some encoder-decoder models can have varying encoder's and thus + # varying model input names + if hasattr(self.model, "encoder") and self.model.encoder.main_input_name != self.model.main_input_name: + generation_inputs = inputs[self.model.encoder.main_input_name] + else: + generation_inputs = inputs[self.model.main_input_name] + + gen_kwargs["input_ids"] = generation_inputs + generated_tokens = self.model.generate(**gen_kwargs) + generated_tokens = generated_tokens[:, generation_inputs.size()[-1]:] + + # in case the batch is shorter than max length, the output should be padded + if gen_kwargs.get("max_length") is not None and generated_tokens.shape[-1] < gen_kwargs["max_length"]: + generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"]) + elif gen_kwargs.get("max_new_tokens") is not None and generated_tokens.shape[-1] < ( + gen_kwargs["max_new_tokens"] + 1 + ): + generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_new_tokens"] + 1) + + loss = None + + if self.args.prediction_loss_only: + return (loss, None, None) + + if has_labels: + labels = inputs["labels"] + if gen_kwargs.get("max_length") is not None and labels.shape[-1] < gen_kwargs["max_length"]: + labels = self._pad_tensors_to_max_len(labels, gen_kwargs["max_length"]) + elif gen_kwargs.get("max_new_tokens") is not None and labels.shape[-1] < ( + gen_kwargs["max_new_tokens"] + 1 + ): + labels = self._pad_tensors_to_max_len(labels, (gen_kwargs["max_new_tokens"] + 1)) + else: + labels = None + + return (loss, generated_tokens, labels) + + def _pad_tensors_to_max_len(self, tensor, max_length): + if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"): + # If PAD token is not defined at least EOS token has to be defined + pad_token_id = ( + self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id + ) + else: + if self.model.config.pad_token_id is not None: + pad_token_id = self.model.config.pad_token_id + else: + raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors") + + padded_tensor = pad_token_id * torch.ones( + (tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device + ) + padded_tensor[:, : tensor.shape[-1]] = tensor + return padded_tensor diff --git a/ptuning/wandb/debug-internal.log b/ptuning/wandb/debug-internal.log new file mode 120000 index 0000000000000000000000000000000000000000..67e6a4f5d01319921a711910e3f8f856e2e3c997 --- /dev/null +++ b/ptuning/wandb/debug-internal.log @@ -0,0 +1 @@ +run-20230421_163626-3b3qqmpu/logs/debug-internal.log \ No newline at end of file diff --git a/ptuning/wandb/debug.log b/ptuning/wandb/debug.log new file mode 120000 index 0000000000000000000000000000000000000000..8b8ad4a9db7108fa97a4297a44e34b591aa2bf09 --- /dev/null +++ b/ptuning/wandb/debug.log @@ -0,0 +1 @@ +run-20230421_163626-3b3qqmpu/logs/debug.log \ No newline at end of file diff --git a/ptuning/wandb/latest-run b/ptuning/wandb/latest-run new file mode 120000 index 0000000000000000000000000000000000000000..55d0e16c23876170be9f31104042b35f29ce408d --- /dev/null +++ b/ptuning/wandb/latest-run @@ -0,0 +1 @@ +run-20230421_163626-3b3qqmpu \ No newline at end of file diff --git a/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/conda-environment.yaml b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/conda-environment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16f6ba96f14199a8dd4a2577abf22153f896bc1e --- /dev/null +++ b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/conda-environment.yaml @@ -0,0 +1,187 @@ +name: py3 +channels: + - defaults +dependencies: + - _libgcc_mutex=0.1=main + - _openmp_mutex=5.1=1_gnu + - ca-certificates=2023.01.10=h06a4308_0 + - certifi=2022.12.7=py39h06a4308_0 + - ld_impl_linux-64=2.38=h1181459_1 + - libffi=3.4.2=h6a678d5_6 + - libgcc-ng=11.2.0=h1234567_1 + - libgomp=11.2.0=h1234567_1 + - libstdcxx-ng=11.2.0=h1234567_1 + - ncurses=6.4=h6a678d5_0 + - openssl=1.1.1t=h7f8727e_0 + - pip=23.0.1=py39h06a4308_0 + - python=3.9.16=h7a1cb2a_2 + - readline=8.2=h5eee18b_0 + - setuptools=65.6.3=py39h06a4308_0 + - sqlite=3.41.1=h5eee18b_0 + - tk=8.6.12=h1ccaba5_0 + - tzdata=2022g=h04d1e81_0 + - wheel=0.38.4=py39h06a4308_0 + - xz=5.2.10=h5eee18b_1 + - zlib=1.2.13=h5eee18b_0 + - pip: + - absl-py==1.4.0 + - aiofiles==23.1.0 + - aiohttp==3.8.4 + - aiosignal==1.3.1 + - albumentations==1.3.0 + - altair==4.2.2 + - anyio==3.6.2 + - apex==0.1 + - async-timeout==4.0.2 + - attrs==22.2.0 + - bitarray==2.7.3 + - boto3==1.26.94 + - botocore==1.29.94 + - braceexpand==0.1.7 + - charset-normalizer==3.1.0 + - click==8.1.3 + - cmake==3.26.0 + - contourpy==1.0.7 + - cpm-kernels==1.0.11 + - cycler==0.11.0 + - dataclasses==0.6 + - datasets==2.10.1 + - deepspeed==0.8.2 + - dill==0.3.6 + - docker-pycreds==0.4.0 + - entrypoints==0.4 + - et-xmlfile==1.1.0 + - exifread-nocycle==3.0.1 + - fastapi==0.95.0 + - fasttext==0.9.2 + - ffmpy==0.3.0 + - filelock==3.10.0 + - fire==0.4.0 + - flask==2.2.3 + - fonttools==4.39.2 + - frozenlist==1.3.3 + - fsspec==2022.11.0 + - ftfy==6.1.1 + - gitdb==4.0.10 + - gitpython==3.1.31 + - gradio==3.22.1 + - h11==0.14.0 + - hjson==3.1.0 + - httpcore==0.16.3 + - httpx==0.23.3 + - huggingface-hub==0.13.2 + - icetk==0.0.4 + - idna==3.4 + - imageio==2.26.0 + - img2dataset==1.41.0 + - importlib-metadata==6.1.0 + - importlib-resources==5.12.0 + - itsdangerous==2.1.2 + - jieba==0.42.1 + - jinja2==3.1.2 + - jmespath==1.0.1 + - joblib==1.2.0 + - jsonschema==4.17.3 + - kiwisolver==1.4.4 + - langdetect==1.0.9 + - lazy-loader==0.1 + - linkify-it-py==2.0.0 + - lit==15.0.7 + - lsh==0.1.2 + - markdown-it-py==2.2.0 + - markupsafe==2.1.2 + - matplotlib==3.7.1 + - mdit-py-plugins==0.3.3 + - mdurl==0.1.2 + - mpmath==1.3.0 + - multidict==6.0.4 + - multiprocess==0.70.14 + - networkx==3.0 + - ninja==1.11.1 + - nltk==3.8.1 + - numpy==1.24.2 + - nvidia-cublas-cu11==11.10.3.66 + - nvidia-cuda-cupti-cu11==11.7.101 + - nvidia-cuda-nvrtc-cu11==11.7.99 + - nvidia-cuda-runtime-cu11==11.7.99 + - nvidia-cudnn-cu11==8.5.0.96 + - nvidia-cufft-cu11==10.9.0.58 + - nvidia-curand-cu11==10.2.10.91 + - nvidia-cusolver-cu11==11.4.0.1 + - nvidia-cusparse-cu11==11.7.4.91 + - nvidia-nccl-cu11==2.14.3 + - nvidia-nvtx-cu11==11.7.91 + - opencv-python-headless==4.7.0.72 + - openpyxl==3.1.2 + - orjson==3.8.7 + - packaging==23.0 + - pandas==1.5.3 + - pathtools==0.1.2 + - pillow==9.4.0 + - promise==2.3 + - protobuf==3.20.0 + - psutil==5.9.4 + - py-cpuinfo==9.0.0 + - py4j==0.10.9.2 + - pyarrow==7.0.0 + - pybind11==2.10.4 + - pydantic==1.10.6 + - pydub==0.25.1 + - pyparsing==3.0.9 + - pyrsistent==0.19.3 + - pysnooper==1.1.1 + - pyspark==3.2.0 + - python-dateutil==2.8.2 + - python-multipart==0.0.6 + - pytz==2022.7.1 + - pywavelets==1.4.1 + - pyyaml==6.0 + - qudida==0.0.4 + - regex==2022.10.31 + - requests==2.28.2 + - requests-file==1.5.1 + - responses==0.18.0 + - rfc3986==1.5.0 + - rouge-chinese==1.0.3 + - rouge-score==0.1.2 + - s3transfer==0.6.0 + - scikit-image==0.20.0 + - scikit-learn==1.2.2 + - scipy==1.9.1 + - sentencepiece==0.1.97 + - sentry-sdk==1.17.0 + - setproctitle==1.3.2 + - shortuuid==1.0.11 + - six==1.16.0 + - smmap==5.0.0 + - sniffio==1.3.0 + - starlette==0.26.1 + - swissarmytransformer==0.2.12 + - sympy==1.11.1 + - tensorboardx==2.6 + - termcolor==2.2.0 + - threadpoolctl==3.1.0 + - tifffile==2023.3.15 + - tldextract==3.4.0 + - tokenizers==0.13.2 + - toolz==0.12.0 + - torch==2.0.0 + - torchsnooper==0.8 + - torchvision==0.15.1 + - tqdm==4.65.0 + - transformers==4.27.1 + - triton==2.0.0 + - typing-extensions==4.5.0 + - uc-micro-py==1.0.1 + - unidecode==1.3.6 + - urllib3==1.26.15 + - uvicorn==0.21.1 + - wandb==0.12.21 + - wcwidth==0.2.6 + - webdataset==0.2.43 + - websockets==10.4 + - werkzeug==2.2.3 + - xxhash==3.2.0 + - yarl==1.8.2 + - zipp==3.15.0 +prefix: /home/wangyan/anaconda3/envs/py3 diff --git a/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/config.yaml b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c75dfc664e1297896d8eaf30ef27760361fe650 --- /dev/null +++ b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/config.yaml @@ -0,0 +1,629 @@ +wandb_version: 1 + +_name_or_path: + desc: null + value: /home/wangyan/project/hft/uptest +_wandb: + desc: null + value: + cli_version: 0.12.21 + framework: huggingface + huggingface_version: 4.27.1 + is_jupyter_run: false + is_kaggle_kernel: false + m: + - 1: train/global_step + 6: + - 3 + - 1: train/loss + 5: 1 + 6: + - 1 + - 1: train/learning_rate + 5: 1 + 6: + - 1 + - 1: train/epoch + 5: 1 + 6: + - 1 + - 1: train/train_runtime + 5: 1 + 6: + - 1 + - 1: train/train_samples_per_second + 5: 1 + 6: + - 1 + - 1: train/train_steps_per_second + 5: 1 + 6: + - 1 + - 1: train/total_flos + 5: 1 + 6: + - 1 + - 1: train/train_loss + 5: 1 + 6: + - 1 + python_version: 3.9.16 + start_time: 1682066186 + t: + 1: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + 2: + - 1 + - 5 + - 11 + - 49 + - 51 + - 53 + - 55 + 3: + - 7 + 4: 3.9.16 + 5: 0.12.21 + 6: 4.27.1 + 8: + - 5 +adafactor: + desc: null + value: false +adam_beta1: + desc: null + value: 0.9 +adam_beta2: + desc: null + value: 0.999 +adam_epsilon: + desc: null + value: 1.0e-08 +add_cross_attention: + desc: null + value: false +architectures: + desc: null + value: + - ChatGLMModel +auto_find_batch_size: + desc: null + value: false +auto_map: + desc: null + value: + AutoConfig: configuration_chatglm.ChatGLMConfig + AutoModel: modeling_chatglm.ChatGLMForConditionalGeneration + AutoModelForSeq2SeqLM: modeling_chatglm.ChatGLMForConditionalGeneration +bad_words_ids: + desc: null + value: null +begin_suppress_tokens: + desc: null + value: null +bf16: + desc: null + value: false +bf16_full_eval: + desc: null + value: false +bos_token_id: + desc: null + value: 130004 +chunk_size_feed_forward: + desc: null + value: 0 +cross_attention_hidden_size: + desc: null + value: null +data_seed: + desc: null + value: None +dataloader_drop_last: + desc: null + value: false +dataloader_num_workers: + desc: null + value: 0 +dataloader_pin_memory: + desc: null + value: true +ddp_bucket_cap_mb: + desc: null + value: None +ddp_find_unused_parameters: + desc: null + value: None +ddp_timeout: + desc: null + value: 1800 +debug: + desc: null + value: '[]' +decoder_start_token_id: + desc: null + value: null +deepspeed: + desc: null + value: None +disable_tqdm: + desc: null + value: false +diversity_penalty: + desc: null + value: 0.0 +do_eval: + desc: null + value: false +do_predict: + desc: null + value: false +do_sample: + desc: null + value: false +do_train: + desc: null + value: true +early_stopping: + desc: null + value: false +encoder_no_repeat_ngram_size: + desc: null + value: 0 +eos_token_id: + desc: null + value: 130005 +eval_accumulation_steps: + desc: null + value: None +eval_batch_size: + desc: null + value: 1 +eval_delay: + desc: null + value: 0 +eval_steps: + desc: null + value: None +evaluation_strategy: + desc: null + value: 'no' +exponential_decay_length_penalty: + desc: null + value: null +finetuning_task: + desc: null + value: null +forced_bos_token_id: + desc: null + value: null +forced_eos_token_id: + desc: null + value: null +fp16: + desc: null + value: false +fp16_backend: + desc: null + value: auto +fp16_full_eval: + desc: null + value: false +fp16_opt_level: + desc: null + value: O1 +fsdp: + desc: null + value: '[]' +fsdp_config: + desc: null + value: '{''fsdp_min_num_params'': 0, ''xla'': False, ''xla_fsdp_grad_ckpt'': False}' +fsdp_min_num_params: + desc: null + value: 0 +fsdp_transformer_layer_cls_to_wrap: + desc: null + value: None +full_determinism: + desc: null + value: false +generation_max_length: + desc: null + value: 64 +generation_num_beams: + desc: null + value: None +gmask_token_id: + desc: null + value: 130001 +gradient_accumulation_steps: + desc: null + value: 16 +gradient_checkpointing: + desc: null + value: false +greater_is_better: + desc: null + value: None +group_by_length: + desc: null + value: false +half_precision_backend: + desc: null + value: auto +hidden_size: + desc: null + value: 4096 +hub_model_id: + desc: null + value: None +hub_private_repo: + desc: null + value: false +hub_strategy: + desc: null + value: every_save +hub_token: + desc: null + value: +id2label: + desc: null + value: + '0': LABEL_0 + '1': LABEL_1 +ignore_data_skip: + desc: null + value: false +include_inputs_for_metrics: + desc: null + value: false +inner_hidden_size: + desc: null + value: 16384 +is_decoder: + desc: null + value: false +is_encoder_decoder: + desc: null + value: false +jit_mode_eval: + desc: null + value: false +label2id: + desc: null + value: + LABEL_0: 0 + LABEL_1: 1 +label_names: + desc: null + value: None +label_smoothing_factor: + desc: null + value: 0.0 +layernorm_epsilon: + desc: null + value: 1.0e-05 +learning_rate: + desc: null + value: 0.02 +length_column_name: + desc: null + value: length +length_penalty: + desc: null + value: 1.0 +load_best_model_at_end: + desc: null + value: false +local_rank: + desc: null + value: -1 +log_level: + desc: null + value: passive +log_level_replica: + desc: null + value: warning +log_on_each_node: + desc: null + value: true +logging_dir: + desc: null + value: output/adgen-chatglm-6b-pt-128-2e-2/runs/Apr21_16-33-10_10-254-135-19 +logging_first_step: + desc: null + value: false +logging_nan_inf_filter: + desc: null + value: true +logging_steps: + desc: null + value: 10 +logging_strategy: + desc: null + value: steps +lr_scheduler_type: + desc: null + value: linear +mask_token_id: + desc: null + value: 130000 +max_grad_norm: + desc: null + value: 1.0 +max_length: + desc: null + value: 20 +max_sequence_length: + desc: null + value: 2048 +max_steps: + desc: null + value: 100 +metric_for_best_model: + desc: null + value: None +min_length: + desc: null + value: 0 +model_type: + desc: null + value: chatglm +mp_parameters: + desc: null + value: '' +no_cuda: + desc: null + value: false +no_repeat_ngram_size: + desc: null + value: 0 +num_attention_heads: + desc: null + value: 32 +num_beam_groups: + desc: null + value: 1 +num_beams: + desc: null + value: 1 +num_layers: + desc: null + value: 28 +num_return_sequences: + desc: null + value: 1 +num_train_epochs: + desc: null + value: 3.0 +optim: + desc: null + value: adamw_hf +optim_args: + desc: null + value: None +output_attentions: + desc: null + value: false +output_dir: + desc: null + value: output/adgen-chatglm-6b-pt-128-2e-2 +output_hidden_states: + desc: null + value: false +output_scores: + desc: null + value: false +overwrite_output_dir: + desc: null + value: true +pad_token_id: + desc: null + value: 3 +past_index: + desc: null + value: -1 +per_device_eval_batch_size: + desc: null + value: 1 +per_device_train_batch_size: + desc: null + value: 1 +per_gpu_eval_batch_size: + desc: null + value: None +per_gpu_train_batch_size: + desc: null + value: None +position_encoding_2d: + desc: null + value: true +pre_seq_len: + desc: null + value: 128 +predict_with_generate: + desc: null + value: true +prediction_loss_only: + desc: null + value: false +prefix: + desc: null + value: null +prefix_projection: + desc: null + value: false +problem_type: + desc: null + value: null +pruned_heads: + desc: null + value: {} +push_to_hub: + desc: null + value: false +push_to_hub_model_id: + desc: null + value: None +push_to_hub_organization: + desc: null + value: None +push_to_hub_token: + desc: null + value: +quantization_bit: + desc: null + value: 4 +ray_scope: + desc: null + value: last +remove_invalid_values: + desc: null + value: false +remove_unused_columns: + desc: null + value: true +repetition_penalty: + desc: null + value: 1.0 +report_to: + desc: null + value: '[''tensorboard'', ''wandb'']' +resume_from_checkpoint: + desc: null + value: None +return_dict: + desc: null + value: true +return_dict_in_generate: + desc: null + value: false +run_name: + desc: null + value: output/adgen-chatglm-6b-pt-128-2e-2 +save_on_each_node: + desc: null + value: false +save_steps: + desc: null + value: 50 +save_strategy: + desc: null + value: steps +save_total_limit: + desc: null + value: None +seed: + desc: null + value: 42 +sep_token_id: + desc: null + value: null +sharded_ddp: + desc: null + value: '[]' +skip_memory_metrics: + desc: null + value: true +sortish_sampler: + desc: null + value: false +suppress_tokens: + desc: null + value: null +task_specific_params: + desc: null + value: null +temperature: + desc: null + value: 1.0 +tf32: + desc: null + value: None +tf_legacy_loss: + desc: null + value: false +tie_encoder_decoder: + desc: null + value: false +tie_word_embeddings: + desc: null + value: true +tokenizer_class: + desc: null + value: null +top_k: + desc: null + value: 50 +top_p: + desc: null + value: 1.0 +torch_compile: + desc: null + value: false +torch_compile_backend: + desc: null + value: None +torch_compile_mode: + desc: null + value: None +torch_dtype: + desc: null + value: float16 +torchdynamo: + desc: null + value: None +torchscript: + desc: null + value: false +tpu_metrics_debug: + desc: null + value: false +tpu_num_cores: + desc: null + value: None +train_batch_size: + desc: null + value: 1 +transformers_version: + desc: null + value: 4.27.1 +typical_p: + desc: null + value: 1.0 +use_bfloat16: + desc: null + value: false +use_cache: + desc: null + value: true +use_ipex: + desc: null + value: false +use_legacy_prediction_loop: + desc: null + value: false +use_mps_device: + desc: null + value: false +vocab_size: + desc: null + value: 130528 +warmup_ratio: + desc: null + value: 0.0 +warmup_steps: + desc: null + value: 0 +weight_decay: + desc: null + value: 0.0 +xpu_backend: + desc: null + value: None diff --git a/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log new file mode 100644 index 0000000000000000000000000000000000000000..e03d8f5e549cb95469459c2425df079f00f9633d --- /dev/null +++ b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log @@ -0,0 +1,130 @@ + + 0%| | 0/100 [00:00> Configuration saved in output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/config.json +[INFO|configuration_utils.py:362] 2023-04-21 16:39:47,552 >> Configuration saved in output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/generation_config.json +[INFO|modeling_utils.py:1762] 2023-04-21 16:39:47,698 >> Model weights saved in output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/pytorch_model.bin +[INFO|tokenization_utils_base.py:2163] 2023-04-21 16:39:47,699 >> tokenizer config file saved in output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/tokenizer_config.json +[INFO|tokenization_utils_base.py:2170] 2023-04-21 16:39:47,699 >> Special tokens file saved in output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-50/special_tokens_map.json + + + + + + + + + 59%|██████████████████████████████████████████████████████████▍ | 59/100 [03:48<02:37, 3.85s/it] + + + + + + + + + + + 69%|████████████████████████████████████████████████████████████████████▎ | 69/100 [04:27<01:58, 3.84s/it] + + + + + + + + + + + 79%|██████████████████████████████████████████████████████████████████████████████▏ | 79/100 [05:05<01:20, 3.84s/it] + + + + + + + + + + + 89%|████████████████████████████████████████████████████████████████████████████████████████ | 89/100 [05:44<00:42, 3.84s/it] + + + + + + + + + + + 99%|██████████████████████████████████████████████████████████████████████████████████████████████████ | 99/100 [06:22<00:03, 3.84s/it] +{'loss': 4.2842, 'learning_rate': 0.0, 'epoch': 0.01} +100%|██████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [06:26<00:00, 3.84s/it][INFO|configuration_utils.py:457] 2023-04-21 16:42:59,948 >> Configuration saved in output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/config.json +[INFO|configuration_utils.py:362] 2023-04-21 16:42:59,950 >> Configuration saved in output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/generation_config.json +[INFO|modeling_utils.py:1762] 2023-04-21 16:43:00,095 >> Model weights saved in output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/pytorch_model.bin +[INFO|tokenization_utils_base.py:2163] 2023-04-21 16:43:00,096 >> tokenizer config file saved in output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/tokenizer_config.json +[INFO|tokenization_utils_base.py:2170] 2023-04-21 16:43:00,097 >> Special tokens file saved in output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-100/special_tokens_map.json +{'train_runtime': 395.1154, 'train_samples_per_second': 4.049, 'train_steps_per_second': 0.253, 'train_loss': 4.456654052734375, 'epoch': 0.01} +***** train metrics ***** + epoch = 0.01 + train_loss = 4.4567 + train_runtime = 0:06:35.11 + train_samples = 114599 + train_samples_per_second = 4.049 + train_steps_per_second = 0.253 +100%|██████████████████████████████████████████████████████████████████████████████████████████████████| 100/100 [06:26<00:00, 3.87s/it] \ No newline at end of file diff --git a/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/requirements.txt b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..8168a952ebe280823e83355e9f51e5c56f290986 --- /dev/null +++ b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/requirements.txt @@ -0,0 +1,164 @@ +absl-py==1.4.0 +aiofiles==23.1.0 +aiohttp==3.8.4 +aiosignal==1.3.1 +albumentations==1.3.0 +altair==4.2.2 +anyio==3.6.2 +apex==0.1 +async-timeout==4.0.2 +attrs==22.2.0 +bitarray==2.7.3 +boto3==1.26.94 +botocore==1.29.94 +braceexpand==0.1.7 +certifi==2022.12.7 +charset-normalizer==3.1.0 +click==8.1.3 +cmake==3.26.0 +contourpy==1.0.7 +cpm-kernels==1.0.11 +cycler==0.11.0 +dataclasses==0.6 +datasets==2.10.1 +deepspeed==0.8.2 +dill==0.3.6 +docker-pycreds==0.4.0 +entrypoints==0.4 +et-xmlfile==1.1.0 +exifread-nocycle==3.0.1 +fastapi==0.95.0 +fasttext==0.9.2 +ffmpy==0.3.0 +filelock==3.10.0 +fire==0.4.0 +flask==2.2.3 +fonttools==4.39.2 +frozenlist==1.3.3 +fsspec==2022.11.0 +ftfy==6.1.1 +gitdb==4.0.10 +gitpython==3.1.31 +gradio==3.22.1 +h11==0.14.0 +hjson==3.1.0 +httpcore==0.16.3 +httpx==0.23.3 +huggingface-hub==0.13.2 +icetk==0.0.4 +idna==3.4 +imageio==2.26.0 +img2dataset==1.41.0 +importlib-metadata==6.1.0 +importlib-resources==5.12.0 +itsdangerous==2.1.2 +jieba==0.42.1 +jinja2==3.1.2 +jmespath==1.0.1 +joblib==1.2.0 +jsonschema==4.17.3 +kiwisolver==1.4.4 +langdetect==1.0.9 +lazy-loader==0.1 +linkify-it-py==2.0.0 +lit==15.0.7 +lsh==0.1.2 +markdown-it-py==2.2.0 +markupsafe==2.1.2 +matplotlib==3.7.1 +mdit-py-plugins==0.3.3 +mdurl==0.1.2 +mpmath==1.3.0 +multidict==6.0.4 +multiprocess==0.70.14 +networkx==3.0 +ninja==1.11.1 +nltk==3.8.1 +numpy==1.24.2 +nvidia-cublas-cu11==11.10.3.66 +nvidia-cuda-cupti-cu11==11.7.101 +nvidia-cuda-nvrtc-cu11==11.7.99 +nvidia-cuda-runtime-cu11==11.7.99 +nvidia-cudnn-cu11==8.5.0.96 +nvidia-cufft-cu11==10.9.0.58 +nvidia-curand-cu11==10.2.10.91 +nvidia-cusolver-cu11==11.4.0.1 +nvidia-cusparse-cu11==11.7.4.91 +nvidia-nccl-cu11==2.14.3 +nvidia-nvtx-cu11==11.7.91 +opencv-python-headless==4.7.0.72 +openpyxl==3.1.2 +orjson==3.8.7 +packaging==23.0 +pandas==1.5.3 +pathtools==0.1.2 +pillow==9.4.0 +pip==23.0.1 +promise==2.3 +protobuf==3.20.0 +psutil==5.9.4 +py-cpuinfo==9.0.0 +py4j==0.10.9.2 +pyarrow==7.0.0 +pybind11==2.10.4 +pydantic==1.10.6 +pydub==0.25.1 +pyparsing==3.0.9 +pyrsistent==0.19.3 +pysnooper==1.1.1 +pyspark==3.2.0 +python-dateutil==2.8.2 +python-multipart==0.0.6 +pytz==2022.7.1 +pywavelets==1.4.1 +pyyaml==6.0 +qudida==0.0.4 +regex==2022.10.31 +requests-file==1.5.1 +requests==2.28.2 +responses==0.18.0 +rfc3986==1.5.0 +rouge-chinese==1.0.3 +rouge-score==0.1.2 +s3transfer==0.6.0 +scikit-image==0.20.0 +scikit-learn==1.2.2 +scipy==1.9.1 +sentencepiece==0.1.97 +sentry-sdk==1.17.0 +setproctitle==1.3.2 +setuptools==65.6.3 +shortuuid==1.0.11 +six==1.16.0 +smmap==5.0.0 +sniffio==1.3.0 +starlette==0.26.1 +swissarmytransformer==0.2.12 +sympy==1.11.1 +tensorboardx==2.6 +termcolor==2.2.0 +threadpoolctl==3.1.0 +tifffile==2023.3.15 +tldextract==3.4.0 +tokenizers==0.13.2 +toolz==0.12.0 +torch==2.0.0 +torchsnooper==0.8 +torchvision==0.15.1 +tqdm==4.65.0 +transformers==4.27.1 +triton==2.0.0 +typing-extensions==4.5.0 +uc-micro-py==1.0.1 +unidecode==1.3.6 +urllib3==1.26.15 +uvicorn==0.21.1 +wandb==0.12.21 +wcwidth==0.2.6 +webdataset==0.2.43 +websockets==10.4 +werkzeug==2.2.3 +wheel==0.38.4 +xxhash==3.2.0 +yarl==1.8.2 +zipp==3.15.0 \ No newline at end of file diff --git a/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-metadata.json b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-metadata.json new file mode 100644 index 0000000000000000000000000000000000000000..1997713bccaf922c493520682e311f215bb422c5 --- /dev/null +++ b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-metadata.json @@ -0,0 +1,63 @@ +{ + "os": "Linux-5.15.0-60-generic-x86_64-with-glibc2.35", + "python": "3.9.16", + "heartbeatAt": "2023-04-21T08:36:30.595299", + "startedAt": "2023-04-21T08:36:26.362657", + "docker": null, + "gpu": "NVIDIA GeForce RTX 3090", + "gpu_count": 8, + "cpu_count": 96, + "cuda": null, + "args": [ + "--do_train", + "--train_file", + "AdvertiseGen/train.json", + "--validation_file", + "AdvertiseGen/dev.json", + "--prompt_column", + "content", + "--response_column", + "summary", + "--overwrite_cache", + "--model_name_or_path", + "/home/wangyan/project/hft/uptest", + "--output_dir", + "output/adgen-chatglm-6b-pt-128-2e-2", + "--overwrite_output_dir", + "--max_source_length", + "64", + "--max_target_length", + "64", + "--per_device_train_batch_size", + "1", + "--per_device_eval_batch_size", + "1", + "--gradient_accumulation_steps", + "16", + "--predict_with_generate", + "--max_steps", + "100", + "--logging_steps", + "10", + "--save_steps", + "50", + "--learning_rate", + "2e-2", + "--pre_seq_len", + "128", + "--quantization_bit", + "4" + ], + "state": "running", + "program": "/home/wangyan/project/hft/uptest/ptuning/main.py", + "codePath": "ptuning/main.py", + "git": { + "remote": "https://huggingface.co/lykeven/uptest", + "commit": "c20d69300cdd7e377680b5d0308f2ca796447024" + }, + "email": null, + "root": "/home/wangyan/project/hft/uptest", + "host": "10-254-135-19", + "username": "wangyan", + "executable": "/home/wangyan/anaconda3/envs/py3/bin/python3" +} diff --git a/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json new file mode 100644 index 0000000000000000000000000000000000000000..d2090b8456a96a1a3197b974038607e99392f593 --- /dev/null +++ b/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json @@ -0,0 +1 @@ +{"train/loss": 4.2842, "train/learning_rate": 0.0, "train/epoch": 0.01, "train/global_step": 100, "_timestamp": 1682066580, "_runtime": 394, "_step": 10, "train/train_runtime": 395.1154, "train/train_samples_per_second": 4.049, "train/train_steps_per_second": 0.253, "train/total_flos": 3466572123340800.0, "train/train_loss": 4.456654052734375, "_wandb": {"runtime": 392}} \ No newline at end of file diff --git a/ptuning/wandb/run-20230421_163626-3b3qqmpu/logs/debug-internal.log b/ptuning/wandb/run-20230421_163626-3b3qqmpu/logs/debug-internal.log new file mode 100644 index 0000000000000000000000000000000000000000..e594846611efbc0dbd1115bbedbd67523210b3f0 --- /dev/null +++ b/ptuning/wandb/run-20230421_163626-3b3qqmpu/logs/debug-internal.log @@ -0,0 +1,393 @@ +2023-04-21 16:36:26,895 INFO MainThread:1908068 [internal.py:wandb_internal():90] W&B internal server running at pid: 1908068, started at: 2023-04-21 16:36:26.895229 +2023-04-21 16:36:26,897 INFO WriterThread:1908068 [datastore.py:open_for_write():75] open: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/run-3b3qqmpu.wandb +2023-04-21 16:36:26,899 DEBUG SenderThread:1908068 [sender.py:send():236] send: header +2023-04-21 16:36:26,900 DEBUG SenderThread:1908068 [sender.py:send():236] send: run +2023-04-21 16:36:27,362 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: check_version +2023-04-21 16:36:27,369 INFO SenderThread:1908068 [dir_watcher.py:__init__():216] watching files in: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files +2023-04-21 16:36:27,369 INFO SenderThread:1908068 [sender.py:_start_run_threads():815] run started: 3b3qqmpu with start time 1682066186 +2023-04-21 16:36:27,369 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:36:27,370 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:36:27,370 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: check_version +2023-04-21 16:36:28,372 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_created():269] file/dir created: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:36:30,584 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: run_start +2023-04-21 16:36:30,594 DEBUG HandlerThread:1908068 [meta.py:__init__():35] meta init +2023-04-21 16:36:30,595 DEBUG HandlerThread:1908068 [meta.py:__init__():49] meta init done +2023-04-21 16:36:30,595 DEBUG HandlerThread:1908068 [meta.py:probe():209] probe +2023-04-21 16:36:30,609 DEBUG HandlerThread:1908068 [meta.py:_setup_git():199] setup git +2023-04-21 16:36:30,627 DEBUG HandlerThread:1908068 [meta.py:_setup_git():206] setup git done +2023-04-21 16:36:30,627 DEBUG HandlerThread:1908068 [meta.py:_save_pip():53] save pip +2023-04-21 16:36:30,627 DEBUG HandlerThread:1908068 [meta.py:_save_pip():67] save pip done +2023-04-21 16:36:30,628 DEBUG HandlerThread:1908068 [meta.py:_save_conda():74] save conda +2023-04-21 16:36:31,370 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_created():269] file/dir created: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/requirements.txt +2023-04-21 16:36:31,371 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_created():269] file/dir created: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/conda-environment.yaml +2023-04-21 16:36:33,569 DEBUG HandlerThread:1908068 [meta.py:_save_conda():84] save conda done +2023-04-21 16:36:33,569 DEBUG HandlerThread:1908068 [meta.py:probe():247] probe done +2023-04-21 16:36:33,574 DEBUG SenderThread:1908068 [sender.py:send():236] send: files +2023-04-21 16:36:33,574 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-metadata.json with policy now +2023-04-21 16:36:33,580 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:36:33,580 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:36:34,017 DEBUG SenderThread:1908068 [sender.py:send():236] send: telemetry +2023-04-21 16:36:34,017 DEBUG SenderThread:1908068 [sender.py:send():236] send: config +2023-04-21 16:36:34,018 DEBUG SenderThread:1908068 [sender.py:send():236] send: metric +2023-04-21 16:36:34,018 DEBUG SenderThread:1908068 [sender.py:send():236] send: telemetry +2023-04-21 16:36:34,019 DEBUG SenderThread:1908068 [sender.py:send():236] send: metric +2023-04-21 16:36:34,019 WARNING SenderThread:1908068 [sender.py:send_metric():908] Seen metric with glob (shouldn't happen) +2023-04-21 16:36:34,371 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/conda-environment.yaml +2023-04-21 16:36:34,371 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_created():269] file/dir created: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-metadata.json +2023-04-21 16:36:34,372 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_created():269] file/dir created: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:36:34,621 INFO Thread-11 :1908068 [upload_job.py:push():138] Uploaded file /tmp/tmp5t_rzebkwandb/10c5l0lt-wandb-metadata.json +2023-04-21 16:36:36,372 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:36:42,373 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:36:44,375 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:36:48,376 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:36:49,018 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:36:49,019 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:36:52,377 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:36:56,378 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:36:59,379 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/config.yaml +2023-04-21 16:37:00,379 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:01,201 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:37:04,380 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:04,439 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:37:04,440 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:37:08,381 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:12,383 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:13,483 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:37:13,485 DEBUG SenderThread:1908068 [sender.py:send():236] send: metric +2023-04-21 16:37:13,485 DEBUG SenderThread:1908068 [sender.py:send():236] send: metric +2023-04-21 16:37:13,485 DEBUG SenderThread:1908068 [sender.py:send():236] send: metric +2023-04-21 16:37:13,486 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:37:13,486 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:37:13,486 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:37:14,383 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:14,384 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:37:16,384 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:19,864 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:37:19,864 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:37:20,385 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:22,386 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:26,387 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:30,388 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/config.yaml +2023-04-21 16:37:30,388 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:33,704 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:37:34,389 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:35,295 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:37:35,296 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:37:38,390 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:42,391 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:46,393 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:50,394 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:50,744 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:37:50,744 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:37:51,950 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:37:51,951 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:37:51,952 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:37:51,952 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:37:52,395 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:37:54,395 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:37:58,396 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:00,397 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:04,398 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:06,203 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:38:06,203 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:38:06,651 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:38:08,399 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:12,400 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:16,401 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:20,402 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:21,653 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:38:21,653 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:38:24,403 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:28,405 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:30,508 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:38:30,509 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:38:30,510 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:38:30,510 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:38:31,405 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:38:32,406 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:36,407 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:37,107 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:38:37,108 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:38:38,731 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:38:40,408 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:44,409 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:48,410 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:52,411 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:52,541 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:38:52,541 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:38:54,411 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:38:58,412 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:02,413 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:06,414 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:07,974 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:39:07,974 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:39:09,027 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:39:09,027 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:39:09,028 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:39:09,030 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:39:09,415 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:39:10,416 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:11,195 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:39:14,417 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:18,418 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:22,419 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:23,405 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:39:23,405 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:39:26,420 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:30,421 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:34,422 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:38,423 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:38,855 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:39:38,855 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:39:40,423 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:43,641 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:39:44,424 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:47,545 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:39:47,546 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:39:47,546 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:39:47,547 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:39:48,425 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:48,426 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:39:52,426 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:39:54,386 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:39:54,386 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:39:56,428 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:00,429 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:04,430 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:08,431 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:09,800 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:40:09,800 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:40:12,432 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:16,121 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:40:16,433 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:20,435 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:24,436 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:25,315 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:40:25,315 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:40:26,398 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:40:26,398 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:40:26,399 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:40:26,399 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:40:26,436 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:40:28,437 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:32,438 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:36,439 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:38,440 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:40,805 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:40:40,805 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:40:42,441 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:46,442 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:48,534 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:40:50,444 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:54,445 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:40:56,267 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:40:56,267 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:40:58,446 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:02,447 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:04,770 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:41:04,771 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:41:04,771 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:41:04,773 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:41:05,448 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:41:06,449 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:10,450 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:11,685 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:41:11,685 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:41:14,451 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:18,452 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:20,954 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:41:22,454 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:24,454 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:27,113 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:41:27,113 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:41:28,455 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:32,457 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:36,458 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:40,459 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:42,553 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:41:42,553 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:41:43,170 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:41:43,171 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:41:43,171 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:41:43,172 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:41:43,460 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:41:44,461 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:48,462 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:52,463 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:53,357 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:41:56,464 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:41:57,991 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:41:57,992 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:42:00,466 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:04,467 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:08,468 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:10,469 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:13,408 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:42:13,408 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:42:14,470 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:18,471 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:21,544 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:42:21,545 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:42:21,545 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:42:21,546 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:42:22,473 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:22,473 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:42:25,751 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:42:26,474 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:28,829 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:42:28,830 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:42:30,475 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:34,476 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:38,478 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:42,479 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:44,289 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:42:44,289 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:42:46,480 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:50,481 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:54,483 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:56,484 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:42:58,187 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:42:59,750 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: stop_status +2023-04-21 16:42:59,751 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: stop_status +2023-04-21 16:42:59,942 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:43:00,200 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:43:00,201 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:43:00,201 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:43:00,407 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: partial_history +2023-04-21 16:43:00,409 DEBUG SenderThread:1908068 [sender.py:send():236] send: metric +2023-04-21 16:43:00,409 DEBUG SenderThread:1908068 [sender.py:send():236] send: metric +2023-04-21 16:43:00,409 DEBUG SenderThread:1908068 [sender.py:send():236] send: metric +2023-04-21 16:43:00,410 DEBUG SenderThread:1908068 [sender.py:send():236] send: metric +2023-04-21 16:43:00,410 DEBUG SenderThread:1908068 [sender.py:send():236] send: metric +2023-04-21 16:43:00,410 DEBUG SenderThread:1908068 [sender.py:send():236] send: history +2023-04-21 16:43:00,410 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:43:00,410 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:43:00,438 DEBUG SenderThread:1908068 [sender.py:send():236] send: telemetry +2023-04-21 16:43:00,485 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:43:00,485 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:43:02,486 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:43:02,900 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:03,512 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:43:03,513 DEBUG SenderThread:1908068 [sender.py:send():236] send: exit +2023-04-21 16:43:03,513 INFO SenderThread:1908068 [sender.py:send_exit():372] handling exit code: 0 +2023-04-21 16:43:03,513 INFO SenderThread:1908068 [sender.py:send_exit():374] handling runtime: 392 +2023-04-21 16:43:03,514 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:43:03,514 INFO SenderThread:1908068 [sender.py:send_exit():380] send defer +2023-04-21 16:43:03,514 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:03,515 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:03,515 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 0 +2023-04-21 16:43:03,516 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:03,516 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 0 +2023-04-21 16:43:03,516 INFO SenderThread:1908068 [sender.py:transition_state():393] send defer: 1 +2023-04-21 16:43:03,516 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:03,516 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 1 +2023-04-21 16:43:03,678 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:03,678 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:03,679 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 1 +2023-04-21 16:43:03,679 INFO SenderThread:1908068 [sender.py:transition_state():393] send defer: 2 +2023-04-21 16:43:03,679 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:03,679 DEBUG SenderThread:1908068 [sender.py:send():236] send: stats +2023-04-21 16:43:03,680 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:03,680 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 2 +2023-04-21 16:43:03,680 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:03,681 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 2 +2023-04-21 16:43:03,681 INFO SenderThread:1908068 [sender.py:transition_state():393] send defer: 3 +2023-04-21 16:43:03,681 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:03,681 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 3 +2023-04-21 16:43:03,681 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:03,681 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 3 +2023-04-21 16:43:03,681 INFO SenderThread:1908068 [sender.py:transition_state():393] send defer: 4 +2023-04-21 16:43:03,682 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:03,682 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 4 +2023-04-21 16:43:03,682 DEBUG SenderThread:1908068 [sender.py:send():236] send: summary +2023-04-21 16:43:03,683 INFO SenderThread:1908068 [sender.py:_save_file():952] saving file wandb-summary.json with policy end +2023-04-21 16:43:03,683 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:03,683 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 4 +2023-04-21 16:43:03,683 INFO SenderThread:1908068 [sender.py:transition_state():393] send defer: 5 +2023-04-21 16:43:03,683 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:03,683 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 5 +2023-04-21 16:43:03,684 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:03,684 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 5 +2023-04-21 16:43:03,684 INFO SenderThread:1908068 [sender.py:transition_state():393] send defer: 6 +2023-04-21 16:43:03,684 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:03,684 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 6 +2023-04-21 16:43:03,684 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:03,684 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 6 +2023-04-21 16:43:03,684 INFO SenderThread:1908068 [dir_watcher.py:finish():353] shutting down directory watcher +2023-04-21 16:43:03,781 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:04,513 INFO Thread-7 :1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/config.yaml +2023-04-21 16:43:04,513 INFO SenderThread:1908068 [dir_watcher.py:_on_file_modified():286] file/dir modified: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:43:04,513 INFO SenderThread:1908068 [dir_watcher.py:finish():383] scan: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files +2023-04-21 16:43:04,514 INFO SenderThread:1908068 [dir_watcher.py:finish():397] scan save: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log output.log +2023-04-21 16:43:04,514 INFO SenderThread:1908068 [dir_watcher.py:finish():397] scan save: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/conda-environment.yaml conda-environment.yaml +2023-04-21 16:43:04,518 INFO SenderThread:1908068 [dir_watcher.py:finish():397] scan save: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json wandb-summary.json +2023-04-21 16:43:04,518 INFO SenderThread:1908068 [dir_watcher.py:finish():397] scan save: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/config.yaml config.yaml +2023-04-21 16:43:04,523 INFO SenderThread:1908068 [dir_watcher.py:finish():397] scan save: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/requirements.txt requirements.txt +2023-04-21 16:43:04,527 INFO SenderThread:1908068 [dir_watcher.py:finish():397] scan save: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-metadata.json wandb-metadata.json +2023-04-21 16:43:04,529 INFO SenderThread:1908068 [sender.py:transition_state():393] send defer: 7 +2023-04-21 16:43:04,530 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:04,533 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:04,533 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 7 +2023-04-21 16:43:04,534 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:04,534 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 7 +2023-04-21 16:43:04,534 INFO SenderThread:1908068 [file_pusher.py:finish():171] shutting down file pusher +2023-04-21 16:43:04,631 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:04,631 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:04,732 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:04,732 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:04,833 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:04,834 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:04,935 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:04,935 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:05,036 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:05,036 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:05,137 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:05,137 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:05,238 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:05,238 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:05,339 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:05,339 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:05,440 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:05,440 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:05,541 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:05,541 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:05,587 INFO Thread-13 :1908068 [upload_job.py:push():138] Uploaded file /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/conda-environment.yaml +2023-04-21 16:43:05,589 INFO Thread-12 :1908068 [upload_job.py:push():138] Uploaded file /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/output.log +2023-04-21 16:43:05,592 INFO Thread-15 :1908068 [upload_job.py:push():138] Uploaded file /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/config.yaml +2023-04-21 16:43:05,599 INFO Thread-16 :1908068 [upload_job.py:push():138] Uploaded file /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/requirements.txt +2023-04-21 16:43:05,603 INFO Thread-14 :1908068 [upload_job.py:push():138] Uploaded file /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/files/wandb-summary.json +2023-04-21 16:43:05,642 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:05,643 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:05,743 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:05,744 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:05,804 INFO Thread-6 :1908068 [sender.py:transition_state():393] send defer: 8 +2023-04-21 16:43:05,804 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:05,804 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 8 +2023-04-21 16:43:05,804 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:05,805 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 8 +2023-04-21 16:43:05,845 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:07,281 INFO SenderThread:1908068 [sender.py:transition_state():393] send defer: 9 +2023-04-21 16:43:07,281 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:07,282 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:07,282 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 9 +2023-04-21 16:43:07,282 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:07,282 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 9 +2023-04-21 16:43:07,282 INFO SenderThread:1908068 [sender.py:transition_state():393] send defer: 10 +2023-04-21 16:43:07,283 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: defer +2023-04-21 16:43:07,283 DEBUG SenderThread:1908068 [sender.py:send():236] send: final +2023-04-21 16:43:07,283 INFO HandlerThread:1908068 [handler.py:handle_request_defer():163] handle defer: 10 +2023-04-21 16:43:07,283 DEBUG SenderThread:1908068 [sender.py:send():236] send: footer +2023-04-21 16:43:07,284 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: defer +2023-04-21 16:43:07,284 INFO SenderThread:1908068 [sender.py:send_request_defer():389] handle sender defer: 10 +2023-04-21 16:43:07,382 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: poll_exit +2023-04-21 16:43:07,383 DEBUG SenderThread:1908068 [sender.py:send_request():250] send_request: poll_exit +2023-04-21 16:43:07,383 INFO SenderThread:1908068 [file_pusher.py:join():176] waiting for file pusher +2023-04-21 16:43:08,324 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: sampled_history +2023-04-21 16:43:08,325 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: get_summary +2023-04-21 16:43:08,326 DEBUG HandlerThread:1908068 [handler.py:handle_request():140] handle_request: shutdown +2023-04-21 16:43:08,326 INFO HandlerThread:1908068 [handler.py:finish():806] shutting down handler +2023-04-21 16:43:09,223 INFO SenderThread:1908068 [sender.py:finish():1114] shutting down sender +2023-04-21 16:43:09,223 INFO SenderThread:1908068 [file_pusher.py:finish():171] shutting down file pusher +2023-04-21 16:43:09,223 INFO SenderThread:1908068 [file_pusher.py:join():176] waiting for file pusher +2023-04-21 16:43:09,283 INFO WriterThread:1908068 [datastore.py:close():279] close: /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/run-3b3qqmpu.wandb +2023-04-21 16:43:09,313 INFO MainThread:1908068 [internal.py:handle_exit():80] Internal process exited diff --git a/ptuning/wandb/run-20230421_163626-3b3qqmpu/logs/debug.log b/ptuning/wandb/run-20230421_163626-3b3qqmpu/logs/debug.log new file mode 100644 index 0000000000000000000000000000000000000000..c20e24738d80dc341fe1fd02362046a2357e85be --- /dev/null +++ b/ptuning/wandb/run-20230421_163626-3b3qqmpu/logs/debug.log @@ -0,0 +1,173 @@ +2023-04-21 16:36:26,365 INFO MainThread:1907816 [wandb_setup.py:_flush():76] Configure stats pid to 1907816 +2023-04-21 16:36:26,366 INFO MainThread:1907816 [wandb_setup.py:_flush():76] Loading settings from /home/wangyan/.config/wandb/settings +2023-04-21 16:36:26,366 INFO MainThread:1907816 [wandb_setup.py:_flush():76] Loading settings from /home/wangyan/project/hft/uptest/ptuning/wandb/settings +2023-04-21 16:36:26,366 INFO MainThread:1907816 [wandb_setup.py:_flush():76] Loading settings from environment variables: {} +2023-04-21 16:36:26,366 INFO MainThread:1907816 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': 'ptuning/main.py', 'program': '/home/wangyan/project/hft/uptest/ptuning/main.py'} +2023-04-21 16:36:26,366 INFO MainThread:1907816 [wandb_init.py:_log_setup():461] Logging user logs to /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/logs/debug.log +2023-04-21 16:36:26,366 INFO MainThread:1907816 [wandb_init.py:_log_setup():462] Logging internal logs to /home/wangyan/project/hft/uptest/ptuning/wandb/run-20230421_163626-3b3qqmpu/logs/debug-internal.log +2023-04-21 16:36:26,366 INFO MainThread:1907816 [wandb_init.py:init():495] calling init triggers +2023-04-21 16:36:26,366 INFO MainThread:1907816 [wandb_init.py:init():498] wandb.init called with sweep_config: {} +config: {} +2023-04-21 16:36:26,367 INFO MainThread:1907816 [wandb_init.py:init():548] starting backend +2023-04-21 16:36:26,367 INFO MainThread:1907816 [backend.py:_multiprocessing_setup():97] multiprocessing start_methods=fork,spawn,forkserver, using: spawn +2023-04-21 16:36:26,401 INFO MainThread:1907816 [backend.py:ensure_launched():217] starting backend process... +2023-04-21 16:36:26,427 INFO MainThread:1907816 [backend.py:ensure_launched():222] started backend process with pid: 1908068 +2023-04-21 16:36:26,429 INFO MainThread:1907816 [wandb_init.py:init():558] backend started and connected +2023-04-21 16:36:26,437 INFO MainThread:1907816 [wandb_init.py:init():634] updated telemetry +2023-04-21 16:36:26,493 INFO MainThread:1907816 [wandb_init.py:init():665] communicating run to backend with 30 second timeout +2023-04-21 16:36:27,361 INFO MainThread:1907816 [wandb_run.py:_on_init():1919] communicating current version +2023-04-21 16:36:30,583 INFO MainThread:1907816 [wandb_run.py:_on_init():1923] got version response upgrade_message: "wandb version 0.15.0 is available! To upgrade, please run:\n $ pip install wandb --upgrade" + +2023-04-21 16:36:30,583 INFO MainThread:1907816 [wandb_init.py:init():700] starting run threads in backend +2023-04-21 16:36:33,580 INFO MainThread:1907816 [wandb_run.py:_console_start():1893] atexit reg +2023-04-21 16:36:33,581 INFO MainThread:1907816 [wandb_run.py:_redirect():1766] redirect: SettingsConsole.REDIRECT +2023-04-21 16:36:33,582 INFO MainThread:1907816 [wandb_run.py:_redirect():1771] Redirecting console. +2023-04-21 16:36:33,584 INFO MainThread:1907816 [wandb_run.py:_redirect():1827] Redirects installed. +2023-04-21 16:36:33,584 INFO MainThread:1907816 [wandb_init.py:init():732] run started, returning control to user process +2023-04-21 16:36:33,614 INFO MainThread:1907816 [wandb_run.py:_config_callback():1131] config_cb None None {'num_layers': 28, 'vocab_size': 130528, 'hidden_size': 4096, 'num_attention_heads': 32, 'max_sequence_length': 2048, 'layernorm_epsilon': 1e-05, 'inner_hidden_size': 16384, 'use_cache': True, 'bos_token_id': 130004, 'eos_token_id': 130005, 'pad_token_id': 3, 'mask_token_id': 130000, 'gmask_token_id': 130001, 'position_encoding_2d': True, 'quantization_bit': 4, 'pre_seq_len': 128, 'prefix_projection': False, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'float16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'chunk_size_feed_forward': 0, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['ChatGLMModel'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': '/home/wangyan/project/hft/uptest', 'transformers_version': '4.27.1', 'auto_map': {'AutoConfig': 'configuration_chatglm.ChatGLMConfig', 'AutoModel': 'modeling_chatglm.ChatGLMForConditionalGeneration', 'AutoModelForSeq2SeqLM': 'modeling_chatglm.ChatGLMForConditionalGeneration'}, 'model_type': 'chatglm', 'output_dir': 'output/adgen-chatglm-6b-pt-128-2e-2', 'overwrite_output_dir': True, 'do_train': True, 'do_eval': False, 'do_predict': False, 'evaluation_strategy': 'no', 'prediction_loss_only': False, 'per_device_train_batch_size': 1, 'per_device_eval_batch_size': 1, 'per_gpu_train_batch_size': 'None', 'per_gpu_eval_batch_size': 'None', 'gradient_accumulation_steps': 16, 'eval_accumulation_steps': 'None', 'eval_delay': 0, 'learning_rate': 0.02, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 3.0, 'max_steps': 100, 'lr_scheduler_type': 'linear', 'warmup_ratio': 0.0, 'warmup_steps': 0, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': 'output/adgen-chatglm-6b-pt-128-2e-2/runs/Apr21_16-33-10_10-254-135-19', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 10, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 50, 'save_total_limit': 'None', 'save_on_each_node': False, 'no_cuda': False, 'use_mps_device': False, 'seed': 42, 'data_seed': 'None', 'jit_mode_eval': False, 'use_ipex': False, 'bf16': False, 'fp16': False, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': 'None', 'local_rank': -1, 'xpu_backend': 'None', 'tpu_num_cores': 'None', 'tpu_metrics_debug': False, 'debug': '[]', 'dataloader_drop_last': False, 'eval_steps': 'None', 'dataloader_num_workers': 0, 'past_index': -1, 'run_name': 'output/adgen-chatglm-6b-pt-128-2e-2', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': 'None', 'load_best_model_at_end': False, 'metric_for_best_model': 'None', 'greater_is_better': 'None', 'ignore_data_skip': False, 'sharded_ddp': '[]', 'fsdp': '[]', 'fsdp_min_num_params': 0, 'fsdp_config': "{'fsdp_min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}", 'fsdp_transformer_layer_cls_to_wrap': 'None', 'deepspeed': 'None', 'label_smoothing_factor': 0.0, 'optim': 'adamw_hf', 'optim_args': 'None', 'adafactor': False, 'group_by_length': False, 'length_column_name': 'length', 'report_to': "['tensorboard', 'wandb']", 'ddp_find_unused_parameters': 'None', 'ddp_bucket_cap_mb': 'None', 'dataloader_pin_memory': True, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': 'None', 'hub_model_id': 'None', 'hub_strategy': 'every_save', 'hub_token': '', 'hub_private_repo': False, 'gradient_checkpointing': False, 'include_inputs_for_metrics': False, 'fp16_backend': 'auto', 'push_to_hub_model_id': 'None', 'push_to_hub_organization': 'None', 'push_to_hub_token': '', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': 'None', 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': 'None', 'torch_compile_mode': 'None', 'sortish_sampler': False, 'predict_with_generate': True, 'generation_max_length': 64, 'generation_num_beams': 'None', 'train_batch_size': 1, 'eval_batch_size': 1} +2023-04-21 16:43:00,434 INFO MainThread:1907816 [wandb_run.py:_atexit_cleanup():1862] got exitcode: 0 +2023-04-21 16:43:00,437 INFO MainThread:1907816 [wandb_run.py:_restore():1834] restore +2023-04-21 16:43:03,516 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 1 +} +pusher_stats { + uploaded_bytes: 1751 + total_bytes: 1751 +} + +2023-04-21 16:43:03,680 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 1 +} +pusher_stats { + uploaded_bytes: 1751 + total_bytes: 1751 +} + +2023-04-21 16:43:04,531 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 1751 + total_bytes: 24880 +} + +2023-04-21 16:43:04,632 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 1751 + total_bytes: 24880 +} + +2023-04-21 16:43:04,733 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 1751 + total_bytes: 24880 +} + +2023-04-21 16:43:04,834 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 1751 + total_bytes: 24880 +} + +2023-04-21 16:43:04,935 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 1751 + total_bytes: 24880 +} + +2023-04-21 16:43:05,036 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 1751 + total_bytes: 24880 +} + +2023-04-21 16:43:05,137 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 1751 + total_bytes: 24880 +} + +2023-04-21 16:43:05,239 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 24880 + total_bytes: 24880 +} + +2023-04-21 16:43:05,340 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 24880 + total_bytes: 24880 +} + +2023-04-21 16:43:05,441 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 24880 + total_bytes: 24880 +} + +2023-04-21 16:43:05,542 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 24880 + total_bytes: 24880 +} + +2023-04-21 16:43:05,643 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 24880 + total_bytes: 24880 +} + +2023-04-21 16:43:05,744 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 24880 + total_bytes: 24880 +} + +2023-04-21 16:43:07,282 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 24880 + total_bytes: 24880 +} + +2023-04-21 16:43:08,223 INFO MainThread:1907816 [wandb_run.py:_on_finish():1991] got exit ret: done: true +exit_result { +} +file_counts { + wandb_count: 6 +} +pusher_stats { + uploaded_bytes: 24880 + total_bytes: 24880 +} +local_info { +} + +2023-04-21 16:43:09,439 INFO MainThread:1907816 [wandb_run.py:_footer_history_summary_info():3099] rendering history +2023-04-21 16:43:09,440 INFO MainThread:1907816 [wandb_run.py:_footer_history_summary_info():3131] rendering summary +2023-04-21 16:43:09,444 INFO MainThread:1907816 [wandb_run.py:_footer_sync_info():3055] logging synced files diff --git a/ptuning/wandb/run-20230421_163626-3b3qqmpu/run-3b3qqmpu.wandb b/ptuning/wandb/run-20230421_163626-3b3qqmpu/run-3b3qqmpu.wandb new file mode 100644 index 0000000000000000000000000000000000000000..42b02506aaebb740b8b7599a5bc86b4c961f5590 Binary files /dev/null and b/ptuning/wandb/run-20230421_163626-3b3qqmpu/run-3b3qqmpu.wandb differ diff --git a/ptuning/web_demo.py b/ptuning/web_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..43d0c826e7190b11b6fe50e1a6243e9fceead2d2 --- /dev/null +++ b/ptuning/web_demo.py @@ -0,0 +1,166 @@ +import os, sys + +import gradio as gr +import mdtex2html + +import torch +import transformers +from transformers import ( + AutoConfig, + AutoModel, + AutoTokenizer, + AutoTokenizer, + DataCollatorForSeq2Seq, + HfArgumentParser, + Seq2SeqTrainingArguments, + set_seed, +) + +from arguments import ModelArguments, DataTrainingArguments + + +model = None +tokenizer = None + +"""Override Chatbot.postprocess""" + + +def postprocess(self, y): + if y is None: + return [] + for i, (message, response) in enumerate(y): + y[i] = ( + None if message is None else mdtex2html.convert((message)), + None if response is None else mdtex2html.convert(response), + ) + return y + + +gr.Chatbot.postprocess = postprocess + + +def parse_text(text): + """copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/""" + lines = text.split("\n") + lines = [line for line in lines if line != ""] + count = 0 + for i, line in enumerate(lines): + if "```" in line: + count += 1 + items = line.split('`') + if count % 2 == 1: + lines[i] = f'
'
+            else:
+                lines[i] = f'
' + else: + if i > 0: + if count % 2 == 1: + line = line.replace("`", "\`") + line = line.replace("<", "<") + line = line.replace(">", ">") + line = line.replace(" ", " ") + line = line.replace("*", "*") + line = line.replace("_", "_") + line = line.replace("-", "-") + line = line.replace(".", ".") + line = line.replace("!", "!") + line = line.replace("(", "(") + line = line.replace(")", ")") + line = line.replace("$", "$") + lines[i] = "
"+line + text = "".join(lines) + return text + + +def predict(input, chatbot, max_length, top_p, temperature, history): + chatbot.append((parse_text(input), "")) + for response, history in model.stream_chat(tokenizer, input, history, max_length=max_length, top_p=top_p, + temperature=temperature): + chatbot[-1] = (parse_text(input), parse_text(response)) + + yield chatbot, history + + +def reset_user_input(): + return gr.update(value='') + + +def reset_state(): + return [], [] + + +with gr.Blocks() as demo: + gr.HTML("""

ChatGLM

""") + + chatbot = gr.Chatbot() + with gr.Row(): + with gr.Column(scale=4): + with gr.Column(scale=12): + user_input = gr.Textbox(show_label=False, placeholder="Input...", lines=10).style( + container=False) + with gr.Column(min_width=32, scale=1): + submitBtn = gr.Button("Submit", variant="primary") + with gr.Column(scale=1): + emptyBtn = gr.Button("Clear History") + max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True) + top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True) + temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True) + + history = gr.State([]) + + submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history], [chatbot, history], + show_progress=True) + submitBtn.click(reset_user_input, [], [user_input]) + + emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True) + + + +def main(): + global model, tokenizer + + parser = HfArgumentParser(( + ModelArguments)) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))[0] + else: + model_args = parser.parse_args_into_dataclasses()[0] + + tokenizer = AutoTokenizer.from_pretrained( + model_args.model_name_or_path, trust_remote_code=True) + config = AutoConfig.from_pretrained( + model_args.model_name_or_path, trust_remote_code=True) + + config.pre_seq_len = model_args.pre_seq_len + config.prefix_projection = model_args.prefix_projection + + if model_args.ptuning_checkpoint is not None: + print(f"Loading prefix_encoder weight from {model_args.ptuning_checkpoint}") + model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) + prefix_state_dict = torch.load(os.path.join(model_args.ptuning_checkpoint, "pytorch_model.bin")) + new_prefix_state_dict = {} + for k, v in prefix_state_dict.items(): + if k.startswith("transformer.prefix_encoder."): + new_prefix_state_dict[k[len("transformer.prefix_encoder."):]] = v + model.transformer.prefix_encoder.load_state_dict(new_prefix_state_dict) + else: + model = AutoModel.from_pretrained(model_args.model_name_or_path, config=config, trust_remote_code=True) + + if model_args.quantization_bit is not None: + print(f"Quantized to {model_args.quantization_bit} bit") + model = model.quantize(model_args.quantization_bit) + + if model_args.pre_seq_len is not None: + # P-tuning v2 + model = model.half().cuda() + model.transformer.prefix_encoder.float().cuda() + + model = model.eval() + demo.queue().launch(share=False, inbrowser=True) + + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ptuning/web_demo.sh b/ptuning/web_demo.sh new file mode 100644 index 0000000000000000000000000000000000000000..87bf9e9fc24b716b5d48bcfc7904a1e05d466318 --- /dev/null +++ b/ptuning/web_demo.sh @@ -0,0 +1,7 @@ +PRE_SEQ_LEN=128 + +CUDA_VISIBLE_DEVICES=0 python3 web_demo.py \ + --model_name_or_path THUDM/chatglm-6b \ + --ptuning_checkpoint output/adgen-chatglm-6b-pt-128-2e-2/checkpoint-3000 \ + --pre_seq_len $PRE_SEQ_LEN + diff --git a/pytorch_model-00001-of-00008.bin b/pytorch_model-00001-of-00008.bin new file mode 100644 index 0000000000000000000000000000000000000000..1d00c9377caa0b1dee463a247b08b9dbd71600fb --- /dev/null +++ b/pytorch_model-00001-of-00008.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be79e2b22d99b3d76184f83f266cc764275220b66da6c4d0217176c8f8f6af27 +size 1740651802 diff --git a/pytorch_model-00002-of-00008.bin b/pytorch_model-00002-of-00008.bin new file mode 100644 index 0000000000000000000000000000000000000000..893f8d30268105d8c9f9d005b472a90417a401bd --- /dev/null +++ b/pytorch_model-00002-of-00008.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a80198fb714f7363d7e541125bb70b9cb6b1d1ef5988d32a7a25a852a374cbc3 +size 1879731432 diff --git a/pytorch_model-00003-of-00008.bin b/pytorch_model-00003-of-00008.bin new file mode 100644 index 0000000000000000000000000000000000000000..b1dce7313a26c9822a04624b54f6995b8867ac73 --- /dev/null +++ b/pytorch_model-00003-of-00008.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aaba0ae53b3ea30559575c8528dab52ca291a26ac847c5601fcf874db401198f +size 1980385902 diff --git a/pytorch_model-00004-of-00008.bin b/pytorch_model-00004-of-00008.bin new file mode 100644 index 0000000000000000000000000000000000000000..2d45901bc3a85dbcd825af45da849d0389b94804 --- /dev/null +++ b/pytorch_model-00004-of-00008.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:968d134dd9b11e393d160144f097d6bff8c559413e3f75e9e0b6d35618eba669 +size 1913294120 diff --git a/pytorch_model-00005-of-00008.bin b/pytorch_model-00005-of-00008.bin new file mode 100644 index 0000000000000000000000000000000000000000..6d95cb5c087b8e54ca1f92743c4bdeb51b2fbbd1 --- /dev/null +++ b/pytorch_model-00005-of-00008.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc628ce0dcd5c38783e63fc81dd1b609fe01670ec3b855b358aa0d1d7ea48bf3 +size 1879722289 diff --git a/pytorch_model-00006-of-00008.bin b/pytorch_model-00006-of-00008.bin new file mode 100644 index 0000000000000000000000000000000000000000..87a8ebcbdcc7e69c363d54186d1e38896c172061 --- /dev/null +++ b/pytorch_model-00006-of-00008.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:511ec23b7907b7a26461671775a2ac08c08fb3695285bbe7d91fc534d7cbfd7e +size 1879731496 diff --git a/pytorch_model-00007-of-00008.bin b/pytorch_model-00007-of-00008.bin new file mode 100644 index 0000000000000000000000000000000000000000..cb0ed609ecd925e4a40922f04750cd3c688df44f --- /dev/null +++ b/pytorch_model-00007-of-00008.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:245d64e05cebeb214d696bccc87c1dbdf16c67c366e7f54af452ec5748c2186e +size 1074103621 diff --git a/pytorch_model-00008-of-00008.bin b/pytorch_model-00008-of-00008.bin new file mode 100644 index 0000000000000000000000000000000000000000..c232ffbe3918fd0586bc950e5761ca6b4d48da7e --- /dev/null +++ b/pytorch_model-00008-of-00008.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:607d08dd09074840c5f4603d4959a5c6789790955181c7253a2c14d38c1801d2 +size 1069286123 diff --git a/pytorch_model.bin.index.json b/pytorch_model.bin.index.json new file mode 100644 index 0000000000000000000000000000000000000000..b8ada2bdf39c8297dc2b3159270227c587bd13e9 --- /dev/null +++ b/pytorch_model.bin.index.json @@ -0,0 +1,375 @@ +{ + "metadata": { + "total_size": 13744473856 + }, + "weight_map": { + "lm_head.weight": "pytorch_model-00008-of-00008.bin", + "transformer.final_layernorm.bias": "pytorch_model-00007-of-00008.bin", + "transformer.final_layernorm.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.0.attention.dense.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.attention.dense.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.attention.query_key_value.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.attention.query_key_value.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.input_layernorm.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.mlp.dense_4h_to_h.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.mlp.dense_4h_to_h.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.post_attention_layernorm.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.attention.dense.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.attention.dense.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.attention.query_key_value.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.attention.query_key_value.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.attention.rotary_emb.inv_freq": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.input_layernorm.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.1.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.1.mlp.dense_h_to_4h.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.mlp.dense_h_to_4h.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.post_attention_layernorm.bias": "pytorch_model-00001-of-00008.bin", + "transformer.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00008.bin", + "transformer.layers.10.attention.dense.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.attention.dense.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.attention.query_key_value.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.attention.query_key_value.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.input_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.input_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.post_attention_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.10.post_attention_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.attention.dense.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.attention.dense.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.attention.query_key_value.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.attention.query_key_value.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.input_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.input_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.11.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.post_attention_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.11.post_attention_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.attention.dense.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.attention.dense.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.attention.query_key_value.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.attention.query_key_value.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.input_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.input_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.post_attention_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.12.post_attention_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.attention.dense.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.attention.dense.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.attention.query_key_value.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.attention.query_key_value.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.input_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.input_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.post_attention_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.13.post_attention_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.attention.dense.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.attention.dense.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.attention.query_key_value.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.attention.query_key_value.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.input_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.input_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.post_attention_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.14.post_attention_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.attention.dense.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.attention.dense.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.attention.query_key_value.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.attention.query_key_value.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.input_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.input_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.mlp.dense_4h_to_h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.mlp.dense_4h_to_h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.mlp.dense_h_to_4h.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.mlp.dense_h_to_4h.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.post_attention_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.15.post_attention_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.16.attention.dense.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.attention.dense.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.attention.query_key_value.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.attention.query_key_value.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.attention.rotary_emb.inv_freq": "pytorch_model-00004-of-00008.bin", + "transformer.layers.16.input_layernorm.bias": "pytorch_model-00004-of-00008.bin", + "transformer.layers.16.input_layernorm.weight": "pytorch_model-00004-of-00008.bin", + "transformer.layers.16.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.post_attention_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.16.post_attention_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.attention.dense.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.attention.dense.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.attention.query_key_value.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.attention.query_key_value.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.input_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.input_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.post_attention_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.17.post_attention_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.attention.dense.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.attention.dense.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.attention.query_key_value.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.attention.query_key_value.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.input_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.input_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.post_attention_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.18.post_attention_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.attention.dense.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.attention.dense.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.attention.query_key_value.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.attention.query_key_value.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.input_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.input_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.mlp.dense_4h_to_h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.mlp.dense_4h_to_h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.post_attention_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.19.post_attention_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.2.attention.dense.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.attention.dense.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.attention.query_key_value.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.attention.query_key_value.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.input_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.input_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.post_attention_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.2.post_attention_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.20.attention.dense.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.attention.dense.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.attention.query_key_value.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.attention.query_key_value.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.attention.rotary_emb.inv_freq": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.input_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.input_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.20.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.20.mlp.dense_h_to_4h.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.mlp.dense_h_to_4h.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.post_attention_layernorm.bias": "pytorch_model-00005-of-00008.bin", + "transformer.layers.20.post_attention_layernorm.weight": "pytorch_model-00005-of-00008.bin", + "transformer.layers.21.attention.dense.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.attention.dense.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.attention.query_key_value.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.attention.query_key_value.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.input_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.input_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.post_attention_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.21.post_attention_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.attention.dense.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.attention.dense.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.attention.query_key_value.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.attention.query_key_value.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.input_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.input_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.post_attention_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.22.post_attention_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.attention.dense.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.attention.dense.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.attention.query_key_value.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.attention.query_key_value.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.input_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.input_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.post_attention_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.23.post_attention_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.attention.dense.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.attention.dense.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.attention.query_key_value.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.attention.query_key_value.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.input_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.input_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.mlp.dense_4h_to_h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.mlp.dense_4h_to_h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.mlp.dense_h_to_4h.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.mlp.dense_h_to_4h.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.post_attention_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.24.post_attention_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.attention.dense.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.attention.dense.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.attention.query_key_value.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.attention.query_key_value.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.attention.rotary_emb.inv_freq": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.input_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.input_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.mlp.dense_4h_to_h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.25.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.25.mlp.dense_h_to_4h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.25.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.25.post_attention_layernorm.bias": "pytorch_model-00006-of-00008.bin", + "transformer.layers.25.post_attention_layernorm.weight": "pytorch_model-00006-of-00008.bin", + "transformer.layers.26.attention.dense.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.attention.dense.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.attention.query_key_value.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.attention.query_key_value.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.attention.rotary_emb.inv_freq": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.input_layernorm.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.input_layernorm.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.mlp.dense_4h_to_h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.mlp.dense_h_to_4h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.post_attention_layernorm.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.26.post_attention_layernorm.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.attention.dense.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.attention.dense.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.attention.query_key_value.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.attention.query_key_value.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.attention.rotary_emb.inv_freq": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.input_layernorm.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.input_layernorm.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.mlp.dense_4h_to_h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.mlp.dense_4h_to_h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.mlp.dense_h_to_4h.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.mlp.dense_h_to_4h.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.post_attention_layernorm.bias": "pytorch_model-00007-of-00008.bin", + "transformer.layers.27.post_attention_layernorm.weight": "pytorch_model-00007-of-00008.bin", + "transformer.layers.3.attention.dense.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.attention.dense.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.attention.query_key_value.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.attention.query_key_value.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.input_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.input_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.post_attention_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.3.post_attention_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.attention.dense.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.attention.dense.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.attention.query_key_value.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.attention.query_key_value.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.input_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.input_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.post_attention_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.4.post_attention_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.attention.dense.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.attention.dense.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.attention.query_key_value.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.attention.query_key_value.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.input_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.input_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.mlp.dense_4h_to_h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.mlp.dense_4h_to_h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.mlp.dense_h_to_4h.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.mlp.dense_h_to_4h.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.post_attention_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.5.post_attention_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.attention.dense.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.attention.dense.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.attention.query_key_value.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.attention.query_key_value.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.attention.rotary_emb.inv_freq": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.input_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.input_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.6.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.6.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.6.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.6.post_attention_layernorm.bias": "pytorch_model-00002-of-00008.bin", + "transformer.layers.6.post_attention_layernorm.weight": "pytorch_model-00002-of-00008.bin", + "transformer.layers.7.attention.dense.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.attention.dense.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.attention.query_key_value.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.attention.query_key_value.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.input_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.input_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.post_attention_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.7.post_attention_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.attention.dense.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.attention.dense.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.attention.query_key_value.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.attention.query_key_value.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.input_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.input_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.post_attention_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.8.post_attention_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.attention.dense.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.attention.dense.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.attention.query_key_value.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.attention.query_key_value.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.attention.rotary_emb.inv_freq": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.input_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.input_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.mlp.dense_4h_to_h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.mlp.dense_4h_to_h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.mlp.dense_h_to_4h.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.mlp.dense_h_to_4h.weight": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.post_attention_layernorm.bias": "pytorch_model-00003-of-00008.bin", + "transformer.layers.9.post_attention_layernorm.weight": "pytorch_model-00003-of-00008.bin", + "transformer.word_embeddings.weight": "pytorch_model-00001-of-00008.bin" + } +} diff --git a/quantization.py b/quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..6f469f6a25a8233fe881608168daeba0bc809540 --- /dev/null +++ b/quantization.py @@ -0,0 +1,201 @@ +from torch.nn import Linear +from torch.nn.parameter import Parameter + +import bz2 +import torch +import base64 +import ctypes +from transformers.utils import logging + +from typing import List +from functools import partial + +logger = logging.get_logger(__name__) + +try: + from cpm_kernels.kernels.base import LazyKernelCModule, KernelFunction, round_up + + class Kernel: + def __init__(self, code: bytes, function_names: List[str]): + self.code = code + self._function_names = function_names + self._cmodule = LazyKernelCModule(self.code) + + for name in self._function_names: + setattr(self, name, KernelFunction(self._cmodule, name)) + + quantization_code = "$QlpoOTFBWSZTWU9yuJUAQHN//////////f/n/8/n///n//bt4dTidcVx8X3V9FV/92/v4B7/AD5FBQFAAAChSgKpFCFAFVSigUAAAEKhSgUUqgFBKigqVREQAABQBQIANDTTIGI00BkZBkNGE0A0BkBkGQGRkaNAaAGQNBoGgDIAAYIGTI0DQAQAaGmmQMRpoDIyDIaMJoBoDIDIMgMjI0aA0AMgaDQNAGQAAwQMmRoGgAgA0NNMgYjTQGRkGQ0YTQDQGQGQZAZGRo0BoAZA0GgaAMgABggZMjQNABABoaaZAxGmgMjIMhowmgGgMgMgyAyMjRoDQAyBoNA0AZAADBAyZGgaAAmqU1NEgJqnptU/Sn4jRR6J6epk2pqb1Q/SgAPUGgyNNGjQ2SBpoAZAAGg0NB6mgDIAAAAA2oaApSREBNAARhGiYEaEwU8pvImlP0k2aam1GaGqbFNM1MHpTwmkepmyU9R6nqPKekHqNNPUxNGhp6n6p6QaZ6o9TG1GMqcoV9ly6nRanHlq6zPNbnGZNi6HSug+2nPiZ13XcnFYZW+45W11CumhzYhchOJ2GLLV1OBjBjGf4TptOddTSOcVxhqYZMYwZXZZY00zI1paX5X9J+b+f4e+x43RXSxXPOdquiGpduatGyXneN696M9t4HU2eR5XX/kPhP261NTx3JO1Ow7LyuDmeo9a7d351T1ZxnvnrvYnrXv/hXxPCeuYx2XsNmO003eg9J3Z6U7b23meJ4ri01OdzTk9BNO96brz+qT5nuvvH3ds/G+m/JcG/F2XYuhXlvO+jP7U3XgrzPN/lr8Sf1n6j4j7jZs+s/T0tNaNNYzTs12rxjwztHlnire3Nzc3N1wuBwOBwXBvZfoHpD7rFmR99V5vj3aXza3xdBbXMalubTg/jIv5dfAi54Pdc75j4z412n3Npj3Ld/ENm7a3b/Cod6h/ret1/5vn/C+l+gdslMvgPSLJ8d8q+U66fevYn/tW1chleEtNTGlcHCbLRlq0tHzF5tsbbZZfHjjLgZu42XCuC3NrdjTasZGNzgxPIrGqp7r3p7L2p5XjnpPSmTd5XtzqnB6U87zzg1Ol0zd0zsLszxR6lkxp35u6/teL0L0W922cR7Lu1lpL9CsHirzuM2T+BgsyViT6LHcm0/Vr6U/7LGGyJeqTEjt0PHWhF5mCT7R9mtlDwriYv0Tyr/OxYt6qp5r0mPVT0608TqnqMZaarU2nFwrTzzlrs1ed7z1ux60wyr4ydCaTi3enW8x68x0zU7tXSlcmPSW1mGpWJMg4zmPC2lK96tp0OE80y4MfEvnZj8zGluR6b22ki1Ou9V2nCd9xovcPvcYMZYy0lvN60ScZ45vN6yeCeeXFb1lVjnnCar5fwXwE2bzJ4HI1XVPXfXZMm44GUsMpYsmLB65TuVdm0cl0b+i/wGNN66XjeV7zuPpHcnK/juhhjdfId5jMdE5nN0dGmmm2zZs2cexD5n9p/dY352XsvXHaZNWWsmmS1atjR452nYudzvqv2HMRyvNNnlMcDl3R2+yx2uVrBubTW9icHDVtbNXlZm7jma1rM4VurZZd2y6nUau7ZXZ7bVU+mnoOVxZGMrVmvX60605JwmzGZhhhjTWtaaaMaaGTGmNMZasY0iX8VMUl8eepaIrzGSpemWOQyZORk2bNpjUybMmxqYmknCGCFynutfksaZpjTNMaaatM0xsxcGR0sociNqxNSmhhR1ZJPbsn8qyF0t2qH6iYBclclalbtTTcHTDsPaX6rlnElph2Jyumumtynv2Kk8GI7rsvXbIcJgHJOSaSXnnGaI3m87RtVXJOZ/YtgdTE6Wpha6ZlE8ayXkef1fh602r2WwvfMXtMdLlkfnLFdYYwYso+bWqm7yJqHXZGw2nrS5ZanSYnWlxBxMF1V940K2wdrI7R6OYf7DGGamMmTSbRhlS45xmVOumF1EyPCmHrrN8wwZOOrdNtLeMtzFzDlWnfTBxMk2NaXIZHBYxYLD4w8yju0ao65Vz1OIXoS9dLanwCe1PWrYuWMqf1if1z2k2yYfKJ741PDgno1ZQ8DRqvUny3mNoWTzGO6m1DkrJI8JiR5cSd+vZdGOO8nrMoc5+NDUFsMSXaZJeNlMmGLtJsovOsUp7I9S5VojKxF6bTVEelXqlfJobQr3LozSh2Jk7VcrVMfhXqszGWMzNqGhqZY0OadxkyyMssKugZR0KNFXBHlqwmJgTE/BNVMk6ItJXZMR0H47GpXv/DMOvNkmVuaV1PRfEdxuqc7Hcd+ZV/zTLaRxWk0nl9CdCeM6mn5rstHIBcpiuwmUZXeq81DacHI2rmrZ5SuE5mOZd6LQrZg9mx32TprA8BMo5jKN6yLTCi3WzQaZSuhzTtM1fUTGVpG8Tw+KXI0tjEpiWxtLYynOlktSbVlaI5kxP8TDH8kx50xoxi5KcA4pcja8KWLRlO/Ks6q06ergnvm1ca3Tq8Uw7LTUsmWyctXPWmpitl/uvGcWTGXGuAXDfhqazGmjkxcJW5hMMMMpYsXl2TZYtVOddG3XCarUt6Ptq9CZXSNzyuRzqRZOjsxdBbFVz6OA5HI43r1jityVlVpVkxmOsyaYWE1NTGq1sOVh36mHMcxtSvcy70edG0ZGR3I1Go1GRlV7mWWo1G0ZGRqlvH40l7o4m5xMWLLLYyNjnqc8556mdPqLJ31n/1nWOncxzG1tizrHs/Z+d2vP/B/l8wdJ6rHUn2nbbDq4p6htFtYzMMMTaZis1K5GKzGNmxhmUx2DDlZ/qNnIx41xnaMfCZWYaZWtNLTNW8ND4Fw1MyZOCdM428suKG1ehW8TesOydg7J+YYcD4cYR+8dFK6M4E3HM9ZfRNNL+Sn6rsl4DsrDl2HpPCnfxjGXtbZtYys1ttlyJ4T+BvexjGWRjMszK4Jpc77D3GyuVD7q0+G8m9G+2+rGm7cOR2y7FdtY2XUYx/oNlfRYxhMYyYZkyyg55enna9Kt/FFi6GMMwYwdwxWgxGMLKYmUyGExTKMZkMFhkymKuh0NOBNnBu+23LdwDoZYYzGGMxtORaTU1pjTGWTTGGtMrNWUsyyTTLLG1qy2ZjbK2DBllWqxMtBMaYZQmcE7zvvRcTkclUwdkxTaSdyySt/7fpL+T1v516Ji97fwr5JbLu305zMn5+GMTTZ9F+y7ExwmGVfG44yxn3dLv6l5i+Wth1jCrDq21nW9LqvvDzz3Vf3LLH/O/32TJ/erx3bXftO4eF+G956D952K/An4NfvOpjFjExjevP/UmE0fIoZXx6/w6lX/no3D0bLt+ixjieBM6ksRd0yB4Lt2SwYNE+gd1detlZWUnpiZfGfFaK+4PyCa/v18V8X75pe9fLXzp7l3VjF76vWZmHwGz1IZNWT7b8yddJ4q5kyrVdfru6atWc7bVYztL9Jf4GXvT+Y8m9/YsXP6H018a8D4XVOqvfzqeR+6yZOD8dPv0+U7/q5Pl+2dNb0MjzGVH5p6MNQ7cOWvw62U9aHE8DprDek+McLyvDz+te+9Zhq5+YTruufMcWMabqysTmZVWjKPfnK0wyVcrsuhjZRdLkHNvD72b9abriOSGIxiLixMOoalNPXzy+wT/tf+U6HHONfsz+xe8ufHBdQWWGWLA9if0rsnmrxK5LvRZQeWsTCsrmOYy8VteVfuRfcVTtDLItLIsMYxZLdU/DbtSemxF6Z6Zo5WBXE4tFdCyVMMXMTEMZXVlS6Xec2T4e0tHsRcEuWshcJ2YsNF5rUx1E8ifCq6Z+ZP7qdCeu/aTwFd53l16/o0NOw6O3dLavP4Hbi4RdmuDk6DoYaninC0+o4uZjbJ7Rxeu0/FbuFg+q7DVS6fQe0rZ6NDGUNNU6DEqOaLTicKnYZMnBWruljQxoaS3dZhocDge0bSTyOvdAbG5hxe2xji7E/L55xX13wWNDi6HCekcFxfCPGxY0MXC+s7afWaMdDyjyr+o8Rudm/NabOZvdl274zH4f5XK9z6On1Pe/K5TdPAslg77BjuO6Y3eO7GqvOPG/stknp1leyvLL0Z7bl9I4noMvLkzytLhWYzrOZzLXCORe028rORzOg4N/L0HlMOQ3Pgmnbb6KczlabORpu980q37TBqRu0/p3PO6234Bl03Ynuz+9W7gnsEcmvYaYY3aMYY0wx3pYd+ujsXauWdaY5Xkbtl23fPzFHiDB/QMo0yFjBllYxTQYYyxkrwn7JufwJ/PfgJ+C83X69ni6zvXcnyXabv0ncbLwsceS+RNlyN2mnneJtX0ngYO0+e+0+UnA+Wch3ji8hj5an4h+i6XBySU4n+R0roVcbw5yvHrmr4Yw8Y7x6c+9POPYHI5HI5HI5HI5HGXGww4nE4nrVyOR8XeqPEO7PLOiukYa3Novk5hV4cdtYZLI93e+uxff2jRo0aNGjRo0aNG1bVtW1dy3m83m8+tQ5ZzHw3nObwOu8La9Rc1dtkdS8A3eTk823tnktXWlxN6Oixe06zrN70Isd9jiOgZFq9yfkPqP/SLhN2Myl8jDM43bl1nbcb4cO57jlh8Jow6pzXZdL4dyODTuuhu77FyO27DdwdRxmvO+O+3N2+BdqyTwLHVczDVY4UPE4O66/ZO2cx1LFzVdSXtF7G4HMbrauOHRw6c8FdZ5m9fHZHYZXfTlZquyynSyTTKke6vcffSD9pzPA/G7n7jxPmuhc1DHMynPMrGL6AdewYmwu5ko+UUyTwrMv27rPH1v1nGqd87+p6N6LU8k3NEng53xXyHS97+44OSg/sy/hn+Se6yfYNjW0/uTgP+PvWYzLMmjhcLB/gGpri6H83/84eUXWT6T9Hsv7785z/7z4icpW+zfXypuR7rx/gMdZb1/wC678pcs8/2a3mDitGHxl9mfPlll5MafWWqxk/eYuTDgcNMzDGWLWvsuglNxs53GtN6uWpktlW1tZZYcuinMMWmnNnJydze3b2Y1McBxrBkXw799izLMZZYyy0TkbsGM4p03S2uVu5s/XXUdSdec6smVxZYYGpVmT8A+8ajuEyV5FatkvVru2x6uxGXXbH4A+jvgP4GMYy3iPLXzq/6z65+E005ey+cwMZD3fZcqc6xpjTFjQ0P3U+e++cPYmTIwj0nrK5NPTfl3WvpfLtXDcb2HQMudYOxFXQBor4L4T6vrOauFctYXJQ++NUWmJe5bmx1jDiZS1dTqWxo4GR8jm3fttpmPHppk9PEyv4/y8/sO07XacOmcqc0x2Vi9BvNJvN5oW8x4mOsydpidRxMYJPx06m1bqPzq9KtK8sxXNXFodD/+MYYaJTLwOhc9brCsV18oOR1i4tXChyTkq4lf4y1Ke+9axjDHqs1mfBbMXuP4Hzi+X7t8vzv7bHerrUPgPCxhjre4fXdfLNtNM+Jd+Zdh8xd8wP87uNPoPgv4W7/5P2BuxfsMabNnMnza+54Pdi5U671GPZY8CehX8Voeoo7FHpkeEc6715FwHZrIrUrHaviPUbPZHND+IhczrP6FcYvhOZ0Di/ETt0OI+YwNWR9r7tpf6WDeZKZDB1+z2IthOl1mPyb5FluvEx9h9d0NnM0Y1XPFkWIsk1WotJ0PBMmkvjvQTd0e71tfeV+8r8lQ/tpzpsmxJ+InrI/dj2UajUajVTUajatRqNRtGo1Go1Go4wjeMpZFMVV9CHbofPraLsJ3JpWV2XOoanCuFky4y3PPNxucK2uKC1Lbdb1eo+m5XomN6HfeZsabHLHRX/K+offtNGGmHWctcVcG44MdSqsOLY9VzX+Zxfxn2HPdWTpzWvkrtJ8M5zorrKcquRytJ5N5DZmcaW02l76nWO+BqPXm1A2Ry/0q71dH/mqrqeFjkYxjEXtsX8qubTk67rGycyqsdm4tZx5D6D5hhi0waaWmiaMP81Yjii5qxPlPuU/GfTL1Y5E6Jyfiq63qTa39A4J0sOGDgO9WF9bOXl0XfPRbsY2bPNKPy1YrFYrFYmRhhlTIyMjJWJYZHXuCXI8OoXsvfljGLFicNifpp2XunoPiG1wtx3p1Tah+/DD66OnVtVXP9rKbVxOnL0tR/rHtqB5UDErUVcl11D4qqvjpOcxX7armUNJB3LpW6bxVvD08e8h3odKKvyCFZBdSh2FVcST9xV3n3T8t1j7Kr9qgrqXg+13Pt5U7JCvFXVIV1YG5lRhkVYZJYYDDD4KOIMoHCp26WS8GB7uBh2zIdgq/PKyInjV2STShuoapUdCpX1yTwqq/z1VvET7Kh5nVPkO8YyxjLt2MaaMmWTLQvx3qnzltnXW0p2jxgbEtSny/Osv8Y9pLMXYoHVPAhkVdWVeODhR6q9/Sxe2liwwZWMVvFXfRkeIDxAePUPIrdJ4ey6yquzH+PD/bUOWAu05qVHtFd8rrKHSoeNIOUqrYr3FXyToqfYJgwmJdKpXXOwYYegNNGMzfZPp/t3t/DVs4zjNTN61rRqaWaa4NYbRjTa0tWwy2Y2tGN8ZO8ofNKq4j9SL7I+cSm4/6ovLV5HNXLI0jJidwrtk6ynCaP6Z++GjRlWS3tLeW129Mi9evxU9mtz6s5J3Z7M2ngTgnKvmpomxpaLCzPfmx0JWE+m3NLDDGOX47RctdYYNK5jakdqLkRlI39n590T5zctGSwwZZDJj6kW8XSi6ot2MmWWJ0DUT3nuvebBudScjZ79g8cWJ8av0k+/bE5WKd5MdbFpbDVMxu1DVMmtNZGJvq1mtRbn6M+g/kP0FwDwr7quZs7xosNGpbscyxhhd9TyJyFwbLcxlTasg75vW7TsV5K7ji44XPMMrdoj+Y3rT0Hie62nlYV/pwczzOmdLqLhYkzGMzCZWGMQzGMSsZYY6Di1t4nlJ+Em63mJxrVLxPbYxNEdgc1dU2iOKyoYYWjNrEeHTYybVk0atSa7ehuwsWMWTqn1TrnS6hYsi71d1+s+k+ic70e20fzE/VaTdxT9ZtU4GIXdeNx3X77guYYfpHeTQjaMX6brOu4OY4K7Y2d9mbHarI5ox3p4GpJ2Vd/Tst60f7j999pppjR+Q/Qf8J/VaORs3cji7FfFuN61+ui9s8hix1OCh5KGVV23BPXvZfz3CLyHpix+exi8z/KnCnosY2eunor+cxyPO/xJ0vKey9OvE9VjqaYu0x3Z3jd6o2b1T12D+F8l232lwaaacD5LE8LBxu7WTlbWraWpew8Xexjel3E+wWD4APITdNqR8F3R3T0lunCQ4GaE9R37DxeCYfcHi4xci5ovKfxVs55y2hf+65E/Xdp6jR5nrebTmi5incpkyOjs50JvrZwstbbW6kfuuQw+2mykf/EXNFzxfKTrxew929TR6bWnGL//F3JFOFCQT3K4lQ" + + kernels = Kernel( + bz2.decompress(base64.b64decode(quantization_code)), + [ + "int4WeightCompression", + "int4WeightExtractionFloat", + "int4WeightExtractionHalf", + "int8WeightExtractionFloat", + "int8WeightExtractionHalf", + ], + ) +except Exception as exception: + kernels = None + logger.warning("Failed to load cpm_kernels:" + str(exception)) + + +class W8A16Linear(torch.autograd.Function): + @staticmethod + def forward(ctx, inp: torch.Tensor, quant_w: torch.Tensor, scale_w: torch.Tensor, weight_bit_width): + ctx.inp_shape = inp.size() + ctx.weight_bit_width = weight_bit_width + out_features = quant_w.size(0) + inp = inp.contiguous().view(-1, inp.size(-1)) + weight = extract_weight_to_half(quant_w, scale_w, weight_bit_width) + ctx.weight_shape = weight.size() + output = inp.mm(weight.t()) + ctx.save_for_backward(inp, quant_w, scale_w) + return output.view(*(ctx.inp_shape[:-1] + (out_features,))) + + @staticmethod + def backward(ctx, grad_output: torch.Tensor): + inp, quant_w, scale_w = ctx.saved_tensors + weight = extract_weight_to_half(quant_w, scale_w, ctx.weight_bit_width) + grad_output = grad_output.contiguous().view(-1, weight.size(0)) + grad_input = grad_output.mm(weight) + grad_weight = grad_output.t().mm(inp) + return grad_input.view(ctx.inp_shape), grad_weight.view(ctx.weight_shape), None, None + + +def compress_int4_weight(weight: torch.Tensor): # (n, m) + with torch.cuda.device(weight.device): + n, m = weight.size(0), weight.size(1) + assert m % 2 == 0 + m = m // 2 + out = torch.empty(n, m, dtype=torch.int8, device="cuda") + stream = torch.cuda.current_stream() + + gridDim = (n, 1, 1) + blockDim = (min(round_up(m, 32), 1024), 1, 1) + + kernels.int4WeightCompression( + gridDim, + blockDim, + 0, + stream, + [ctypes.c_void_p(weight.data_ptr()), ctypes.c_void_p(out.data_ptr()), ctypes.c_int32(n), ctypes.c_int32(m)], + ) + return out + + +def extract_weight_to_half(weight: torch.Tensor, scale_list: torch.Tensor, source_bit_width: int): + if source_bit_width == 8: + func = kernels.int8WeightExtractionHalf + elif source_bit_width == 4: + func = kernels.int4WeightExtractionHalf + else: + assert False, "Unsupported bit-width" + + with torch.cuda.device(weight.device): + n, m = weight.size(0), weight.size(1) + out = torch.empty(n, m * (8 // source_bit_width), dtype=torch.half, device="cuda") + stream = torch.cuda.current_stream() + + gridDim = (n, 1, 1) + blockDim = (min(round_up(m, 32), 1024), 1, 1) + + func( + gridDim, + blockDim, + 0, + stream, + [ + ctypes.c_void_p(weight.data_ptr()), + ctypes.c_void_p(scale_list.data_ptr()), + ctypes.c_void_p(out.data_ptr()), + ctypes.c_int32(n), + ctypes.c_int32(m), + ], + ) + return out + + +class QuantizedLinear(Linear): + def __init__(self, weight_bit_width: int, weight_tensor=None, bias_tensor=None, empty_init=False, *args, **kwargs): + super(QuantizedLinear, self).__init__(*args, **kwargs) + self.weight_bit_width = weight_bit_width + + shape = self.weight.shape + del self.weight + + if weight_tensor is None or empty_init: + self.weight = torch.empty( + shape[0], shape[1] * weight_bit_width // 8, dtype=torch.int8, device=kwargs["device"] + ) + self.weight_scale = torch.empty(shape[0], dtype=kwargs["dtype"], device=kwargs["device"]) + else: + self.weight_scale = (weight_tensor.abs().max(dim=-1).values / ((2 ** (weight_bit_width - 1)) - 1)).half() + self.weight = torch.round(weight_tensor / self.weight_scale[:, None]).to(torch.int8) + if weight_bit_width == 4: + self.weight = compress_int4_weight(self.weight) + + self.weight = Parameter(self.weight.to(kwargs["device"]), requires_grad=False) + self.weight_scale = Parameter(self.weight_scale.to(kwargs["device"]), requires_grad=False) + if bias_tensor is not None: + self.bias = Parameter(bias_tensor.to(kwargs["device"]), requires_grad=False) + else: + self.bias = None + + def forward(self, input): + output = W8A16Linear.apply(input, self.weight, self.weight_scale, self.weight_bit_width) + if self.bias is not None: + output = output + self.bias + return output + + +def quantize(model, weight_bit_width, empty_init=False, **kwargs): + """Replace fp16 linear with quantized linear""" + + for layer in model.layers: + layer.attention.query_key_value = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.attention.query_key_value.weight.to(torch.cuda.current_device()), + bias_tensor=layer.attention.query_key_value.bias, + in_features=layer.attention.query_key_value.in_features, + out_features=layer.attention.query_key_value.out_features, + bias=True, + dtype=torch.half, + device=layer.attention.query_key_value.weight.device, + empty_init=empty_init + ) + layer.attention.dense = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.attention.dense.weight.to(torch.cuda.current_device()), + bias_tensor=layer.attention.dense.bias, + in_features=layer.attention.dense.in_features, + out_features=layer.attention.dense.out_features, + bias=True, + dtype=torch.half, + device=layer.attention.dense.weight.device, + empty_init=empty_init + ) + layer.mlp.dense_h_to_4h = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.mlp.dense_h_to_4h.weight.to(torch.cuda.current_device()), + bias_tensor=layer.mlp.dense_h_to_4h.bias, + in_features=layer.mlp.dense_h_to_4h.in_features, + out_features=layer.mlp.dense_h_to_4h.out_features, + bias=True, + dtype=torch.half, + device=layer.mlp.dense_h_to_4h.weight.device, + empty_init=empty_init + ) + layer.mlp.dense_4h_to_h = QuantizedLinear( + weight_bit_width=weight_bit_width, + weight_tensor=layer.mlp.dense_4h_to_h.weight.to(torch.cuda.current_device()), + bias_tensor=layer.mlp.dense_4h_to_h.bias, + in_features=layer.mlp.dense_4h_to_h.in_features, + out_features=layer.mlp.dense_4h_to_h.out_features, + bias=True, + dtype=torch.half, + device=layer.mlp.dense_4h_to_h.weight.device, + empty_init=empty_init + ) + return model diff --git a/tokenization_chatglm.py b/tokenization_chatglm.py new file mode 100644 index 0000000000000000000000000000000000000000..1d4f0ba532543b6dbdacdd83d30324b7a6abfad3 --- /dev/null +++ b/tokenization_chatglm.py @@ -0,0 +1,430 @@ +"""Tokenization classes for ChatGLM.""" +from typing import List, Optional, Union +import os + +from transformers.tokenization_utils import PreTrainedTokenizer +from transformers.utils import logging, PaddingStrategy +from transformers.tokenization_utils_base import EncodedInput, BatchEncoding +from typing import Dict +import sentencepiece as spm +import numpy as np + +logger = logging.get_logger(__name__) + +PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { + "THUDM/chatglm-6b": 2048, +} + + +class TextTokenizer: + def __init__(self, model_path): + self.sp = spm.SentencePieceProcessor() + self.sp.Load(model_path) + self.num_tokens = self.sp.vocab_size() + + def encode(self, text): + return self.sp.EncodeAsIds(text) + + def decode(self, ids: List[int]): + return self.sp.DecodeIds(ids) + + def tokenize(self, text): + return self.sp.EncodeAsPieces(text) + + def convert_tokens_to_ids(self, tokens): + return [self.sp.PieceToId(token) for token in tokens] + + def convert_token_to_id(self, token): + return self.sp.PieceToId(token) + + def convert_id_to_token(self, idx): + return self.sp.IdToPiece(idx) + + def __len__(self): + return self.num_tokens + + +class SPTokenizer: + def __init__( + self, + vocab_file, + num_image_tokens=20000, + max_blank_length=80, + byte_fallback=True, + ): + assert vocab_file is not None + self.vocab_file = vocab_file + self.num_image_tokens = num_image_tokens + self.special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "", "", "", "", ""] + self.max_blank_length = max_blank_length + self.byte_fallback = byte_fallback + self.text_tokenizer = TextTokenizer(vocab_file) + + def _get_text_tokenizer(self): + return self.text_tokenizer + + @staticmethod + def get_blank_token(length: int): + assert length >= 2 + return f"<|blank_{length}|>" + + @staticmethod + def get_tab_token(): + return f"<|tab|>" + + @property + def num_text_tokens(self): + return self.text_tokenizer.num_tokens + + @property + def num_tokens(self): + return self.num_image_tokens + self.num_text_tokens + + @staticmethod + def _encode_whitespaces(text: str, max_len: int = 80): + text = text.replace("\t", SPTokenizer.get_tab_token()) + for i in range(max_len, 1, -1): + text = text.replace(" " * i, SPTokenizer.get_blank_token(i)) + return text + + def _preprocess(self, text: str, linebreak=True, whitespaces=True): + if linebreak: + text = text.replace("\n", "") + if whitespaces: + text = self._encode_whitespaces(text, max_len=self.max_blank_length) + return text + + def encode( + self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True + ) -> List[int]: + """ + @param text: Text to encode. + @param linebreak: Whether to encode newline (\n) in text. + @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding. + @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text. + @param add_dummy_prefix: Whether to add dummy blank space in the beginning. + """ + text = self._preprocess(text, linebreak, whitespaces) + if not add_dummy_prefix: + text = "" + text + tmp = self._get_text_tokenizer().encode(text) + tokens = [x + self.num_image_tokens for x in tmp] + return tokens if add_dummy_prefix else tokens[2:] + + def decode(self, text_ids: List[int]) -> str: + ids = [int(_id) - self.num_image_tokens for _id in text_ids] + ids = [_id for _id in ids if _id >= 0] + text = self._get_text_tokenizer().decode(ids) + text = text.replace("", "\n") + text = text.replace(SPTokenizer.get_tab_token(), "\t") + for i in range(2, self.max_blank_length + 1): + text = text.replace(self.get_blank_token(i), " " * i) + return text + + def tokenize( + self, text: str, linebreak=True, whitespaces=True, add_dummy_prefix=True + ) -> List[str]: + """ + @param text: Text to encode. + @param linebreak: Whether to encode newline (\n) in text. + @param whitespaces: Whether to encode multiple whitespaces or tab in text, useful for source code encoding. + @param special_tokens: Whether to encode special token ([MASK], [gMASK], etc.) in text. + @param add_dummy_prefix: Whether to add dummy blank space in the beginning. + """ + text = self._preprocess(text, linebreak, whitespaces) + if not add_dummy_prefix: + text = "" + text + tokens = self._get_text_tokenizer().tokenize(text) + return tokens if add_dummy_prefix else tokens[2:] + + def __getitem__(self, x: Union[int, str]): + if isinstance(x, int): + if x < self.num_image_tokens: + return "".format(x) + else: + return self.text_tokenizer.convert_id_to_token(x - self.num_image_tokens) + elif isinstance(x, str): + if x.startswith("") and x[7:-1].isdigit(): + return int(x[7:-1]) + else: + return self.text_tokenizer.convert_token_to_id(x) + self.num_image_tokens + else: + raise ValueError("The key should be str or int.") + + +class ChatGLMTokenizer(PreTrainedTokenizer): + """ + Construct a ChatGLM tokenizer. Based on byte-level Byte-Pair-Encoding. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + """ + + vocab_files_names = {"vocab_file": "ice_text.model"} + max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES + model_input_names = ["input_ids", "attention_mask", "position_ids"] + + def __init__( + self, + vocab_file, + do_lower_case=False, + remove_space=False, + bos_token='', + eos_token='', + end_token='', + mask_token='[MASK]', + gmask_token='[gMASK]', + padding_side="left", + pad_token="", + unk_token="", + num_image_tokens=20000, + **kwargs + ) -> None: + super().__init__( + do_lower_case=do_lower_case, + remove_space=remove_space, + padding_side=padding_side, + bos_token=bos_token, + eos_token=eos_token, + end_token=end_token, + mask_token=mask_token, + gmask_token=gmask_token, + pad_token=pad_token, + unk_token=unk_token, + num_image_tokens=num_image_tokens, + **kwargs + ) + + self.do_lower_case = do_lower_case + self.remove_space = remove_space + self.vocab_file = vocab_file + + self.bos_token = bos_token + self.eos_token = eos_token + self.end_token = end_token + self.mask_token = mask_token + self.gmask_token = gmask_token + + self.sp_tokenizer = SPTokenizer(vocab_file, num_image_tokens=num_image_tokens) + + """ Initialisation """ + + @property + def gmask_token_id(self) -> Optional[int]: + if self.gmask_token is None: + return None + return self.convert_tokens_to_ids(self.gmask_token) + + @property + def end_token_id(self) -> Optional[int]: + """ + `Optional[int]`: Id of the end of context token in the vocabulary. Returns `None` if the token has not been + set. + """ + if self.end_token is None: + return None + return self.convert_tokens_to_ids(self.end_token) + + @property + def vocab_size(self): + """ Returns vocab size """ + return self.sp_tokenizer.num_tokens + + def get_vocab(self): + """ Returns vocab as a dict """ + vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def preprocess_text(self, inputs): + if self.remove_space: + outputs = " ".join(inputs.strip().split()) + else: + outputs = inputs + + if self.do_lower_case: + outputs = outputs.lower() + + return outputs + + def _tokenize(self, text, **kwargs): + """ Returns a tokenized string. """ + text = self.preprocess_text(text) + + seq = self.sp_tokenizer.tokenize(text) + + return seq + + def _decode( + self, + token_ids: Union[int, List[int]], + skip_special_tokens: bool = False, + clean_up_tokenization_spaces: bool = True, + **kwargs + ) -> str: + if isinstance(token_ids, int): + token_ids = [token_ids] + if len(token_ids) == 0: + return "" + if self.pad_token_id in token_ids: # remove pad + token_ids = list(filter((self.pad_token_id).__ne__, token_ids)) + return self.sp_tokenizer.decode(token_ids) + + def _convert_token_to_id(self, token): + """ Converts a token (str) in an id using the vocab. """ + return self.sp_tokenizer[token] + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.sp_tokenizer[index] + + def save_vocabulary(self, save_directory, filename_prefix=None): + """ + Save the vocabulary and special tokens file to a directory. + + Args: + save_directory (`str`): + The directory in which to save the vocabulary. + filename_prefix (`str`, *optional*): + An optional prefix to add to the named of the saved files. + + Returns: + `Tuple(str)`: Paths to the files saved. + """ + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, self.vocab_files_names["vocab_file"] + ) + else: + vocab_file = save_directory + + with open(self.vocab_file, 'rb') as fin: + proto_str = fin.read() + + with open(vocab_file, "wb") as writer: + writer.write(proto_str) + + return (vocab_file,) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + gmask_id = self.sp_tokenizer[self.gmask_token] + eos_id = self.sp_tokenizer[self.eos_token] + token_ids_0 = token_ids_0 + [gmask_id, self.sp_tokenizer[self.bos_token]] + if token_ids_1 is not None: + token_ids_0 = token_ids_0 + token_ids_1 + [eos_id] + return token_ids_0 + + def _pad( + self, + encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], + max_length: Optional[int] = None, + padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, + pad_to_multiple_of: Optional[int] = None, + return_attention_mask: Optional[bool] = None, + ) -> dict: + """ + Pad encoded inputs (on left/right and up to predefined length or max length in the batch) + + Args: + encoded_inputs: + Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). + max_length: maximum length of the returned list and optionally padding length (see below). + Will truncate by taking into account the special tokens. + padding_strategy: PaddingStrategy to use for padding. + + - PaddingStrategy.LONGEST Pad to the longest sequence in the batch + - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) + - PaddingStrategy.DO_NOT_PAD: Do not pad + The tokenizer padding sides are defined in self.padding_side: + + - 'left': pads on the left of the sequences + - 'right': pads on the right of the sequences + pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. + This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability + `>= 7.5` (Volta). + return_attention_mask: + (optional) Set to False to avoid returning attention mask (default: set to model specifics) + """ + # Load from model defaults + bos_token_id = self.sp_tokenizer[self.bos_token] + mask_token_id = self.sp_tokenizer[self.mask_token] + gmask_token_id = self.sp_tokenizer[self.gmask_token] + assert self.padding_side == "left" + + required_input = encoded_inputs[self.model_input_names[0]] + seq_length = len(required_input) + + if padding_strategy == PaddingStrategy.LONGEST: + max_length = len(required_input) + + if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): + max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of + + needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length + + # Initialize attention mask if not present. + if max_length is not None: + if "attention_mask" not in encoded_inputs: + if bos_token_id in required_input: + context_length = required_input.index(bos_token_id) + else: + context_length = seq_length + attention_mask = np.ones((1, seq_length, seq_length)) + attention_mask = np.tril(attention_mask) + attention_mask[:, :, :context_length] = 1 + attention_mask = np.bool_(attention_mask < 0.5) + encoded_inputs["attention_mask"] = attention_mask + + if "position_ids" not in encoded_inputs: + if bos_token_id in required_input: + context_length = required_input.index(bos_token_id) + else: + context_length = seq_length + position_ids = np.arange(seq_length, dtype=np.int64) + mask_token = mask_token_id if mask_token_id in required_input else gmask_token_id + if mask_token in required_input: + mask_position = required_input.index(mask_token) + position_ids[context_length:] = mask_position + block_position_ids = np.concatenate( + [np.zeros(context_length, dtype=np.int64), + np.arange(1, seq_length - context_length + 1, dtype=np.int64)]) + encoded_inputs["position_ids"] = np.stack([position_ids, block_position_ids], axis=0) + + if needs_to_be_padded: + difference = max_length - len(required_input) + + if "attention_mask" in encoded_inputs: + encoded_inputs["attention_mask"] = np.pad(encoded_inputs["attention_mask"], + pad_width=[(0, 0), (difference, 0), (difference, 0)], + mode='constant', constant_values=True) + if "token_type_ids" in encoded_inputs: + encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ + "token_type_ids" + ] + if "special_tokens_mask" in encoded_inputs: + encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] + if "position_ids" in encoded_inputs: + encoded_inputs["position_ids"] = np.pad(encoded_inputs["position_ids"], + pad_width=[(0, 0), (difference, 0)]) + encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input + + return encoded_inputs diff --git a/tokenizer_config.json b/tokenizer_config.json new file mode 100644 index 0000000000000000000000000000000000000000..f8221e09d53d36aac30bffac7e25888b2dad3743 --- /dev/null +++ b/tokenizer_config.json @@ -0,0 +1,20 @@ +{ + "name_or_path": "THUDM/chatglm-6b", + "bos_token": "", + "eos_token": "", + "end_token": "", + "gmask_token": "[gMASK]", + "mask_token": "[MASK]", + "pad_token": "", + "unk_token": "", + "remove_space": false, + "do_lower_case": false, + "tokenizer_class": "ChatGLMTokenizer", + "num_image_tokens": 0, + "auto_map": { + "AutoTokenizer": [ + "tokenization_chatglm.ChatGLMTokenizer", + null + ] + } +}