from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
import os
os.system('nvidia-smi')

# os.system('pip install -r requirements_qwen_1_8.txt -i https://mirrors.aliyun.com/pypi/simple')
# os.system('pip install deepspeed transformers==4.32.0 peft pydantic==1.10.13 transformers_stream_generator einops tiktoken modelscope')

## 2.模型下载

# 阿里魔搭社区notebook的[jupyterLab](https://www.modelscope.cn/my/mynotebook/authorization)里：下载模型会缓存在 `~/.cache/modelscope/`

import time
from modelscope import snapshot_download
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig

def prepare_model():
    cache_dir = "pretrained_llms"
    model_name = 'qwen/Qwen-1_8B-Chat'
    model_dir = snapshot_download(model_name, cache_dir=cache_dir)


    query = "识别以下句子中的地址信息，并按照{address:['地址']}的格式返回。如果没有地址，返回{address:[]}。句子为：在一本关于人文的杂志中，我们发现了一篇介绍北京市海淀区科学院南路76号社区服务中心一层的文章，文章深入探讨了该地点的人文历史背景以及其对于当地居民的影响。"
    tokenizer = AutoTokenizer.from_pretrained(f"{cache_dir}/{model_name}", trust_remote_code=True, cache_dir=cache_dir)
    model = AutoModelForCausalLM.from_pretrained(f"{cache_dir}/{model_name}", device_map="auto", trust_remote_code=True, cache_dir=cache_dir).eval()
    response, history = model.chat(tokenizer, query, history=None)
    print("回答如下:\n", response)

    os.system('git clone https://github.com/QwenLM/Qwen')


def merge_model():
    from peft import AutoPeftModelForCausalLM 
    from transformers import AutoTokenizer

    tokenizer = AutoTokenizer.from_pretrained("output_qwen", trust_remote_code=True ) 
    tokenizer.save_pretrained("qwen-1_8b-finetune")

    model = AutoPeftModelForCausalLM.from_pretrained("output_qwen", device_map="auto", trust_remote_code=True ).eval() 
    merged_model = model.merge_and_unload() 
    merged_model.save_pretrained("qwen-1_8b-finetune", max_shard_size="2048MB", safe_serialization=True)

## 7.本地部署微调模型

def eval():

    query = "识别以下句子中的地址信息，并按照{address:['地址']}的格式返回。如果没有地址，返回{address:[]}。句子为：在一本关于人文的杂志中，我们发现了一篇介绍北京市海淀区科学院南路76号社区服务中心一层的文章，文章深入探讨了该地点的人文历史背景以及其对于当地居民的影响。"
    local_model_path = "qwen-1_8b-finetune"
    tokenizer = AutoTokenizer.from_pretrained(local_model_path, trust_remote_code=True)
    model = AutoModelForCausalLM.from_pretrained(local_model_path, device_map="auto", trust_remote_code=True).eval()
    response, history = model.chat(tokenizer, query, history=None)
    print("回答如下:\n", response)

    ## 8.保存依赖包信息

    os.system('pip freeze > requirements_qwen_1_8.txt')


if __name__ == '__main__':
    prepare_model()
    os.system('python ./Qwen/finetune.py \
    --model_name_or_path "pretrained_llms/qwen/Qwen-1_8B-Chat/" \
    --data_path qwen_tune_data/qwen.json \
    --fp16 True \
    --output_dir output_qwen \
    --num_train_epochs 10 \
    --per_device_train_batch_size 2 \
    --per_device_eval_batch_size 1 \
    --gradient_accumulation_steps 8 \
    --evaluation_strategy "no" \
    --save_strategy "steps" \
    --save_steps 1000 \
    --save_total_limit 10 \
    --learning_rate 3e-4 \
    --weight_decay 0.1 \
    --adam_beta2 0.95 \
    --warmup_ratio 0.01 \
    --lr_scheduler_type "cosine" \
    --logging_steps 1 \
    --report_to "none" \
    --model_max_length 512 \
    --lazy_preprocess True \
    --gradient_checkpointing True \
    --use_lora True')
    merge_model()
    eval()