import json
import time

import gradio as  gr
import os
import shutil
import finetune.cover_alpaca2jsonl as  cover_alpaca2jsonl
import finetune.tokenize_dataset_rows as tokenize_dataset_rows
import finetune.finetune as finetune





def load_base_model():
    # 读取模型列表
    modelDir = "models"
    modelNames = []
    for f in os.listdir(modelDir):
        if os.path.isdir(modelDir+"/"+f):
            modelNames.append(f)
    select_model = None
    if len(modelNames) >0:
        select_model = modelNames[0]
    return gr.Dropdown.update(choices=modelNames, value=select_model)
def load_train_data(file):
    if file is None:
        return ""
    fileName = os.path.basename(file.name)
    destination = "data/"+fileName
    print(fileName)
    if os.path.exists(destination):
        os.remove(destination)
    shutil.move(file.name ,destination)
    with open(destination,encoding='utf-8') as f:
        head = [next(f) for x in range(10)]
    return "".join(head)+" \n..."
def pre_train(file,select_model,max_seq_length,progress=gr.Progress()):
    if select_model is None or select_model == "":
        return "请选择基础模型"
    if file is None:
        return "样本数据不存在，请重新上传"
    fileName = os.path.basename(file.name)
    destination = "data/" + fileName
    if not os.path.exists(destination):
        gr.Error("文件不存在"+destination)

    # 把json文件转换成jsonl格式
    cover_alpaca2jsonl.convert(input=destination,output=destination.replace(".json",".jsonl"),progress=progress)
    # 把jsonl文件转换成dataset
    model = os.path.abspath("models/"+select_model)
    progress(0, "tokenizing start..")
    datasetDir = destination.replace(".json", "")

    tokenize_dataset_rows.tokenize(model_name=model
                                   , input=destination.replace(".json" , ".jsonl")
                                   , output=datasetDir
                                   , max_seq_length=int(max_seq_length)
                                   ,callback=None
                                   )
    progress(1, "tokenizing end..")
    time.sleep(2)

    # 获取dataset的目录结构
    messages = []
    messages.append(datasetDir)
    for f in os.listdir(datasetDir):
        messages.append("----"+f)


    return "\n".join(messages)

def  do_train(file,select_model,lora_rank,per_device_train_batch_size,gradient_accumulation_steps,max_steps,save_steps,progress=gr.Progress()):
    # 删除历史文件
    runs = "runs"
    if os.path.exists(runs):
        shutil.rmtree(runs)
    if select_model is None or select_model == "":
        return "请选择基础模型"
    if file is None:
        return "样本数据不存在，请重新上传"
    fileName = os.path.basename(file.name)
    destination = "data/" + fileName
    datasetDir = destination.replace(".json", "")
    model = os.path.abspath("models/" + select_model)
    lora_name = fileName.replace(".json", "")

    lora_out_dir = "lora/"+select_model+"-lora-"+lora_name
    if not os.path.exists(datasetDir):
        return "dataset不存在，请先对数据进行处理"
    if os.path.exists(lora_out_dir):
        shutil.rmtree(lora_out_dir)

    config = finetune.Config(
        model_name=model,
        dataset_path=datasetDir,
        lora_rank=int(lora_rank),
        per_device_train_batch_size=int(per_device_train_batch_size),
        gradient_accumulation_steps=int(gradient_accumulation_steps),
        max_steps=int(max_steps),
        save_steps=int(save_steps),
        save_total_limit=2,
        learning_rate=1e-5,
        fp16=True,
        remove_unused_columns=False,
        logging_steps=50,
        output_dir=os.path.abspath(lora_out_dir),
    )
    finetune.fine(config,progress=progress)
    # 获取lora的目录结构
    messages = ["LoRA模型目录:\n\n"]
    messages.append(lora_out_dir)
    for f in os.listdir(lora_out_dir):
        messages.append("----" + f)

    return "\n".join(messages)

with gr.Blocks(title="训练", css="#fixed_size_img {height: 240px;}") as trainingApp:
    gr.Markdown("一键训练ChatGLM,当前仅支持chatgml-6b的训练，其他基础模型的训练尚未适配")
    with gr.Row():
        with gr.Column(scale=4):
            select_model = gr.Dropdown(label="请选择基础模型", choices=[],interactive=True)
        with gr.Column(scale=1):
            load_model_btn = gr.Button("读取模型")
        load_model_btn.click(load_base_model, outputs=select_model)
    with gr.Row():
        with gr.Column():
            train_data_file = gr.File(label="训练样本")
        with gr.Column():
            gr.File(label="文件样例",value="examples/knowledge_example.json")
            train_data_display = gr.TextArea(lines=5,label="训练样本内容")
        train_data_file.change(load_train_data,inputs=train_data_file,outputs=train_data_display)
    with gr.Row():
        with gr.Column():
            max_seq_length = gr.Slider(minimum=1, maximum=2048, step=1, label="max_seq_length",value=200,interactive=True)
            pre_train_btn = gr.Button("数据预处理")
        with gr.Column():
            pre_train_output = gr.TextArea(label="预处理结果", lines=5)
    with gr.Row():
        with gr.Column():

            lora_rank = gr.Slider(minimum=4, maximum=128, step=4, label="lora_rank",value=8,interactive=True)
            per_device_train_batch_size = gr.Slider(minimum=1, maximum=64, step=1, label="批量处理个数(per_device_train_batch_size)",value=1,interactive=True)
            gradient_accumulation_steps = gr.Slider(minimum=1,maximum=32,step=1,label="梯度累积步数(gradient_accumulation_steps)",value=1,interactive=True)
            max_steps = gr.Slider(minimum=100, maximum=100000, step=100, label="最大训练步数(max_steps)",value=10000,interactive=True)
            save_steps = gr.Slider(minimum=100, maximum=1000, step=100, label="保存步数(save_steps)",value=500,interactive=True)

            train_btn = gr.Button("训练")
        with gr.Column():
            train_output = gr.TextArea(label="训练结果", lines=5)

    pre_train_btn.click(pre_train,inputs=[train_data_file,select_model,max_seq_length],outputs=pre_train_output)
    train_btn.click(do_train,inputs=[train_data_file,select_model,lora_rank,per_device_train_batch_size,gradient_accumulation_steps,max_steps,save_steps],outputs=train_output)