######################################################################################################################
# 数据加载
import pandas as pd
import numpy as np
import datasets
import random
from PyCmpltrtok.common import sep
from nlp_dataset_emotion_x00500_use import get_data
from nlp_dataset_emotion_x00100_check_emojis import get_en2zh_zh2en
import sys

random.seed(666)

en2zh, zh2en = get_en2zh_zh2en('trans/tte_options_zh.trans.txt')
xemotions = sorted(zh2en.keys())
print('emotions:', xemotions)

washed_path = '_save/washed/emotionX7'
washed_path_train = washed_path + '_train.txt'
washed_path_val = washed_path + '_val.txt'

sep('train')
train_data = get_data(washed_path_train)
sep('val')
val_data = get_data(washed_path_val)
sep('All over')

# dftrain = pd.read_parquet('data/dftrain.parquet')
# dftest = pd.read_parquet('data/dftest.parquet')

sep('define hist func')
def get_history():
    his = []
    random.shuffle(train_data)
    SHOT_FOR_EVERY_EMOTION = 5
    for xtag in xemotions:
        cnt = 0
        for xid, xtext, xem in train_data:
            if xem != xtag:
                continue
            his.append((xtext, xem, ))
            cnt += 1
            if cnt >= SHOT_FOR_EVERY_EMOTION:
                break
    random.shuffle(his)
    return his
his = get_history()

dftrain = pd.DataFrame(train_data, columns=('id', 'text', 'tag'))
dftest = pd.DataFrame(val_data, columns=('id', 'text', 'tag'))


print('tag value counts:', dftrain['tag'].value_counts())


def build_inputs(query, history):
    prompt = ""
    for i, (old_query, response) in enumerate(history):
        prompt += "[Round {}]\n\n问：{}\n\n答：{}\n\n".format(i + 1, old_query, response)
    prompt += "[Round {}]\n\n问：{} -> \n\n答：".format(len(history) + 1, query)
    return prompt


print(build_inputs('味道不太行',history=his))

dftrain['context'] = [build_inputs(x,history=his) for x in dftrain['text']]
dftrain['target'] = [x for x in dftrain['tag']]
dftrain = dftrain[['context','target']]
print(dftrain[:5])

dftest['context'] = [build_inputs(x,history=his) for x in dftest['text']]
dftest['target'] = [x for x in dftest['tag']]
dftest = dftest[['context','target']]
print(dftest[:5])

ds_train = datasets.Dataset.from_pandas(dftrain)
ds_val = datasets.Dataset.from_pandas(dftest)

######################################################################################################################
# token编码
from tqdm import tqdm
import transformers

model_name = "chatglm2-6b"
# max_seq_length = 512
max_seq_length = 4096
skip_over_length = True

tokenizer = transformers.AutoTokenizer.from_pretrained(
    model_name, trust_remote_code=True)

config = transformers.AutoConfig.from_pretrained(
    model_name, trust_remote_code=True, device_map='auto')


def preprocess(example):
    context = example["context"]
    target = example["target"]

    context_ids = tokenizer.encode(
        context,
        max_length=max_seq_length,
        truncation=True)

    target_ids = tokenizer.encode(
        target,
        max_length=max_seq_length,
        truncation=True,
        add_special_tokens=False)

    input_ids = context_ids + target_ids + [config.eos_token_id]

    return {"input_ids": input_ids, "context_len": len(context_ids), 'target_len': len(target_ids)}


ds_train_token = ds_train.map(preprocess).select_columns(['input_ids', 'context_len', 'target_len'])
if skip_over_length:
    ds_train_token = ds_train_token.filter(
        lambda example: example["context_len"] < max_seq_length and example["target_len"] < max_seq_length)

ds_val_token = ds_val.map(preprocess).select_columns(['input_ids', 'context_len', 'target_len'])
if skip_over_length:
    ds_val_token = ds_val_token.filter(
        lambda example: example["context_len"] < max_seq_length and example["target_len"] < max_seq_length)

######################################################################################################################
# 管道构建

def data_collator(features: list):
    len_ids = [len(feature["input_ids"]) for feature in features]
    longest = max(len_ids)  # 之后按照batch中最长的input_ids进行padding

    input_ids = []
    labels_list = []

    for length, feature in sorted(zip(len_ids, features), key=lambda x: -x[0]):
        ids = feature["input_ids"]
        context_len = feature["context_len"]

        labels = (
                [-100] * (context_len - 1) + ids[(context_len - 1):] + [-100] * (longest - length)
                # [-100] * (context_len) + ids[(context_len):] + [-100] * (longest - length)
        )  # -100标志位后面会在计算loss时会被忽略不贡献损失，我们集中优化target部分生成的loss

        ids = ids + [tokenizer.pad_token_id] * (longest - length)
        # print('ids', tokenizer.decode(ids))
        # print('labels', tokenizer.decode(labels))
        # sys.exit(0)

        input_ids.append(torch.LongTensor(ids))
        labels_list.append(torch.LongTensor(labels))

    input_ids = torch.stack(input_ids)
    labels = torch.stack(labels_list)
    return {
        "input_ids": input_ids,
        "labels": labels,
    }


import torch

dl_train = torch.utils.data.DataLoader(ds_train_token, num_workers=2, batch_size=4,
                                       pin_memory=True, shuffle=True,
                                       collate_fn=data_collator)
dl_val = torch.utils.data.DataLoader(ds_val_token, num_workers=2, batch_size=4,
                                     pin_memory=True, shuffle=True,
                                     collate_fn=data_collator)

for batch in dl_train:
    break

dl_train.size = 300  # 每300个step视作一个epoch，做一次验证

######################################################################################################################
# 定义模型
import warnings
warnings.filterwarnings("ignore")


from transformers import AutoTokenizer, AutoModel, TrainingArguments, AutoConfig
import torch
import torch.nn as nn
from peft import get_peft_model, LoraConfig, TaskType

model = AutoModel.from_pretrained("chatglm2-6b",
                                  load_in_8bit=False,
                                  trust_remote_code=True,
                                  device_map='auto')

model.supports_gradient_checkpointing = True  #节约cuda
model.gradient_checkpointing_enable()
model.enable_input_require_grads()
#model.lm_head = CastOutputToFloat(model.lm_head)

model.config.use_cache = False  # silence the warnings. Please re-enable for inference!


peft_config = LoraConfig(
    task_type=TaskType.CAUSAL_LM, inference_mode=False,
    r=8,
    lora_alpha=32, lora_dropout=0.1,
)

model = get_peft_model(model, peft_config)
model.is_parallelizable = True
model.model_parallel = True
model.print_trainable_parameters()

######################################################################################################################
# 模型训练

from torchkeras import KerasModel
from accelerate import Accelerator


class StepRunner:
    def __init__(self, net, loss_fn, accelerator=None, stage="train", metrics_dict=None,
                 optimizer=None, lr_scheduler=None
                 ):
        self.net, self.loss_fn, self.metrics_dict, self.stage = net, loss_fn, metrics_dict, stage
        self.optimizer, self.lr_scheduler = optimizer, lr_scheduler
        self.accelerator = accelerator if accelerator is not None else Accelerator()
        if self.stage == 'train':
            self.net.train()
        else:
            self.net.eval()

    def __call__(self, batch):

        # loss
        with self.accelerator.autocast():
            loss = self.net(input_ids=batch["input_ids"], labels=batch["labels"]).loss

        # backward()
        if self.optimizer is not None and self.stage == "train":
            self.accelerator.backward(loss)
            if self.accelerator.sync_gradients:
                self.accelerator.clip_grad_norm_(self.net.parameters(), 1.0)
            self.optimizer.step()
            if self.lr_scheduler is not None:
                self.lr_scheduler.step()
            self.optimizer.zero_grad()

        all_loss = self.accelerator.gather(loss).sum()

        # losses (or plain metrics that can be averaged)
        step_losses = {self.stage + "_loss": all_loss.item()}

        # metrics (stateful metrics)
        step_metrics = {}

        if self.stage == "train":
            if self.optimizer is not None:
                step_metrics['lr'] = self.optimizer.state_dict()['param_groups'][0]['lr']
            else:
                step_metrics['lr'] = 0.0
        return step_losses, step_metrics


KerasModel.StepRunner = StepRunner


# 仅仅保存lora可训练参数
def save_ckpt(self, ckpt_path='checkpoint.pt', accelerator=None):
    unwrap_net = accelerator.unwrap_model(self.net)
    unwrap_net.save_pretrained(ckpt_path)


def load_ckpt(self, ckpt_path='checkpoint.pt'):
    self.net = self.net.from_pretrained(self.net, ckpt_path)
    self.from_scratch = False


KerasModel.save_ckpt = save_ckpt
KerasModel.load_ckpt = load_ckpt

keras_model = KerasModel(model, loss_fn=None,
                         optimizer=torch.optim.AdamW(model.parameters(), lr=2e-6))
ckpt_path = 'emotion_chatglm2'

keras_model.fit(train_data=dl_train,
                val_data=dl_val,
                epochs=100, patience=5,
                monitor='val_loss', mode='min',
                ckpt_path=ckpt_path,
                mixed_precision='fp16'
                )

######################################################################################################################
# 验证模型

from peft import PeftModel
model = AutoModel.from_pretrained("chatglm2-6b",
                                  load_in_8bit=False,
                                  trust_remote_code=True,
                                  device_map='auto')
model = PeftModel.from_pretrained(model,ckpt_path)
model = model.merge_and_unload() #合并lora权重


def predict(text):
    response, history = model.chat(tokenizer, f"{text} -> ", history=his,
    temperature=0.01)
    return response

predict('死鬼，咋弄得这么有滋味呢')


preds = ['' for x in dftest['text']]

from tqdm import tqdm
for i in tqdm(range(len(dftest))):
    text = dftest['text'].loc[i]
    preds[i] = predict(text)

dftest['pred'] = preds

dftest.pivot_table(index='tag',columns = 'pred',values='text',aggfunc='count')


acc = len(dftest.query('tag==pred'))/len(dftest)
print('acc=',acc)

######################################################################################################################
# 使用模型

def predict(text,temperature=0.8):
    response, history = model.chat(tokenizer, f"{text} -> ", history=his,
    temperature=temperature)
    return response

for i in range(10):
    print(predict('死鬼，咋弄得这么有滋味呢'))

######################################################################################################################
# 保存模型

model.save_pretrained("chatglm2-6b-emotion", max_shard_size='1GB')

tokenizer.save_pretrained("chatglm2-6b-emotion")

# 将相关的py文件也复制过去
# !ls chatglm2-6b
# !cp  chatglm2-6b/*.py chatglm2-6b-emotion/
