#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File  : PromptPaddle.py
# @Author: Richard Chiming Xu
# @Date  : 2023/3/14
# @Desc  : 基于OpenAI的分类

from transformers import OpenAIGPTTokenizer, OpenAIGPTForSequenceClassification, AdamW

import pandas as pd
import numpy as np
from tqdm import tqdm
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from torch.optim.lr_scheduler import CosineAnnealingLR
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import re
import warnings

warnings.filterwarnings("ignore")


def set_random_seed(config):
    np.random.seed(config.random_seed)
    torch.manual_seed(config.random_seed)
    torch.cuda.manual_seed(config.random_seed)
    torch.cuda.manual_seed_all(config.random_seed)


class Config():
    train_data = 'data/train.csv'
    test_data = 'data/test.csv'
    result_data_save = 'result/bert.csv'
    max_seq_size = 24  # 最大句子长度
    batch_size = 64  # 批大小
    learning_rate = 1e-5
    # label的词典
    label_k_v_vocab = {'HomeAppliance-Control': 0, 'Calendar-Query': 1, 'Audio-Play': 2, 'Music-Play': 3,
                       'TVProgram-Play': 4, 'Video-Play': 5, 'Weather-Query': 6, 'Other': 7, 'Alarm-Update': 8,
                       'Travel-Query': 9, 'FilmTele-Play': 10, 'Radio-Listen': 11}
    label_v_k_vocab = dict(zip(label_k_v_vocab.values(), label_k_v_vocab.keys()))

    mode = 'train'
    print_loss = 20
    epochs = 15
    test_size = 0.2
    random_seed = 42
    device = 'cpu'
    model_save_path = 'model/paddle'

    tokenizer = None
    model_path = 'GPT2'

    print_log = 20


class MyDataset(Dataset):
    def __init__(self, config: Config, data: list, label: list = None):
        self.data = data
        self.tokenizer = config.tokenizer
        self.max_seq_len = config.max_seq_size
        self.len = len(data)
        self.label = label

    def __getitem__(self, idx):

        text = self.data[idx]
        # tokenizer
        inputs = self.tokenizer.encode(text, return_token_type_ids=True, return_attention_mask=True)
        # 打包结果
        result = [inputs['input_ids'],
                  inputs['token_type_ids'],
                  inputs['attention_mask']]
        if self.label is not None:
            result.append([self.label[idx]])
        else:
            result.append(None)
        # 返回
        return result

    def __len__(self):
        return self.len


class MyCollator:
    def __init__(self, max_seq_len):
        self.max_seq_len = max_seq_len

    def __call__(self, examples):
        input_ids_list = []
        labels = []
        token_type_ids_list = []
        attention_mask_list = []
        for example in examples:
            input_ids_list.append(example['inputs_ids'])
            labels.append(example['labels'])
            token_type_ids_list.append(example['token_type_ids'])
            attention_mask_list.append(example['attention_mask'])

        '''
            对齐操作   
        '''
        # 1. 找到inputs中最长句子
        max_length = max(len(input_ids) for input_ids in input_ids_list)
        # 2. 根据最长句子，定义一个全零Tensor
        input_ids_tensor = torch.zeros((len(labels), max_length), dtype=torch.long)
        token_type_ids_tensor = torch.zeros_like(input_ids_tensor)
        attention_mask_tensor = torch.zeros_like(input_ids_tensor)

        for i, input_ids in enumerate(input_ids_list):
            # 3.得到当前句子长度
            seq_len = len(input_ids)
            input_ids_tensor[i, :seq_len] = torch.tensor(input_ids, dtype=torch.long)
            token_type_ids_tensor[i, :seq_len] = torch.tensor(token_type_ids_list[i], dtype=torch.long)
            attention_mask_tensor[i, :seq_len] = torch.tensor(attention_mask_list[i], dtype=torch.long)
        # 返回
        return {
            'input_ids': input_ids_tensor,
            'labels': torch.tensor(labels, dtype=torch.long),
            'token_type_ids': token_type_ids_tensor,
            'attention_mask': attention_mask_tensor
        }


def create_dataloader(config: Config):
    # 定义collator
    collate_fn = MyCollator(config.max_seq_size)
    if config.mode == 'train':
        # 读取数据
        data_df = pd.read_csv(config.train_data, sep='\t', header=None)
        data_df = data_df.rename(columns={0: 'text', 1: 'label'})
        data_df['label'] = data_df['label'].map(config.label_k_v_vocab)
        # 切分数据
        X_train, X_val, y_train, y_val = train_test_split(data_df['text'].tolist(), data_df['label'].tolist(),
                                                          test_size=config.test_size,
                                                          random_state=config.random_seed)
        # 构建数据
        train_dataloader = DataLoader(MyDataset(config, X_train, y_train), batch_size=config.batch_size, shuffle=True,
                                      collate_fn=collate_fn)
        val_dataloder = DataLoader(MyDataset(config, X_val, y_val), batch_size=config.batch_size, shuffle=True,
                                   collate_fn=collate_fn)
        return train_dataloader, val_dataloder
    elif config.mode == 'predict':
        # 读取数据
        data_df = pd.read_csv(config.test_data, sep='\t', header=None)
        data_df = data_df.rename(columns={0: 'text'})
        predict_dataloader = DataLoader(MyDataset(config, data_df['text'].tolist()), batch_size=config.batch_size,
                                        shuffle=False, collate_fn=collate_fn)
        return data_df, predict_dataloader


# def predict(config: Config, model, data_df: pd.DataFrame, predict_dataloader: DataLoader):
#     model.eval()
#     predict_result = []
#     for iter_id, batch in tqdm(enumerate(predict_dataloader)):
#         input_ids = batch['input_ids']
#         token_type_ids = batch['token_type_ids']
#         attention_mask = batch['attention_mask']
#         # 模型计算
#         logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
#         logits = paddle.argmax(logits, axis=1)
#         # 获取结果
#         y_pred = [[i] for i in logits.cpu().detach().numpy()]
#         # 统计结果
#         predict_result += y_pred
#
#     data_df['Target'] = [config.label_v_k_vocab[result[0]] for result in predict_result]
#     data_df['ID'] = [i + 1 for i in range(len(data_df))]
#     data_df[['ID', 'Target']].to_csv(config.result_data_save, index=False)


def evaluation(config: Config, model, val_dataloader):
    '''
    验证数据
    :param config: 配置
    :param model: 模型
    :param val_dataloader: 验证集
    :return:
    '''
    # 启动模型验证模式
    model.eval()
    # 初始化参数
    preds = []
    labels = []
    val_loss = 0.
    val_iterator = tqdm(val_dataloader, desc='验证中', total=len(val_dataloader))
    # 遍历每一个验证的batch,这里是验证，要设置为不需要梯度下降
    with torch.no_grad():
        for batch in val_iterator:
            labels.append(batch['labels'])
            # 同样有GPU可以加上这句
            batch = {item: value.to(config['device']) for item, value in batch.items()}

            # 模型输出是 (loss,out)
            loss, logits = model(**batch)[:2]
            val_loss += loss.item()
            # 直接取出预测的逻辑值的最大值的索引，就是我们需要预测的label
            # 这里如果是GPU运算，要把参数复制出CPU中
            pred = logits.argmax(dim=-1).detach().cpu()
            # pred = logits.argmax(dim=-1)
            preds.append(pred)
    # 计算平均损失
    avg_val_loss = val_loss / len(val_dataloader)
    # 统计labels和preds
    labels = torch.cat(labels, dim=0).numpy()
    preds = torch.cat(preds, dim=0).numpy()
    # 计算分数
    f1 = f1_score(labels, preds, average='macro')
    return avg_val_loss, f1


def train(config: Config, train_dataloader: DataLoader, val_dataloader: DataLoader):
    # 初始化模型
    model = OpenAIGPTForSequenceClassification.from_pretrained(config.model_path, num_labels=len(config.label_k_v_vocab))
    # 定义优化器
    num_training_steps = len(train_dataloader) * config.epochs
    # 余弦退火

    optimizer = AdamW(lr=config.learning_rate, params=model.parameters())
    lr_scheduler = CosineAnnealingLR(optimizer, num_training_steps, eta_min=5e-6)

    # 损失函数
    loss_fn = nn.CrossEntropyLoss()
    # 临时记录
    loss_list = []
    for epoch in range(config.epochs):
        model.train()
        total_acc, total_loss, train_num_batch = 0., 0., 0
        model.train()
        for iter_id, batch in enumerate(train_dataloader):
            input_ids = batch['input_ids']
            token_type_ids = batch['token_type_ids']
            attention_mask = batch['attention_mask']
            labels = batch['labels']
            # 模型计算
            logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
            # 计算损失函数
            loss = loss_fn(logits, labels)

            logits = torch.argmax(logits, dim=1)
            # 计算指标
            acc = accuracy_score(labels, logits)
            total_loss += loss.item()
            total_acc += acc

            # 反向传播，更新参数
            loss.backward()
            optimizer.step()
            lr_scheduler.step()
            optimizer.zero_grad()

            # 打印
            if iter_id % config.print_log == 0:
                print('epoch:{}, iter_id:{}, loss:{}, acc:{}'.format(epoch, iter_id, loss.item(), acc))

            train_num_batch += 1
            loss_list.append(loss.item())

        val_loss, val_acc = evaluation(config, model, val_dataloader)
        print('-' * 30)
        print('epoch:{}, avg_train_loss:{}. avg_train_acc:{}, val_loss:{}. val_acc:{}'.
              format(epoch, total_loss / train_num_batch, total_acc / train_num_batch, val_loss, val_acc))
        print('-' * 30)
        model.train()
    return model, loss_list


if __name__ == '__main__':
    config = Config()

    set_random_seed(config)
    config.device = 'cuda' if torch.cuda.is_available() else 'cpu'

    config.mode = 'train'
    if config.mode == 'train':
        # 初始化
        config.tokenizer = OpenAIGPTTokenizer.from_pretrained(config.model_path)
        # 读取数据
        train_dataloader, val_dataloder = create_dataloader(config)
        # 训练
        model, loss_list = train(config, train_dataloader, val_dataloder)
        # 保存
        config.tokenizer.save_pretrained(config.model_save_path)
        model.save_pretrained(config.model_save_path)

    # elif config.mode == 'predict':
    #     # 初始化
    #     config.tokenizer = AutoTokenizer.from_pretrained(config.model_save_path)
    #     model = AutoModelForSequenceClassification.from_pretrained(config.model_save_path)
    #     # 读取数据
    #     data_df, pred_dataloader = create_dataloader(config)
    #     # 预测
    #     predict(config, model, data_df, pred_dataloader)

    print('done.')
