import pandas as pd
import numpy as np
from tqdm import tqdm
import paddle
from paddle.io import Dataset, DataLoader
from paddlenlp.transformers import AutoModelForSequenceClassification, AutoTokenizer
from paddle.optimizer import AdamW
from paddle.optimizer.lr import CosineAnnealingDecay
from paddle import nn
import torch

from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import re
import warnings

warnings.filterwarnings("ignore")


def set_random_seed(config):
    np.random.seed(config.random_seed)
    paddle.seed(config.random_seed)


class Config():
    train_data = 'data/train.csv'
    test_data = 'data/test.csv'
    result_data_save = 'result/bert.csv'
    max_seq_size = 24  # 最大句子长度
    batch_size = 64  # 批大小
    learning_rate = 1e-5
    # label的词典
    label_k_v_vocab = {'HomeAppliance-Control': 0, 'Calendar-Query': 1, 'Audio-Play': 2, 'Music-Play': 3,
                       'TVProgram-Play': 4, 'Video-Play': 5, 'Weather-Query': 6, 'Other': 7, 'Alarm-Update': 8,
                       'Travel-Query': 9, 'FilmTele-Play': 10, 'Radio-Listen': 11}
    label_v_k_vocab = dict(zip(label_k_v_vocab.values(), label_k_v_vocab.keys()))

    mode = 'train'
    print_loss = 20
    epochs = 15
    test_size = 0.2
    random_seed = 42
    device = 'cpu'
    model_save_path = 'model/paddle'
    best_model_save_path = 'model/paddle/best'

    tokenizer = None
    model_path = 'ernie-3.0-xbase-zh'

    print_log = 20


class MyDataset(Dataset):
    def __init__(self, config: Config, data: list, label: list = None):
        self.data = data
        self.tokenizer = config.tokenizer
        self.max_seq_len = config.max_seq_size
        self.len = len(data)
        self.label = label

    def __getitem__(self, idx):

        text = self.data[idx]
        # tokenizer
        inputs = self.tokenizer.encode(text, return_token_type_ids=True, return_attention_mask=True)
        # 打包结果
        result = [inputs['input_ids'],
                  inputs['token_type_ids'],
                  inputs['attention_mask']]
        if self.label is not None:
            result.append([self.label[idx]])
        else:
            result.append(None)
        # 返回
        return result

    def __len__(self):
        return self.len


class MyCollator:
    def __init__(self, max_seq_len, sep_token_id):
        self.max_seq_len = max_seq_len
        self.sep_token_id = sep_token_id

    def pad(self, input_ids_list, token_type_ids_list, attention_mask_list, max_seq_len):
        # 初始化填充长度
        input_ids = paddle.zeros((len(input_ids_list), max_seq_len), dtype='int64')
        token_type_ids = paddle.zeros_like(input_ids)
        attention_mask = paddle.zeros_like(input_ids)
        # 遍历获取输入
        for i in range(len(input_ids_list)):
            seq_len = len(input_ids_list[i])

            if seq_len < max_seq_len:  # 如果小于最大长度
                input_ids[i, :seq_len] = paddle.to_tensor(input_ids_list[i], dtype='int64')
                token_type_ids[i, :seq_len] = paddle.to_tensor(token_type_ids_list[i], dtype='int64')
                attention_mask[i, :seq_len] = paddle.to_tensor(attention_mask_list[i], dtype='int64')
            else:  # 如果大于或等于
                # 最后一位加上tokenizer的特殊占位
                input_ids[i] = paddle.to_tensor(
                    input_ids_list[i][:max_seq_len - 1] + [self.sep_token_id], dtype='int64')
                token_type_ids[i] = paddle.to_tensor(
                    token_type_ids_list[i][:max_seq_len], dtype='int64')
                attention_mask[i] = paddle.to_tensor(
                    attention_mask_list[i][:max_seq_len], dtype='int64')

        return input_ids, token_type_ids, attention_mask

    def __call__(self, examples):
        # 获取数据
        input_ids_list, token_type_ids_list, attention_mask_list, labels = list(zip(*examples))
        # 求句子最大长度
        cur_seq_len = max([len(ids) for ids in input_ids_list])  # 当前数据最大长度
        max_seq_len = min(cur_seq_len, self.max_seq_len)  # 最大长度
        # 填充句子
        input_ids, token_type_ids, attention_mask = self.pad(input_ids_list, token_type_ids_list, attention_mask_list,
                                                             max_seq_len)

        data = {
            'input_ids': input_ids,
            'token_type_ids': token_type_ids,
            'attention_mask': attention_mask
        }
        # 返回结果
        if labels[0] is not None:
            labels = paddle.to_tensor(list(labels), dtype='int64')
            data['labels'] = labels

        return data


def read_data(config: Config):
    if config.mode == 'train':  # 读取训练数据
        # 读取数据
        data = pd.read_csv(config.train_data)
        # 剔除无用数据
        data = data[data['label'] != '未知']
        # 空值填充
        data = data.fillna('')
        # 转换标签
        data['label'] = data['label'].map(config.label_k_v_vocab)
        return data
    elif config.mode == 'predict':  # 读取预测数据
        # 读取数据
        data = pd.read_csv(config.test_data)
        return data


def create_dataloader(config: Config):
    # 定义collator
    collate_fn = MyCollator(config.max_seq_size, config.tokenizer.sep_token_id)
    if config.mode == 'train':
        # 读取数据
        data_df = pd.read_csv(config.train_data, sep='\t', header=None)
        # data_df = pd.read_csv(config.train_data)
        data_df = data_df.rename(columns={0: 'text', 1: 'label'})
        data_df['label'] = data_df['label'].map(config.label_k_v_vocab)
        # 切分数据
        X_train, X_val, y_train, y_val = train_test_split(data_df['text'].tolist(), data_df['label'].tolist(),
                                                          test_size=config.test_size,
                                                          random_state=config.random_seed)
        # 构建数据
        train_dataloader = DataLoader(MyDataset(config, X_train, y_train), batch_size=config.batch_size, shuffle=True,
                                      collate_fn=collate_fn)
        val_dataloder = DataLoader(MyDataset(config, X_val, y_val), batch_size=config.batch_size, shuffle=True,
                                   collate_fn=collate_fn)
        return train_dataloader, val_dataloder
    elif config.mode == 'predict':
        # 读取数据
        data_df = pd.read_csv(config.test_data, sep='\t', header=None)
        data_df = data_df.rename(columns={0: 'text'})
        predict_dataloader = DataLoader(MyDataset(config, data_df['text'].tolist()), batch_size=config.batch_size,
                                        shuffle=False, collate_fn=collate_fn)
        return data_df, predict_dataloader


def predict(config: Config, model, data_df: pd.DataFrame, predict_dataloader: DataLoader):
    model.eval()
    predict_result = []
    for iter_id, batch in tqdm(enumerate(predict_dataloader)):
        input_ids = batch['input_ids']
        token_type_ids = batch['token_type_ids']
        attention_mask = batch['attention_mask']
        # 模型计算
        logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
        logits = paddle.argmax(logits, axis=1)
        # 获取结果
        y_pred = [[i] for i in logits.cpu().detach().numpy()]
        # 统计结果
        predict_result += y_pred

    data_df['Target'] = [config.label_v_k_vocab[result[0]] for result in predict_result]
    data_df['ID'] = [i + 1 for i in range(len(data_df))]
    data_df[['ID', 'Target']].to_csv(config.result_data_save, index=False)


def val(model, val_dataloader: DataLoader):
    model.eval()
    loss_fn = nn.loss.CrossEntropyLoss()
    total_acc, total_loss, test_num_batch = 0., 0., 0
    for iter_id, batch in enumerate(val_dataloader):
        input_ids = batch['input_ids']
        token_type_ids = batch['token_type_ids']
        attention_mask = batch['attention_mask']
        labels = batch['labels']
        # 模型计算
        logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)

        # 计算损失函数
        loss = loss_fn(logits, labels)

        # 计算指标
        logits = paddle.argmax(logits, axis=1)
        acc = accuracy_score(labels, logits)
        total_loss += loss.item()
        total_acc += acc
        test_num_batch += 1

    return total_loss / test_num_batch, total_acc / test_num_batch


def train(config: Config, train_dataloader: DataLoader, val_dataloader: DataLoader):
    # 初始化模型
    model = AutoModelForSequenceClassification.from_pretrained(config.model_path,
                                                               num_classes=len(config.label_k_v_vocab))
    # 定义优化器
    num_training_steps = len(train_dataloader) * config.epochs
    lr_scheduler = CosineAnnealingDecay(config.learning_rate, num_training_steps, eta_min=5e-6)
    decay_params = [
        p.name for n, p in model.named_parameters()
        if not any(nd in n for nd in ["bias", "norm"])
    ]
    opt = AdamW(learning_rate=lr_scheduler, parameters=model.parameters(),
                apply_decay_param_fun=lambda x: x in decay_params)

    # 损失函数
    loss_fn = nn.loss.CrossEntropyLoss()
    best_acc = 0
    # 临时记录
    loss_list = []
    for epoch in range(config.epochs):
        total_acc, total_loss, train_num_batch = 0., 0., 0
        model.train()
        for iter_id, batch in enumerate(train_dataloader):
            input_ids = batch['input_ids']
            token_type_ids = batch['token_type_ids']
            attention_mask = batch['attention_mask']
            labels = batch['labels']
            # 模型计算
            logits = model(input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=attention_mask)
            # 计算损失函数
            loss = loss_fn(logits, labels)

            logits = paddle.argmax(logits, axis=1)
            # 计算指标
            acc = accuracy_score(labels, logits)
            total_loss += loss.item()
            total_acc += acc

            # 反向传播，更新参数
            loss.backward()
            opt.step()
            opt.clear_grad()
            lr_scheduler.step()

            # 打印
            if iter_id % config.print_log == 0:
                print('epoch:{}, iter_id:{}, loss:{}, acc:{}'.format(epoch, iter_id, loss.item(), acc))

            train_num_batch += 1
            loss_list.append(loss.item())

        val_loss, val_acc = val(model, val_dataloader)
        print('-' * 30)
        print('epoch:{}, avg_train_loss:{}. avg_train_acc:{}, val_loss:{}. val_acc:{}'.
              format(epoch, total_loss / train_num_batch, total_acc / train_num_batch, val_loss, val_acc))

        # 保存
        if val_acc > best_acc:
            print('保存最优模型')
            best_acc = val_acc
            config.tokenizer.save_pretrained(config.best_model_save_path)
            model.save_pretrained(config.best_model_save_path)
        print('-' * 30)
    return model, loss_list


if __name__ == '__main__':
    config = Config()

    set_random_seed(config)

    paddle.device.set_device('gpu:0')

    config.mode = 'predict'
    if config.mode == 'train':
        # 初始化
        config.tokenizer = AutoTokenizer.from_pretrained(config.model_path)
        # 读取数据
        train_dataloader, val_dataloder = create_dataloader(config)
        # 训练
        model, loss_list = train(config, train_dataloader, val_dataloder)
        # 保存
        config.tokenizer.save_pretrained(config.model_save_path)
        model.save_pretrained(config.model_save_path)

    elif config.mode == 'predict':
        # 初始化
        config.tokenizer = AutoTokenizer.from_pretrained(config.best_model_save_path)
        model = AutoModelForSequenceClassification.from_pretrained(config.best_model_save_path)
        # 读取数据
        data_df, pred_dataloader = create_dataloader(config)
        # 预测
        predict(config, model, data_df, pred_dataloader)

    print('done.')
