import os.path
from functools import partial
import argparse
import random
import time
import re
import json
import codecs

import numpy as np
import pandas as pd
import paddle.io
import paddle.nn.functional as F
import paddle.nn as nn
from paddle.metric import Metric
from tqdm import tqdm
import paddlenlp as ppnlp
from paddlenlp.data import Stack, Tuple, Pad
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import LinearDecayWithWarmup
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score, f1_score

train_path = 'train.csv'
test_path = 'test.csv'
train_df = pd.read_csv(train_path).fillna(value='')
test_df = pd.read_csv(test_path).fillna(value='')

select_labels = train_df['label'].unique()
lables = []
for label in select_labels:
    if '|' not in label:
        if label not in lables:
            lables.append(label)
        else:
            for _ in label.split('|'):
                if _ not in lables:
                    lables.append(_)

with open('label.json', 'w', encoding='utf-8') as f:
    f.write(json.dumps(dict(zip(range(len(lables)), lables)), ensure_ascii=False, indent=2))

train_data = []
test_data = []

# 把训练集 label 转换成 one-hot 的 id 形式
for i in range(train_df.shape[0]):
    label, content = train_df.iloc[i, :]
    label_id = [0] * len(lables)
    for j, _ in enumerate(lables):
        for separate_label in label.split('|'):
            if _ == separate_label:
                label_id[j] = 1
    train_data.append((content, label_id))

# 把测试集的 label 转换成id 形式
for i in range(test_df.shape[0]):
    label, content = test_df.iloc[i, :]
    label_id = [0] * len(lables)
    for j, _ in enumerate(lables):
        for separate_label in label.split('|'):
            if _ == separate_label:
                label_id[j] = 1
    test_data.append((content, label_id))

# 查看训练集前 5 条数据
print(train_data[:5])

# 构造 load_dataset 的处理函数， 并把它解析成 {'text': text, 'label': label}

def read_custom_data(data, is_test=False):
    """
    Read data
    :param data:
    :param is_test:
    :return:
    """

    for line in data:
        if is_test:
            text = line[0]
            yield {'text' : text, 'label': ''}
        else:
            text, label = line[0], line[1]
            yield {'text' : text, 'label' : label}

# 加载训练集
train_ds = load_dataset(read_custom_data, data=train_data, is_test=False, lazy=False)
# 加载验证集
dev_ds = load_dataset(read_custom_data, data=test_data, is_test=False, lazy=False)

print(f'train data top 2 samples: {train_ds[2]}')

# convert_example 模型的转换函数， 用于把文本转换成 id 的形式
def convert_example(example, tokenizer, max_seq_len=512, is_test=False):
    encode_inputs = tokenizer(text=example['text'], max_seq_len=max_seq_len)
    input_ids = encode_inputs['input_ids']
    token_type_ids = encode_inputs['token_type_ids']

    if not is_test:
        label = np.array(example['label'], dtype='float32')
        return input_ids, token_type_ids, label
    else:
        return input_ids, token_type_ids

def create_dataloader(
        dataset,
        mode='train',
        batch_size=1,
        batchify_fn=None,
        trans_fn=None
    ):
    if trans_fn:
        dataset = dataset.map(trans_fn)
    shuffle = True if mode == 'train' else False

    if mode == 'train':
        sampler = paddle.io.DistributedBatchSampler(
            dataset=dataset, batch_size=batch_size, shuffle=shuffle
        )
    else:
        sampler = paddle.io.BatchSampler(
            dataset=dataset, batch_size=batch_size, shuffle=shuffle
        )
    return paddle.io.DataLoader(
        dataset=dataset,
        collate_fn=batchify_fn,
        batch_sampler=sampler,
        return_list=True
    )

# 构建模型
model_name_or_path = 'ernie-1.0'
tokenize = ppnlp.transformers.ErnieTokenizer.from_pretrained(model_name_or_path)

max_seq_len = 128
batch_size = 128

trans_fn = partial(
    convert_example,
    tokenize=tokenize,
    max_seq_len=max_seq_len
)

batchify_fn = lambda samples, fn = Tuple(
    Pad(axis=0, pad_val=tokenize.pad_token_id), # input_ids
    Pad(axis=0, pad_val=tokenize.pad_token_type_id), # segment
    Stack(dtype='float32') # label
) : [data for data in fn(samples)]

train_data_loader = create_dataloader(
    train_ds,
    mode='train',
    batch_size=batch_size,
    batchify_fn=batchify_fn,
    trans_fn=trans_fn
)

dev_data_loader = create_dataloader(
    dev_ds,
    mode='dev',
    batch_size=batch_size,
    batchify_fn=batchify_fn,
    trans_fn=trans_fn
)

# 模型搭建
class MultiLabelClassifier(nn.Layer):
    def __init__(self, pretrained_model, num_labels=2, dropout=None):
        super(MultiLabelClassifier, self).__init__()
        self.ptm = pretrained_model
        self.num_labels = num_labels
        self.dropout = nn.Dropout(dropout if dropout is not None else self.ptm.config['hidden_dropout_prob'])
        self.classifier = nn.Linear(self.ptm.config['hidden_size'], num_labels)

    def forward(
            self,
            input_ids,
            token_type_ids = None,
            position_ids = None,
            attention_mask = None,
    ):
        _, pooled_output = self.ptm(
            input_ids,
            token_type_ids=token_type_ids,
            position_ids=position_ids,
            attention_mask=attention_mask
        )

        pooled_output = self.dropout(pooled_output)
        logits = self.classifier(pooled_output) # 全连接
        return logits

# MultiLabelReport 是多标签分离的 F1Score 的实现

class MultiLabelReport(Metric):
    """
    AUC and F1Score for multi-label text classification task.
    """
    def __init__(self, name='MultiLabelReport', average='micro'):
        super(MultiLabelReport, self).__init__()
        self.average = average
        self._name = name
        self.reset()

    def f1_score(self, y_prob):
        '''
        Returns the f1 score by searching the best threshhold
        '''
        best_score = 0
        for threshold in [i * 0.01 for i in range(1000)]:
            self.y_pred = y_prob > threshold
            score = f1_score(y_pred=self.y_pred,y_true=self.y_true,average=self.average)
            if score > best_score:
                best_score = score
        return best_score

    def reset(self):
        """
        Resets all of the metric state.
        """
        self.y_pred = None
        self.y_true = None

    def update(self, probs, lables):
        if self.y_prob is not None:
            self.y_prob = np.append(self.y_prob, probs.numpy(), axis=0)
        else:
            self.y_prob = probs.numpy()
        if self.y_true is not None:
            self.y_true = np.append(self.y_true, lables.numpy(), axis=0)
        else:
            self.y_true = lables.numpy()

    def accumulate(self):
        f1_score = self.f1_score(y_prob=self.y_prob)
        return f1_score

    def name(self):
        """
        Returns metric name
        """
        return self._name

# 训练配置
## 实例化 ERNIE 的模型
epochs = 5
weight_decay=0.0
data_path = 'data'
warmup_proportion=0.0
init_from_ckpt = None
learning_rate = 5e-5

from paddlenlp.transformers import ErnieTokenizer, ErnieModel
pretrained_model = ErnieModel.from_pretrained(model_name_or_path)
model = MultiLabelClassifier(pretrained_model, num_labels=len(train_ds.data[0]['label']))

# 如果有预训练模型， 在加载
if init_from_ckpt and os.path.isfile(init_from_ckpt):
    state_dict = paddle.load(init_from_ckpt)

# 设置训练的 steps
num_trainning_steps = len(train_data_loader) * epochs

lr_scheduler = LinearDecayWithWarmup(learning_rate,num_trainning_steps, warmup_proportion)

# Generate parameter names needed to perform weight decay.
# All bias and LayerNorm parameters are excluded.

decay_params = [
    p.name for n,p in model.named_parameters()
    if not any(nd in n for nd in ['bias','norm'])
]
# 使用 AdamW 优化器
optimizer = paddle.optimizer.AdamW(
    learning_rate=learning_rate,
    parameters=model.parameters(),
    weight_decay=weight_decay,
    apply_decay_param_fun=lambda x: x in decay_params
)

# 多标签分类的评估方式
metric = MultiLabelReport()
# 定义多标签分类的损失函数， BCEWithLogitsLoss 是二分类的损失
criterion = paddle.nn.BCEWithLogitsLoss()

# 模型训练
@paddle.no_grad()
def evaluate(model, criterion, metric, data_loader):
    model.eval()
    metric.reset()
    losses = []
    for batch in tqdm(data_loader):
        input_ids, token_type_ids, labes = batch
        logits = model(input_ids, token_type_ids)
        loss = criterion(logits, labes)
        probs = F.sigmoid(logits)
        losses.append(loss)
        metric.update(probs, labes)
        f1_score = metric.accumulate()

    print("eval loss: %.5f, f1 score: %.5f" %(np.mead(losses), f1_score))
    model.train()
    model.reset()

def do_train(model, train_data_loader, dev_data_loader):
    global_step = 0
    tic_train = time.time()
    for epoch in range(1, epochs + 1):
        for step, batch in enumerate(train_data_loader, start=1):
            input_ids, token_type_ids, labels = batch
            logits = model(input_ids, token_type_ids)
            loss = criterion(logits, labels)
            probs = F.sigmoid(logits)
            metric.update(probs, labels)
            f1_score = metric.accumulate()
            global_step += 1
            if global_step % 50 == 0:
                print("global step %d, epoch: %d, batch: %d, loss: %.5f , f1 score: %.5f, speed: %.2f step/s"
                      % (global_step, epoch, step, loss, f1_score, 10 / (time.time() - tic_train)))
                tic_train = time.time()
            loss.backward()
            optimizer.step()
            lr_scheduler.step()
            optimizer.clear_gradients()

            if global_step % 100 == 0:
                save_dir = 'checkpoint'
                save_dir = os.path.join(save_dir, 'model{}'.format(model_name_or_path))
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                save_param_path = os.path.join(save_dir, 'model_state.pdparams')
                paddle.save(model.stat_dict(), save_param_path)
                tokenize.save_pretrained(save_dir)
        evaluate(model, criterion, metric, dev_data_loader)

do_train(model,train_data_loader, dev_data_loader)