import pandas as pd

# 通过 pandas 读取并处理数据
train1 = pd.read_csv('virus_train.csv') # 疫情微博训练数据集
train2 = pd.read_csv('usual_train.csv') # 通用微博训练数据集
train = pd.concat([train1,train2]) # 将上述两个数据集进行合并

eval1 = pd.read_csv('virus_eval_labeled.csv') # 疫情微博验证数据集
eval2 = pd.read_csv('usual_eval_labeled.csv') # 通用微博验证数据集
eval = pd.concat([eval1,eval2]) # 将上述两个数据集进行合并

test1 = pd.read_csv('virus_test_labeled.csv') # 疫情微博测试数据集
test2 = pd.read_csv('usual_test_labeled.csv') # 通用微博测试数据集
test = pd.concat([test1,test2]) # 将上述两个数据集进行合并

# 构造总数据集便于统计分析
total = pd.concat([train,eval,test])

# 将数据处理为 text_a, label 的格式便于进行统一处理

train['label'] = train['情绪标签']
eval['label'] = eval['情绪标签']
test['label'] = test['情绪标签']
total['label'] = total['情绪标签']


train['text_a'] = train['文本']
eval['text_a'] = eval['文本']
test['text_a'] = test['文本']
total['text_a'] = total['文本']

train.rename(columns={'情绪标签':'label','文本':'text_a'})

# 数据分析 EDA
total.head()

total.info()
train.info()

train = train.dropna(subset=['text_a'])
total = total.dropna(subset=['text_a'])

# 统计文本长度
total['text_a'].map(len).describe()

# 统计数据中类别标签的分布情况
total['label'].value_counts()

#%matplotlib inline

total['label'].value_counts(normalize=True).plot(kind='bar')

import math
import numpy as np
import os
import collections
from functools import partial
import random
import time
import inspect
import importlib
from tqdm import tqdm

import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import IterableDataset
from paddle.utils.download import get_path_from_url
from paddle.dataset.common import md5file

import paddlenlp as ppnlp
from paddlenlp.data import JiebaTokenizer,Pad,Tuple,Vocab,Stack
from paddlenlp.datasets import DatasetBuilder

# 定义微调模型, 华为的 NeZha 模型
model_name = 'nezha-large-wwn-chinese'

model = ppnlp.transformers.NeZhaForSequenceClassification.from_pretrained(model_name,num_classes=6)

tokenizer = ppnlp.transformers.NeZhaTokenizer.from_pretrained(model_name)

# 数据读取和预处理
label_list = list(train['label'].unique())
print(f'label_list : {label_list}')

# 定义数据集对应文件及其文件存储格式
class EmotitonData(DatasetBuilder):
    split = {
        'train': 'train.csv', # 训练集
        'dev': 'eval.csv', #验证集
        'test': 'test.csv', # 测试集
    }

    def _get_data(self, mode, **kwargs):
        filename = self.split[mode]
        return filename

    def _read(self, filename: str, *args):
        '''
        读取数据
        :param filename:
        :param args:
        :return:
        '''

        with open(filename, 'r', encoding='utf-8') as f:
            head = None
            for line in f:
                data = line.strip().split('\t')
                if not head:
                    head = data
                else:
                    text_a, labe = data
                    yield {'text_a' : text_a, 'label': labe}
    def get_labels(self):
        return label_list

# 定义数据加载函数

def load_dataset(
        name=None,
        data_files=None,
        splits=None,
        lazy=None,
        **kwargs
):
    reader_cls = EmotitonData
    print(reader_cls)
    if not name:
        reader_intance = reader_cls(lazy=lazy, **kwargs)
    else:
        reader_intance = reader_cls(lazy=lazy, name=name, **kwargs)

    datasets = reader_intance.read_datasets(data_files=data_files,splits=splits)

    return datasets

train_ds, dev_ds, test_ds = load_dataset(splits=['train','dev','test'])

def convert_example(example, tokenizer, max_seq_len=512, is_test=False):
    encoded_inputs = tokenizer.encode(text=example['text_a'],max_seq_len=max_seq_len)
    input_ids = encoded_inputs['input_ids']
    token_type_ids = encoded_inputs['token_type_ids']

    if not is_test:
        label = np.array([example['label']], dtype='int64')
        return input_ids,token_type_ids,label
    else:
        return input_ids,token_type_ids

def create_dataloader(
        dataset,
        mode='train',
        batch_size=1,
        batchify_fn=None,
        trans_fn=None
):
    if trans_fn:
        dataset = dataset.map(trans_fn)

    if mode == 'train':
        sampler = paddle.io.DistributedBatchSampler(
            dataset=dataset,
            batch_size=batch_size,
            shuffle=True
        )
    else:
        sampler = paddle.io.BatchSampler(
            dataset=dataset,
            batch_size=batch_size,
            shuffle=False
        )

    return paddle.io.DataLoader(
        dataset=dataset,
        batch_sampler=sampler,
        collate_fn=batchify_fn,
        return_list=True
    )

batch_size = 32
max_seq_len = 128
trans_fn = partial(
    convert_example,
    tokenizer=tokenizer,
    max_seq_len=max_seq_len
)

batchify_fn = lambda samples, fn=True(
    Pad(pad_val=tokenizer.pad_token_id,axis=0), # input_ids
    Pad(pad_val=tokenizer.pad_token_type_id,axis=0), # token_type_ids
    Stack(), # label
) : [data for data in fn(samples)]

train_dataloader = create_dataloader(
    dataset=train_ds,
    mode='train',
    batch_size=batch_size,
    batchify_fn=batchify_fn,
    trans_fn=trans_fn
)

dev_dataloader = create_dataloader(
    dataset=dev_ds,
    mode='dev',
    batch_size=batch_size,
    batchify_fn=batchify_fn,
    trans_fn=trans_fn
)

test_dataloader = create_dataloader(
    dataset=test_ds,
    mode='test',
    batch_size=batch_size,
    batchify_fn=batchify_fn,
    trans_fn=trans_fn
)

# 定义优化器， 损失函数
from paddlenlp.transformers import LinearDecayWithWarmup
learning_rate = 2e-5
epochs = 4
warmup_proportion = 0.1
weight_decay = 0.01

num_training_steps = len(train_dataloader) * epochs
lr_scheduler = LinearDecayWithWarmup(learning_rate, num_training_steps, warmup_proportion)
optimizer = paddle.optimizer.AdamW(
    learning_rate=lr_scheduler,
    parameters=model.parameters(),
    weight_decay=weight_decay,
    apply_decay_param_fun= lambda x: x in [
        p.name for n, p in model.named_parameters()
        if not any(nd in n for nd in ['bias','norm'])
    ]
)

criterion = paddle.nn.loss.CrossEntropyLoss()
metric = paddle.metric.Accuracy()

@paddle.no_grad()
def evaluate(model, criterion, metric, data_loader):
    model.eval()
    metric.reset()
    losses = []
    for batch in data_loader:
        input_ids, token_type_ids, labels = batch
        logits = model(input_ids, token_type_ids)
        loss = criterion(logits, labels)
        losses.append(loss.numpy())
        correct = metric.compute(logits, labels)
        metric.update(correct)
        accu = metric.accumulate()
    print('eval loss %.5f, accu: %.5f' %(np.mean(losses)), accu)

    model.train()
    metric.reset()

    return  accu

import paddle.nn.functional as F
import os
save_dir = 'checkpont'
if not os.path.exists(save_dir):
    os.makedirs(save_dir)

pre_accu = 0
accu = 0
global_step = 0
for epoch in range(1, global_step+1):
    for step, batch in enumerate(train_dataloader,start=1):
        input_ids, token_type_ids, labels = batch
        logits = model(input_ids, token_type_ids)
        loss = criterion(logits,labels)
        probs = F.softmax(logits,axis=1)
        correct = criterion(probs,labels)
        metric.update(correct)
        acc = metric.accumulate()

        global_step +=1
        if global_step % 10 == 0:
            print("global step %d, epoch: %d, batch: %d, loss: %.5f, acc: %.5f" % (global_step, epoch, step, loss, acc))
        loss.backward()
        optimizer.step()
        lr_scheduler.step()
        optimizer.clear_grad()

    accu = eval(model, criterion, metric, dev_dataloader)

    print(acc)

    if acc > pre_accu:
        save_param_path = os.path.join(save_dir, 'model_state.pdparams') # 保存模型
        paddle.save(model.state_dict(), save_param_path)
        pre_accu = accu

tokenizer.save_pretrained(save_dir)

# 加载验证集上效果最右的一轮模型
import os

params_path = 'checkpoint/model_state.pdparams'
if params_path and os.path.isfile(params_path):
    state_dict = paddle.load(params_path)
    model.set_dict(stat_dict=state_dict)
    print("Loaded parameters from %s" %params_path)

evaluate(model, criterion, metric, dev_dataloader)

evaluate(model,criterion,metric,test_dataloader)