from paddlenlp.datasets import load_dataset
from functools import partial
from paddlenlp.data import Pad, Tuple, Stack
import time

train_ds, dev_ds, test_ds = load_dataset('chnsenticorp', splits=['train','dev','test'])
print(f'train frist sample: {train_ds[0]}')
print(f'dev frist sample: {dev_ds[0]}')
print(f'test frist sample: {test_ds[0]}')

# 情感预训练模型 SKEP 加载
from paddlenlp.transformers import SkepForSequenceClassification, SkepTokenizer

model_name = 'skep_ernie_1.0_large_ch'
# 指定模型名称， 一键加载模型
model = SkepForSequenceClassification.from_pretrained(
    pretrained_model_name_or_path=model_name, num_classes=len(train_ds.label_list)
)

# 通过模型名称一键加载对应的 Tokenizer， 用于处理文本数据， 如切分 token， 转换 token_id 等
tokenizer = SkepTokenizer.from_pretrained(pretrained_model_name_or_path=model_name)

# 转换数据集，使得预训练模型能够读取的格式
import os
from functools import partial
import numpy as np
import paddle
import paddle.nn.functional as F

def convert_example(
        example,
        tokenizer,
        max_seq_len=512,
        is_test=False
):
    encoded_inputs = tokenizer(text=example['text'], max_seq_len=max_seq_len)
    input_ids = encoded_inputs['input_ids']
    token_type_ids = tokenizer['token_type_ids']

    if not is_test:
        lable = np.array([example['label']], dtype='int64')
        return input_ids, token_type_ids, lable
    else:
        return input_ids, token_type_ids

def create_dataloader(
        dataset,
        mode='train',
        trans_fn=None,
        batch_size=1,
        batchify_fn=None
):
    if trans_fn:
        dataset = dataset.map(trans_fn)

    shuffle = True if mode == 'train' else False

    if mode == 'train':
        sample = paddle.io.DistributedBatchSampler(
            dataset=dataset,batch_size=batch_size,shuffle=shuffle
        )
    else:
        sample = paddle.io.BatchSampler(
            dataset=dataset,batch_size=batch_size,shuffle=shuffle
        )

    return paddle.io.DataLoader(
        dataset=dataset,
        batch_sampler=sample,
        collate_fn=batchify_fn
    )

# 设置模型通用参数
batch_size = 32
max_seq_len = 256
learning_rate = 2e-5
epochs = 1

trans_fn = partial(convert_example,tokenizer,max_seq_len)

batchify_fn = lambda sample, fn=Tuple(
    Pad(axis=0, pad_val=tokenizer.pad_token_id), # input_ids
    Pad(axis=0, pad_val=tokenizer.pad_token_type_id), # token_type_ids
    Stack(type='int64')
): [data for data in fn(sample)]

train_dataloader = create_dataloader(
    train_ds,
    mode='train',
    trans_fn=trans_fn,
    batch_size=batch_size,
    batchify_fn=batchify_fn
)

dev_dataloader = create_dataloader(
    dev_ds,
    mode='dev',
    trans_fn=trans_fn,
    batch_size=batch_size,
    batchify_fn=batchify_fn
)

test_dataloader = create_dataloader(
    test_ds,
    mode='test',
    trans_fn=trans_fn,
    batch_size=batch_size,
    batchify_fn=batchify_fn
)

# 验证函数
@paddle.no_grad()
def evaluate(model, criterion, metric, data_loader):
    model.eval()
    metric.reset()
    losses = []
    for batch in data_loader:
        input_ids, token_type_ids, labels = batch
        logits = model(input_ids, token_type_ids)
        loss = criterion(logits,labels)
        losses.append(loss.nuumpy())
        correct = metric.compute(logits, labels)
        metric.update(correct)
        accu = metric.accumulate()

    print('eval loss %.5f, accu %.5f' %(np.mean(losses), accu))
    model.train()
    metric.reset()

# 设置损失函数， 配置优化器
## 训练过程中保存模型参数的目录
ckpt_dir = 'skep_ckpt'
# len(train_dataloader) 一轮训练所需的 step 数

num_training_steps = len(train_dataloader) * epochs
# Adam 优化器
optimizer = paddle.optimizer.AdamW(
    learning_rate=learning_rate,
    parameters=model.parameters()
)
#定义交叉损失函数
criterion = paddle.nn.loss.CrossEntropyLoss()
# accutracy 评价指标
metric = paddle.metric.Accuracy()

# 开始训练
global_step = 0
tic_train = time.time()
for epoch in range(1, epochs + 1):
    for step, batch in enumerate(train_dataloader):
        input_ids, token_type_ids, labels = batch
        # 给模型喂数据
        logits = model(input_ids, token_type_ids)
        # 计算 loss
        loss = criterion(logits,labels)
        # 预测概率
        probs = F.softmax(logits,axis=1)
        # 计算 acc
        correct = metric.compute(probs,labels)
        metric.update(correct)
        acc = metric.accumulate()

        global_step += 1
        if global_step % 10 == 0:
            print(
                "global step %d, epoch: %d, batch: %d, loss: %.5f, accu: %.5f, speed: %.2f step/s"
                % (global_step, epoch, step, loss, acc,
                   10 / (time.time() - tic_train)))
        # 反向梯度回传
        loss.backward()
        optimizer.step()
        optimizer.clear_grad()

        if global_step % 100 == 0:
            save_dir = os.path.join(ckpt_dir, 'model_%d' % global_step)

            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            # 评估当前训练的模型
            evaluate(model, criterion, metric, dev_dataloader)
            # 保存模型参数
            model.save_pretrained(save_dir)

# 预测
trans_fn_test = partial(
    convert_example,
    tokenizer=tokenizer,
    max_seq_len=max_seq_len,
    is_test = True
)

test_dataloader = create_dataloader(
    test_ds,
    mode='test',
    batch_size=batch_size,
    batchify_fn=batchify_fn,
    trans_fn=trans_fn_test
)

params_path = 'skep_ckp/model_500/model_state.pdparams'
if params_path and os.path.isfile(params_path):
    # 加载模型参数
    state_dict = paddle.load(params_path)
    model.set_dict(state_dict)
    print('Loaded params from %s' % params_path)

label_map = {0:'0',1:'1'}
result = []
# 切换model 模型为评估模型， 关闭 dropout 等随机因素
model.eval()
for batch in test_dataloader:
    input_ids, token_type_ids, qids = batch
    logits = model(input_ids,token_type_ids)
    probs = F.softmax(logits,axis=-1)
    idx = paddle.argmax(probs, axis=1).numpy()
    idx = idx.tolist()
    labels = [label_map[i] for i in idx]

    qids = qids.numpy().tolist()
    result.extend(zip(qids, labels))


res_dir = 'results'
if not os.path.exists(res_dir):
    os.makedirs(res_dir)

with open(os.path.join(res_dir,'ChnSentiCorp.tsv'), 'w', encoding='utf-8') as f:
    f.write('index\tprediction\n')
    for qid, label in result:
        f.write(str(qid[0]) + '\t' + label + '\n')


# 目标级
batch_size = 16
train_ds_se, test_ds_se = load_dataset('seabsa16', 'phns', splits=['train','test'])
print(f'train frist sample: {train_ds_se[0]}')
print(f'test frist sample: {test_ds_se[0]}')

# 加载 skep 模型和对应的 tokenizer
model_se = SkepForSequenceClassification.from_pretrained(
    pretrained_model_name_or_path=model_name,num_classes=len(train_ds_se.label_list)
)

tokenizer_se = SkepTokenizer.from_pretrained(pretrained_model_name_or_path=model_name)

def convert_example_se(
        example,
        tokenizer,
        max_seq_len = 512,
        is_test = False,
        datasets_name='chnsenticorp'
):
    encoded_inputs = tokenizer.encode(
        text=example['text'],
        text_pair=example['text_pair'],
        max_seq_len=max_seq_len
    )
    input_ids = encoded_inputs['input_ids']
    token_type_ids = encoded_inputs['token_type_ids']

    if not is_test:
        label = np.array([example['label']],dtype='int64')
        return input_ids, token_type_ids, label
    else:
        return input_ids, token_type_ids

trans_func = partial(
    convert_example_se,
    tokenizer=tokenizer_se,
    max_seq_len=max_seq_len
)

batchify_fn_se = lambda samples, fn=Tuple(
    Pad(axis=0,pad_val=tokenizer_se.pad_token_id), # input_ids
    Pad(axis=0,pad_val=tokenizer_se.pad_token_type_id), # token_type_ids
    Stack(dtype='int64')
) : [data for data in fn(samples)]

train_dataloader_se = create_dataloader(
    train_ds_se,
    mode='train',
    batch_size=batch_size,
    batchify_fn=batchify_fn_se,
    trans_fn=trans_func
)
# 模型训练
epochs = 3
num_training_steps_se = len(train_dataloader_se) * epochs
# 优化器
optimizer_se = paddle.optimizer.AdamW(
    learning_rate=5e-5,
    parameters=model_se.parameters()
)

# 定义损失函数
criterion_se = paddle.nn.CrossEntropyLoss()
# Accuracy 评价指标
metric = paddle.metric.Accuracy()

# 开始预测
ckpt_dir = 'skep_aspect'
global_step = 0
tic_train = time.time()
for epoch in range(1, epochs + 1):
    for step, batch in enumerate(train_dataloader_se, start=1):
        input_ids, token_type_ids, labels = batch
        logits = model(input_ids,token_type_ids)
        loss = criterion_se(logits,labels)
        probs = F.softmax(logits, axis=1)
        correct = metric.compute(probs, labels)
        metric.update(correct)
        acc = metric.accumulate()

        global_step += 1
        if global_step % 10 == 0:
            print(
                "global step %d, epoch: %d, batch: %d, loss: %.5f, acc: %.5f, speed: %.2f step/s"
                % (global_step, epoch, step, loss, acc,
                   10 / (time.time() - tic_train)))
            tic_train = time.time()

        loss.backward()
        optimizer.step()
        optimizer.clear_grad()

        if global_step % 100 == 0:
            save_dir = os.path.join(ckpt_dir, 'model_%d' %global_step)
            model.save_pretrained(save_dir)

            tokenizer_se.save_pretrained(save_dir)

# 提交预测结果
@paddle.no_grad()

def predict(model, data_loader, label_map):

    model.eval()
    result = []
    for batch in data_loader:
        input_ids, token_type_ids = batch
        logits = model(input_ids, token_type_ids)
        probs = F.softmax(logits,axis=1)
        idx = paddle.argmax(probs, axis=1).numpy()
        idx = idx.tolist()
        labels = [label_map[i] for i in idx]
        result.extend(labels)

    return result

# 处理测试集数据
label_map = {0: '0', 1: '1'}
trans_func = partial(
    convert_example_se,
    tokenizer=tokenizer_se,
    max_seq_len=max_seq_len,
    is_test=True
)

batchify_fn_se_test = lambda smaples, fn=Tuple(
    Pad(axis=0,pad_val=tokenizer_se.pad_token_id),
    Pad(axis=0,pad_val=tokenizer_se.pad_token_type_id)
) : [data for data in fn(smaples)]

test_dataloader_se = create_dataloader(
    test_ds_se,
    mode='test',
    batch_size=batch_size,
    batchify_fn=batchify_fn_se_test,
    trans_fn=trans_func
)

params_path = 'skep_ckpt/model_200/model_state.pdparams'
if params_path and os.path.isfile(params_path):
    state_dict = paddle.load(params_path)
    model_se.set_dict(state_dict)
    print("Loaded parameters from %s" %params_path)

result = predict(model_se, test_dataloader_se, label_map)

# 写入预测结果文件
with open(os.path.join("results", "SEABSA16_PHNS.tsv"), 'w',encoding='utf-8') as f:
    f.write("index\tprediction\n")
    for idx, label in enumerate(result):
        f.write(str(idx) + '\t' + label + '\n')
