import numpy as np
from paddlenlp.data import Pad, Tuple, Stack
import paddlenlp as ppnlp
import paddle
from paddlenlp.datasets import load_dataset
from functools import partial

train_ds, dev_ds, test_ds = load_dataset('chnsenticorp', splits=['train','dev','test'])

label_list = train_ds.label_list

print(f'train frist sample: {train_ds[0]}')
print(f'dev frist sample: {dev_ds[0]}')
print(f'test frist sample: {test_ds[0]}')
print(f'label: {label_list}')

model_name = 'ernie-1.0'
tokenizer = ppnlp.transformers.ErnieTokenizer.from_pretrained(model_name)
ernie_model = ppnlp.transformers.ErnieModel.from_pretrained(model_name)
# ernie tokenzier 使用实例
# 将原始输入切分为 token
tokens = tokenizer._tokenize(train_ds[0]['text'])
print(f'Tokens : {tokens}')
# token 映射为 tokne id
tokens_id = tokenizer.convert_tokens_to_ids(tokens)
print(f'Tokens id : {tokens_id}')

# 拼接上预测模型敌营的特殊 token 如 [CLS], [SEP]
tokens_id = tokenizer.build_inputs_with_special_tokens(tokens_id)

# 将其转化成 paddle 框架数据格式
token_pd = paddle.to_tensor([tokens_id])
print(f'Tokens : {token_pd}')

# 输入 ERNIE 模型中得到相应的输出
## sequece_output 是对应每个输入 token 的语义特征表示， shape : (1,num_tokens, hidden_size), 一般用于序列标注， 问答等任务
## pooled_output 是对应整个句子的语义特征表示， shape : (1, hidden_size) 其一般用于文本分类，信息检索等任务
sequece_output, pooled_output = ernie_model(token_pd)

# 一行代码完成切分 token， 映射 token id 以及拼接特性 token
encode_text = tokenizer(text=train_ds[0]['text'])

for key, value in encode_text.items():
    print(f'key: {key}, value: {value}')

# 转换为 paddle 框架
input_ids = paddle.to_tensor([encode_text['input_ids']])
print(f"input_ids : {input_ids}")
segment_ids = paddle.to_tensor([encode_text['token_type_ids']])
print(f"segment ids: {segment_ids}")

# 输入到 ERNIE 模型张得到相应的输出
sequece_output, pooled_output = ernie_model(input_ids, segment_ids)
print(f"Token wise ouptput: {sequece_output}, Pooled otput : {pooled_output}")

def convert_example(example,tokenizer,max_seq_len=512, is_test=False):
    encoded_input = tokenizer.encode(example['text'])
    input_ids = encoded_input['input_ids']
    token_type_ids = encoded_input['token_type_ids']

    if not is_test:
        label = np.array([example['label']], dtype='intt64')
        return input_ids, token_type_ids, label
    else:
        return input_ids, token_type_ids

def create_dataloader(
        dataset,
        mode='train',
        batch_size = 1,
        trans_fn = None,
        batchify_fn = None
):
    shuffle = True if mode == 'train' else False
    if trans_fn:
        dataset = dataset.map(trans_fn)
    if mode == 'train':
        sample = paddle.io.DistributedBatchSampler(
            dataset=dataset,
            batch_size=batch_size,
            shuffle=shuffle
        )
    else:
        sample = paddle.io.BatchSampler(
            dataset=dataset,
            batch_size=batch_size,
            shuffle=shuffle
        )
    return paddle.io.DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        collate_fn=batchify_fn,
        return_list=True
    )

max_seq_len = 128
batch_size = 32

trans_fn = partial(
    convert_example, tokenizer,max_seq_len=max_seq_len, is_test=False
)

batchify_fn = lambda samples, fn=Tuple(
    Pad(axis=0,pad_val=tokenizer.pad_token_id), # input_ids
    Pad(axis=0,pad_val=tokenizer.pad_token_type_id), # segment ids
    Stack(dtype='int64')
): [data for data in fn(samples)]

train_dataloader = create_dataloader(
    train_ds,
    mode='train',
    batch_size=batch_size,
    trans_fn=trans_fn,
    batchify_fn=batchify_fn
)

dev_dataloader = create_dataloader(
    dev_ds,
    mode='dev',
    batch_size=batch_size,
    trans_fn=trans_fn,
    batchify_fn=batchify_fn
)

# 加载模型
model = ppnlp.transformers.ErnieForSequenceClassification.from_pretrained(model_name,num_classes=label_list)

from paddlenlp.transformers import LinearDecayWithWarmup
# 训练过程中最大的学习率, 动态学习率
learning_rate = 5e-5

epochs = 1
# 学习率预约比
warmup_proportion = 0.1
# 权重衰减系数， 类似模型正则策略， 避免模型过拟合
weight_decay = 0.01

num_trainning_steps = len(train_dataloader) * epochs
lr_scheduler = LinearDecayWithWarmup(learning_rate, num_trainning_steps, warmup_proportion)

# 优化器
optimizer = paddle.optimizer.AdamW(
    learning_rate=lr_scheduler,
    parameters=model.parameters(),
    weight_decay=weight_decay,
    apply_decay_param_fun= lambda x: x in [
        p.name for n, p in model.parameters()
        if not any(nd in n for nd in ['bias', 'norm'])
    ]
)

criterion = paddle.nn.loss.CrossEntropyLoss()
metric = paddle.metric.Accuracy()

# 验证
@paddle.no_grad()
def evalyate(model, criterion, metric, data_loader):
    model.eval()
    metric.reset()
    losses = []

    for batch in data_loader:
        input_ids, token_type_ids, lables = batch
        logits = model(input_ids, token_type_ids)
        loss = criterion(logits, lables)
        losses.append(loss.numpy())
        correct = metric.compute(logits, lables)
        metric.update(correct)
        accu = metric.Accumulate()
    print('eval loss: %.5f, accu: %.5f' %(np.mean(losses), accu))

    model.train()
    metric.reset()



import os
from paddle.nn import functional as F

model_path = 'checkpoint'
if not os.path.exists(model_path):
    os.mkdir(model_path)

global_step = 0
for epoch in range(1, epochs+1):
    for step, batch in enumerate(train_dataloader, start=1):
        input_ids, token_type_ids, labels = batch
        logits = model(input_ids, token_type_ids)
        loss = criterion(logits, labels)
        prob = F.softmax(logits,axis=1)
        correct = metric.compute(prob, labels)
        metric.update(correct)
        acc = metric.accumulate()

        global_step += 1
        if global_step % 10 == 0:
            print("global step %d, epoch: %d, batch: %d, loss: %.5f, acc: %.5f" % (
                global_step, epoch, step, loss, acc))

        loss.backward()
        optimizer.step()
        lr_scheduler.step()
        optimizer.clear_grad()

    evalyate(model, criterion, metric, dev_dataloader)

model.save_pretrained(model_path)
tokenizer.save_pretrained(model_path)


# 模型预测
def predict(model, data_loader, tokenizer, label_map, batch_size=1):
    examples = []
    for text in data_loader:
        input_ids, segment_ids = convert_example(
            text,
            tokenizer,
            max_seq_len=128,
            is_test=True
        )
        examples.append((input_ids, segment_ids))
    batchify_fn = lambda samples, fn=Tuple(
        Pad(axis=0,pad_val=tokenizer.pad_token_id),
        Pad(axis=0,pad_val=tokenizer.pad_token_id)
    ) : [data for data in fn(samples)]

    batches = []
    one_batch = []
    for example in examples:
        one_batch.append(example)
        if len(one_batch) == batch_size:
            batches.append(one_batch)
            one_batch = []
        if one_batch:
            batches.append(one_batch)

    results = []
    model.eval()
    for batch in batches:
        input_ids, segment_ids = batchify_fn(batch)
        input_ids = paddle.to_tensor(input_ids)
        segment_ids = paddle.to_tensor(segment_ids)
        logits = model(input_ids, segment_ids)
        probs = F.softmax(logits, axis=1)
        idx = paddle.argmax(probs, axis=1).numpy()
        idx = idx.tolist()
        labels = [label_map[i] for i in idx]
        results.extend(labels)

    return results

