iimport paddlenlp as ppnlp
import paddle
from paddlenlp.datasets import load_dataset

train_ds, dev_ds, test_ds = load_dataset('chnsenticorp', splits=['train','dev','test'])
print(f'label: {train_ds.label_list}')

for data in train_ds.data[:5]:
    print(data)

# 数据预处理： 切词，映射词表id 等

## 下载模型和 tokenizer
model_name = 'ernie-1.0'
tokenizer = ppnlp.transformers.ErnieTokenizer.from_pretrained(model_name)
# 加载预训练模型 ERNIE
ernie_model = ppnlp.transformers.ErnieModel.from_pretrained(model_name)

# 将输入的文本切分为 token
tokens = tokenizer._tokenize('请输入测试样例')
print(f"Tokens : {tokens}")

# token 映射为对应 token_id
token_ids = tokenizer.convert_token_to_ids(tokens)
print(f"Tokens id: {token_ids}")

# 拼接上与训练模型对应的特殊 token， 如 [CLS], [SEP]
token_ids = tokenizer.build_inputs_with_special_tokens(token_ids)

# 转换为 paddle 框架数据格式
tokens_pd = paddle.to_tensor([token_ids])
print(f"Tokens: {tokens_pd}")

# 此时即可输入 ERNLE 模型中得到相应的输出
sequence_output, pooled_output = ernie_model(tokens_pd)
print(f"Token wise output shape: {sequence_output.shape}, pooled_output shape: {pooled_output.shape}")

## sequence_output 是对应每个输入 token 语义特征表示， shape 为 (1, num_tokens, hidden_size) 其一般用于序列标注，问答等任务
## pooled_output 是对应整个句子的语义特征表示， shape 为 (1,hidden_size) 其一般用于文本分类， 信息检索等任务

# 一行代码完成切分 token 映射 token id 以及拼接特殊 token
## 高阶 api
encode_text = tokenizer(text='请输入测试样例')
for key, value in encode_text.item():
    print(f'key: {key}, value: {value}')

# 转换成 paddle 框架数据格式
inputs_ids = paddle.to_tensor([encode_text['input_ids']])
print(f"inputs: {inputs_ids}")
segment_ids = paddle.to_tensor(encode_text['token_type__ids'])
print(f'token_type_ids : {segment_ids}')
# 此时即可输入 ernie 模型中得到相应的输出
sequence_output, pooled_output = ernie_model(inputs_ids, segment_ids)
print(f'Toke wise output shape : {sequence_output.shape}, Pooled output shape: {pooled_output.shape}')

# 单句输入
single_seg_input = tokenizer(text="请输入测试样例")
# 句对输入
multi_seg_input = tokenizer(text="请输入测试样例1", text_pair='请输入测试样例2')
print(f'单句输入 token （str): {tokenizer.convert_token_to_ids(single_seg_input['input_ids'])}')
print(f'单句输入 token （int): {single_seg_input['input_ids']}')
print(f'单句输入 segment ids: {single_seg_input['token_type_ids']}')

print()
print(f'句对输入 token(str): {tokenizer.convert_token_to_ids(multi_seg_input['input_ids'])}')
print(f'句对输入 token(int): {multi_seg_input['input_ids']}')
print(f'句对输入 segment ids: {multi_seg_input['token_type_ids']}')

# Hightlight : padding 到统一长度

encode_text = tokenizer(text='请输入测试样例', max_seq_len=15)
for key, value in encode_text:
    print(f'key: {key}, value: {value}')

## 以上代码详细介绍了 tokenizer 的用法， 接下来使用 tokenizer 处理 ChnSentiCrop 数据集

from functools import partial
from paddlenlp.data import Stack, Tuple, Pad
from utils import convert_example, create_dataloader
batch_size = 32
max_seq_length = 128

trans_fn = partial(
    convert_example,
    tokenizer=tokenizer,
    max_seq_length=max_seq_length
)
batchify_fn = lambda samples, fn = Tuple(
    Pad(axis=0, pad_val=0), # input_ids
    Pad(axis=0, pad_val=0), # segment
    Stack(dtype='int64') # label
) : [data for data in fn(samples)]

train_data_loader = create_dataloader(
    train_ds,
    mode='train',
    batch_size=batch_size,
    batchify_fn=batchify_fn,
    trans_fn=trans_fn
)

dev_data_loader = create_dataloader(
    dev_ds,
    mode='dev',
    batch_size=batch_size,
    batchify_fn=batchify_fn,
    trans_fn=trans_fn
)

# 加载与训练模型 ERNIE 用于文本分类任务的 Fine-tune 网络
model = ppnlp.transformers.ErnieForSequenceClassification.from_pretrained(model_name)

# 设置 Fine-Tune 优化策略， 接入评价指标
from paddlenlp.transformers import LinearDecayWithWarmup
# 训练过程中最大学习率
learning_rate = 5e-5
# 训练轮次
epochs = 3
# 学习率预热比例
warmup_proportion = 0.1
# 权重衰减系数， 类似模型正则项策略， 避免模型过拟合
weight_decay = 0.01

num_training_setps = len(train_data_loader) * epochs
lr_scheduler = LinearDecayWithWarmup(learning_rate, num_training_setps, warmup_proportion)
optimizer = paddle.optimizer.AdamW(
    learning_rate=lr_scheduler,
    parameters=model.parameters(),
    weight_decay=weight_decay,
    apply_decay_param_fun=lambda x: x in (
        p.name for n,p in model.named_parameters()
        if not any(nd in n for nd in ['bias', 'norm'])
    )
)

criterion = paddle.nn.loss.CrossEntropyLoss()
mertric = paddle.metric.Accuracy()

# 模型训练的过程有以下步骤
## 从 dataloader 中取出一个 batch_data
## 将 batch_data喂给 model， 做前向计算
## 将前向计算结果传给损失函数，计算 loss， 将前向计算结果传给评价方法， 计算评价指标
## loss 反向回传， 更新梯度。 重复以上步骤

import paddle.nn.functional as F
from utils import evaluate

global_step = 0
for epochs in range(1, epochs+1):
    for step, batch in enumerate(train_data_loader, start=1):
        inputs_ids, segment_ids, labels = batch
        logits = model(inputs_ids, segment_ids)
        loss = criterion(logits, labels)
        probs = F.softmax(logits, axis=1)
        correct = mertric.compute(probs, labels)
        mertric.update(correct)
        acc = mertric.accumulate()

        global_step += 1
        if global_step % 10 == 0
            print("global step %d, epoch: %d, loss: %.5f, acc: %.5f" %(global_step, epochs, step, loss, acc))

        loss.backward()
        optimizer.step()
        lr_scheduler.step()
        optimizer.clear_grad()
    evaluate(model,criterion,mertric,dev_data_loader)

model.save_pretrained('./checkpoint_enier')
tokenizer.save_pretrained('./checkpoint_enier')

# 模型预测

from utils_ernie import predict

data = [
    {"text":'这个宾馆比较陈旧了，特价的房间也很一般。总体来说一般'},
    {"text":'怀着十分激动的心情放映，可是看着看着发现，在放映完毕后，出现一集米老鼠的动画片'},
    {"text":'作为老的四星酒店，房间依然很整洁，相当不错。机场接机服务很好，可以在车上办理入住手续，节省时间。'},
]

label_map = {0 : 'negative', 1: 'positive'}
results = predict(
    model, data, tokenizer, label_map, batch_size=batch_size
)
for idx, text in enumerate(data):
    print(f"Data : {text}, label: {results[idx]}")