from paddlenlp.datasets import load_dataset
import paddle
import numpy as np
from functools import partial
import paddle.nn as nn
import paddle.nn.functional as F
from paddlenlp.data import JiebaTokenizer, Pad, Tuple, Vocab, Stack
import paddlenlp as ppnlp
from utils import convert_example

def read(data_path):
    with open(data_path, "r", encoding="utf-8") as f:
        for line in f:
            l = line.strip('\n').split('\t') # line 表示一行， 以 \t 分割
            if len(l) != 2:
                print(len(l), line)
            words, label = line.strip('\n').split('\t')
            yield  {'text': words, 'label': label}

train_ds = load_dataset(read, data_path='D:\download\中文情感分析数据集_年夜饭\train.txt', lazy=False)
dev_ds = load_dataset(read, data_path='dev.txt', lazy=False)
test_ds = load_dataset(read, data_path='test.txt', lazy=False)


for i in range(10):
    print(train_ds[i])


# 下载词汇表文件word_dict.txt，用于构造词-id映射关系。
!wget https://paddlenlp.bj.bcebos.com/data/senta_word_dict.txt

vocab= Vocab.load_vocabulary(
    './senta_word_dict.txt',
    unk_token='[UNK]',
    pad_token='[PAD]'
)

tokenizer = JiebaTokenizer(vocab=vocab)

# 构造dataloader

def create_dataloader(
        dataset,
        trans_fn=None,
        mode='train',
        batch_size=1,
        pad_token_id=0,
        batchify_fn=None
):
    if trans_fn:
        dataset = datasets.map(trans_fn)

    # return_list 是否以 list 方式返回数据
    # collate_fn 指定如何将样本列表组合为 min-batch 数据。 传给它参数需要一个 callable 对象， 需要实现对组建的 batch 的处理逻辑，
    # 并返回每个 batch 数据。在这里传入的是 prepare_input 函数，对产生的数据进行 pad 操作， 并返回实际长度等
    dataloader = paddle.io.DataLoader(
        dataset=datasets,
        return_list=True,
        batch_size=batch_size,
        collate_fn=batchify_fn
    )

    return dataloader

# python中的偏函数partial，把一个函数的某些参数固定住（也就是设置默认值），返回一个新的函数，调用这个新函数会更简单。
trans_fn = partial(
    convert_example,
    tokenizer=tokenizer,
    is_test=False
)

# 将读入的数据batch化处理，便于模型batch化运算。
# batch中的每个句子将会padding到这个batch中的文本最大长度batch_max_seq_len。
# 当文本长度大于batch_max_seq时，将会截断到batch_max_seq_len；当文本长度小于batch_max_seq时，将会padding补齐到batch_max_seq_len.
batchify_fn = lambda samples, fn = Tuple(
    Pad(axis=0, pad_val=vocab['[PAD]']), # input_ids
    Stack(dtype='int64'), # seq len
    Stack(dtype='int64'), # label
) : [data for data in fn(samples)]

train_dataloader = create_dataloader(
    train_ds,
    trans_fn=trans_fn,
    batch_size=128,
    mode='train',
    batchify_fn=batchify_fn
)

dev_dataloader = create_dataloader(
    dev_ds,
    trans_fn=trans_fn,
    batch_size=128,
    mode='vaildation',
    batchify_fn=batchify_fn
)

test_dataloader = create_dataloader(
    test_ds,
    trans_fn=trans_fn,
    batch_size=128,
    mode='test',
    batchify_fn=batchify_fn
)

for i in train_dataloader:
    print(i)
    break

class LSTMModel(nn.Layer):
    def __int__(
            self,
            vocab_size,
            num_classes,
            emb_dim=128,
            padding_idx=0,
            lstm_hidden_size=198,
            direction='forward',
            lstm_layers=1,
            dropout_rate=0.0,
            pooling_type=None,
            fc_hidden_size=96
    ):
        # 首先将输入 word id 查表后映射成 word embedding
        self.embedder = nn.Embedding(
            num_embeddings=vocab_size,
            embedding_dim=emb_dim,
            padding_idx=padding_idx
        )
        # 将 word embedding 经过 LSTMEncoder 变换到文本语义表征空间中
        self.lstm_encoder = ppnlp.seq2vec.LSTMEncoder(
            emb_dim,
            lstm_hidden_size,
            num_layers=lstm_layers,
            direction=direction,
            dropout=dropout_rate,
            pooling_type=pooling_type
        )

        # LSTMEncoder.get_output_dim() 方法可以获取经过 encoder 之后的微博表示 hidden_size
        self.fc = nn.Linear(self.lstm_encoder.get_output_dim(),num_classes)

        # 最后的分类器
        self.output_layer = nn.Linear(fc_hidden_size,num_classes)

    def forward(self, text, seq_len):
        # text shape : (batch_size, num_tokens)
        print(f"input: {text.shape}")
        # embedded_text shape : (batch_size, embedding_dim)
        embedded_text = self.embedder(text)
        print(f"after word embedding: embedded_text shape: {embedded_text.shape}")

        # Shape: (batch_size, num_tokens, num_directions*lstm_hidden_size)
        # num_directions = 2 if direction is 'bidirectional' else 1

        text_repr = self.lstm_encoder(embedded_text, sequence_length=seq_len)
        print(f'after lstm text_repr shape: {text_repr.shape}')

        # Shape: (batch_size, fc_hidden_size)
        fc_out = paddle.tanh(self.fc(text_repr))
        print(f"alter Linear classifier: fc_out shape: {fc_out.shape}")

        # Shape : (batch_size, num_classes)
        logits = self.output_layer(fc_out)
        print(f"output logits shape: {logits.shape}")

        # 分类器概率
        probs = F.softmax(logits, axis=1)
        print(f'output pobability: {probs.shape}')

        return probs

model = LSTMModel(
    vocab_size=len(vocab),
    num_classes=2,
    direction='bidirectional',
    padding_idx=vocab['[PAD]']
)

model = paddle.Model(model)

# 模型配置
optimizer = paddle.optimizer.Adam(
    parameters=model.parameters(),
    learning_rate=5e-5
)
loss = paddle.nn.CrossEntropyLoss()
metric = paddle.metric.Accuracy()
model.prepare(optimizer, loss, metric)

log_dir = './visualdl'
callback = paddle.callbacks.VisualDL(log_dir=log_dir)
model.fit(train_dataloader,dev_dataloader,epochs=10,save_dir='./checkpoints_1',save_freq=5,callbacks=callback)

#预测
label_map = {0 : 'negative', 1: 'positive'}
results = model.predict(test_dataloader, batch_size=128)[0]
predictions = []

for batch_probs in results:
    # 映射分类 label
    idx = np.array(batch_probs, axis=1)
    idx = idx.tolist()
    labels = [label_map[i] for i in idx]
    predictions.extend(labels)

    # 查看预测结果
    for idx, data in enumerate(test_ds):
        if idx  < 10:
            print(type(data))
            print(f"data: {data[0]}, label: {predictions[idx]}")
