import os
import time
from collections import Counter
from itertools import chain
import jieba

def sort_and_write_words(all_words, file_path):
    words = list(chain(*all_words))
    words_vocab = Counter(words).most_common()
    with open(file_path, 'w', encoding='utf-8') as f:
        f.write('[UNK]\n[PAD]\n')
        # 过滤低词频， 词频 < 5
        for word, num in words_vocab:
            if num < 5:
                continue
            f.write(word + '\n')

(root, directory, files), = list(os.walk('./word/data'))
all_words = []
for file_name in files:
    with open(os.path.join(root,file_name), 'r', encoding='utf-8') as f:
        for line in f:
            if file_name in ['train.txt','dev.txt']:
                text,label = line.strip().split('\t')
            elif file_name == 'test.txt':
                text = line.strip()
            else:
                continue
            words = jieba.lcut(text)
            words = [word for word in words if word.strip() != '']
            all_words.append(words)

# 写入词表
sort_and_write_words(all_words,'./work/data/vocab.txt')

# 加载自定义数据集， 继承 paddle.io.Dataset

import paddle
class NewsData(paddle.io.Dataset):
    def __init__(self, data_path, mode='train'):
        is_test = True if mode == 'test' else False
        self.label_map = {item:index for index, item in enumerate(self.label_list)}
        self.examples = self._read_file(data_path, is_test)

    def _read_file(self, data_path, is_test):
        examples = []
        with open(data_path, 'r', encoding='utf-8') as f:
            for line in f:
                if is_test:
                    text = line.strip()
                    examples.append((text))
                else:
                    text, label = line.strip('\n').split('\t')
                    # 将文本转换为数字 label: [0,1,2,3,4m=,...]
                    label = self.label_map[label]
                    examples.append((text, label))
        return examples

    def __getitem__(self,idx):
        return self.examples[idx]

    def __len__(self):
        return len(self.examples)

    @property
    def label_list(self):
        return  ['财经', '彩票', '房产', '股票', '家居', '教育', '科技', '社会', '时尚', '时政', '体育', '星座', '游戏', '娱乐']

# Loads dataset:
train_ds = NewsData('./word/data/train.txt', mode='train')
dev_ds = NewsData('./word/data/dev.txt', mode='dev')
test_ds = NewsData('./word/data/test.txt', mode='test')

print("Train data 5 samples:")
for text, label in train_ds[:5]:
    print(f"Text:{text}, label id: {label}")

print("Test data 5 samples")
for text in test_ds[:5]:
    print(f"Text:{text}")

# 加载数据集， jieba 切词， 完成后的单词映射词表中的 id
# 使用 paddle.io.DataLoader 接口实现多线程异步加载数据

import numpy as np
def read_vocab(vocab_path):
    # 读取词典
    vocab = {}
    with open(vocab_path, 'r', encoding='utf-8') as f:
        for idx, line in enumerate(f):
            word = line.strip('\n')
            vocab[word] = idx
    return vocab

def convert_example(example, vocab, stop_words, is_test=False):
    """
       数据转换，过滤停用词等，数据增强
       Builds model inputs from a sequence for sequence classification tasks.
       It use `jieba.cut` to tokenize text.

       Args:
           example(obj:`list[str]`): List of input data, containing text and label if it have label.
           tokenizer(obj: paddlenlp.data.JiebaTokenizer): It use jieba to cut the chinese string.
           is_test(obj:`False`, defaults to `False`): Whether the example contains label or not.

       Returns:
           input_ids(obj:`list[int]`): The list of token ids.
           valid_length(obj:`int`): The input sequence valid length.
           label(obj:`numpy.array`, data type of int64, optional): The input label if not is_test.
    """
    if is_test:
        text = example
    else:
        text, label = example

    input_ids = []
    for word in jieba.cut(text):
        if word in vocab and word not in stop_words:
            word_id = vocab[word]
            input_ids.append(word_id)
        elif word in vocab and word in stop_words:
            continue
        elif word not in vocab:
            word_id = vocab['[UNK]']
            input_ids.append(word_id)
    valid_length = np.array(len(input_ids), dtype='int64')
    input_ids = np.array(input_ids, dtype='int64')

    if not is_test:
        label = np.array(label, dtype='int64')
        return input_ids, valid_length, label
    else:
        return input_ids, valid_length

def preprocess_prediction_data(data, tokenizer):
    """
        It process the prediction data as the format used as training.

        Args:
            data (obj:`List[str]`): The prediction data whose each element is  a tokenized text.
            tokenizer(obj: paddlenlp.data.JiebaTokenizer): It use jieba to cut the chinese string.

        Returns:
            examples (obj:`List(Example)`): The processed data whose each element is a Example (numedtuple) object.
                A Example object contains `text`(word_ids) and `seq_len`(sequence length).

    """
    examples = []
    for text in data:
        ids = tokenizer.encode(text)
        examples.append([ids, len(ids)])

    return examples

def write_results(labels, file_path):
    with open(file_path, 'w', encoding='utf-8') as f:
        f.writelines('\n'.join(labels))


from functools import partial
import paddlenlp
from paddlenlp.datasets import MapDataset
from paddlenlp.data import Pad, Stack, JiebaTokenizer, Vocab, Tuple

def create_dataloader(
        dataset,
        trans_fn=None,
        mode='train',
        batch_size=1,
        batchify_fn=None
    ):
    if trans_fn:
        dataset = MapDataset(dataset).map(trans_fn)

    if mode == 'train':
        sampler = paddle.io.DistributedBatchSampler(
            dataset=dataset, shuffle=True, batch_size=batch_size
        )
    else:
        sampler = paddle.io.BatchSampler(
            dataset=dataset, batch_size=batch_size, shuffle=False
        )

    return paddle.io.DataLoader(
        dataset=dataset,
        batch_size=batch_size,
        collate_fn=batchify_fn,
        return_list=True
    )

vocab = read_vocab('./work/data/vocab.txt')
stop_words = read_vocab('./work/data/stop_words.txt')
batch_size = 128
epochs = 3

trans_fn = partial(convert_example, vocab=vocab, stop_words=stop_words, is_test=False)
batchify_fn = lambda samples, fn=Tuple(
    Pad(axis=0, pad_val=vocab.get('[PAD]', 0)), # input_ids
    Stack(dtype='int64'), # seq len
    Stack(dtype='int64') # label
) : [data for data in fn(samples)]

train_dataloader = create_dataloader(
    train_ds,
    trans_fn=trans_fn,
    mode='train',
    batch_size=batch_size,
    batchify_fn=batchify_fn
)

dev_dataloader = create_dataloader(
    dev_ds,
    trans_fn=trans_fn,
    mode='dev',
    batch_size=batch_size,
    batchify_fn=batchify_fn
)

test_dataloader = create_dataloader(
    test_ds,
    trans_fn=trans_fn,
    mode='test',
    batch_size=batch_size,
    batchify_fn=batchify_fn
)

# 搭建模型， 使用 paddlenlp.seq2vec 模块的 LSTMEncoder， 此模型是双向的 LSTM ： BiLSTM
# paddlenlp.Embedding  组件 word-embedding 层
# paddlenlp.seq2vec.LSTMEncoder  组建句子建模层
# paddle.nn.Linear 构造二分类其

import paddle.nn as nn
import paddle.nn.functional as F

class LSTMModel(nn.Layer):
    def __int__(
            self,
            vocab_size,
            num_classes,
            emb_dim = 128,
            padding_idx = 0,
            lstm_hidden_size = 198,
            direction = 'forward',
            lstm_layers = 1,
            dropout_rate = 0.0,
            pooling_type = None,
            fc_hidden_size = 96
    ):
        super().__init__()
        # 首先将输入 word id 查表后映射成 word embedding
        self.embedder = nn.Embedding(
            num_embeddings=vocab_size,
            embedding_dim=emb_dim,
            padding_idx=padding_idx
        )

        # 将 word Embedding 经过 LSTMEncoder 变换到文本语义表征空间中
        self.lstm_encoder = paddlenlp.seq2vec.LSTMEncoder(
            emb_dim,
            hidden_size=lstm_hidden_size,
            num_layers=lstm_layers,
            direction=direction,
            dropout_rate=dropout_rate,
            pooling_type=pooling_type
        )
        # LSTMEncoder.get_output_dim() 方法可以获取经过 encoder 之后的文本表示层 hiddlen_size
        self.fc = nn.Linear(self.lstm_encoder.get_output_dim(), fc_hidden_size)

        # 最后分类器
        self.output_layer = nn.Linear(fc_hidden_size, num_classes)

    def forward(self, text, seq_len):
        # Shape: (batch_size, num_tokens, embedding_dim)
        embedded_text = self.embedder(text)
        # Shape: (batch_size, num_tokens, num_directions * lstm_hidden_size)
        # num_directions = 2 if direction is 'bidirection' else 1
        text_repr = self.lstm_encoder(embedded_text, sequence_length=seq_len)
        # Shape: (batch_size, fc_hidden_size)
        fc_out = paddle.tanh(self.fc(text))
        # Shape: (batch_size, num_classes)
        logits = self.output_layer(fc_out)

        return logits


model = LSTMModel(
    len(vocab),
    len(train_ds.label_list),
    direction='bidirectional',
    padding_idx=vocab['[PAD]']
)

model = paddle.Model(model)

# 配置模型， 设置损失函数， 选择优化器
optimizer = paddle.optimizer.Adam(
    parameters=model.parameters(),
    learning_rate=5e-4
)
criterion = paddle.nn.CrossEntropyLoss()
metric = paddle.metric.Accuracy()

model.prepare(optimizer, criterion, metric)

model.fit(train_dataloader, dev_dataloader, epochs=epochs, save_dir='./log')

# 预测
test_batchify_fn = lambda samples, fn = Tuple(
    Pad(axis=0, pad_val=vocab.get('[PAD]', 0)),
    Stack(dtype='int64')
) : [data for data in fn(samples)]

results = model.prepare(test_dataloader)
inverse_label_map = {val:key for key, val in test_ds.label_map.items()}
all_labels = []
for batch_results in results[0]:
    label_ids = np.argmax(batch_results, axis=1).tolist()
    labels = [inverse_label_map[label_id] for label_id in label_ids]
    all_labels.extend(labels)

write_results(all_labels, './result.txt')