import re
import jieba
import os
import random
import paddle
import paddlenlp as ppnlp
from paddlenlp.data import Stack, Tuple, Pad
import paddle.nn.functional as F
import paddle.nn as nn
import numpy as np
from functools import partial
from paddlenlp.datasets import MapDataset

# 查看单一文件的内容， 注意这里需要忽略错误 erros = 'ignore'
f = open('/home/aistudio/trec06c/data/054/054', 'r',encoding='gb2312', errors='ignore')
text = ''
for line in f:
    line = line.strip().strip('\n')
    if len(line) > 1:
        print(line)
        text += line

# 去掉非中文字符
def clean_str(string):
    string = re.sub(r'……[\u4e00-\u9fff]', ' ', string)
    string = re.sub(r'\s{2},', " ", string)
    return string.strip()

def get_data_in_a_file(orginal_path, save_path='all_email.txt'):
    email = ''
    with open(orginal_path,'r',encoding='gb2312',errors='ignore') as fd:

        for line in fd.readlines():
            # 去掉换行符
            line = line.strip().strip('\n')
            # 去掉非中文字符
            line = clean_str(line)
            email += line

    # 只保留末尾 200 个字符
    return email[-200:]

label = ''
data_list = []
data_file = '/home/aistudio/all_email.txt'
# index_file = 'text_index.txt'
index_file = '/home/aistudio/trec06c/full/index'
if os.path.exists(data_file):
    print(f"{data_file} file exists")
    with open(data_file,'r') as fd:
        for line in fd.readlines():
            data_list.append(line.strip().split('\t'))
else:
    with open(index_file,'r') as f:
        for line in f.readlines():
            # 设置垃圾邮件的标签为 0
            str_list = line.replace('../','').split(' ')
            # print(f"str_list:{str_list}")
            if str_list[0] == 'spam':
                # print(f"label: {str_list[0]}")
                label = '0'
            # 设置正常邮件的标签为 1
            elif str_list[0] == 'ham':
                label = '1'
            text = get_data_in_a_file('/home/aistudio/trec06c/' + str(str_list[1].split('\n')[0]))
            data_list.append([text,label])
            with open(data_file, 'a+') as f:
                f.write(text + '\t' + label + '\n')

random.shuffle(data_list)
train_list,dev_list,test_list = [],[],[]
for index, line in enumerate(data_list):
    if index % 10 == 0:
        dev_list.append(line)
    elif index % 10 == 1 or index % 10 == 5:
        test_list.append(line)
    else:
        train_list.append(line)

# 自定义数据集
class SelfDefineDataset(paddle.io.Dataset):
    def __init__(self,data):
        super(SelfDefineDataset,self).__init__()
        self.data = data

    def __getitem__(self, idx):
        return self.data[idx]

    def __len__(self):
        return len(self.data)

    def get_labels(selfs):
        return ['0','1']

train_ds = MapDataset(SelfDefineDataset(train_list))
dev_ds = MapDataset(SelfDefineDataset(dev_list))
test_ds = MapDataset(SelfDefineDataset(test_list))
# 创建词表
dict_list = []
vocab = {"[UNK]": 0, "[PAD]" : 1}
dict_path = 'webdict.txt'
if os.path.exists(dict_path):
    os.remove(dict_path)
else:
    for data in data_list:
        seg = jieba.lcut(data[0])
        for word in seg:
            if not word is " " and word not in dict_list:
                dict_list.append(word)

for index, word in enumerate(dict_list):
    vocab[word] = index + 2

# 构造 dataloader
def convert_example(example, vocab, unk_token_id=0, is_test=False):
    """
    jieba 分词，转换id
    :param example:
    :param vocab:
    :param unk_token_id:
    :param is_test:
    :return:
    """
    input_ids = []
    for token in jieba.cut(example[0]):
        token_id = vocab.get(token, unk_token_id)
        input_ids.append(token_id)
    valid_length = np.array(len(input_ids), dtype='int64')

    if not is_test:
        label = np.array(example[-1], dtype='int64')
        return input_ids, valid_length, label
    else:
        return input_ids, valid_length

def convert_example_pre(example, tokenizer,label_list, max_seq_len=512, is_test=False):
    if not is_test:
        text, label = example
    else:
        text = example
    input_encode = tokenizer.encode(text,max_seq_len=max_seq_len)
    input_ids = input_encode['input_ids']
    token_type_ids = input_encode['token_type_id']

    if not is_test:
        label = np.array([int(label)], dtype='int64')
        return input_ids, token_type_ids, label
    else:
        return input_ids, token_type_ids

def create_dataloader_pre(
        dataset,
        trans_fn=None,
        mode='train',
        batch_size=1,
        batchify_fn=None
):
    if trans_fn:
        dataset = dataset.map(trans_fn, lazy=True)

    shuffer = True if mode == 'train' else False
    if mode == 'train':
        sampler = paddle.io.DistributedBatchSampler(
            dataset=dataset,
            batch_size=batch_size,
            shuffle=shuffer
        )
    else:
        sampler = paddle.io.BatchSampler(
            dataset=dataset,
            batch_size=batch_size,
            shuffle=shuffer
        )

    return paddle.io.DataLoader(
        dataset=dataset,
        return_list=True,
        batch_sampler=sampler,
        collate_fn=batchify_fn

    )
tokenize = ppnlp.transformers.BertTokenizer.from_pretrained("bert-base-chinese")
label_list = SelfDefineDataset(train_list).get_labels()
trans_fn_pre = partial(convert_example_pre, tokenizer=tokenize,label_list=label_list, max_seq_len=128, is_test=False)

batchify_fn = lambda sample, fn=Tuple(
    Pad(pad_val=tokenize.pad_token_id,axis=0), # input_ids
    Pad(pad_val=tokenize.pad_token_type_id,axis=0), # segment_ids
    Stack(dtype='int64')
) : [data for data in fn(sample)]

train_dataloader_pre = create_dataloader_pre(
    train_ds,
    trans_fn=trans_fn_pre,
    mode='train',
    batch_size=64,
    batchify_fn=batchify_fn
)

dev_dataloader_pre = create_dataloader_pre(
    dev_ds,
    trans_fn=trans_fn_pre,
    mode='dev',
    batch_size=64,
    batchify_fn=batchify_fn
)

test_dataloader_pre = create_dataloader_pre(
    test_ds,
    trans_fn=trans_fn_pre,
    mode='test',
    batch_size=64,
    batchify_fn=batchify_fn
)
# model 搭建
model = ppnlp.transformers.BertForSequenceClassification.from_pretrained('bert-base-chinese', num_classes=2)
learning_rate = 1e-5
epochs = 10
warmup_proption = 0.1
weight_decay = 0.1
num_trainnings_steps = len(train_dataloader_pre) * epochs
num_warmup_steps = int(warmup_proption * num_trainnings_steps)

def get_lr_factor(current_step):
    if current_step < num_warmup_steps:
        return float(current_step) / float(max(1, num_warmup_steps))

    else:
        return max(0.0,
            float(num_trainnings_steps - current_step) /
            float(max(1, num_warmup_steps - num_warmup_steps))
        )

# 学习率调度器
lr_scheduler = paddle.optimizer.lr.LambdaDecay(learning_rate,lr_lambda=lambda current_step: get_lr_factor(current_step))
# 优化器
optimizer = paddle.optimizer.AdamW(
    learning_rate=lr_scheduler,
    parameters=model.parameters(),
    weight_decay=weight_decay,
    apply_decay_param_fun=lambda x: x in [
        p.name for n, p in model.named_parameters()
        if not any(nd in n for nd in ['bias', 'norm'])

    ]
)

# 损失函数
criterion = paddle.nn.loss.CrossEntropyLoss()
# 评估函数
metric = paddle.metric.Accuracy()

def evaluate(model, criterion, metric, data_loader):
    model.eval()
    metric.reset()
    losses = []
    for batch in data_loader:
        input_ids, segment_ids, labels = batch
        logits = model(input_ids, segment_ids)
        loss = criterion(logits, labels)
        losses.append(loss)
        correct = metric.compute(logits, labels)
        metric.update(correct)
        accu = metric.accumulate()
    print("eval loss: %.5f, accu : %.5f" %(np.mean(losses)), accu)
    model.train()
    metric.reset()

    return np.mean(losses),accu


# 开始训练

global_step = 0
with LogWriter(logdir='./log') as write:
    for epoch in range(1, epochs + 1):
        for step, batch in enumerate(train_dataloader_pre, start=1):
            input_ids, segment_ids, labels = batch
            logits = model(input_ids, segment_ids)
            loss = criterion(logits, labels)
            probs = F.softmax(logits, axis=1)
            correct = metric.compute(probs, labels)
            metric.update(correct)
            acc = metric.accumulate()

            global_step += 1
            if global_step % 50 == 0:
                print("global step %d, epoch: %d, batch: %d, loss: %.5f, acc: %.5f" % (global_step, epoch, step, loss, acc))
                write.add_scalar(tag='train/loss', step=global_step,value=loss)
                write.add_scalar(tag='train/acc', step=global_step,value=acc)
            loss.backward()
            optimizer.step()
            lr_scheduler.step()
            optimizer.clear_gradients()
        eval_loss, eval_acc = evaluate(model, criterion, metric, dev_dataloader_pre)

        # 记录评估过程
        write.add_scalar(tag='eval/loss', step=epoch, value=eval_loss)
        write.add_scalar(tag='eval/acc', step=epoch, value=eval_acc)


def create_dataloader(
        datasets,
        tran_fn=None,
        mode='train',
        batch_size=1,
        batchify_fn=None
):
    if tran_fn:
        dataset = datasets.apply(tran_fn, lazy=True)
    return paddle.io.DataLoader(
        dataset,
        return_list=True,
        batch_size=batch_size,
        collate_fn=batchify_fn
    )

trans_fn = partial(
    convert_example,
    vocab=vocab,
    unk_token_id=vocab.get('[UNK]',1),
    is_test=False
)

batchify_fn = lambda samples, fn= Tuple(
    Pad(axis=0, pad_val=vocab['[PAD]']), # input_ids
    Stack(dtype='int64'), # seq_len
    Stack(dtype='int64') # label
) : [data for data in fn(samples)]

train_data_loader = create_dataloader(
    train_ds,
    trans_fn=trans_fn,
    batch_size=32,
    mode='train',
    batchify_fn=batchify_fn
)


dev_data_loader = create_dataloader(
    dev_ds,
    trans_fn=trans_fn,
    batch_size=32,
    mode='train',
    batchify_fn=batchify_fn
)

test_data_loader = create_dataloader(
    test_ds,
    trans_fn=trans_fn,
    batch_size=32,
    mode='train',
    batchify_fn=batchify_fn
)

def convert_tokens_to_ids(tokens, vocab):
    """
    Converts a token id (or a sequence of id) in a token string
     (or a sequence of tokens), using the vocabulary.
    :param tokens: word
    :param vocab: word:ids(dict)
    :return: token_id(int)
    """
    ids = []
    unk_id = vocab.get('[UNK]',0)
    for token in tokens:
        id = vocab.get(token, unk_id)
        if id:
            ids.append(ids)

# 创建网络 seq2vec LSTMEnocder
class LSTMModel(nn.Layer):
    def __init__(self,
                 vocab_size,
                 num_classes,
                 emb_dim=64,
                 padding_idx=0,
                 lstm_hidden_size=96,
                 direction='forward',
                 lstm_layers=2,
                 dropout_rate=0,
                 pooling_type=None,
                 fc_hidden_size=48):
        super().__init__()
        # 首先将输入的 word_id 查表后映射成 word embedding
        self.embedder = nn.Embedding(
            num_embeddings=vocab_size,
            embedding_dim=emb_dim,
            padding_idx=padding_idx
        )
        # 将 word embedding 经过 LSTMEncoder 变化为文本语义表征空间中
        self.lstm_encoder = ppnlp.seq2vec.LSTMEncoder(
            emb_dim,
            lstm_hidden_size,
            num_layers=lstm_layers,
            direction=direction,
            dropout=dropout_rate,
            pooling_type=pooling_type
        )
        # LSTMEncoder.get_output_dim() 方法可以获取经过 encoder 之后的文本表征 hidden_size
        self.fc = nn.Linear(self.lstm_encoder.get_output_dim(), fc_hidden_size)
        # 最后是分类器
        self.output_layer = nn.Linear(fc_hidden_size, num_classes)

    def forward(self, text, seq_len):
        # text Shape: (batch_size, num_tokens)
        # embedded_text shape (batch_size, num_tokens, embedding_dim)
        embedded_text = self.embedder(text)

        # text_repr shape (batch_size, num_tokens, fc_hidden_size)
        text_repr = self.lstm_encoder(embedded_text, sequence_length=seq_len)

        # fc_out shape (batch_size, num_classes)
        fc_out = paddle.tanh(self.fc(text_repr))

        logits = self.output_layer(fc_out)

        # probs 分类概率值
        probs = F.softmax(logits, axis=1)

        return probs

model = LSTMModel(
    len(vocab),
    2,
    direction='bidirectional',
    padding_idx=vocab['[PAD]']
)
model = paddle.Model(model)

# 配置模型
optimizer = paddle.optimizer.Adam(
    parameters=model.parameters(),
    learning_rate=1e-4
)

loss = paddle.nn.CrossEntropyLoss()
metric = paddle.metric.Accuracy()
model.prepare(optimizer, loss, metric)

# 设置 visuald1 路径
log_dir = './visualdl'
callback = paddle.callbacks.VisualDL(log_dir=log_dir)

model.fit(train_data_loader,dev_data_loader,epochs=10, save_dir='./checkpoints', save_freq=5, callbacks=callback)

# 测试模型效果
label_map = {0:'垃圾邮件', 1:'正常邮件'}
results = model.predict(test_data=test_data_loader)[0]

predictions = []
for batch_probs in results:
    # 映射分类 label
    idx = np.argmax(batch_probs, axis=1)
    idx = idx.tolist()
    labels = [label_map[i] for i in idx]
    predictions.extend(labels)

# 查看预测数据前 5 个样例分类结果
for idx, data in enumerate(test_list[:5]):
    print(f"data: {data}, \t label: {predictions[idx]}")