import os
import re
import jieba
import torch
from tqdm import tqdm
import numpy as np
import torch.utils.data as Data

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


# 正则去除标点符号，只保留字母数字和汉字
def clean_str(string):
    string = re.sub(r"[^A-Za-z0-9\u4e00-\u9fbb]", " ", string)
    return string.strip()


# 读取数据和标签
def read_data(path, file_name):
    # data_row = []
    data = []
    label = []
    path_whole = os.path.join(path, file_name)
    with open(path_whole, 'r', encoding='UTF-8') as f:
        lines = f.readlines()
    for line in tqdm(lines):
        line_cut = jieba.cut(clean_str(line))  # 分词
        line_cut = [word for word in line_cut if word != ' ']  # 去空格
        # data_row.append(line_cut)
        data.append(line_cut[:-1])
        label.append(int(line_cut[-1]))

    return data, label


# 获取字典并字典
def save_dic(data):
    freq = {}
    vocab = {}
    word_list = []
    vocab['unk'] = 0  # 填充字符以及陌生词字符
    num = 1
    for line in tqdm(data):  # 计算词频
        for word in line:
            if word not in freq:
                freq[word] = 1
            else:
                freq[word] = freq[word] + 1
            # if word not in word_list:
            #     word_list.append(word)
            #     vocab[word] = num
            #     num += 1
    for w, f in freq.items():  # 词频小于5的不计入字典
        if f > 5:
            vocab[w] = num
            num += 1
    f = open("C:/Users/Administrator/PycharmProjects/pytorch/chinese_text_classification/THUCNews/data/dict.txt", 'w')
    f.write(str(vocab))
    f.close()
    print("save dict successfully.")
    return vocab


# 词通过字典转换成对应的数字id， max_len参数代表最大句长，长的截断，短的用0填充
def word_id(data, vocab, max_len):
    unk = 0
    ids = []
    for line in data:
        temp = np.zeros(shape=[max_len, ])
        for i, word in enumerate(line):
            if i < max_len:
                temp[i] = vocab.get(word, unk)
        ids.append(temp)

    return ids


# 生成训练的batch数据格式
def build_batch(data, label, batch_size, is_train=True):
    # label = sum(label, [])
    data = torch.LongTensor(data).to(device)
    label = torch.LongTensor(label).to(device)
    data_set = Data.TensorDataset(data, label)
    data_loader = Data.DataLoader(data_set, batch_size=batch_size, shuffle=is_train)

    return data_loader


# 读取字典
def read_vocab():
    f = open("C:/Users/Administrator/PycharmProjects/pytorch/chinese_text_classification/THUCNews/data/dict.txt", 'r')
    dict_ = eval(f.read())
    f.close()
    return dict_


# 读取词向量字典（词向量不一定自己训练，可以用别人训练好的，效果可能更好）
def read_embeddings_matrix(vocab, embedding_dim):
    max_words = len(vocab)
    embeddings_index = {}
    embedding_file = "C:/Users/Administrator/PycharmProjects/pytorch/chinese_text_classification/THUCNews/saved_dict/word2vec.txt"
    f = open(embedding_file, 'r', encoding='utf-8')
    for line in tqdm(f):
        values = line.split()
        word = values[0]
        coefs = np.asarray(values[1:], dtype='float32')
        embeddings_index[word] = coefs

    embedding_matrix = np.zeros((max_words, embedding_dim))
    for word, i in vocab.items():
        embedding_vector = embeddings_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = embedding_vector
        else:
            embedding_vector = np.random.uniform(low=-0.01, high=0.01, size=(embedding_dim,))
            embedding_matrix[i] = embedding_vector
    return torch.Tensor(embedding_matrix).to(device)


# 把以上函数汇总
def get_all_need(max_len, batch_size, embedding_dim):
    path = "C:/Users/Administrator/PycharmProjects/pytorch/chinese_text_classification/THUCNews/data"
    train_file = "train.txt"
    dev_file = "dev.txt"
    train_data, train_label = read_data(path, train_file)
    dev_data, dev_label = read_data(path, dev_file)
    # print(train_data[:5])
    # print(train_label[:5])
    vocab = save_dic(train_data + dev_data)
    # vocab = read_vocab()
    train_ids = word_id(train_data, vocab, max_len)
    dev_ids = word_id(dev_data, vocab, max_len)
    train_loader = build_batch(train_ids, train_label, batch_size, is_train=True)
    dev_loader = build_batch(dev_ids, dev_label, batch_size, is_train=False)
    embeddings_matrix = read_embeddings_matrix(vocab, embedding_dim)

    return train_loader, dev_loader, embeddings_matrix
