import logging

import jieba
import numpy as np
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm

jieba.setLogLevel(logging.INFO)


def read_dict(voc_dict_path):
    voc_dict = {}
    dict_list = open(voc_dict_path, encoding="utf-8").readlines()
    for item in dict_list:
        item = item.split(",")
        voc_dict[item[0]] = int(item[1].strip())
    return voc_dict


def load_data(data_path, data_stop_path):
    data_list = open(data_path, encoding="utf-8").readlines()[1:]
    stops_word = open(data_stop_path, encoding='utf-8').readlines()
    stops_word = [line.strip() for line in stops_word]
    filters = ['!', '"', '#', '$', '%', '&', '\(', '\)', '\*', '\+', ',', '-', '\.', '/', ':', ';', '<', '=', '>',
               '\?', '@'
        , '\[', '\\', '\]', '^', '_', '`', '\{', '\|', '\}', '~', '\t', '\n', '\x97', '\x96', '”', '“', ]
    stops_word.extend(filters)

    min_seq = 2  # 设置最小出现次数，小于该值的词认为不重要忽略掉，降低计算量
    top_n = 1000  # 字典最大容量就1000

    UNK = "<UNK>"
    PAD = "<PAD>"

    voc_dict = {}
    data = []
    max_len_seq = 0
    for item in tqdm(data_list[:1000]):
        label = item[0]
        content = item[2:].strip()
        seg_list = jieba.cut(content, cut_all=False)
        seg_res = []
        for seg_item in seg_list:
            if seg_item in stops_word:
                continue
            seg_res.append(seg_item)
            if seg_item in voc_dict.keys():
                voc_dict[seg_item] = voc_dict[seg_item] + 1
            else:
                voc_dict[seg_item] = 1
        if len(seg_res) > max_len_seq:
            max_len_seq = len(seg_res)
        data.append([label, seg_res])
    return data, max_len_seq


class TextCls(Dataset):
    def __init__(self, data_path, data_stop_path, voc_dict_path):
        self.data_path = data_path
        self.data_stop_path = data_stop_path
        self.voc_dict = read_dict(voc_dict_path)
        self.data, self.max_len_seq = load_data(self.data_path, self.data_stop_path)
        np.random.shuffle(self.data)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, item):
        data = self.data[item]
        label = int(data[0])
        word_list = data[1]
        input_idx = []
        for word in word_list:
            if word in self.voc_dict.keys():
                input_idx.append(self.voc_dict[word])
            else:
                input_idx.append(self.voc_dict["<UNK>"])
        if len(input_idx) < self.max_len_seq:
            input_idx += [self.voc_dict["PAD"] for _ in range(self.max_len_seq - len(input_idx))]
        data = np.array(input_idx)
        return label, data


def data_loader(dataset,batch_size=10,shuffle=True):
    return DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)


if __name__ == '__main__':
    dataloader = data_loader()
    for i, batch in enumerate(dataloader):
        print(batch)
