import json
import torch
from torch.utils.data import Dataset, DataLoader
from collections import Counter
from utils.DataProcesser import segment, cut_sentence, cut_paragraph
from torchtext.vocab import Vocab

import os
from tqdm import tqdm


class MyDataset(Dataset):
    def __init__(self):
        self.source_text_li = []
        self.target_text_li = []
        self.label_li = []

    def __len__(self):
        return len(self.source_text_li)

    def __getitem__(self, index):
        return self.source_text_li[index], self.target_text_li[index], self.label_li[index]

    def load_line(self, line):
        tmp = json.loads(line)  # json.loads解析json字符串s 为 json对象tmp
        if "source" not in tmp or "target" not in tmp or "labelA" not in tmp:
            raise ValueError("data is incomplete")  # raise 停止程序，抛出异常
        self.source_text_li.append(tmp["source"])
        self.target_text_li.append(tmp["target"])
        self.label_li.append(int(tmp["labelA"]))

    def load_all_line(self, path):
        with open(path, mode='r', encoding='utf-8') as f:
            for line in f:
                self.load_line(line)


def get_vovab(path):
    """

    :param path: vocab词典的路径
    :return: Vocab对象
    """
    vocab_path = r'D:\data\sohu\LL_topic\vocab'
    if os.path.exists(vocab_path):
        return torch.load(vocab_path)
    else:
        counter = Counter()
        dataset = MyDataset()
        dataset.load_all_line(path)
        for source, target, label in tqdm(dataset):
            counter.update(segment(source))
            counter.update(segment(target))
        vocab = Vocab(counter, min_freq=5, specials=('<unk>', '<pad>'))
        torch.save(vocab, vocab_path)
        return vocab


def str_to_index(vocab, string):
    """

    :param vocab: 词典Vocab
    :param string: 一个单词
    :return: 词典索引index
    """
    return vocab[string]


def sentence_to_index(vocab, sentence, words_len):
    """

    :param vocab: 词典Vocab
    :param sentence: 一个句子
    :return: 词典索引列表 index list
    :param words_len:
    """
    tem = []
    words = segment(sentence)
    if len(words) <= words_len:  # 填充
        words.extend(['<PAD>'] * (words_len - len(words)))
    else:  # 截断
        words = words[:words_len]
    for word in words:
        tem.append(str_to_index(vocab, word))
    return tem


vocab = get_vovab(r'D:\data\sohu\LL_topic\train.txt')
vocab_size = len(vocab)


# print(str_to_index(vocab, '<unk>'))

def category_to_one_hot(label):
    """
    把label做成one hot 编码格式
    [1,0] 不匹配
    [0,1] 匹配
    :param label:
    :return:
    """
    if label == 0:
        return [1, 0]
    else:
        return [0, 1]


def get_dataloader(path):
    """

    :param path: train valid test dataset file path
    :return:
    """
    dataset = MyDataset()
    dataset.load_all_line(path)
    dataLoader = DataLoader(dataset, batch_size=32, shuffle=True, collate_fn=collate_fn, drop_last=True)
    return dataLoader


def collate_fn(batch):
    """
    return document words level represent, sentence level represent, paragraph level represent
    sentence level represent：用 “。”（句号）来确定一个句子
    paragraph level represent：用“  ”（两个空格）来确定一个段落。（根据数据集格式来定的）
    :param batch:
    :return:
    """
    """这里参数需要之后统计一下数据集修改大小"""
    dw_num = 500  # 900
    sw_num = 20
    pw_num = 20

    source_w_li, target_w_li, label_li = [], [], []
    source_s_li, target_s_li = [], []
    source_p_li, target_p_li = [], []

    for idx, (source, target, label) in enumerate(batch):
        # word level
        source_w_li.append(sentence_to_index(vocab, source, dw_num))  # 一篇文档500词
        target_w_li.append(sentence_to_index(vocab, target, dw_num))
        # sentence level 一篇文档分为10个句子，每个句子20词
        ss_tem, ts_tem = [], []
        for ss, ts in zip(cut_sentence(source, 10), cut_sentence(target, 10)):
            ss_tem.append(sentence_to_index(vocab, ss, sw_num))
            ts_tem.append(sentence_to_index(vocab, ts, sw_num))
        source_s_li.append(ss_tem)
        target_s_li.append(ts_tem)
        # paragraph level 一篇文档分为3个段落，每个段落5个句子，一个句子20词
        sp_tem, tp_tem = [], []
        for sp, tp in zip(cut_paragraph(source, 3, 5), cut_paragraph(target, 3, 5)):
            mid_tems, mid_temt = [], []
            for sps, tps in zip(sp, tp):
                mid_tems.append(sentence_to_index(vocab, sps, pw_num))
                mid_temt.append(sentence_to_index(vocab, tps, pw_num))
            sp_tem.append(mid_tems)
            tp_tem.append(mid_temt)
        source_p_li.append(sp_tem)
        target_p_li.append(tp_tem)
        label_li.append(category_to_one_hot(label))

    # 注意这里每个batch（source_w_li和source_w_li）里面的元素长度必须一样，所以需要截断和填充
    source_w_li = torch.LongTensor(source_w_li)  # [32,500]
    target_w_li = torch.LongTensor(target_w_li)
    source_s_li = torch.LongTensor(source_s_li)  # [32,10,20]
    target_s_li = torch.LongTensor(target_s_li)
    source_p_li = torch.LongTensor(source_p_li)  # [32,3,5,20]
    target_p_li = torch.LongTensor(target_p_li)
    label_li = torch.Tensor(label_li)  # label不能做成LongTensor，应该是FloatTensor,Tensor默认FloatTensor
    return source_w_li, target_w_li, source_s_li, target_s_li, source_p_li, target_p_li, label_li
