import os.path
import time
import torch
from typing import overload
import pickle as pkl
from tqdm import tqdm
from torch.utils.data import Dataset, DataLoader

import jieba
from collections import Counter

UNK, PAD = '<UNK>', '<PAD>'


def build_vocab(file_path, tokenizer, max_size=10000, min_freq=5):
    """
    构建词表

    :param file_path: 文本文件路径，每行一条数据，这里给出路径，自动读取所有txt文件。
    :param tokenizer: 分词器函数，默认使用jieba.cut
    :param max_size:  词表最大大小
    :param min_freq:  最小词频
    :return:
    """
    counter = Counter()
    txt_files = []

    for file in os.listdir(file_path):
        if file.endswith(".txt") and file != "class.txt":
            full_path = os.path.join(file_path, file)
            txt_files.append(full_path)

    for file in txt_files:
        with open(file, 'r', encoding="UTF-8") as f:
            for line in f:
                line = line.strip().split("\t")[0]
                if not line:
                    continue
                words = tokenizer(line)
                counter.update(words)

    vocab = [word for word, freq in counter.items() if freq >min_freq]
    vocab = sorted(vocab, key=lambda x: counter[x], reverse=True)[:max_size]

    vocab = vocab + ['<PAD>', '<UNK>']

    word2index = {word:idx for idx, word in enumerate(vocab)}
    return word2index





def build_dataset(config, use_word):
    if use_word:
        tokenizer = jieba.cut
        vocab = pkl.load(open(config.vocab_path, "rb"))
    else:
        tokenizer = lambda x: [y for y in x]
        vocab = pkl.load(open(config.vocab_path, "rb"))

    train = load_dataset(config.train_path, tokenizer, vocab, config.pad_size)
    dev = load_dataset(config.dev_path, tokenizer, vocab, config.pad_size)
    test = load_dataset(config.test_path, tokenizer, vocab, config.pad_size)

    return vocab, train, dev, test


def load_dataset(path, tokenizer, vocab, pad_size=32):
    contents = []
    with open(path, "r", encoding="UTF-8") as f:
        for line in tqdm(f):
            line_without_n = line.strip()
            if not line_without_n:
                continue
            content, label = line_without_n.split('\t')
            token = list(tokenizer(content))
            words = []
            token_len = len(token)
            if token_len > pad_size:
                token = token[:pad_size]
                token_len = pad_size
            else:
                token.extend(['<PAD>'] * (pad_size - token_len))

            for word in token:
                words.append(vocab.get(word, vocab.get(UNK)))

            contents.append((words, int(label), token_len))

        return contents


def get_iterator(data, config, shuffle=False):
    ans = BuildIterator(data, config)
    return DataLoader(ans,
                      batch_size=config.batch_size,
                      shuffle=shuffle,
                      )


class BuildIterator(Dataset):
    def __init__(self, data, config):
        super(BuildIterator, self).__init__()
        self.data = data
        self.config = config

    def __getitem__(self, item):
        # token_ids, label, length = self.data[item]
        # embed_tensor = torch.stack([
        #     torch.tensor(self.config.embedding_pretrained[token_id], dtype=torch.float)
        #     for token_id in token_ids
        # ])  # 输出形状: (seq_len, embedding_dim)

        # return embed_tensor, label, length  # 返回 (15, 200), label, length
        return torch.tensor(self.data[item][0]), self.data[item][1], self.data[item][2]

    def __len__(self):
        return len(self.data)


@overload
def get_time_dif(start_time) -> float:
    """获取开始时间，获得与现在时间的时间差"""


@overload
def get_time_dif(start_time, end_time) -> float:
    """获取开始结束时间，计算时间差"""


def get_time_dif(*args):
    if len(args) == 1:
        return time.time() - args[0]

    else:
        return args[1] - args[0]

