import torch
import numpy as np
from sklearn.utils import resample
import time
from datetime import timedelta
import pandas as pd
from main import text_predeal, jiebafenci
from sklearn.model_selection import train_test_split
import re

MAX_VOCAB_SIZE = 10000
UNK, PAD = '<UNK>', '<PAD>'


def build_vocab(datas, tokenizer, max_size, min_freq):
    vocab_dic = {}
    for content in datas:
        for word in tokenizer(content):
            vocab_dic[word] = vocab_dic.get(word, 0) + 1
    vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]
    vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}
    vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})
    return vocab_dic


def build_dataset(config, use_word, is_fold = False):
    """
    构建数据集
    Args:
        config: 配置对象
        use_word (bool): 是否使用单词级别

    Returns:
        vocab: 词汇表
        train: 训练数据集
        dev: 验证数据集
        test: 测试数据集
    """
    if use_word:
        tokenizer = lambda x: x.split(' ')  # 以空格隔开，单词级别
    else:
        tokenizer = lambda x: [y for y in x]  # 字符级别

    # 读取训练数据和测试数据
    train_data = pd.read_csv(config.train_path, sep='\t')
    test_data = pd.read_csv(config.test_path, sep=',')
    
    # 过采样
    train_data_minority = train_data[train_data['label'] == 1]
    train_data_majority = train_data[train_data['label'] == 0]

    train_data_minority_upsampled = resample(
        train_data_minority, 
        replace=True,     # 允许放回抽样
        n_samples=(int)(len(train_data_majority)*0.5),  # 匹配多数类样本数量
        random_state=123  # 固定随机种子
    )
    
    train_data_upsampled = pd.concat([train_data_majority, train_data_minority_upsampled])
    train_data = train_data_upsampled.sample(frac=1.0, random_state=123).reset_index(drop=True)
    

    # 对文本进行预处理，去除一些非文字符
    train_data['comment'] = train_data['comment'].apply(lambda x: text_predeal(x))
    test_data['comment'] = test_data['comment'].apply(lambda x: text_predeal(x))

    # 加载停用词表
    stop_words = []
    with open(config.stop_words, 'r', encoding='utf-8') as f:
        stop_words = [line.strip() for line in f if line.strip()]

    # 对评论文本进行分词和去除停用词
    train_temp = [jiebafenci(knob, stop_words) for knob in train_data['comment'].values]
    test_temp = [jiebafenci(knob, stop_words) for knob in test_data['comment'].values]    
    
    if is_fold == False:
        X_train, X_val, y_train, y_val = train_test_split(train_temp, train_data['label'], test_size=0.2, random_state=42)
    vocab = build_vocab(train_temp+test_temp, tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)

    def bi_gram_hash(sequence, t, buckets):
        """
        计算双字母哈希值
        Args:
            sequence: 序列
            t: 位置
            buckets: 桶的数量
        Returns:
            哈希值
        """
        t1 = sequence[t - 1] if t - 1 >= 0 else 0
        return (t1 * 14918087) % buckets

    def tri_gram_hash(sequence, t, buckets):
        """
        计算三字母哈希值
        Args:
            sequence: 序列
            t: 位置
            buckets: 桶的数量
        Returns:
            哈希值
        """
        t1 = sequence[t - 1] if t - 1 >= 0 else 0
        t2 = sequence[t - 2] if t - 2 >= 0 else 0
        return (t2 * 14918087 * 18408749 + t1 * 14918087) % buckets

    def load_dataset(datas, labels, pad_size=32, is_test=False, is_key=True):
        contents = []
        leng = len(labels)
        i = 0

        if is_key:
            keyword_weights = {}
            with open('/data/sfq/oco/keywords.txt', 'r', encoding='utf-8') as f:
                for line in f:
                    keyword, weight = line.strip().split(',')  # 假设 keywords.txt 文件中关键词和权重用逗号分隔
                    keyword_weights[keyword] = float(weight)  # 将权重转换为浮点数

        max_len = 0
        upper = 0
        while i < leng:
            content = datas[i]
            label = labels[i]
            words_line = []
            token = tokenizer(content)
            seq_len = len(token)

            if is_key:
                keywords_count = 0
                for keyword in keyword_weights:
                    match = re.findall(r'\b' + keyword + r'\b', content, re.IGNORECASE)
                    if match:
                        keywords_count += len(match) * keyword_weights[keyword]  # 使用关键词权重进行加权

            
            if pad_size:
                if len(token) < pad_size:
                    token.extend([PAD] * (pad_size - len(token)))
                else:
                    if len(token) > max_len:
                        max_len = len(token)
                    upper+=1
                    token = token[:pad_size]
                    seq_len = pad_size
            
            # 将单词转换为 ID
            for word in token:
                words_line.append(vocab.get(word, vocab.get(UNK)))

            buckets = config.n_gram_vocab
            bigram = []
            trigram = []
            # ------ngram------
            for j in range(pad_size):
                bigram.append(bi_gram_hash(words_line, j, buckets))
                trigram.append(tri_gram_hash(words_line, j, buckets))
            # -----------------
            if is_key:
                if is_test == False:
                    contents.append((words_line, int(label), seq_len, bigram, trigram, keywords_count))
                else:
                    contents.append((words_line, str(label), seq_len, bigram, trigram, keywords_count))
            else:
                if is_test == False:
                    contents.append((words_line, int(label), seq_len, bigram, trigram))
                else:
                    contents.append((words_line, str(label), seq_len, bigram, trigram))

            i += 1
            
        print("max_len: ", max_len, "; upper: ", upper)
        return contents

    test = load_dataset(test_temp, test_data['id'], config.pad_size, True)
    test_iter = build_iterator(test, config)
    
    if is_fold:
        train = load_dataset(train_temp, train_data['label'], config.pad_size)
        return len(vocab), train, test_iter, test_data
    else:
        train = load_dataset(X_train, y_train.values, config.pad_size)
        dev = load_dataset(X_val, y_val.values, config.pad_size)
        return vocab, train, dev, test_iter, test_data


class DatasetIterater(object):
    def __init__(self, batches, batch_size, device):
        self.batch_size = batch_size
        self.batches = batches
        self.n_batches = len(batches) // batch_size
        self.residue = False  # 记录batch数量是否为整数 
        if len(batches) % self.n_batches != 0:
            self.residue = True
        self.index = 0
        self.device = device

    def _to_tensor(self, datas):
        # xx = [xxx[2] for xxx in datas]
        # indexx = np.argsort(xx)[::-1]
        # datas = np.array(datas)[indexx]
        x = torch.LongTensor([_[0] for _ in datas]).to(self.device)
        if isinstance(datas[0][1], str):
            y = -1
        else:
            y = torch.LongTensor([_[1] for _ in datas]).to(self.device)
        bigram = torch.LongTensor([_[3] for _ in datas]).to(self.device)
        trigram = torch.LongTensor([_[4] for _ in datas]).to(self.device)

        # pad前的长度(超过pad_size的设为pad_size)
        seq_len = torch.LongTensor([_[2] for _ in datas]).to(self.device)
        keyword_count = torch.LongTensor([_[5] for _ in datas]).to(self.device)
        return (x, seq_len, bigram, trigram, keyword_count), y

    def __next__(self):
        if self.residue and self.index == self.n_batches:
            batches = self.batches[self.index * self.batch_size: len(self.batches)]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

        elif self.index >= self.n_batches:
            self.index = 0
            raise StopIteration
        else:
            batches = self.batches[self.index * self.batch_size: (self.index + 1) * self.batch_size]
            self.index += 1
            batches = self._to_tensor(batches)
            return batches

    def __iter__(self):
        return self

    def __len__(self):
        if self.residue:
            return self.n_batches + 1
        else:
            return self.n_batches


def build_iterator(dataset, config):
    iter = DatasetIterater(dataset, config.batch_size, config.device)
    return iter


def get_time_dif(start_time):
    """获取已使用时间"""
    end_time = time.time()
    time_dif = end_time - start_time
    return timedelta(seconds=int(round(time_dif)))