import os
import pickle
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import numpy as np
from sklearn.model_selection import train_test_split


def load_vocab(vocab_path='vocab.pkl'):
    """
    从当前目录加载词汇表。
    """
    if not os.path.exists(vocab_path):
        raise FileNotFoundError(f"词汇表文件 {vocab_path} 不存在！")
    with open(vocab_path, 'rb') as f:
        word_to_idx = pickle.load(f)
    print(f"词汇表已加载，包含 {len(word_to_idx)} 个词。")
    return word_to_idx


def load_pretrained_embeddings(embedding_file, word_to_idx, unk_token="<UNK>"):
    """
    加载预训练的词向量，并将未找到的词汇映射到 <UNK>。
    """
    embedding_data = np.load(embedding_file)
    word_vectors = embedding_data['embeddings']

    vocab_size = len(word_to_idx)
    embed_size = word_vectors.shape[1]
    print(f"词嵌入矩阵已加载，包含 {word_vectors.shape[0]} 个词。")
    embedding_matrix = np.zeros((vocab_size, embed_size))

    unk_idx = word_to_idx.get(unk_token, 1)  # 默认UNK的索引为1
    for word, idx in word_to_idx.items():
        if word in word_vectors:
            embedding_matrix[idx] = word_vectors[word]
        else:
            embedding_matrix[idx] = embedding_matrix[unk_idx]
    return torch.tensor(embedding_matrix, dtype=torch.float)


def text_to_sequence(text, word_to_idx, fixed_length=None, padding_idx=0):
    """
    将文本转换为索引序列。
    """
    sequence = [word_to_idx.get(word, word_to_idx.get("<UNK>", 1)) for word in text]
    if fixed_length:
        sequence = sequence[:fixed_length] + [padding_idx] * max(0, fixed_length - len(sequence))
    return sequence


class TextDataset(Dataset):
    def __init__(self, sequences, labels):
        self.sequences = [torch.tensor(seq, dtype=torch.long) for seq in sequences]
        self.labels = torch.tensor(labels, dtype=torch.long)

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        return self.sequences[idx], self.labels[idx]


def collate_fn(batch):
    sequences, labels = zip(*batch)
    sequences = pad_sequence(sequences, batch_first=True, padding_value=0)
    labels = torch.tensor(labels, dtype=torch.long)
    return sequences, labels


def load_data(file_path, embedding_file, vocab_path='vocab.pkl', test_size=0.2, fixed_length=None):
    """
    加载数据并返回训练集和测试集，同时加载预训练的词向量。
    """
    texts, labels = [], []
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            line = line.strip()
            if not line:
                continue
            try:
                text, label = line.rsplit(' ', 1)
                texts.append(text)
                labels.append(int(label))
            except ValueError:
                print(f"跳过无效行: {line}")

    word_to_idx = load_vocab(vocab_path)  # 加载现有的词汇表
    sequences = [text_to_sequence(text, word_to_idx, fixed_length=fixed_length) for text in texts]

    embedding_matrix = load_pretrained_embeddings(embedding_file, word_to_idx)

    X_train, X_test, y_train, y_test = train_test_split(sequences, labels, test_size=test_size, random_state=42)

    train_dataset = TextDataset(X_train, y_train)
    test_dataset = TextDataset(X_test, y_test)
    return train_dataset, test_dataset, word_to_idx, embedding_matrix
