# dataset.py
import ijson
import torch
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
import json


class Vocabulary:
    """构建源语言和目标语言词汇表"""

    def __init__(self, min_freq=1):
        # 字典 {词: 索引}
        self.word2idx = {"<pad>": 0, "<sos>": 1, "<eos>": 2, "<unk>": 3}  # 填充符（使得batch内句子等长），句子开始，句子结束，未知词
        # 反向字典 {索引: 词}
        self.idx2word = {idx: word for word, idx in self.word2idx.items()}
        self.word_count = {}
        self.min_freq = min_freq  # 词频阈值

    def add_sentence(self, sentence):
        """句子分词后添加到词汇表"""
        for word in sentence.lower().split():
            if word not in self.word2idx:
                idx = len(self.word2idx)
                self.word2idx[word] = idx
                self.idx2word[idx] = word
            self.word_count[word] = self.word_count.get(word, 0) + 1

    def build(self, sentences):
        """从多个句子构建词汇表"""
        word_count = {}
        for sentence in sentences:
            for word in sentence.split():
                word_count[word] = word_count.get(word, 0) + 1

        # 重新初始化词表
        self.word2idx = {"<pad>": 0, "<sos>": 1, "<eos>": 2, "<unk>": 3}
        self.idx2word = {idx: word for word, idx in self.word2idx.items()}

        for word, count in word_count.items():
            if count >= self.min_freq:
                idx = len(self.word2idx)
                self.word2idx[word] = idx
                self.idx2word[idx] = word


class StreamingTranslationDataset(Dataset):
    """流式读取 JSON 数据，避免内存占满"""

    def __init__(self, file_path, src_vocab, tgt_vocab, max_len=100):
        self.file_path = file_path
        self.src_vocab = src_vocab
        self.tgt_vocab = tgt_vocab
        self.max_len = max_len
        self.data_size = self._count_samples()  # 统计数据总量

    def _count_samples(self):
        """统计数据总数"""
        count = 0
        with open(self.file_path, "rb") as f:
            for _ in ijson.items(f, 'item'):
                count += 1
        return count

    def __len__(self):
        return self.data_size

    # 每次调用__getitem__都会重新打开文件+迭代读取到目标行，时间复杂度O(n)
    def __getitem__(self, idx):
        """流式读取某一条数据"""
        with open(self.file_path, "rb") as f:
            for i, item in enumerate(ijson.items(f, 'item')):
                if i == idx:
                    return self._process_item(item)
        return None

    def _process_item(self, data_dict):
        """处理单个数据样本"""
        if not isinstance(data_dict, dict) or "english" not in data_dict or "chinese" not in data_dict:
            return {"src": torch.LongTensor([0] * (self.max_len + 2)),
                    "tgt": torch.LongTensor([0] * (self.max_len + 2))}

        # 使用 'english' 字段作为源语言 (src)，'chinese' 字段作为目标语言 (tgt)
        src_sent = [self.src_vocab.word2idx.get(word, 3) for word in data_dict["english"].split()]
        if not src_sent:  # 如果分词后是空的
            src_sent = [3]  # 至少用 <unk> 代替
        tgt_sent = [self.tgt_vocab.word2idx.get(word, 3) for word in data_dict["chinese"].split()]

        # 确保 src 和 tgt 的长度不超过 max_len + 2
        src_sent = [1] + src_sent[:self.max_len] + [2]  # 1 为 <sos>，2 为 <eos>
        tgt_sent = [1] + tgt_sent[:self.max_len] + [2]

        # 用 <pad> (0) 填充
        src_sent += [0] * (self.max_len + 2 - len(src_sent))
        tgt_sent += [0] * (self.max_len + 2 - len(tgt_sent))

        return {
            'src': torch.LongTensor(src_sent),
            'tgt': torch.LongTensor(tgt_sent)
        }

    @staticmethod
    def collate_fn(batch):
        """自定义 collate 函数，将 batch 中的样本填充为相同长度"""
        src_batch = [item['src'] for item in batch]
        tgt_batch = [item['tgt'] for item in batch]

        src_padded = pad_sequence(src_batch, padding_value=0, batch_first=True)
        tgt_padded = pad_sequence(tgt_batch, padding_value=0, batch_first=True)

        return {
            'src': src_padded,
            'tgt': tgt_padded
        }


# 一次性读取数据集
class InMemoryTranslationDataset(Dataset):
    def __init__(self, file_path, src_vocab, tgt_vocab, max_len=100):
        self.src_vocab = src_vocab
        self.tgt_vocab = tgt_vocab
        self.max_len = max_len

        # 一次性读取并处理所有数据
        with open(file_path, 'r', encoding='utf-8') as f:
            self.data = json.load(f)

        self.samples = [self._process_item(item) for item in self.data if "english" in item and "chinese" in item]

    def __len__(self):
        return len(self.samples)

    # 时间复杂度为O(1)
    def __getitem__(self, idx):
        return self.samples[idx]

    def _process_item(self, data_dict):
        src_sent = [self.src_vocab.word2idx.get(word, 3) for word in data_dict["english"].split()]
        tgt_sent = [self.tgt_vocab.word2idx.get(word, 3) for word in data_dict["chinese"].split()]

        src_sent = [1] + src_sent[:self.max_len] + [2]
        tgt_sent = [1] + tgt_sent[:self.max_len] + [2]

        src_sent += [0] * (self.max_len + 2 - len(src_sent))
        tgt_sent += [0] * (self.max_len + 2 - len(tgt_sent))

        return {
            'src': torch.LongTensor(src_sent),
            'tgt': torch.LongTensor(tgt_sent)
        }

    @staticmethod
    def collate_fn(batch):
        src_batch = [item['src'] for item in batch]
        tgt_batch = [item['tgt'] for item in batch]

        src_padded = pad_sequence(src_batch, padding_value=0, batch_first=True)
        tgt_padded = pad_sequence(tgt_batch, padding_value=0, batch_first=True)

        return {'src': src_padded, 'tgt': tgt_padded}
