# utils/data_loader.py
import torch
from torch.utils.data import Dataset, DataLoader
import os
from config import Config
from tqdm import tqdm
from utils.vocab import Vocab

class TranslationDataset(Dataset):
    def __init__(self, src_file, tgt_file=None, src_vocab=None, tgt_vocab=None, max_seq_length=None, is_test=False, is_chinese_char_split=True):
        self.src_vocab = src_vocab
        self.tgt_vocab = tgt_vocab
        self.max_seq_length = max_seq_length
        self.is_test = is_test
        self.is_chinese_char_split = is_chinese_char_split
        
        # 如果是测试集，tgt_file 可以为 None，因为参考文件包含源语言和目标语言
        if is_test:
            self.src_sentences, self.tgt_sentences = self.load_test_data(src_file)  # src_file 实际上是参考文件
        else:
            self.src_sentences, self.tgt_sentences = self.load_data(src_file, tgt_file)
    
    def load_data(self, src_file, tgt_file):
        with open(src_file, 'r', encoding='utf-8') as f:
            src_sentences = [line.strip() for line in f.readlines()]
        with open(tgt_file, 'r', encoding='utf-8') as f:
            tgt_sentences = [line.strip() for line in f.readlines()]
        
        # 确保源语言和目标语言句子数量相同
        assert len(src_sentences) == len(tgt_sentences), f"Source and target file have different number of sentences: {len(src_sentences)} vs {len(tgt_sentences)}"
        
        # 打印样本数量
        print(f"Loaded {len(src_sentences)} sentence pairs")
        
        return src_sentences, tgt_sentences

    def load_test_data(self, reference_file):
        src_sentences = []
        tgt_sentences = []
        
        with open(reference_file, 'r', encoding='utf-8') as f:
            lines = f.readlines()
            i = 0
            while i < len(lines):
                if lines[i].strip():  # 如果不是空行
                    src_text = lines[i].strip()
                    if i + 2 < len(lines):  # 确保还有足够的行数
                        ref_text = lines[i + 2].strip()
                        src_sentences.append(src_text)
                        tgt_sentences.append(ref_text)
                    i += 3  # 跳过中文、空行和英文
                else:
                    i += 1
        
        print(f"Loaded {len(src_sentences)} test sentence pairs from reference file")
        return src_sentences, tgt_sentences
    
    def __len__(self):
        return len(self.src_sentences)
    
    def __getitem__(self, idx):
        src_sentence = self.src_sentences[idx]
        tgt_sentence = self.tgt_sentences[idx]
        
        # 分词并转换为ID
        if self.is_chinese_char_split:
            # 中文按字符分割
            src_tokens = [self.src_vocab.stoi.get(token, self.src_vocab.stoi['<unk>']) 
                         for token in list(src_sentence)]
        else:
            # 英文按空格分割
            src_tokens = [self.src_vocab.stoi.get(token, self.src_vocab.stoi['<unk>']) 
                         for token in src_sentence.split()]
        
        # 目标语言（英文）总是按空格分割
        tgt_tokens = [self.tgt_vocab.stoi.get(token, self.tgt_vocab.stoi['<unk>']) 
                     for token in tgt_sentence.split()]
        
        # 截断序列，保留空间给<sos>和<eos>
        src_tokens = src_tokens[:self.max_seq_length - 2]
        tgt_tokens = tgt_tokens[:self.max_seq_length - 2]
        
        # 添加<sos>和<eos>标记
        src_tokens = [self.src_vocab.stoi['<sos>']] + src_tokens + [self.src_vocab.stoi['<eos>']]
        tgt_tokens = [self.tgt_vocab.stoi['<sos>']] + tgt_tokens + [self.tgt_vocab.stoi['<eos>']]
        
        # 记录原始长度（包括<sos>和<eos>）
        src_len = len(src_tokens)
        tgt_len = len(tgt_tokens)
        
        # 创建掩码
        # False: 实际token位置，模型应该关注这些位置
        # True: padding位置，模型应该忽略这些位置
        src_padding_mask = [False] * src_len + [True] * (self.max_seq_length - src_len)
        tgt_padding_mask = [False] * tgt_len + [True] * (self.max_seq_length - tgt_len)
        
        # 填充序列到最大长度
        src_tokens += [self.src_vocab.stoi['<pad>']] * (self.max_seq_length - src_len)
        tgt_tokens += [self.tgt_vocab.stoi['<pad>']] * (self.max_seq_length - tgt_len)
        
        return {
            'src': torch.tensor(src_tokens, dtype=torch.long),
            'tgt': torch.tensor(tgt_tokens, dtype=torch.long),
            'src_mask': torch.tensor(src_padding_mask, dtype=torch.bool),  # True表示padding位置
            'tgt_mask': torch.tensor(tgt_padding_mask, dtype=torch.bool),  # True表示padding位置
            'src_len': src_len,
            'tgt_len': tgt_len
        }

def preview_dataset(dataset, name="Dataset", num_samples=3):
    """预览数据集的原始文本和张量形式"""
    print(f"\n{'-'*20} {name} Preview {'-'*20}")
    for i in range(min(num_samples, len(dataset))):
        # 获取原始文本
        src_text = dataset.src_sentences[i]
        tgt_text = dataset.tgt_sentences[i]
        print(f"\nSample {i+1}:")
        print(f"Source text     : {src_text}")
        print(f"Target text     : {tgt_text}")
        
        # 获取张量形式的数据
        sample = dataset[i]
        src_tokens = sample['src']
        tgt_tokens = sample['tgt']
        
        # 解码张量中的token（去除padding）
        src_decoded = [dataset.src_vocab.itos[idx.item()] for idx in src_tokens[:sample['src_len']]]
        tgt_decoded = [dataset.tgt_vocab.itos[idx.item()] for idx in tgt_tokens[:sample['tgt_len']]]
        
        print(f"Source tokens   : {src_decoded}")
        print(f"Target tokens   : {tgt_decoded}")
        print(f"Source tensor   : Shape {src_tokens.shape}, {src_tokens[:sample['src_len']]}")
        print(f"Target tensor   : Shape {tgt_tokens.shape}, {tgt_tokens[:sample['tgt_len']]}")
        print(f"Source length   : {sample['src_len']}")
        print(f"Target length   : {sample['tgt_len']}")
        print(f"Source mask (full) : {sample['src_mask']}")  # 显示完整掩码
        print(f"Target mask (full) : {sample['tgt_mask']}")  # 显示完整掩码
        # 显示有效token和padding的数量
        print(f"Source valid/pad : {(~sample['src_mask']).sum().item()}/{sample['src_mask'].sum().item()}")
        print(f"Target valid/pad : {(~sample['tgt_mask']).sum().item()}/{sample['tgt_mask'].sum().item()}")

def get_data_loaders(config, pin_memory=False):
    # 检查词汇表是否存在
    src_vocab_path = os.path.join(config.data_dir, 'src_vocab.pth')
    tgt_vocab_path = os.path.join(config.data_dir, 'tgt_vocab.pth')
    
    if not os.path.exists(src_vocab_path) or not os.path.exists(tgt_vocab_path):
        print("Vocabulary files not found. Building vocabulary...")
        from preprocess import build_vocab
        build_vocab(
            os.path.join(config.data_dir, config.train_src_file),
            src_vocab_path,
            min_freq=1,
            is_chinese=False  # 修改：使用词级分割，因为输入已经分词
        )
        build_vocab(
            os.path.join(config.data_dir, config.train_tgt_file),
            tgt_vocab_path,
            min_freq=1,
            is_chinese=False
        )
        print("Vocabulary files created.")
    
    # 加载词汇表
    try:
        src_vocab_dict = torch.load(src_vocab_path)
        tgt_vocab_dict = torch.load(tgt_vocab_path)
    except:
        # 如果加载失败，尝试使用weights_only=True
        src_vocab_dict = torch.load(src_vocab_path, weights_only=True)
        tgt_vocab_dict = torch.load(tgt_vocab_path, weights_only=True)
    
    # 将字典包装为 Vocab 对象
    src_vocab = Vocab(src_vocab_dict['stoi'], src_vocab_dict['itos'])
    tgt_vocab = Vocab(tgt_vocab_dict['stoi'], tgt_vocab_dict['itos'])
    
    print(f"Source vocabulary size: {len(src_vocab)}")
    print(f"Target vocabulary size: {len(tgt_vocab)}")
    
    print("Loading training data...")
    train_dataset = TranslationDataset(
        os.path.join(config.data_dir, config.train_src_file),
        os.path.join(config.data_dir, config.train_tgt_file),
        src_vocab, tgt_vocab, config.max_seq_length,
        is_test=False,
        is_chinese_char_split=False
    )
    train_loader = DataLoader(
        train_dataset, 
        batch_size=config.batch_size, 
        shuffle=True, 
        pin_memory=pin_memory,
        num_workers=0
    )
    
    # # 预览训练集
    # preview_dataset(train_dataset, "Training Dataset")
    
    print("\nLoading validation data...")
    dev_dataset = TranslationDataset(
        os.path.join(config.data_dir, config.dev_file),  # 使用验证集参考文件
        src_vocab=src_vocab,
        tgt_vocab=tgt_vocab,
        max_seq_length=config.max_seq_length,
        is_test=True,  # 标记为测试模式以使用相同的加载逻辑
        is_chinese_char_split=False
    )
    dev_loader = DataLoader(
        dev_dataset, 
        batch_size=config.batch_size, 
        shuffle=False, 
        pin_memory=pin_memory,
        num_workers=0
    )
    
    # # 预览验证集
    # preview_dataset(dev_dataset, "Validation Dataset")
    
    print("Loading test data...")
    test_dataset = TranslationDataset(
        os.path.join(config.data_dir, config.reference_file),
        src_vocab=src_vocab,
        tgt_vocab=tgt_vocab,
        max_seq_length=config.max_seq_length,
        is_test=True,
        is_chinese_char_split=False
    )
    test_loader = DataLoader(
        test_dataset, 
        batch_size=config.batch_size, 
        shuffle=False, 
        pin_memory=pin_memory,
        num_workers=0
    )
    
    return train_loader, dev_loader, test_loader