# -*- coding: utf-8 -*-
"""
数据处理模块 - 处理文本数据加载、预处理和词汇表构建
包含数据集类、词汇表构建器、文本预处理等功能
"""

import os
import re
import json
import pickle
from typing import List, Dict, Tuple, Optional, Union
from collections import Counter, defaultdict

import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
import jieba
from tqdm import tqdm

from config import config


class Tokenizer:
    """分词器类 - 处理文本分词和编码"""
    
    def __init__(self, vocab_path: Optional[str] = None):
        self.vocab_path = vocab_path
        self.word2idx = {}
        self.idx2word = {}
        self.vocab_size = 0
        
        # 特殊标记
        self.special_tokens = {
            config.data.pad_token: 0,
            config.data.unk_token: 1,
            config.data.bos_token: 2,
            config.data.eos_token: 3,
            config.data.mask_token: 4
        }
        
        if vocab_path and os.path.exists(vocab_path):
            self.load_vocab(vocab_path)
    
    def tokenize(self, text: str) -> List[str]:
        """文本分词"""
        # 清理文本
        text = self._clean_text(text)
        
        # 使用jieba分词
        tokens = list(jieba.cut(text))
        
        # 过滤空白字符
        tokens = [token.strip() for token in tokens if token.strip()]
        
        return tokens
    
    def _clean_text(self, text: str) -> str:
        """清理文本"""
        # 转换为小写
        if config.data.lowercase:
            text = text.lower()
        
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text)
        
        # 可选：移除标点符号
        if config.data.remove_punctuation:
            text = re.sub(r'[^\w\s]', '', text)
        
        return text.strip()
    
    def build_vocab(self, texts: List[str], min_freq: int = 2) -> None:
        """构建词汇表"""
        print("构建词汇表...")
        
        # 统计词频
        word_counter = Counter()
        for text in tqdm(texts, desc="统计词频"):
            tokens = self.tokenize(text)
            word_counter.update(tokens)
        
        # 初始化词汇表
        self.word2idx = self.special_tokens.copy()
        self.idx2word = {idx: word for word, idx in self.special_tokens.items()}
        
        # 添加高频词
        idx = len(self.special_tokens)
        for word, freq in word_counter.most_common():
            if freq >= min_freq and word not in self.word2idx:
                self.word2idx[word] = idx
                self.idx2word[idx] = word
                idx += 1
                
                # 限制词汇表大小
                if idx >= config.model.vocab_size:
                    break
        
        self.vocab_size = len(self.word2idx)
        print(f"词汇表构建完成，词汇量: {self.vocab_size}")
    
    def encode(self, text: str, add_special_tokens: bool = True) -> List[int]:
        """将文本编码为数字序列"""
        tokens = self.tokenize(text)
        
        # 添加特殊标记
        if add_special_tokens:
            tokens = [config.data.bos_token] + tokens + [config.data.eos_token]
        
        # 转换为索引
        indices = []
        for token in tokens:
            if token in self.word2idx:
                indices.append(self.word2idx[token])
            else:
                indices.append(self.word2idx[config.data.unk_token])
        
        return indices
    
    def decode(self, indices: List[int], skip_special_tokens: bool = True) -> str:
        """将数字序列解码为文本"""
        tokens = []
        for idx in indices:
            if idx in self.idx2word:
                token = self.idx2word[idx]
                if skip_special_tokens and token in self.special_tokens:
                    continue
                tokens.append(token)
        
        return ''.join(tokens)
    
    def save_vocab(self, path: str) -> None:
        """保存词汇表"""
        vocab_data = {
            'word2idx': self.word2idx,
            'idx2word': self.idx2word,
            'vocab_size': self.vocab_size,
            'special_tokens': self.special_tokens
        }
        
        with open(path, 'w', encoding='utf-8') as f:
            json.dump(vocab_data, f, ensure_ascii=False, indent=2)
        
        print(f"词汇表已保存到: {path}")
    
    def load_vocab(self, path: str) -> None:
        """加载词汇表"""
        with open(path, 'r', encoding='utf-8') as f:
            vocab_data = json.load(f)
        
        self.word2idx = vocab_data['word2idx']
        # 将字符串键转换为整数键
        self.idx2word = {int(k): v for k, v in vocab_data['idx2word'].items()}
        self.vocab_size = vocab_data['vocab_size']
        self.special_tokens = vocab_data['special_tokens']
        
        print(f"词汇表已加载，词汇量: {self.vocab_size}")


class TextDataset(Dataset):
    """文本数据集类"""
    
    def __init__(self, data_path: str, tokenizer: Tokenizer, max_length: int = None):
        self.data_path = data_path
        self.tokenizer = tokenizer
        self.max_length = max_length or config.data.max_length
        
        # 加载数据
        self.texts = self._load_data()
        print(f"加载数据完成，共 {len(self.texts)} 条")
    
    def _load_data(self) -> List[str]:
        """加载文本数据"""
        texts = []
        
        if not os.path.exists(self.data_path):
            print(f"警告: 数据文件不存在 {self.data_path}")
            return texts
        
        with open(self.data_path, 'r', encoding='utf-8') as f:
            for line in tqdm(f, desc="加载数据"):
                line = line.strip()
                if line and len(line) >= config.data.min_length:
                    texts.append(line)
        
        return texts
    
    def __len__(self) -> int:
        return len(self.texts)
    
    def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
        text = self.texts[idx]
        
        # 编码文本
        input_ids = self.tokenizer.encode(text, add_special_tokens=True)
        
        # 截断或填充
        if len(input_ids) > self.max_length:
            input_ids = input_ids[:self.max_length]
        
        # 创建标签（用于语言模型训练）
        labels = input_ids[1:] + [self.tokenizer.word2idx[config.data.eos_token]]
        input_ids = input_ids[:-1]
        
        # 确保长度一致
        if len(labels) > len(input_ids):
            labels = labels[:len(input_ids)]
        elif len(input_ids) > len(labels):
            input_ids = input_ids[:len(labels)]
        
        return {
            'input_ids': torch.tensor(input_ids, dtype=torch.long),
            'labels': torch.tensor(labels, dtype=torch.long),
            'attention_mask': torch.ones(len(input_ids), dtype=torch.long)
        }


def collate_fn(batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
    """批处理函数 - 处理不同长度的序列"""
    input_ids = [item['input_ids'] for item in batch]
    labels = [item['labels'] for item in batch]
    attention_masks = [item['attention_mask'] for item in batch]
    
    # 填充序列
    pad_token_id = config.data.pad_token if isinstance(config.data.pad_token, int) else 0
    
    input_ids = pad_sequence(input_ids, batch_first=True, padding_value=pad_token_id)
    labels = pad_sequence(labels, batch_first=True, padding_value=-100)  # -100 用于忽略损失计算
    attention_masks = pad_sequence(attention_masks, batch_first=True, padding_value=0)
    
    return {
        'input_ids': input_ids,
        'labels': labels,
        'attention_mask': attention_masks
    }


class DataManager:
    """数据管理器 - 统一管理数据加载和预处理"""
    
    def __init__(self, tokenizer: Optional[Tokenizer] = None):
        self.tokenizer = tokenizer or Tokenizer()
        self.train_dataset = None
        self.val_dataset = None
        self.test_dataset = None
    
    def prepare_data(self, train_path: str, val_path: str = None, test_path: str = None) -> None:
        """准备数据 - 构建词汇表和数据集"""
        print("开始准备数据...")
        
        # 如果词汇表不存在，则构建
        if not self.tokenizer.word2idx:
            print("构建词汇表...")
            all_texts = []
            
            # 读取训练数据用于构建词汇表
            if os.path.exists(train_path):
                with open(train_path, 'r', encoding='utf-8') as f:
                    all_texts.extend([line.strip() for line in f if line.strip()])
            
            self.tokenizer.build_vocab(all_texts)
            
            # 保存词汇表
            vocab_save_path = config.data.vocab_path
            os.makedirs(os.path.dirname(vocab_save_path), exist_ok=True)
            self.tokenizer.save_vocab(vocab_save_path)
        
        # 创建数据集
        if os.path.exists(train_path):
            self.train_dataset = TextDataset(train_path, self.tokenizer)
        
        if val_path and os.path.exists(val_path):
            self.val_dataset = TextDataset(val_path, self.tokenizer)
        
        if test_path and os.path.exists(test_path):
            self.test_dataset = TextDataset(test_path, self.tokenizer)
        
        print("数据准备完成!")
    
    def get_dataloader(self, dataset_type: str = 'train', shuffle: bool = True) -> DataLoader:
        """获取数据加载器"""
        if dataset_type == 'train' and self.train_dataset:
            dataset = self.train_dataset
            batch_size = config.training.batch_size
        elif dataset_type == 'val' and self.val_dataset:
            dataset = self.val_dataset
            batch_size = config.training.batch_size
        elif dataset_type == 'test' and self.test_dataset:
            dataset = self.test_dataset
            batch_size = config.training.batch_size
        else:
            raise ValueError(f"数据集类型 '{dataset_type}' 不存在或未加载")
        
        return DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=shuffle,
            collate_fn=collate_fn,
            num_workers=config.data.num_workers,
            pin_memory=config.data.pin_memory
        )
    
    def create_sample_data(self, output_dir: str = "data") -> None:
        """创建示例数据文件"""
        os.makedirs(output_dir, exist_ok=True)
        
        # 示例中文文本数据
        sample_texts = [
            "人工智能是计算机科学的一个分支，它企图了解智能的实质。",
            "深度学习是机器学习的一个子领域，基于人工神经网络。",
            "自然语言处理是人工智能的一个重要应用领域。",
            "Transformer模型在自然语言处理任务中表现出色。",
            "注意力机制是Transformer模型的核心组件。",
            "预训练语言模型在各种NLP任务中都有很好的表现。",
            "BERT、GPT等模型推动了NLP领域的发展。",
            "大规模语言模型需要大量的计算资源进行训练。",
            "文本生成是自然语言处理的一个重要任务。",
            "机器翻译技术已经在实际应用中得到广泛使用。"
        ] * 100  # 重复100次以增加数据量
        
        # 创建训练、验证和测试数据
        train_size = int(len(sample_texts) * 0.8)
        val_size = int(len(sample_texts) * 0.1)
        
        train_texts = sample_texts[:train_size]
        val_texts = sample_texts[train_size:train_size + val_size]
        test_texts = sample_texts[train_size + val_size:]
        
        # 保存数据文件
        datasets = {
            'train.txt': train_texts,
            'val.txt': val_texts,
            'test.txt': test_texts
        }
        
        for filename, texts in datasets.items():
            filepath = os.path.join(output_dir, filename)
            with open(filepath, 'w', encoding='utf-8') as f:
                for text in texts:
                    f.write(text + '\n')
            print(f"创建示例数据: {filepath} ({len(texts)} 条)")


if __name__ == "__main__":
    # 测试数据处理模块
    print("测试数据处理模块...")
    
    # 创建数据管理器
    data_manager = DataManager()
    
    # 创建示例数据
    data_manager.create_sample_data()
    
    # 准备数据
    data_manager.prepare_data(
        train_path="data/train.txt",
        val_path="data/val.txt",
        test_path="data/test.txt"
    )
    
    # 测试数据加载器
    train_loader = data_manager.get_dataloader('train')
    print(f"训练数据加载器创建成功，批次数: {len(train_loader)}")
    
    # 查看一个批次的数据
    for batch in train_loader:
        print(f"输入形状: {batch['input_ids'].shape}")
        print(f"标签形状: {batch['labels'].shape}")
        print(f"注意力掩码形状: {batch['attention_mask'].shape}")
        break
    
    print("数据处理模块测试完成!")