import os
import torch
import hashlib
import numpy as np
import torch.nn as nn
import math
from tqdm import tqdm
from torchvision import transforms
from PIL import Image
import pandas as pd
from sklearn.model_selection import train_test_split
from transformers import BertTokenizer, BertModel
from torch.utils.data import Dataset, DataLoader


# 1. BERT嵌入预计算（添加缓存机制与池化处理）
class TextEmbedder:
    def __init__(self, max_length=64, batch_size=32, cache_dir='text_embeddings'):
        self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
        self.model = BertModel.from_pretrained('bert-base-uncased').eval()
        self.device = torch.device('cuda:6' if torch.cuda.is_available() else 'cpu')
        self.model = self.model.to(self.device)
        self.max_length = max_length
        self.batch_size = batch_size
        params = f"maxlen{max_length}_batch{batch_size}"
        self.hash_str = hashlib.md5(params.encode()).hexdigest()
        self.cache_dir = cache_dir
        self.cache_path = os.path.join(self.cache_dir, f'embeddings_{self.hash_str}.npy')
        self.lengths_path = os.path.join(self.cache_dir, f'embeddings_{self.hash_str}_lengths.npy')
        os.makedirs(cache_dir, exist_ok=True)

    def precompute_embeddings(self, text_list):
        if os.path.exists(self.cache_path) and os.path.exists(self.lengths_path):
            print('加载缓存...')
            embeddings = np.load(self.cache_path, allow_pickle=True)
            lengths = np.load(self.lengths_path, allow_pickle=True)  # 新增
            return embeddings, lengths  # 修改返回值
        
        print('开始计算嵌入...')
        total_samples = len(text_list)
        hidden_dim = self.model.config.hidden_size  # 获取BERT隐藏层维度
        
        # 预分配内存（使用float16直接初始化）
        embeddings = np.zeros(
            (total_samples, self.max_length, hidden_dim),
            dtype=np.float16
        )
        # 新增：预分配长度存储数组
        lengths = np.zeros(total_samples, dtype=np.int32)  # 新增
        
        with torch.no_grad():
            for batch_idx in tqdm(range(0, total_samples, self.batch_size)):
                # 计算当前batch范围
                end_idx = min(batch_idx + self.batch_size, total_samples)
                batch_texts = text_list[batch_idx:end_idx]
                actual_batch_size = len(batch_texts)
                
                # 分词处理
                inputs = self.tokenizer(
                    batch_texts,
                    return_tensors='pt',
                    padding='max_length',
                    truncation=True,
                    max_length=self.max_length
                ).to(self.device)
                
                # 模型推理
                outputs = self.model(**inputs)
                batch_embeddings = outputs.last_hidden_state
                
                # 转换为float16并转移到CPU
                batch_lengths = inputs['attention_mask'].sum(dim=1).cpu().numpy()  # 关键修改
                lengths[batch_idx:batch_idx+actual_batch_size] = batch_lengths
                batch_embeddings = batch_embeddings.to(torch.float16).cpu().numpy()
                
                # 将结果存入预分配数组
                actual_batch_size = len(batch_texts)
                embeddings[batch_idx:batch_idx+actual_batch_size] = batch_embeddings
                
                # 显存清理
                del inputs, outputs, batch_embeddings
                torch.cuda.empty_cache()
        
        print("最终嵌入大小:", embeddings.shape)
        np.save(self.cache_path, embeddings)
        # lengths_path = os.path.join(self.cache_dir, f'embeddings_{self.hash_str}_lengths.npy')
        np.save(self.lengths_path, lengths)  # 正确保存
        return embeddings, lengths
    
    def load_lengths(self):  # 新增方法
        lengths_path = self.cache_path.replace('.npy', '_lengths.npy')
        return np.load(lengths_path, allow_pickle=True)
    
    
# 2. 优化数据集类（添加错误处理）
class FlickrDataset(Dataset):
    def __init__(self, df, image_dir, transform=None):
        self.df = df.reset_index(drop=True)
        self.image_dir = image_dir
        self.transform = transform
        
        # 创建唯一图像ID映射
        self.unique_images = df['image'].unique()
        self.image_to_id = {img: idx for idx, img in enumerate(self.unique_images)}
        
        # 预加载图像路径
        self.image_paths = {img: os.path.join(image_dir, img) for img in self.unique_images}

    def __len__(self):
        return len(self.df)

    def __getitem__(self, idx):
        row = self.df.iloc[idx]
        
        # 加载图像
        try:
            image = Image.open(self.image_paths[row['image']]).convert('RGB')
        except FileNotFoundError:
            raise ValueError(f"图像文件 {self.image_paths[row['image']]} 未找到！")
        
        if self.transform:
            image = self.transform(image)
        
        # 加载预计算嵌入
        embedding = torch.tensor(row['embedding'], dtype=torch.float32)
        # print('f embeddingshape:', embedding.shape)
        cap_len = torch.tensor(row['cap_len'], dtype=torch.long)  # 新增
        
        return image, embedding, cap_len, row['image']

# 3. 数据集准备流程（优化数据划分）
class DataPreprocessor:
    def __init__(self, image_dir, caption_file, sep=',', cache_dir='text_embeddings'):
        self.image_dir = image_dir
        self.caption_file = caption_file
        
        # 读取数据
        try:
            self.df = pd.read_csv(
                caption_file,
                sep=sep,
                header=None,
                names=['image', 'caption_id', 'caption'],
                dtype={'image': str, 'caption': str},
                engine='python',
                on_bad_lines='warn',
                encoding='utf-8'
            )
        except pd.errors.ParserError:
            self.df = pd.read_csv(
                caption_file,
                sep=r',(?=\d+,)',  # 使用正则处理带逗号的描述
                header=None,
                names=['image', 'caption_id', 'caption'],
                engine='python',
                regex=True
            )

        # 清洗文件名
        self.df['image'] = self.df['image'].str.strip().str.split(',').str[0]
        
        # 文本处理
        self.df['caption'] = self.df['caption'].fillna('').astype(str).str.lower().str.strip()
        
        # 过滤无效图像
        self._filter_invalid_images()
        
        # 预计算嵌入
        self.embedder = TextEmbedder(cache_dir=cache_dir)
        self._precompute_embeddings()
        self._generate_token_ids()

    def _filter_invalid_images(self):
        valid_images = []
        print("\n正在验证图像路径...")
        
        for img in tqdm(self.df['image'].unique(), desc="检查图像"):
            # 尝试多种扩展名
            base_name = os.path.splitext(img)[0]
            for ext in ['.jpg', '.jpeg', '.png', '.JPG', '.JPEG']:
                test_path = os.path.join(self.image_dir, base_name + ext)
                if os.path.isfile(test_path):
                    valid_images.append(img)
                    break
            else:
                print(f"未找到图像：{img}，尝试路径：{test_path}")
        
        self.df = self.df[self.df['image'].isin(valid_images)]

    def _precompute_embeddings(self):
        print("预计算文本嵌入...")
        text_list = self.df['caption'].tolist()
        embeddings, lengths = self.embedder.precompute_embeddings(text_list)
        self.df['cap_len'] = lengths

        # 保存嵌入为单独的 .npy 文件
        embedding_file = 'embeddings.npy'
        np.save(embedding_file, embeddings)
        self.df['embedding_file'] = embedding_file
        
        self.df.to_parquet('processed_data.parquet')
        print("嵌入已保存到", embedding_file)

    def _generate_token_ids(self):
        print("生成 token_ids...")
        # 使用 BERT tokenizer 生成 token_ids
        token_ids = []
        for caption in tqdm(self.df['caption'], desc="生成 token_ids"):
            inputs = self.embedder.tokenizer(
                caption,
                return_tensors='pt',
                padding='max_length',
                truncation=True,
                max_length=self.embedder.max_length
            )
            token_ids.append(inputs['input_ids'].squeeze().numpy())
        self.df['token_ids'] = token_ids
        self.df.to_parquet('processed_data.parquet')
        print("token_ids 已保存。")

    def get_datasets(self, test_size=0.2, val_size=0.1):
        # 加载嵌入矩阵
        embedding_file = 'embeddings.npy'
        embeddings = np.load(embedding_file, allow_pickle=True)
        lengths = np.load(self.embedder.lengths_path)

        unique_images = self.df['image'].unique()
        train_val, test_images = train_test_split(unique_images, test_size=test_size, random_state=42)
        train_images, val_images = train_test_split(train_val, test_size=val_size / (1 - test_size), random_state=42)

        # 创建子数据集
        train_df = self.df[self.df['image'].isin(train_images)].reset_index(drop=True)
        val_df = self.df[self.df['image'].isin(val_images)].reset_index(drop=True)
        test_df = self.df[self.df['image'].isin(test_images)].reset_index(drop=True)

        # 将对应嵌入矩阵加入 DataFrame
        train_df['embedding'] = [embeddings[idx] for idx in train_df.index]
        val_df['embedding'] = [embeddings[idx] for idx in val_df.index]
        test_df['embedding'] = [embeddings[idx] for idx in test_df.index]
        train_df['cap_len'] = lengths[train_df.index]  # 直接通过索引切片
        val_df['cap_len'] = lengths[val_df.index]
        test_df['cap_len'] = lengths[test_df.index]

        return train_df, val_df, test_df


def collate_fn(batch):
    images = torch.stack([item[0] for item in batch])
    embeddings = torch.stack([item[1] for item in batch])
    cap_lens = torch.stack([item[2] for item in batch])
    img_names = [item[3] for item in batch]
    # print("Batch embeddings shape:", embeddings.shape)
    return images, embeddings, cap_lens, img_names


# 4. 数据加载流程（优化参数）
def get_data_loaders(batch_size=32, num_workers=4):
    # 图像预处理（标准ImageNet参数）
    transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406], 
            std=[0.229, 0.224, 0.225]
        )
    ])
    
    # 初始化预处理器
    preprocessor = DataPreprocessor(
        image_dir="/home/rzzn/xh/flickr30k_images",
        caption_file="/home/rzzn/xh/captions.txt",
        sep=','  # 根据实际文件格式调整
    )
    
    # 获取数据集
    train_df, val_df, test_df = preprocessor.get_datasets()
    
    # 创建Dataset实例
    train_dataset = FlickrDataset(train_df, preprocessor.image_dir, transform)
    val_dataset = FlickrDataset(val_df, preprocessor.image_dir, transform)
    test_dataset = FlickrDataset(test_df, preprocessor.image_dir, transform)
    
    # 创建DataLoader（仅训练集shuffle）
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=num_workers,
        collate_fn=collate_fn,
        pin_memory=True,
        persistent_workers=True
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        collate_fn=collate_fn,
        pin_memory=True
    )
    test_loader = DataLoader(
        test_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        collate_fn=collate_fn,
        pin_memory=True
    )
    print('loader save')
    return train_loader, val_loader, test_loader