import os
from pathlib import Path
from PIL import Image
import json
import requests
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import logging  # Import logging

def custom_collate_fn(batch):
    """自定义 collate 函数，过滤掉 None 值"""
    # 过滤掉 None 值
    batch = list(filter(lambda x: all(v is not None for v in x), batch))
    if len(batch) == 0:
        return None, None, []
    
    # 解压批次数据
    front_images, back_images, annotations = zip(*batch)
    
    # 将图像堆叠成批次
    front_images = torch.stack(front_images)
    back_images = torch.stack(back_images)
    
    return front_images, back_images, list(annotations)

class CoinDataset(Dataset):
    def __init__(self, api_url: str, title: str = "coin", page_size: int = 10, offset: int = 0, max_offset: int = 217073, cache_dir: str = "cached_images"):
        self.api_url = api_url
        self.title = title
        self.page_size = page_size
        self.max_offset = max_offset
        self.cache_dir = Path(os.path.join("..", cache_dir)) 
        self.cache_dir.mkdir(parents=True, exist_ok=True)
        
        # 保存当前的offset
        self.current_offset = offset
        
        # 初始化数据
        self.data = []
        self.load_data()
        
        self.transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor(),
        ])

    def load_offset(self) -> int:
        """从文件加载上次的offset"""
        if self.offset_file.exists():
            with open(self.offset_file, 'r') as f:
                data = json.load(f)
                return min(data.get('offset', 0), self.max_offset)
        return 0

    def save_offset(self):
        """保存当前offset到文件"""
        # with open(self.offset_file, 'w') as f:
        #     json.dump({'offset': self.current_offset}, f)

    def load_data(self):
        """从 HTTP 接口加载硬币图像和标注信息"""
        if self.current_offset >= self.max_offset:
            logging.info(f"已达到最大偏移量 {self.max_offset}，停止加载数据")  # Added logging
            return False  # 返回 False 表示没有更多数据

        params = {
            'title': self.title,
            'offset': self.current_offset,
            'limit': self.page_size
        }
        
        try:
            logging.info("加载数据: %s", params)  # Added logging
            response = requests.post(self.api_url, json=params)
            if response.status_code == 200:
                response_data = response.json()
                if response_data['code'] == 1:
                    new_data = response_data['data']
                    if new_data:
                        self.data.extend(new_data)
                        self.current_offset += len(new_data)
                        self.save_offset()
                        logging.info("成功加载数据，当前offset: %d/%d", self.current_offset, self.max_offset)  # Added logging
                        return True  # 返回 True 表示成功加载数据
                    else:
                        logging.info("没有更多数据了")  # Added logging
                        return False
                else:
                    logging.error("获取数据失败: %s", response_data['msg'])  # Added logging
                    return False
            else:
                logging.error("无法获取数据，状态码: %d", response.status_code)  # Added logging
                return False
        except Exception as e:
            logging.error("加载数据时出错: %s", e)  # Added logging
            return False

    def __len__(self):
        # 返回当前数据长度，而不是最大偏移量
        return len(self.data)

    def __getitem__(self, idx):
        logging.info("获取索引 %d，当前数据长度: %d，当前offset: %d", idx, len(self.data), self.current_offset)  # Added logging
        
        # 如果接近数据末尾且未达到最大偏移量，加载更多数据
        if idx >= len(self.data) - self.page_size // 2:
            if self.current_offset < self.max_offset:
                self.load_data()

        # 如果索引超出范围，尝试加载更多数据
        attempts = 0
        max_attempts = 3  # 最大尝试次数
        while idx >= len(self.data):
            if attempts >= max_attempts:
                logging.warning("达到最大尝试次数 %d，停止加载", max_attempts)  # Added logging
                return None, None, None
            
            if self.current_offset >= self.max_offset:
                logging.info("已达到最大偏移量，停止加载")  # Added logging
                return None, None, None
            
            if not self.load_data():  # 如果无法加载更多数据
                attempts += 1
                continue
            
            attempts = 0  # 成功加载数据后重置尝试次数

        # 获取数据
        try:
            item = self.data[idx]
            front_image_url = item['frontImage']
            back_image_url = item['backImage']
            item_id = item['id']  # 获取 item id
            annotations = {
                'id': item_id,
                'title': item['title'],
                'frontImage': front_image_url,
                'backImage': back_image_url,
                'frontDescription': item['frontDescription'],
                'backDescription': item['backDescription'],
                'material': item.get('material'),
                'diametre': item.get('diametre'),
                'poids': item.get('poids'),
                'epaisseur': item.get('epaisseur')
            }

            # 下载正面和背面图像，传入 item_id
            front_image = self.download_image(front_image_url, item_id)
            
            # 需要处理背面None没有图片的情况
            back_image = self.download_image(back_image_url, item_id)

            # 检查图像是否成功下载
            if front_image is None or back_image is None:
                logging.warning("图像下载失败，跳过索引: %d", idx)  # Added logging
                # return self.__getitem__((idx + 1) % len(self.data))  # 尝试下一个索引
                return None, None, None
            # 确保图像是有效的 PIL 图像
            if isinstance(front_image, Image.Image):
                front_image_tensor = self.transform(front_image)
            else:
                logging.warning("无效的正面图像类型，跳过索引: %d", idx)  # Added logging
                # return self.__getitem__((idx + 1) % len(self.data))
                return None, None, None

            if isinstance(back_image, Image.Image):
                back_image_tensor = self.transform(back_image)
            else:
                logging.warning("无效的背面图像类型，跳过索引: %d", idx)  # Added logging
                return self.__getitem__((idx + 1) % len(self.data))

            return front_image_tensor, back_image_tensor, annotations
            
        except Exception as e:
            logging.error("处理索引 %d 时出错: %s", idx, e)  # Added logging
            # return self.__getitem__((idx + 1) % len(self.data))
            return None, None, None

    def download_image(self, url: str, item_id: str) -> Image.Image:
        """下载图像并返回 PIL 图像对象，若已存在则直接加载"""
        # 检查 URL 是否包含特定字符串
        if "no-reverse-coin" in url or "no-obverse-coin" in url:
            logging.warning("URL 包含不允许的字符串，返回 None: %s", url)  # Added logging
            return None  # 直接返回 None
        
        # 创建以 item_id 命名的子目录
        item_dir = self.cache_dir / str(item_id)
        item_dir.mkdir(parents=True, exist_ok=True)

        # 获取文件名并构建完整路径
        image_filename = url.split("/")[-1]
        cached_image_path = item_dir / image_filename

        # 如果缓存文件存在，直接加载
        if cached_image_path.exists():
            # logging.info("已存在缓存文件: %s", cached_image_path)  # Added logging
            return Image.open(cached_image_path).convert('RGB')

        # 否则下载图像
        try:
            response = requests.get(url, stream=True, headers={
                'User-Agent': 'Mozilla/5.0',
                'Accept-Language': 'en-US,en;q=0.9'
            })
            response.raise_for_status()
            image = Image.open(response.raw).convert('RGB')

            # 保存到缓存
            image.save(cached_image_path)
            return image
        except Exception as e:
            logging.error("无法下载或打开图像: %s, 错误: %s", url, e)  # Added logging
            return None
