#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
样本集下载程序
功能：自动下载GWAC瞬变源数据并创建样本集
"""

import os
import json
import time
import requests
from datetime import datetime
from datetime import timezone
from pathlib import Path
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        # logging.FileHandler('ot_download.log'),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger(__name__)

class OTDownloader:
    """
    GWAC瞬变源数据下载器
    """
    
    def __init__(self, create_dir=True, base_url="http://10.0.10.236"):
        """
        初始化下载器
        
        Args:
            base_url (str): 服务器基础URL
        """
        self.base_url = base_url
        self.session = requests.Session()
        # 设置请求超时时间
        self.session.timeout = 30
        
        if create_dir:
            # 创建必要的目录结构
            self.data_dir = Path("data/datasets")
            self.data_dir.mkdir(parents=True, exist_ok=True)
            
            # 1. 创建样本集目录
            self.dataset_dir = self.create_dataset_dir()
        
            # 缓存文件路径
            self.cache_file = self.dataset_dir / "transient_list.json"
        
    def get_utc_timestamp(self):
        """
        获取当前UTC时间戳
        
        Returns:
            str: UTC时间戳，格式为YYYYMMDD_HHMMSS
        """
        now = datetime.now(timezone.utc)
        return now.strftime("%Y%m%d_%H%M%S")
    
    def create_dataset_dir(self):
        """
        创建样本集目录
        
        Returns:
            Path: 创建的样本集目录路径
        """
        timestamp = self.get_utc_timestamp()
        dataset_dir = self.data_dir / timestamp
        dataset_dir.mkdir(parents=True, exist_ok=True)
        
        # 创建images子目录
        images_dir = dataset_dir / "images"
        images_dir.mkdir(exist_ok=True)
        
        logger.info(f"创建样本集目录: {dataset_dir}")
        return dataset_dir
    
    def get_object_list(self):
        """
        查询所有目标列表
        
        Returns:
            list: 目标列表，包含所有目标的详细信息
        """
        url = f"{self.base_url}/crosstask/get-cross-object-list.action"
        params = {
            "ot2qp.dateStr": "", #251013
            "ot2qp.ctId": 0,
            "ot2qp.otType": 0,
            "ot2qp.isMatch": 1,
            "ot2qp.probability": "",
            "ot2qp.frameNumber": 5,
            "ot2qp.magDiff": 0
        }
        
        try:
            response = self.session.get(url, params=params)
            response.raise_for_status()
            data = response.json()
            
            # 目标列表在gridModel字段中
            objects = data.get("gridModel", [])
            logger.info(f"查询到 {len(objects)} 个目标")
            return objects
            
        except requests.RequestException as e:
            logger.error(f"查询目标列表失败: {e}")
            return []
        except json.JSONDecodeError as e:
            logger.error(f"解析目标列表JSON失败: {e}")
            return []
    
    def get_object_details(self, object_name):
        """
        获取目标详细信息
        
        Args:
            object_name (str): 目标名称
            
        Returns:
            dict: 目标详细信息，如果失败返回None
        """
        url = f"{self.base_url}/crosstask/get-crossobj-detail-json.action"
        params = {"name": object_name}
        
        try:
            response = self.session.get(url, params=params)
            response.raise_for_status()
            data = response.json()
            
            # 解析ffcList字段（JSON字符串）
            ffc_list_str = data.get("ffcList", "[]")
            try:
                ffc_list = json.loads(ffc_list_str)
                data["ffcList_parsed"] = ffc_list
            except json.JSONDecodeError:
                data["ffcList_parsed"] = []
                logger.warning(f"目标 {object_name} 的ffcList解析失败")
            
            return data
            
        except requests.RequestException as e:
            logger.error(f"查询目标 {object_name} 详情失败: {e}")
            return None
        except json.JSONDecodeError as e:
            logger.error(f"解析目标 {object_name} 详情JSON失败: {e}")
            return None
    
    def download_image(self, image_url, save_path):
        """
        下载图像文件
        
        Args:
            image_url (str): 图像URL
            save_path (Path): 保存路径
            
        Returns:
            bool: 下载是否成功
        """
        try:
            response = self.session.get(image_url, stream=True)
            response.raise_for_status()
            
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=8192):
                    f.write(chunk)
            
            logger.info(f"图像下载成功: {save_path}")
            return True
            
        except requests.RequestException as e:
            logger.error(f"下载图像失败 {image_url}: {e}")
            return False
    
    def load_cache(self):
        """
        加载缓存数据
        
        Returns:
            dict: 缓存数据，如果文件不存在返回空字典
        """
        if self.cache_file.exists():
            try:
                with open(self.cache_file, 'r', encoding='utf-8') as f:
                    return json.load(f)
            except (json.JSONDecodeError, IOError) as e:
                logger.error(f"加载缓存文件失败: {e}")
        
        # 返回默认缓存结构
        return {
            "objects": [],
            "obj_number": 0
        }
    
    def save_cache(self, cache_data):
        """
        保存缓存数据
        
        Args:
            cache_data (dict): 要保存的缓存数据
        """
        try:
            with open(self.cache_file, 'w', encoding='utf-8') as f:
                json.dump(cache_data, f, ensure_ascii=False, indent=2)
            logger.info("缓存数据保存成功")
        except IOError as e:
            logger.error(f"保存缓存文件失败: {e}")
    
    def process_object(self, obj, dataset_dir, is_update=False):
        """
        处理单个目标：获取详情并下载图像
        
        Args:
            obj (dict): 目标基本信息
            dataset_dir (Path): 样本集目录
            is_update (bool): 是否强制更新
            
        Returns:
            dict: 处理后的目标信息
        """
        object_name = obj.get("name", "")
        if not object_name:
            logger.warning("跳过无名目标")
            return None
        
        # 获取目标详情
        details = self.get_object_details(object_name)
        if not details:
            return None
        
        # 下载第一个图像
        images_dir = dataset_dir / "images"
        ffc_list = details.get("ffcList_parsed", [])
        
        if ffc_list:
            first_image = ffc_list[0]
            stamp_name = first_image.get("stamp_name", "")
            stamp_path = first_image.get("stamp_path", "")
            
            if stamp_name and stamp_path:
                
                # 构建图像URL
                image_url = f"{self.base_url}/images/{stamp_path}/{stamp_name}"
                # 构建保存文件名
                image_filename = f"{object_name}_001.jpg"
                save_path = images_dir / image_filename
                # 下载jpg图像
                self.download_image(image_url, save_path)
                
                ''' '''
                # 构建图像URL
                image_url = f"{self.base_url}/images/{stamp_path}/{stamp_name.replace('.jpg', '.fit')}"
                
                # 构建保存文件名
                image_filename = f"{object_name}_001.fit"
                save_path = images_dir / image_filename
                
                # 下载fit图像
                if self.download_image(image_url, save_path):
                    details["image_downloaded"] = True
                    details["image_path"] = str(save_path.relative_to(dataset_dir))
                else:
                    details["image_downloaded"] = False
                
        # 合并基本信息
        result = {
            "name": object_name,
            "basic_info": obj,
            "details": details,
            "processed_time": datetime.utcnow().isoformat()
        }
        
        return result
    
    def run_download(self, is_update=False):
        """
        运行下载流程
        
        Args:
            is_update (bool): 是否强制更新所有目标
        """
        logger.info("开始样本集下载流程")
        
        dataset_dir = self.dataset_dir
        # 2. 加载缓存
        cache_data = self.load_cache()
        cached_objects = {obj["name"]: obj for obj in cache_data["objects"]}
        
        # 3. 查询目标列表
        objects = self.get_object_list()
        if not objects:
            logger.error("未查询到任何目标，退出")
            return
        
        # 4. 处理新目标
        new_objects = []
        processed_count = 0
        
        for obj in objects:
            object_name = obj.get("name", "")
            if not object_name:
                continue
            
            # 检查是否已处理过
            if not is_update and object_name in cached_objects:
                logger.info(f"目标 {object_name} 已存在缓存中，跳过")
                continue
            
            # 处理目标
            logger.info(f"处理目标: {object_name}")
            processed_obj = self.process_object(obj, dataset_dir, is_update)
            
            if processed_obj:
                new_objects.append(processed_obj)
                processed_count += 1
            
            # 短暂延迟，避免请求过快
            time.sleep(0.5)
        
        # 5. 更新缓存
        if is_update:
            # 强制更新模式：替换所有对象
            cache_data["objects"] = new_objects
        else:
            # 增量模式：添加新对象
            cache_data["objects"].extend(new_objects)
        
        cache_data["obj_number"] = len(cache_data["objects"])
        cache_data["last_update"] = datetime.utcnow().isoformat()
        
        self.save_cache(cache_data)
        
        logger.info(f"下载完成！处理了 {processed_count} 个新目标")
        logger.info(f"样本集目录: {dataset_dir}")
        logger.info(f"缓存文件: {self.cache_file}")
    
    def run_continuous(self, interval=15, is_update=False):
        """
        连续运行模式，定时查询新目标
        
        Args:
            interval (int): 查询间隔（秒）
            is_update (bool): 是否强制更新
        """
        logger.info(f"启动连续模式，每 {interval} 秒查询一次")
        
        try:
            while True:
                logger.info("开始新一轮查询")
                self.run_download(is_update)
                
                logger.info(f"等待 {interval} 秒后继续...")
                time.sleep(interval)
                
        except KeyboardInterrupt:
            logger.info("用户中断，退出连续模式")

def main():
    """
    主函数
    """
    import argparse
    
    parser = argparse.ArgumentParser(description='GWAC瞬变源样本集下载程序')
    parser.add_argument('--mode', choices=['once', 'continuous'], default='once',
                       help='运行模式：once（单次）或 continuous（连续）')
    parser.add_argument('--interval', type=int, default=15,
                       help='连续模式下的查询间隔（秒）')
    parser.add_argument('--update', action='store_true',
                       help='强制更新所有目标信息')
    parser.add_argument('--url', default='http://10.0.10.236',
                       help='服务器基础URL')
    
    args = parser.parse_args()
    
    # 创建下载器
    downloader = OTDownloader(base_url=args.url)
    
    # 根据模式运行
    if args.mode == 'continuous':
        downloader.run_continuous(interval=args.interval, is_update=args.update)
    else:
        downloader.run_download(is_update=args.update)

if __name__ == "__main__":
    main()