#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
瞬变源分类与结果上传服务
功能：定时获取瞬变源列表，下载图像，进行分类，并上传分类结果
"""

import os
import json
import time
import requests
from datetime import datetime, timezone, timedelta
from pathlib import Path
import logging
import argparse
import traceback
from typing import List, Dict

# 添加项目根目录到Python路径
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from funs.ot_download import OTDownloader
from funs.transient_classifier import TransientClassify, classifyImage

# 配置日志
logging.basicConfig(
    level=logging.WARNING,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        # logging.FileHandler('ot_classify_service.log'),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger(__name__)

class OTClassifyService:
    """
    瞬变源分类与结果上传服务
    """
    
    def __init__(self, base_url="http://10.0.10.236"):
        """
        初始化服务
        
        Args:
            base_url (str): 服务器基础URL
        """
        self.base_url = base_url
        self.session = requests.Session()
        self.session.timeout = 30
        
        # 创建工作目录结构
        # self.work_dir = self._create_work_dir()
        self.images_dir = self.work_dir / "images"
        self.transient_list_file = self.work_dir / "transient_list.json"
        self.classify_result_file = self.work_dir / "transient_classify_rst.json"
        self.processed_objects_file = self.work_dir / "processed_objects.json"
        
        # 分类结果与类型ID的对应关系
        self.classification_type_map = {
            "is_hot_pixel": 7,
            "is_bright_star_residual": 27,
            "is_transient": 8,
            "is_dark_pixel": 5,
            "other": 1
        }
        
        logger.info(f"初始化服务完成，工作目录: {self.work_dir}")
    
    def _load_processed_objects(self) -> set:
        """
        加载已处理的瞬变源列表
        
        Returns:
            set: 已处理的瞬变源ID集合
        """
        if not self.processed_objects_file.exists():
            return set()
        
        try:
            with open(self.processed_objects_file, 'r', encoding='utf-8') as f:
                data = json.load(f)
                processed_ids = set(data.get("processed_ids", []))
                logger.info(f"加载已处理瞬变源列表，共 {len(processed_ids)} 个")
                return processed_ids
        except (IOError, json.JSONDecodeError) as e:
            logger.warning(f"加载已处理瞬变源列表失败: {e}")
            return set()
    
    def _save_processed_objects(self, processed_ids: set):
        """
        保存已处理的瞬变源列表
        
        Args:
            processed_ids (set): 已处理的瞬变源ID集合
        """
        data = {
            "processed_ids": list(processed_ids),
            "last_update": datetime.now(timezone.utc).isoformat()
        }
        
        try:
            with open(self.processed_objects_file, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            logger.info(f"已处理瞬变源列表保存成功: {self.processed_objects_file}")
        except IOError as e:
            logger.error(f"保存已处理瞬变源列表失败: {e}")
    
    def _load_previous_classify_results(self) -> List[Dict]:
        """
        加载之前的分类结果
        
        Returns:
            List[Dict]: 之前的分类结果列表
        """
        if not self.classify_result_file.exists():
            return []
        
        try:
            with open(self.classify_result_file, 'r', encoding='utf-8') as f:
                data = json.load(f)
                previous_results = data.get("objects", [])
                logger.info(f"加载之前的分类结果，共 {len(previous_results)} 个")
                return previous_results
        except (IOError, json.JSONDecodeError) as e:
            logger.warning(f"加载之前的分类结果失败: {e}")
            return []
    
    def _create_work_dir(self) -> Path:
        """
        创建当天UTC日期的工作目录
        
        Returns:
            Path: 工作目录路径
        """
        # 获取当前UTC日期
        utc_date = datetime.now(timezone.utc).strftime("%Y%m%d")
        work_dir = Path("ot_classify_service") / "data" / utc_date
        work_dir.mkdir(parents=True, exist_ok=True)
        
        # 创建images子目录
        images_dir = work_dir / "images"
        images_dir.mkdir(exist_ok=True)
        
        logger.info(f"创建工作目录: {work_dir}")
        return work_dir
    
    def _update_work_dir_if_needed(self):
        """
        检查是否需要更新工作目录（当日期发生变化时）
        """
        # 获取当前UTC日期
        current_utc_date = datetime.now(timezone.utc).strftime("%Y%m%d")
        
        # 从当前工作目录路径中提取日期
        work_dir_name = self.work_dir.name
        
        # 如果日期不同，则更新工作目录
        if work_dir_name != current_utc_date:
            logger.info(f"日期已更改，从 {work_dir_name} 更新到 {current_utc_date}")
            self.work_dir = self._create_work_dir()
            self.images_dir = self.work_dir / "images"
            self.transient_list_file = self.work_dir / "transient_list.json"
            self.classify_result_file = self.work_dir / "transient_classify_rst.json"
            self.processed_objects_file = self.work_dir / "processed_objects.json"
            logger.info(f"工作目录已更新为: {self.work_dir}")
    
    def _is_within_running_time(self) -> bool:
        """
        检查当前时间是否在北京时间下午5点到第二天早上7点之间
        
        Returns:
            bool: 如果在运行时间内返回True，否则返回False
        """
        # 获取当前北京时间
        utc_now = datetime.now(timezone.utc)
        beijing_time = utc_now + timedelta(hours=8)
        current_hour = beijing_time.hour
        
        # 检查是否在下午5点(17点)到第二天早上7点之间
        # 由于是跨天的情况，我们需要特殊处理
        if current_hour >= 17 or current_hour < 7:
            return True
        else:
            return False
    
    def get_transient_list(self) -> List[Dict]:
        """
        获取瞬变源列表
        
        Returns:
            List[Dict]: 瞬变源列表
        """
        downloader = OTDownloader(create_dir=False, base_url=self.base_url)
        objects = downloader.get_object_list()
        logger.info(f"获取到 {len(objects)} 个瞬变源")
        return objects
    
    def download_transient_images(self, obj: Dict) -> bool:
        """
        下载瞬变源图像
        
        Args:
            obj (Dict): 瞬变源基本信息
            
        Returns:
            bool: 下载是否成功
        """
        object_name = obj.get("name", "")
        if not object_name:
            logger.warning("跳过无名瞬变源")
            return False
        
        downloader = OTDownloader(create_dir=False, base_url=self.base_url)
        
        # 获取目标详情
        details = downloader.get_object_details(object_name)
        if not details:
            return False
        
        # 下载图像
        images_dir = self.images_dir
        ffc_list = details.get("ffcList_parsed", [])
        
        if ffc_list:
            first_image = ffc_list[0]
            stamp_name = first_image.get("stamp_name", "")
            stamp_path = first_image.get("stamp_path", "")
            
            if stamp_name and stamp_path:
                # 构建图像URL和保存路径
                image_url = f"{self.base_url}/images/{stamp_path}/{stamp_name}"
                image_filename = f"{object_name}_001.jpg"
                save_path = images_dir / image_filename
                downloader.download_image(image_url, save_path)
                
                image_url = f"{self.base_url}/images/{stamp_path}/{stamp_name.replace('.jpg', '.fit')}"
                image_filename = f"{object_name}_001.fit"
                save_path = images_dir / image_filename
                
                # 下载fit图像
                if downloader.download_image(image_url, save_path):
                    logger.info(f"图像下载成功: {save_path}")
                    return True
                else:
                    logger.error(f"图像下载失败: {image_url}")
                    return False
        
        return False
    
    def classify_transient(self, obj: Dict) -> Dict:
        """
        对瞬变源进行分类
        
        Args:
            obj (Dict): 瞬变源信息
            
        Returns:
            Dict: 分类结果
        """
        object_name = obj.get("name", "")
        fits_filename = f"{object_name}_001.fit"
        fits_path = self.images_dir / fits_filename
        
        if not os.path.exists(fits_path):
            logger.warning(f"图像文件不存在: {fits_path}")
            return {
                'name': object_name,
                'classification': 'other',
                'error': '图像文件不存在'
            }
        
        try:
            # 分类图像
            classification_result = classifyImage(str(fits_path))
            
            if 'error' in classification_result:
                result = {
                    'name': object_name,
                    'classification': 'other',
                    'error': classification_result['error']
                }
            else:
                # 取第一个候选体的分类结果
                if classification_result['results']:
                    first_result = classification_result['results'][0]
                    result = first_result
                    result['name'] = object_name
                else:
                    result = {
                        'name': object_name,
                        'classification': 'other',
                        'error': '没有检测到候选体'
                    }
            
            logger.info(f"分类完成: {object_name} -> {result['classification']}")
            return result
            
        except Exception as e:
            logger.error(f"分类过程中出错 {object_name}: {e}")
            return {
                'name': object_name,
                'classification': 'other',
                'error': str(e)
            }
    
    def upload_classification_result(self, obj: Dict, classification_result: Dict) -> bool:
        """
        上传分类结果
        
        Args:
            obj (Dict): 瞬变源基本信息
            classification_result (Dict): 分类结果
            
        Returns:
            bool: 上传是否成功
        """
        # 获取瞬变源ID
        ot_id = obj.get("coId", "")
        if not ot_id:
            logger.warning("瞬变源ID为空，跳过上传")
            return False
        
        # 获取分类类型ID
        classification = classification_result.get("classification", "other")
        ot_type_id = self.classification_type_map.get(classification, 1)
        
        # 构建上传URL和参数
        url = f"{self.base_url}/crosstask/cross-ot-classify.action"
        data = {
            "otId": ot_id,
            "otTypeId": ot_type_id
        }
        
        try:
            response = self.session.post(url, data=data)
            response.raise_for_status()
            
            logger.info(f"上传分类结果成功: {ot_id} -> {classification} (ID: {ot_type_id})")
            return True
            
        except requests.RequestException as e:
            logger.error(f"上传分类结果失败 {ot_id}: {e}")
            return False
    
    def _append_to_transient_list(self, new_objects: List[Dict]):
        """
        追加瞬变源列表到transient_list.json文件
        
        Args:
            new_objects (List[Dict]): 新的瞬变源列表
        """
        # 如果文件不存在，创建新文件
        if not self.transient_list_file.exists():
            self.save_transient_list(new_objects)
            return
        
        try:
            # 读取现有数据
            with open(self.transient_list_file, 'r', encoding='utf-8') as f:
                existing_data = json.load(f)
            
            # 获取现有的对象列表
            existing_objects = existing_data.get("objects", [])
            
            # 创建一个集合来跟踪已存在的对象ID
            existing_ids = set()
            for obj in existing_objects:
                obj_id = f"{obj.get('name', '')}_{obj.get('coId', '')}"
                existing_ids.add(obj_id)
            
            # 过滤出新的对象（不重复的）
            unique_new_objects = []
            for obj in new_objects:
                obj_id = f"{obj.get('name', '')}_{obj.get('coId', '')}"
                if obj_id not in existing_ids:
                    unique_new_objects.append(obj)
                    existing_ids.add(obj_id)  # 防止new_objects内部也有重复
            
            # 合并对象列表
            combined_objects = existing_objects + unique_new_objects
            
            # 更新数据
            existing_data["objects"] = combined_objects
            existing_data["obj_number"] = len(combined_objects)
            existing_data["last_update"] = datetime.now(timezone.utc).isoformat()
            
            # 写入文件
            with open(self.transient_list_file, 'w', encoding='utf-8') as f:
                json.dump(existing_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"追加 {len(unique_new_objects)} 个新瞬变源到列表，总计 {len(combined_objects)} 个")
        except (IOError, json.JSONDecodeError) as e:
            logger.error(f"追加瞬变源列表失败: {e}")
            # 如果出现错误，回退到覆盖保存
            self.save_transient_list(new_objects)
    
    def save_transient_list(self, objects: List[Dict]):
        """
        保存瞬变源列表到文件
        
        Args:
            objects (List[Dict]): 瞬变源列表
        """
        data = {
            "objects": objects,
            "obj_number": len(objects),
            "last_update": datetime.now(timezone.utc).isoformat()
        }
        
        try:
            with open(self.transient_list_file, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            logger.info(f"瞬变源列表保存成功: {self.transient_list_file}")
        except IOError as e:
            logger.error(f"保存瞬变源列表失败: {e}")
    
    def _append_to_classify_results(self, new_results: List[Dict]):
        """
        追加分类结果到transient_classify_rst.json文件
        
        Args:
            new_results (List[Dict]): 新的分类结果列表
        """
        # 如果文件不存在，创建新文件
        if not self.classify_result_file.exists():
            self.save_classify_results(new_results)
            return
        
        try:
            # 读取现有数据
            with open(self.classify_result_file, 'r', encoding='utf-8') as f:
                existing_data = json.load(f)
            
            # 获取现有的结果列表
            existing_results = existing_data.get("objects", [])
            
            # 创建一个集合来跟踪已存在的结果ID
            existing_names = set()
            for result in existing_results:
                name = result.get('name', '')
                if name:
                    existing_names.add(name)
            
            # 过滤出新的结果（不重复的）
            unique_new_results = []
            for result in new_results:
                name = result.get('name', '')
                if name and name not in existing_names:
                    unique_new_results.append(result)
                    existing_names.add(name)  # 防止new_results内部也有重复
            
            # 合并结果列表
            combined_results = existing_results + unique_new_results
            
            # 更新数据
            existing_data["objects"] = combined_results
            existing_data["obj_number"] = len(combined_results)
            existing_data["last_update"] = datetime.now(timezone.utc).isoformat()
            
            # 写入文件
            with open(self.classify_result_file, 'w', encoding='utf-8') as f:
                json.dump(existing_data, f, ensure_ascii=False, indent=2)
            
            logger.info(f"追加 {len(unique_new_results)} 个新分类结果，总计 {len(combined_results)} 个")
        except (IOError, json.JSONDecodeError) as e:
            logger.error(f"追加分类结果失败: {e}")
            # 如果出现错误，回退到覆盖保存
            self.save_classify_results(new_results)
    
    def save_classify_results(self, results: List[Dict]):
        """
        保存分类结果到文件
        
        Args:
            results (List[Dict]): 分类结果列表
        """
        data = {
            "objects": results,
            "obj_number": len(results),
            "last_update": datetime.now(timezone.utc).isoformat()
        }
        
        try:
            with open(self.classify_result_file, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=2)
            logger.info(f"分类结果保存成功: {self.classify_result_file}")
        except IOError as e:
            logger.error(f"保存分类结果失败: {e}")
    
    def run_once(self):
        """
        运行一次完整的处理流程
        """
        logger.info("开始执行瞬变源分类流程")
        
        # 1. 获取瞬变源列表
        objects = self.get_transient_list()
        if not objects:
            logger.warning("未获取到任何瞬变源，退出本次处理")
            return
        
        # 检查是否需要更新工作目录，有目标时才更新
        self._update_work_dir_if_needed()
        
        # 2. 加载已处理的瞬变源列表
        processed_ids = self._load_processed_objects()
        
        # 3. 筛选出新增的瞬变源
        new_objects = []
        for obj in objects:
            # 使用name和coId作为唯一标识
            obj_id = f"{obj.get('name', '')}_{obj.get('coId', '')}"
            if obj_id not in processed_ids:
                new_objects.append((obj, obj_id))
        
        logger.info(f"发现 {len(new_objects)} 个新增瞬变源")
        
        # 4. 追加目标列表到transient_list.json
        self._append_to_transient_list(objects)
        
        # 5. 处理新增的瞬变源
        classify_results = []
        newly_processed_ids = set()
        
        for obj, obj_id in new_objects:
            object_name = obj.get("name", "")
            logger.info(f"处理新增瞬变源: {object_name}")
            
            # 下载图像
            if self.download_transient_images(obj):
                # 进行分类
                classification_result = self.classify_transient(obj)
                
                # 添加图像路径信息
                image_filename = f"{object_name}_001.jpg"
                relative_image_path = f"data/{self.work_dir.name}/images/{image_filename}"
                classification_result['image_path'] = relative_image_path
                
                classify_results.append(classification_result)
                
                # 上传分类结果
                self.upload_classification_result(obj, classification_result)
                
                # 添加到已处理列表
                newly_processed_ids.add(obj_id)
            else:
                logger.warning(f"下载图像失败，跳过分类: {object_name}")
                classify_results.append({
                    'name': object_name,
                    'classification': 'other',
                    'error': '图像下载失败'
                })
            
            # 短暂延迟，避免请求过快
            time.sleep(0.5)
        
        # 6. 将处理结果追加到transient_classify_rst.json
        self._append_to_classify_results(classify_results)
        
        # 7. 更新已处理的瞬变源列表
        if newly_processed_ids:
            processed_ids.update(newly_processed_ids)
            self._save_processed_objects(processed_ids)
        
        logger.info("瞬变源分类流程执行完成")
    
    def run_continuous(self, interval: int = 5):
        """
        连续运行模式
        
        Args:
            interval (int): 查询间隔（秒）
        """
        logger.info(f"启动连续模式，每 {interval} 秒查询一次")
        logger.info("注意：只在北京时间下午5点到第二天早上7点之间运行")
        
        try:
            while True:
                # 检查当前时间是否在运行时间内
                if self._is_within_running_time():
                    logger.info("当前时间在运行时间内，开始处理")
                    logger.info("开始新一轮处理")
                    try:
                        self.run_once()
                    except Exception as e:
                        logger.error(f"运行一次处理失败: {e}")
                        stack_trace = traceback.format_exc()
                        logger.error(f"运行一次处理失败: {e}\n{stack_trace}")   
                else:
                    logger.info("当前时间不在运行时间内，跳过本次处理")
                
                logger.info(f"等待 {interval} 秒后继续...")
                time.sleep(interval)
                
        except KeyboardInterrupt:
            logger.info("用户中断，退出连续模式")

def main():
    """
    主函数
    """
    
    parser = argparse.ArgumentParser(description='瞬变源分类与结果上传服务')
    parser.add_argument('--mode', choices=['once', 'continuous'], default='once',
                       help='运行模式：once（单次）或 continuous（连续）')
    parser.add_argument('--interval', type=int, default=5,
                       help='连续模式下的查询间隔（秒）')
    parser.add_argument('--url', default='http://10.0.10.236',
                       help='服务器基础URL')
    
    args = parser.parse_args()
    
    # 创建服务实例
    service = OTClassifyService(base_url=args.url)
    
    # 根据模式运行
    if args.mode == 'continuous':
        service.run_continuous(interval=args.interval)
    else:
        service.run_once()

if __name__ == "__main__":
    main()