import os
import sys
# 添加上级目录到路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import os
import re
from loguru import logger
import numpy as np
from sqlalchemy import create_engine, text
from datetime import timedelta
from pathlib import Path
from utils.tools_date import build_fits_path, get_cur_processed_path
from collections import Counter
from pathlib import Path
from collections import Counter
from pathlib import Path
from datetime import timedelta


class SimultaneousObservationFinder:
    def __init__(self, db_url, date_str, delta_sec=10, version=None):
        """
        初始化“同时观测图像查找器”
        
        :param db_url: SQLAlchemy 数据库连接字符串，用于连接数据库。
        :param date_str: 要查询的日期字符串（格式如 '2025-01-01'）。
        :param delta_sec: 容许的“时间容差”，单位秒，用于判断是否为“同时观测”。
        :param version: 数据版本号（必须提供，否则直接 raise）。
        """
        if not version:
            raise ValueError("必须提供数据版本号")
        
        self.version = version
        # 初始化数据库引擎，配置连接池
        self.engine = create_engine(
            db_url,
            pool_size=0, max_overflow=-1, echo=False, pool_pre_ping=True
        )
        self.date = date_str  # 要查询的日期
        self.delta_sec = delta_sec  # 容许的时间容差（秒）
        self.object_list = []  # 用于存储当天所有有效的观测对象列表
        self.groups = []  # 用于存储所有“同时观测组”的结果

    def fetch_object_list(self):
        """
        查询指定日期内所有有效的观测对象（object），
        并统计其曝光记录数，按记录数从多到少排序
        """
        with self.engine.connect() as conn:
            sql_ = f"""
                SELECT object, COUNT(*) AS num_records
                FROM exposuresocs
                WHERE imagetyp = 'sc'  # 查询的是科学类型的图像
                  AND object IS NOT NULL AND object != ''
                  AND obs_date BETWEEN '{self.date} 00:00:00' AND '{self.date} 23:59:59'  # 查询指定日期的记录
                GROUP BY object
                ORDER BY num_records DESC;  # 按记录数降序排序
            """
            logger.info(sql_)
            result = conn.execute(text(sql_))  # 执行查询
            # 提取 object 列表
            self.object_list = [row['object'] for row in result]

    def fetch_records_for_object(self, obj):
        """
        查询某个 object 的所有曝光记录，并关联 Status_KM 表获取 status 和 location 字段，
        最终按曝光时间升序、曝光时间长短降序排列。

        :param obj: object 名称，代表要查询的目标。
        :return: List[Dict]，每条记录包含 oid, serno, 文件名, filter, obs_date, exposure, status, location 等信息。
        """
        with self.engine.connect() as conn:  # 只查询 status=1 且匹配 version 的成功记录
            rows = conn.execute(
                text(f"""
                    SELECT e.oid, e.object,
                        e.serno, e.eid AS filename,
                        e.filter, e.obs_date,
                        e.exposure, s.status,
                        s.location AS location
                    FROM exposuresocs e
                    LEFT JOIN Status_KM s ON e.oid = s.oid
                    WHERE e.imagetyp = 'sc'  # 只查询科学类型的图像
                    AND e.object = :obj  # 按照指定的 object 查询
                    AND s.status = "1"  # 确保状态为 1，即成功记录
                    AND s.version='{self.version}'  # 确保版本匹配
                    AND e.obs_date BETWEEN '{self.date} 00:00:00' AND '{self.date} 23:59:59'  # 指定日期的曝光记录
                    ORDER BY e.exposure DESC, e.obs_date ASC;  # 按曝光时间升序，曝光时间长短降序排列
                """),
                {"obj": obj}  # 将 obj 参数传入 SQL 查询
            ).fetchall()
            # 将查询结果转换为字典列表
            return [dict(row._mapping) for row in rows]

    

    def build_simultaneous_groups_with_overlap(self, records):
        """
        在给定的 object 记录列表中，寻找“同时观测”的图像组。
        判断标准：在允许时间差（delta_sec）内，且曝光时间重叠即认为同时。

        :param records: List[Dict]，该 object 下所有记录（已按 obs_date 排序）
        :return: List[List[Dict]]，每组为一个同时观测组合。
        """
        used_filenames = set()  # 标记已经被分组的文件
        groups = []

        for i, rec in enumerate(records):
            if rec['filename'] in used_filenames:
                continue  # 已分组跳过

            # 计算当前记录的时间窗口 [start_time, end_time]
            start_time = rec['obs_date'] - timedelta(seconds=self.delta_sec)
            end_time = rec['obs_date'] + timedelta(seconds=rec['exposure'] + self.delta_sec)

            group = [rec]
            used_filenames.add(rec['filename'])

            # 遍历其他记录，判断是否落在时间窗口内
            for j in range(len(records)):
                if i == j:
                    continue
                other = records[j]
                if other['filename'] in used_filenames:
                    continue

                # 计算其他记录的时间窗口 [start_time, end_time]
                other_start_time = other['obs_date'] - timedelta(seconds=self.delta_sec)
                other_end_time = other['obs_date'] + timedelta(seconds=other['exposure'] + self.delta_sec)

                # 检查两个时间窗口是否有重叠
                if start_time <= other_end_time and other_start_time <= end_time:
                    group.append(other)
                    used_filenames.add(other['filename'])

            # 至少两条才算一个“同时观测组”
            if len(group) >= 2:
                groups.append(group)

        return groups
    def print_groups(self, return_filenames=True):
        """
        打印每个“同时观测组”的详细信息
        并可选返回 FITS 文件路径数组

        :param return_filenames: 是否返回路径
        :return: List[np.ndarray]（如果 return_filenames=True）
        """
        logger.info(f"\n共找到 {len(self.groups)} 个“同时观测”组")
        all_filename_arrays = []

        for i, g in enumerate(self.groups):
            logger.info(f"\nGroup {i+1:3d}: object = {g['object']:10s}, 通道数 = {len(g['sernos'])}, 通道 = {sorted(g['sernos'])}")

            filenames = []

            for rec in g['group']:
                exp = rec.get('exposure', 'N/A')
                logger.info(f"  serno={int(rec['serno'])}  filename={rec['filename']}  obs_date={rec['obs_date']}  exposure={exp:>5}  filter={rec['filter']}")

                # 构造 FITS 文件路径
                path = build_fits_path(rec, self.version)
                if path:
                    filenames.append(path)

            if return_filenames and filenames:
                fits_array = np.array(filenames)
                all_filename_arrays.append(fits_array)

        if return_filenames:
            return all_filename_arrays

    def run(self):
        """
        主执行入口：依次处理所有 object，并构建其对应的“同时观测组”
        """
        self.fetch_object_list()  # 获取当天所有有效的观测对象列表
        for obj in self.object_list:
            records = self.fetch_records_for_object(obj)  # 获取该 object 的所有记录
            if not records:
                continue
            # sim_groups = self.build_simultaneous_groups(records)
            sim_groups = self.build_simultaneous_groups_with_overlap_improve(records)  # 使用重叠判断方法
            
            for g in sim_groups:
                self.groups.append({
                    "object": obj,
                    "group": g,
                    "sernos": set([r['serno'] for r in g])  # 获取该组的所有通道号
                })

 
  
    def check_grouping_coverage(self):
        """
        检查所有符合条件的记录是否都被分组
        统计总数、已分组、未分组数量
        """
        with self.engine.connect() as conn:
            result = conn.execute(text(f"""
                SELECT COUNT(*) as total_records
                FROM exposuresocs e
                    LEFT JOIN Status_KM s ON e.oid = s.oid
                WHERE imagetyp = 'sc'
                  AND object IS NOT NULL AND object != ''
                    AND s.status = "1" 
                    AND s.version='{self.version}'
                  AND obs_date BETWEEN '{self.date} 00:00:00' AND '{self.date} 23:59:59'
            """))
            total = result.fetchone()['total_records']

        grouped_filenames = set()
        for g in self.groups:
            for rec in g['group']:
                grouped_filenames.add(rec['filename'])

        logger.info(f"\n📊 总记录数: {total}")
        logger.info(f"✅ 已分组的记录数: {len(grouped_filenames)}")
        logger.info(f"❌ 未分组的记录数: {total - len(grouped_filenames)}")

        if total == len(grouped_filenames):
            logger.info("🎉 所有记录都已成功归入某个‘同时观测组’")
        else:
            logger.info("⚠️ 存在未被分组的记录！请检查时间容差设置或数据分布。")

  
    def build_simultaneous_groups_with_overlap_improve(self, records):
        """
        长曝光优先 + 区间重叠 + 组窗口扩张迭代版：
        - 以当前未分配的“最长曝光”作为种子（你上游SQL已按 exposure DESC, obs_date ASC）
        - 用 [start-δ, start+expo+δ] 作为初始窗口
        - 扫描所有未分配记录，凡与窗口重叠则纳入，并扩大窗口
        - 反复迭代直到不再新增成员（收敛）
        - 组内记录数>=2 才算有效组
        """
        from datetime import timedelta

        def interval(rec):
            """返回含容差的时间区间 (s, e)；对 exposure 为 None 视作 0"""
            exp = rec.get('exposure') or 0
            s = rec['obs_date'] - timedelta(seconds=self.delta_sec)
            e = rec['obs_date'] + timedelta(seconds=exp + self.delta_sec)
            return s, e

        used = set()
        groups = []

        # 重要：records 已由 fetch_records_for_object 排好序（exposure DESC, obs_date ASC）
        # 这样天然“长曝光优先”
        n = len(records)
        for i, rec in enumerate(records):
            if rec['filename'] in used:
                continue

            # 以“长曝光种子”建立组窗口
            gs, ge = interval(rec)
            group = [rec]
            used.add(rec['filename'])

            # 扩张-收敛：把所有与当前窗口重叠的未用记录都吃进来，并更新窗口
            changed = True
            while changed:
                changed = False
                for j in range(n):
                    if j == i:
                        continue
                    other = records[j]
                    if other['filename'] in used:
                        continue

                    os, oe = interval(other)
                    # 判断两区间是否重叠（闭区间；端点相贴也算）
                    if not (oe < gs or os > ge):
                        group.append(other)
                        used.add(other['filename'])
                        # 扩张组窗口
                        old_gs, old_ge = gs, ge
                        gs = min(gs, os)
                        ge = max(ge, oe)
                        # 窗口变化了，继续迭代一轮，确保链式重叠也纳入
                        if gs != old_gs or ge != old_ge:
                            changed = True

            if len(group) >= 2:
                # 为了输出可读，组内按开始时间排一下
                group.sort(key=lambda r: r['obs_date'])
                groups.append(group)

        return groups

    def save_all_to_txt_detailed_with_summary(self, output_file):
        """
        在同一个 TXT 中输出：
        1) 文件开头的综合统计（总记录数、总组数、未匹配记录数、各规模组数）
        2) 详细时间线（已分组一组一段、组内每条记录单独一行；未匹配记录逐条一行）
        组头包含 Start=... 与 End=...；每条记录也包含 End=...；全部按时间顺序展示。
        """
        entries = []  # 用于后续时间排序输出：元素 {"t": datetime, "lines": [str, ...]}

        # ===== 1) 组内数据整理 =====
        group_size_counter = Counter()
        for idx, g in enumerate(self.groups, 1):
            records = sorted(g["group"], key=lambda r: r["obs_date"])
            group_start = records[0]["obs_date"]
            # 组结束时间：所有记录真实曝光结束（开始+曝光）的最大值
            group_end = max(r["obs_date"] + timedelta(seconds=(r.get("exposure") or 0)) for r in records)

            object_name = g["object"]
            group_size_counter[len(records)] += 1

            header = f"[Group {idx:03d}] Start={group_start}  End={group_end}  Object={object_name}"
            lines = [header]

            # 组内每条记录单独一行：开始时间、结束时间、曝光-波段、文件名
            for r in records:
                exp = r.get('exposure') or 0
                rec_end = r['obs_date'] + timedelta(seconds=exp)
                lines.append(
                    f" ->{r['obs_date']}  End={rec_end}  {exp}s-{r['filter']} (fname={r['filename']})"
                )

            entries.append({"t": group_start, "lines": lines})

        # ===== 2) 未匹配记录（并收集总记录数） =====
        grouped_filenames = {r["filename"] for g in self.groups for r in g["group"]}
        all_records = []
        unmatched_records = []
        for obj in self.object_list:
            recs = self.fetch_records_for_object(obj)
            all_records.extend(recs)
            for r in recs:
                if r["filename"] not in grouped_filenames:
                    unmatched_records.append(r)

        unmatched_records.sort(key=lambda r: r["obs_date"])
        for r in unmatched_records:
            exp = r.get('exposure') or 0
            rec_end = r['obs_date'] + timedelta(seconds=exp)
            line = (f"[Unmatched] Start={r['obs_date']}  End={rec_end}  Object={r['object']}  "
                    f"{exp}s-{r['filter']}  fname={r['filename']}")
            entries.append({"t": r["obs_date"], "lines": [line]})

        # ===== 3) 统计数字 =====
        total_records = len(all_records)
        total_groups = len(self.groups)
        total_unmatched = len(unmatched_records)
        c2 = group_size_counter.get(2, 0)
        c3 = group_size_counter.get(3, 0)
        c4 = group_size_counter.get(4, 0)
        c5 = group_size_counter.get(5, 0)
        other_sizes = {k: v for k, v in group_size_counter.items() if k not in (2, 3, 4, 5)}
        other_str = ""
        if other_sizes:
            other_str = "，其它规模：" + "，".join([f"{k}条: {v}组" for k, v in sorted(other_sizes.items())])

        # ===== 4) 合并排序并写文件 =====
        entries.sort(key=lambda x: x["t"])
        output_file = Path(output_file)
        output_file.parent.mkdir(parents=True, exist_ok=True)

        with open(output_file, "w", encoding="utf-8") as f:
            f.write("==== Summary ====\n")
            f.write(f"总记录数: {total_records}\n")
            f.write(f"总组数: {total_groups}\n")
            f.write(f"未匹配记录数: {total_unmatched}\n")
            f.write(f"2条的组数: {c2}，3条的组数: {c3}，4条的组数: {c4}，5条的组数: {c5}{other_str}\n")
            f.write("=================\n\n")
            for e in entries:
                for l in e["lines"]:
                    f.write(l + "\n")

        logger.info(f"统计+详细输出已写入：{output_file}")

# 示例调用入口
if __name__ == "__main__":
    db_url = 'mysql+pymysql://ocs_1m6:1m6ocs!AutoObserver@192.168.16.70/mephisto_observation'
    finder = SimultaneousObservationFinder(
        db_url=db_url,
        date_str='2025-01-01',
        delta_sec=1,
        version="V20250303"
    )
    finder.run()
    
    finder.save_all_to_txt_detailed_with_summary("./log/output_data/simultaneous_observations.txt")
