﻿"""
====================================================================
设备运行分析模块 / Equipment Runtime Analysis Module
====================================================================
功能说明：
1. 提供设备运行数据分析的WebSocket接口
2. 计算设备多维度评分（可用性、出勤、利用率、一致性、合规、可靠性）
3. 调用AI生成分析报告
4. 支持缓存机制提升性能

作者: Data Analysis Team
日期: 2025-01-06
====================================================================
"""

import json
import os
import tempfile
from datetime import datetime
from typing import Optional

import numpy as np
import pandas as pd
import pdfkit
from dateutil.parser import isoparse
from fastapi import APIRouter
from starlette.websockets import WebSocket, WebSocketDisconnect, WebSocketState

# 导入AI客户端
from client.openai_client import invoke_laozhang_api

# 导入数据访问层
from controller.ai.report import generate_report_tables, render_final_report
from dao.es_dao.i_iot_run_dao import get_equipment_list, get_iot_run_data

# 导入工具函数
from utils.date_utils import get_data_range_from_utc_date_str, DateTimeCombiner
from utils.mongo_util import db

# 导入认证中间件
# from middleware.auth_middleware import authenticate_websocket_token, UserContext
from utils.oss_util import upload_file_to_oss_nosdk

ai_router_equipment = APIRouter(prefix="/dataAnalysis/ai", tags=["人工智能接口"])
pdfkit_path = r'E:\\wkhtmltopdf\bin\wkhtmltopdf.exe'
base_dir = r"E:\\report"


@ai_router_equipment.websocket("/equipmentAnalysisWs")
async def equipment_runtime_analysis_ws(ws: WebSocket):
    """
    WebSocket端点：设备运行时间分析

    功能：
    1. 验证用户token并获取权限
    2. 检查分析结果缓存
    3. 获取设备列表和运行数据
    4. 计算多维度评分
    5. 调用AI生成分析报告
    6. 缓存分析结果

    请求参数（JSON格式）：
    {
        "term": {"member.id": <公司ID>},
        "start_time": "2024-01-01T00:00:00Z",
        "end_time": "2024-01-31T23:59:59Z"
    }

    响应格式：
    - type: "analysis_data" - 分析数据
    - type: "analysis_stream" - AI分析流式内容
    - type: "analysis_stream_done" - AI分析完成
    - type: "error" - 错误信息
    """
    # ====================================================================
    # 步骤1: 用户认证
    # ====================================================================
    # 注意：在 accept() 之前验证token（从查询参数或header获取）
    # user_context = await authenticate_websocket_token(ws)
    #
    # if not user_context:
    #     # 认证失败，发送错误消息并关闭连接
    #     try:
    #         await ws.send_json({
    #             "type": "error",
    #             "code": "AUTH_FAILED",
    #             "message": "认证失败：token无效或已过期，请重新登录"
    #         })
    #     except:
    #         pass
    #
    #     await ws.close(code=1008)  # 1008 = Policy Violation (认证失败)
    #     return

    # 认证成功，接受WebSocket连接
    await ws.accept()

    try:
        while True:
            # ====================================================================
            # 步骤2: 接收并验证请求参数
            # ====================================================================
            # 接收前端参数
            raw_data = await ws.receive_text()  # 接收原始文本数据
            data: dict = json.loads(raw_data)  # 将 JSON 字符串转换为 Python 对象

            # ====================================================================
            # 步骤3: 检查分析结果缓存
            # ====================================================================
            # 判断是否有分析记录（基于memberId和时间范围）
            param = {
                "term": data["term"],
                "time_range": {
                    "start": data["start_time"],
                    "end": data["end_time"]
                }
            }
            analysis_history = db.equipment_analysis.find_one(param)
            if analysis_history:
                # 找到缓存结果，直接返回
                del analysis_history['_id']  # 移除MongoDB的_id字段
                await ws.send_json(analysis_history)
            else:
                # ====================================================================
                # 步骤4: 获取设备列表和运行数据
                # ====================================================================
                # 获取设备列表和运行数据
                df_equipment, df_run_per_day, imei_list = await init_equipment_and_runtime_data(data)

                # ====================================================================
                # 步骤5: 数据处理和计算
                # ====================================================================
                # 获取所有设备和日期的全链接表
                df_all, end_time, start_time = await get_df_all(data, df_equipment)
                df_detail = df_all.merge(df_run_per_day, on=['imei', 'date'], how='left')  # 合并设备数据和运行数据
                df_detail['runtime_hours'] = df_detail['runtime_hours'].fillna(0)  # 填充运行时间为0的缺失值

                # 获取设备运行规则，和设备按天运行数据进行融合，获取各类规则下的数据
                df_run_per_day = await get_run_rule_dataframes(df_detail, imei_list)

                # 查询故障停机记录
                df_ticket = await get_after_sales_df(data, imei_list)

                # 构建设备每日状态表
                df_result = build_device_daily_status(
                    df_ticket,
                    df_run_per_day,
                    df_equipment,
                    start_time,
                    end_time,
                    0.5  # 运行时间阈值（小时）
                )

                # ====================================================================
                # 步骤6: 汇总统计和评分计算
                # ====================================================================
                summary = await run_data_analysis(df_result)  # 汇总统计指标
                scores = monthly_scores(summary)  # 汇总统计指标得分

                # 合并设备信息（区域、项目、类型）
                df_cleaned = summary.merge(
                    df_equipment[['imei', 'region_name', 'project_name', 'type']],
                    on='imei',
                    how='left'
                )
                df_equipment_scores = scores.merge(
                    df_equipment[['imei', 'region_name', 'project_name', 'type']],
                    on='imei',
                    how='left'
                )

                # ====================================================================
                # 步骤7: 序列化输出所需的各类衍生结果
                # ====================================================================
                weekly_records, weekly_records_csv = _build_weekly_summary(df_result)
                region_scores_list, project_scores_list, region_scores_csv, project_scores_csv, score_sort_json = \
                    _aggregate_scores(df_equipment_scores)

                # 构建分析数据载荷
                analysis_payload = _build_analysis_payload(
                    data,
                    df_cleaned,
                    df_equipment_scores,
                    weekly_records,
                    region_scores_list,
                    project_scores_list,
                    score_sort_json
                )

                # 发送分析数据给前端
                await ws.send_json(analysis_payload)

                # ====================================================================
                # 步骤8: 调用AI生成分析报告
                # ====================================================================
                # 构建AI查询prompt
                query = gen_query({
                    "summary": summary.to_csv(sep='|', index=False),  # 汇总统计指标
                    "region_scores_csv": region_scores_csv,  # 区域得分
                    "project_scores_csv": project_scores_csv,  # 项目得分
                    "weekly_records": weekly_records_csv,  # 周度统计指标
                    "after_sale_records": df_ticket.to_csv(sep='|', index=False),  # 售后维修记录
                    "project_count": len(project_scores_list),
                    "region_count": len(region_scores_list),
                    "equipment_count": len(df_equipment_scores)
                })
                #
                model = "o3"
                ai_result = await invoke_laozhang_api(
                    query,
                    ws,
                    system_prompt="你是物业保洁公司的运营分析顾问。目标：基于提供的数据，识别关键问题、"
                                  "定位可能原因，并给出可执行的管理行动与下周跟踪清单，帮助提升设备利用与服务质量。",
                    model=model
                )

                # ====================================================================
                # 步骤9: 保存分析结果到数据库
                # ====================================================================
                analysis_payload["ai_result"] = ai_result
                analysis_payload["memberId"] = data["memberId"]
                analysis_payload["term"] = data["term"]
                db.equipment_analysis.insert_one(analysis_payload)

    except WebSocketDisconnect as e:
        # WebSocket断开连接（正常情况）
        # logger.info(f"WS disconnected: code={e.code}")
        print(f"WebSocket断开连接: {e}")
        pass

    except Exception as e:
        # 业务异常时，若连接还活着，发送错误信息
        print(f"处理WebSocket请求时发生错误: {e}", flush=True)

        if ws.application_state != WebSocketState.DISCONNECTED:
            try:
                await ws.send_json({
                    "type": "error",
                    "code": "INTERNAL_ERROR",
                    "message": f"服务器内部错误: {str(e)}"
                })
                await ws.close(code=1011)  # 1011 = Internal Server Error
            except RuntimeError:
                pass

    finally:
        # 最终清理：只在仍未断开的情况下再关一次，避免二次 close 的 RuntimeError
        if ws.application_state != WebSocketState.DISCONNECTED:
            try:
                await ws.close()
            except RuntimeError:
                pass


@ai_router_equipment.post("/equipmentExport")
async def analysis(data: dict):
    param = {
        "term": data["term"],
        "time_range": {
            "start": data["start_time"],
            "end": data["end_time"]
        }
    }
    member_name = data["memberName"]
    start_time = data["start_time"]
    end_time = data["end_time"]
    analysis_history = db.equipment_analysis.find_one(param)
    tables_for_report = generate_report_tables(analysis_history, data['memberName'])
    html_content = render_final_report(
        tables_for_report, analysis_history['ai_result'], member_name, start_time, end_time
    )
    print(html_content)
    with tempfile.NamedTemporaryFile(suffix='.pdf', delete=False) as temp_pdf:
        temp_pdf_path = temp_pdf.name
        print(f"📂 临时PDF路径: {temp_pdf_path}")

        options = {
            'encoding': 'UTF-8',
            'custom-header': [('Accept-Charset', 'UTF-8')],
            'enable-local-file-access': None,
            'page-size': 'A4',
            'margin-top': '0.75in',
            'margin-right': '0.75in',
            'margin-bottom': '0.75in',
            'margin-left': '0.75in',
            'orientation': 'Portrait',
            'print-media-type': None
        }

        print("🔧 正在生成PDF...")
        config = pdfkit.configuration(wkhtmltopdf=pdfkit_path)
        pdfkit.from_string(html_content, temp_pdf_path, configuration=config, options=options)
        print("✅ PDF生成成功")

        # 备份和上传
        print("☁️ 开始备份和上传...")
        filename = f"bangni/report/{data['memberName']}-设备工作分析报告V3-{datetime.now().strftime('%Y%m%d%H%M%S%f')[:-3]}.pdf"

        upload_success, local_path, file_size, upload_error = backup_and_upload_with_retry(
            temp_pdf_path, filename, max_retries=3
        )

        if upload_success:
            print(f"✅ 上传成功，文件大小: {file_size} 字节")
        else:
            print(f"❌ 上传失败: {upload_error}")

        # 清理临时文件
        try:
            os.unlink(temp_pdf_path)
            print("🗑️ 临时文件清理完成")
        except Exception as cleanup_error:
            print(f"⚠️ 临时文件清理失败: {cleanup_error}")

    # 8. 创建报告记录
    oss_url = f'https://landiwulian.oss-cn-shenzhen.aliyuncs.com/{filename}' if upload_success else None

    print("📝 创建报告记录...")
    report_record = create_report_record(
        member_id=data.get('memberId'),
        member_name=member_name,
        report_type=complete_stats['report_info']['type'],
        start_time=start_time,
        end_time=end_time,
        local_path=local_path,
        oss_url=oss_url,
        file_size=file_size,
        data_structure=complete_stats['report_info'],
        request_data=data
    )

    report_record['generation_duration_seconds'] = generation_duration
    report_record['upload_status'] = 'success' if upload_success else 'failed'
    report_record['upload_error'] = upload_error if upload_error else None

    # 9. 保存到MongoDB
    print("💾 保存报告记录到MongoDB...")
    save_report_to_mongodb(report_record)
    print("✅ 报告记录保存成功")

    # 10. 返回结果
    result = {
        'status': 1,
        'message': '成功',
        # 'url': oss_url or '',
        # # 'report_id': report_record['report_id'],
        # 'local_path': local_path,
        # # 'file_size_mb': report_record['file_size_mb'],
        # 'upload_success': upload_success,
        # 'report_info': {
        #     'type': complete_stats['report_info']['type'],
        #     'focus': complete_stats['report_info']['focus'],
        #     'devices': complete_stats['basic_info']['total_devices'],
        #     'regions': complete_stats['basic_info']['total_regions'],
        #     'projects': complete_stats['basic_info']['total_projects'],
        #     # 'period_days': report_record['stats_period_days'],
        #     'generation_time': generation_duration
        # }
    }

    return result


def gen_query(param: Optional[dict] = None):
    query = (
        f"以下是分析所依据的计算后的数据"
        f"基本数据量：大区总数{param.get('region_count')}，项目总数{param.get('project_count')}，设备总数{param.get('equipment_count')}。要特别说明设备数量为统计时间区间内已经创建的设备。项目数量以存在设备为基础。"
        f"一些专业术语比如P10,P50,P90是指统计区间内综合得分在10%，50%，90%分位数的设备得分。要做一下解释,"
        f"说明是百分之10的设备低于这个分数，百分之10的设备高于这个分数等。"
        f"我们面向的是普通管理者可能不知道一些概念。一些字幕简写代称，还有他们意味着什么都在报告开始做好说明。通俗易懂"
        f"输出格式不要用表格或者csv格式。都用文字和标题即可。如果要有明细信息就进行换行。"
        f"A.设备衍生统计数据"
        f"{param.get('summary')}"
        f"B.区域设备统计区间内得分"
        f"{param.get('region_scores_csv')}"
        f"C.项目设备统计区间内得分"
        f"{param.get('project_scores_csv')}"
        f"D.周度统计指标（注：日达标可能不够的情况下周指标可能达标，因为他是设定了周达标。而规则标准类型为auto的则是没有设定标准的是默认的表达的时候请说默认俩小时不要说auto。统一设定为2小时。也可能没有周度指标，则直接为[]）"
        f"[{param.get('weekly_records')}]"
        f"E.售后维修记录"
        f"{param.get('after_sale_records')}"
        "请尽量详尽说明所有问题，不要超过5000字"
        "③ 业务背景与关键定义"

        "服务目标：兼顾利用率、出勤覆盖、合规达标与一致性，减少停机与未进场，保障项目满意度。"
        "重要字段含义提示（按你的口径理解）"
        "“达标时长”：每日合规的最低工作时长（示例：≥3 小时/天）。"
        "“达标天数”：当期内满足“达标时长”的天数。"
        "“一致性”：工作时长的稳定程度（数值越高越稳定）。"
        "“总时长达标率”：总工作时长与标准时长之比。"
        "综合评分子项：A/C/U/S/P/R 分别代表 可用性/出勤覆盖/强度利用/一致性/合规达标/可靠性。"
        "以下是几项指标的含义，你需要在最开始的总和评分里对以下这6个指标进行解释。排版的时候任何情况下正文不要用标题加粗，（不要用那个 '•' ）因为会影响我插件排版，导致我系统无法识别。。换行的时候要正确换行避免让我出现排版问题"
        "A.可用性：设备可工作天数占统计周期内可工作天数的比例，是由于设备本身没有在场（大多是进场时间晚于统计周期开始时间）或者故障导致停机"
        "C.出勤覆盖：设备实际工作天数占实际可用天数的比例。主要是反应人为原因或者其他原因造成的未使用。比如下雨，检查等。"
        "U.强度利用：设备实际工作时长与要求工作标准总时间的比例。反映了设备的使用时间是否在整个统计期间内达标的重要指标。"
        "S.一致性：设备工作时长的稳定性，一致性低则代表工作时间波动性较大，可能存在突击作业和开机混出勤的可能性"
        "P.合规达标：达到工作标准的天数占总可工作天数的比例"
        "R.可靠性：设备可工作的可靠性，主要反应是否经常损坏维修"
        "在讲这些得分的时候都要备注好他的含义。别只写一个A一致性，因为客户看不懂。不明确"
        "若数据口径不全，请在不臆测的前提下，以“数据不足提示”的方式标注。"
        "④ 分析任务（请严格按下列步骤进行）"
        "总体盘点：给出设备覆盖的区域/项目/类型分布概览，以及综合分的分位分布（P10/P50/P90）。（这里在分析时需要说明，这几个数的含义。有时候不容易理解。请友好的说明）"
        "统计：平均综合分、A/C/U/S/P/R 六项均值、可工作率、达标率、总时长达标率。"
        "问题清单（Top N）："
        "维度：设备、项目、区域。"
        "规则（从高到低优先）：综合分低 & S_一致性低；P_合规达标低；C_出勤覆盖低；A_可用性或R_可靠性异常；周达标连续不达标。"
        "根因线索（以证据为导向，不臆测）："
        "用字段关联举证，例如："
        "一致性低但总时长达标率高 ⇒ 疲劳/突击作业可能；"
        "可工作率高但出勤覆盖低 ⇒ 排班/进场问题；"
        "维修停机天数或维修天数>0 且 R 低 ⇒ 设备健康/保养问题；"
        "未进场天数>0 ⇒ 合同或现场协调问题。"
        "引用具体 IMEI、项目名、周区间（week_period）作为证据锚点。"
        "行动建议（可执行）："
        "将每条问题映射到可操作动作（见“行动库”），包含：负责人角色、建议时限、量化目标。"
        "下周跟踪清单：列出需重点跟踪的 IMEI/项目与观察指标（例如 S、P、C 及周达标 is_week_met）。"
        "auto代表默认规则也就是default，每天工作两个小时为达标。因为没有设定工作标准。你表达的时候不要说随机或者auto 而要说默认俩小时"
        "数据质量与边界：指出缺失/异常值、样本不足、极端值对结论的影响。"
        "管理摘要（给非技术经理）：≤8 条 bullet，总结影响最大且最易落地的事项与ROI。"
        "⑤ 输出格式（请严格遵循）"
        "请以markdown格式来输出。就是带markdown标记 # ## ### 这样的。"
        "而不是带markdown```。我我想要美观一些"
        "用中文输出；数字保留两位小数；引用具体 IMEI / 项目 / 周期 作为证据；不要输出你的推理过程。"
        "结构如下（一级标题固定）："
        "一、管理摘要"
        "主要说清楚统计范畴有多少个大区，多少个项目。多少台设备，请好好的数一下数量别瞎说"
        "二、总体盘点"
        "三、问题清单（Top N，含证据）"
        "四、可能原因（证据导向）"
        "五、行动建议（责任人/时限/目标）"
        "六、下周跟踪清单"
        "七、数据质量与边界"

        # …
        # 
        # 同时给出机器可读的 JSON 摘要（字段示例）：
        # 
        # {
        #   "high_risk_devices": [{"imei":"...", "project":"...", "issues":["low_consistency","low_compliance"], "evidence":["S_一致性=47.00","P_合规达标=60.00","week_period=2025-07-28/2025-08-03 is_week_met=0"]}],
        #   "project_kpi": [{"project":"...", "score":92.22, "A":100.00, "C":95.08, "U":100.00, "S":47.00, "P":87.00, "R":100.00}],
        #   "actions": [{"target":"IMEI/项目","owner_role":"现场主管","deadline_days":7,"action":"调整排班/进场","goal":"C_出勤覆盖 ≥90%","kpi":["C","week_met"]}]
        # }

        "⑥ 行动库（将问题→行动快速映射）"
        "S_一致性低（波动大、突击作业）："
        "行动：优化班次配比（早/中/晚）、设定日下限提醒（≥达标时长）、引入周均衡考核；"
        "目标：S ≥ 70，周 is_week_met 连续 2 周为 1。"
        "P_合规达标低 / 达标天数不足："
        "行动：明确“达标时长”考核、上墙看板、异常日复盘制度；"
        "目标：P ≥ 85，达标天数≥ frequency×80%。"
        "C_出勤覆盖低（明明能工作但没进场/没排班）："
        "行动：核对合同工时与假日排班、进出场打卡联动设备开机；"
        "目标：C ≥ 90。"
        "A_可用性 / R_可靠性异常（维修/故障）："
        "行动：保养计划前置、备件到位、质保索赔复盘；"
        "目标：停机率下降 30%，R ≥ 90。"
        "未进场天数>0："
        "行动：项目沟通/合同补充条款，进场与验收挂钩；"
        "目标：未进场=0。"
        "⑦ 命名与展示规范"
        "所有比率与分数统一显示为百分制（保留两位小数）。"
        "表格请优先按“项目 → 设备数（降序） → 综合分（升序）”排序，便于管理者聚焦问题项目。"
        "每个结论都要能在数据中找到对应字段与值作为证据（例：IMEI 868... S_一致性=47.00，周 2025-07-28/2025-08-03 is_week_met=0）"

    )
    return query


def _build_weekly_summary(df_result: pd.DataFrame):
    """
    汇总周度规则执行情况，便于后续在周粒度展示达标统计。
    """
    df_week = df_result[df_result['rule_type'] == 'week']
    if df_week.empty:
        return [], ""

    weekly = df_week.copy()
    weekly["week_period"] = pd.to_datetime(weekly["date"]).dt.to_period("W-SUN")
    weekly = weekly.groupby(['imei', 'week_period'], as_index=False).agg(
        met_days=('is_met', 'sum'),
        counted_days=('is_met', 'count'),
        frequency=('frequency', 'max'),
    )
    weekly['is_week_met'] = (weekly['met_days'] >= weekly['frequency']).astype(int)
    weekly['week_period'] = weekly['week_period'].astype(str)
    return json.loads(weekly.to_json(orient='records', force_ascii=False)), weekly.to_csv(sep='|', index=False)


def _aggregate_scores(df_equipment_scores: pd.DataFrame):
    """
    生成区域/项目维度的平均得分，输出为易序列化的列表结构。
    """
    df_scores_region = df_equipment_scores.groupby('region_name', as_index=False).mean(numeric_only=True)
    df_scores_project = df_equipment_scores.groupby(['region_name', 'project_name'], as_index=False).mean(
        numeric_only=True
    )
    df_region_sort = df_scores_region.sort_values(by='综合分', ascending=False)
    df_project_top3 = df_scores_project.sort_values(by='综合分', ascending=False).head(3)
    df_project_bottom3 = df_scores_project.sort_values(by='综合分', ascending=True).head(3)
    df_equipment_top3 = df_equipment_scores.sort_values(by='综合分', ascending=False).head(3)
    df_equipment_bottom3 = df_equipment_scores.sort_values(by='综合分', ascending=True).head(3)
    score_sort_json = {
        "region": json.loads(df_region_sort.to_json(orient='records', force_ascii=False)),
        "project_top3": json.loads(df_project_top3.to_json(orient='records', force_ascii=False)),
        "project_bottom3": json.loads(df_project_bottom3.to_json(orient='records', force_ascii=False)),
        "equipment_top3": json.loads(df_equipment_top3.to_json(orient='records', force_ascii=False)),
        "equipment_bottom3": json.loads(df_equipment_bottom3.to_json(orient='records', force_ascii=False))
    }
    region_scores_json = json.loads(df_scores_region.to_json(orient='records', force_ascii=False))
    project_scores_json = json.loads(df_scores_project.to_json(orient='records', force_ascii=False))
    scores_region_csv = df_scores_region.to_csv(sep='|', index=False)
    scores_project_csv = df_scores_project.to_csv(sep='|', index=False)
    return region_scores_json, project_scores_json, scores_region_csv, scores_project_csv, score_sort_json


def _build_analysis_payload(
        request_payload: dict,
        df_summary: pd.DataFrame,
        df_equipment_scores: pd.DataFrame,
        weekly_records: list,
        region_scores: list,
        project_scores: list,
        score_sort_json: dict
) -> dict:
    """
    将各项数据整理成统一的 WebSocket 输出格式，方便前端和后续 AI 分析。
    """

    return {
        "type": "analysis_data",
        "time_range": {
            "start": request_payload.get('start_time'),
            "end": request_payload.get('end_time')
        },
        "summary": json.loads(df_summary.to_json(orient='records', force_ascii=False)),
        "monthly_scores": json.loads(df_equipment_scores.to_json(orient='records', force_ascii=False)),
        "weekly_summary": weekly_records,
        "region_scores": region_scores,
        "project_scores": project_scores,
        "score_sort": score_sort_json,
        "overall_score": json.loads(
            df_equipment_scores.mean(numeric_only=True).round(2).to_json(force_ascii=False)
        )
    }


async def run_data_analysis(df: pd.DataFrame):
    """
    执行数据分析函数，对输入的DataFrame进行分组聚合计算，生成设备运行情况汇总表

    参数:
        df: pandas.DataFrame，包含设备运行数据的DataFrame

    返回:
        pandas.DataFrame: 包含各项分析指标的汇总表
    """
    x = 0.1  # 过滤阈值（小时）- 用于过滤运行时间过短的数据点
    # 使用groupby按imei分组，并计算多项聚合指标
    summary = df.groupby(['imei'], as_index=False).agg(
        rule_type=('rule_type', 'first'),
        met_days=('is_met', 'sum'),  # 达标天数
        met_hours=('hours', 'max'),  # 达标时长（最大值）
        counted_days=('imei', 'size'),  # 实际统计天数
        normal_days=('day_status', lambda s: (s == '正常').sum()),  # 正常天数
        all_worktime_hours=('runtime_hours', lambda s: round(s.sum(), 2)),  # 总工作时间
        un_create_days=('day_status', lambda s: (s == '未进场').sum()),  # 未进场天数
        repair_down_days=('downtime_flag', lambda d: d.sum()),  # 维修停机天数
        repair_days=('in_repair', lambda d: d.sum()),  # 维修天数
        runtime_work_mean=('runtime_hours', lambda s: np.round(s[s > x].mean(), 2)),  # 出勤日运行时间均值
        runtime_mean=('runtime_hours', lambda s: np.round(s.mean(), 2)),  # 不再对元组链式 .round
        runtime_std=('runtime_hours', lambda s: np.round(s[s > x].std(ddof=0), 2)),
        consistency=('runtime_hours', lambda s: np.round((
            1 - (s[s > x].std(ddof=0) / s[s > x].mean()) if s[s > x].mean() else float('nan')
        ), 2)),
        open_days=('runtime_hours', lambda s: np.round(s[s > 0].count(), 2)),
    )
    summary['达标率'] = ((summary['met_days'] / summary['normal_days']).fillna(0)).round(2)
    summary.columns = ['imei', '规则标准类型', '达标天数', '达标时长', '实际统计天数', '正常天数', '总工作时间', '未进场天数', '维修停机天数', '维修天数',
                       '出勤日运行时间均值', '所有运行时间均值', '运行时间标准差', '一致性', '出勤天数', '达标率']
    summary['可工作天数'] = summary['正常天数'] + summary['维修天数'] - summary['维修停机天数']
    summary['可工作率'] = ((summary['可工作天数'] / summary['实际统计天数']).fillna(0)).round(2)
    summary['总时长达标率'] = ((summary['总工作时间'] / (summary['可工作天数'] * summary['达标时长'])).fillna(0)).round(
        2)
    return summary


def clamp01(x):
    return np.minimum(1, np.maximum(0, x))


def monthly_scores(summary: pd.DataFrame,
                   weights=None,
                   min_attend_days=5):
    """
    输入：你的 summary（按 imei 聚合后的那张表）
    输出：每台设备 6 维分 + 综合分（0-100）
    """
    if weights is None:
        weights = dict(A=0.1, C=0.25, U=0.25, S=0.05, P=0.3, R=0.05)

    df = summary.copy()

    # 1) A 可用性
    A = clamp01(df['可工作率'].fillna(0).to_numpy())
    A_score = 100 * A

    # 2) C 出勤覆盖（可工作条件下）
    denom_C = df['可工作天数'].replace(0, np.nan)
    C = clamp01((df['出勤天数'] / denom_C).fillna(0).to_numpy())
    C_score = 100 * C

    # 3) U 强度/利用（直接用总时长达标率）
    U = clamp01(df['总时长达标率'].fillna(0).to_numpy())
    U_score = 100 * U

    # 4) S 一致性（小样本惩罚：出勤天数<5 线性打折）
    S_raw = df['一致性'].fillna(0).to_numpy()
    S = clamp01(S_raw)
    attend = df['出勤天数'].fillna(0).to_numpy()
    small_sample_factor = clamp01((attend - 1) / (min_attend_days - 1))  # 1天→0，5天→1
    S_score = 100 * (S * small_sample_factor)

    # 5) P 合规/达标
    P = clamp01(df['达标率'].fillna(0).to_numpy())
    P_score = 100 * P

    # 6) R 可靠性（反向得分）
    days = df['实际统计天数'].replace(0, np.nan)
    D1 = (df['维修停机天数'] / days).fillna(0).to_numpy()
    D2 = (df['维修天数'] / days).fillna(0).to_numpy()
    Dow = 0.7 * D1 + 0.3 * D2
    R = clamp01(1 - Dow)
    R_score = 100 * R

    # 汇总
    radar = pd.DataFrame({
        'imei': df['imei'],
        'A_可用性': A_score,
        'C_出勤覆盖': C_score,
        'U_强度利用': U_score,
        'S_一致性': S_score,
        'P_合规达标': P_score,
        'R_可靠性': R_score,
    })

    # 综合分
    w = np.array([weights['A'], weights['C'], weights['U'],
                  weights['S'], weights['P'], weights['R']])
    radar['综合分'] = (
            radar[['A_可用性', 'C_出勤覆盖', 'U_强度利用', 'S_一致性', 'P_合规达标', 'R_可靠性']].to_numpy()
            @ w.reshape(-1, 1)
    ).ravel().round(2)

    return radar.sort_values('综合分', ascending=False)


async def get_after_sales_df(data, imei_list):
    after_sales_data = await get_after_sale_records(data['start_time'], data['end_time'], imei_list)
    if not after_sales_data:
        return pd.DataFrame()
    rows = []
    for d in after_sales_data:
        imei = d["imei"]
        for rec in d["after_sale_records"]:
            rows.append({
                "imei": imei,
                "start_time": rec.get("createDate"),  # 作为开始时间
                "end_time": rec.get("completeDate")  # 没有结束时间 → None
            })
    df_ticket = pd.DataFrame(rows)
    df_ticket["start_time"] = pd.to_datetime(df_ticket["start_time"], errors='coerce').dt.tz_localize(None)
    df_ticket["end_time"] = pd.to_datetime(df_ticket["end_time"], errors='coerce').dt.tz_localize(None)
    return df_ticket


async def get_df_all(data, df_equipment):
    start_time = isoparse(data['start_time'])
    end_time = isoparse(data['end_time'])
    p_dates = pd.date_range(start=start_time, end=end_time, freq='D')
    df_all = pd.MultiIndex.from_product([df_equipment['imei'], p_dates], names=["imei", "date"]).to_frame(
        index=False)
    df_all["date"] = pd.to_datetime(df_all["date"]).dt.tz_localize(None)
    return df_all, end_time, start_time


async def init_equipment_and_runtime_data(data):
    # 获取设备列表,这里的查询条件和es强耦合。设计不是太合理。后续需要调整。
    equipment_list = await get_equipment_list({"must": [{"term": data['term']}, {
        "range": {
            "createdTime": {
                "lte": DateTimeCombiner.parse_iso_to_date(data['end_time']).strftime("%Y-%m-%d %H:%M:%S")
            }
        }
    }]})
    # 获取IMEI数组作为后续各项数据的查询条件。是较为灵活的做法
    imei_list = [equipment['imei'] for equipment in equipment_list]
    df_equipment = pd.DataFrame(equipment_list)
    # 获取运行时间数据 并添加所属自然周的字段，方便后续做按周分析
    df_run_per_day = await get_run_time_per_day(data.get('start_time'), data.get('end_time'), imei_list)
    # 获取工时规则 按周或者按天的达标规则
    df_run_per_day["date"] = pd.to_datetime(df_run_per_day["date"]).dt.tz_localize(None)
    return df_equipment, df_run_per_day, imei_list


async def get_run_rule_dataframes(df_run_per_day, imei_list):
    rules = await get_work_roles(imei_list)
    if len(rules) > 0:
        df_rules = pd.DataFrame(rules)
        df_run_per_day = df_run_per_day.merge(df_rules, on=['imei'], how='left')
        return df_run_per_day
    else:
        df_run_per_day = df_run_per_day.assign(hours=2.0, frequency=1.0, rule_type='auto')
        return df_run_per_day


async def consistency_compute(df_run_per_day):
    status = df_run_per_day.groupby('imei')['runtime_hours'].agg(
        mean='mean',
        std=lambda x: x.std(ddof=0))
    status['consistency'] = 1 - (status['std'] / status['mean'])
    return status


async def get_work_roles(imei_list):
    rules = await get_work_rules(imei_list)
    rules_result = []

    for rule in rules:
        if rule.get('ruleOfDay'):
            for rule in rule['ruleOfDay']:
                for device in rule['devices']:
                    rules_result.append({
                        "imei": device['imei'],
                        "hours": rule['hours'],
                        "frequency": rule['frequency'],
                        "rule_type": 'day'
                    })
        if rule.get('ruleOfWeek'):
            for rule in rule['ruleOfWeek']:
                for device in rule['devices']:
                    rules_result.append({
                        "imei": device['imei'],
                        "hours": rule['hours'],
                        "frequency": rule['frequency'],
                        "rule_type": 'week'
                    })
    return rules_result


async def get_after_sale_records(start_time, end_time, imei_list):
    pipeline = [
        {
            '$match': {
                'imei': {'$in': imei_list}
            }
        },
        {
            '$lookup': {
                'from': 'o_after_sale_maintain_order',
                'let': {'eid': '$id'},  # 设备的imei
                'pipeline': [
                    {
                        '$match': {
                            '$expr': {
                                '$and': [
                                    # IMEI 关联
                                    {'$eq': ['$equipmentId', '$$eid']},
                                    # 区间相交：order_start < window_end
                                    # 2) 开始时间早于窗口结束：createDate < end_time
                                    {'$lt': ['$createDate', {'$toDate': end_time}]},

                                    # 3) 结束时间晚于窗口开始，或没有结束时间：
                                    #    coalesce(completeDate, +∞) > start_time
                                    {
                                        '$gt': [
                                            {
                                                '$ifNull': [
                                                    '$completeDate', '$expirationTime'
                                                    # {'$dateFromString':
                                                    #      {'dateString': '9999-12-31T23:59:59Z'}
                                                    #  }
                                                ]
                                            },
                                            {'$toDate': start_time}
                                        ]
                                    }
                                ]
                            }
                        }
                    },
                    {'$project': {
                        'createDate': 1,
                        'completeDate': {'$ifNull': ["$completeDate", "$expirationTime"]}
                    }}
                ],
                'as': 'after_sale_records'
            },
        },
        {
            "$match": {
                "$expr": {"$gt": [{"$size": "$after_sale_records"}, 0]}
            }
        }
    ]
    maintenance_data = list(db['b_equipment_entity'].aggregate(pipeline))
    return maintenance_data


async def get_run_time_per_day(start_time, end_time, imei_list):
    run_log = await get_iot_run_data(start_time, end_time, imei_list)
    df_run = pd.DataFrame(run_log)
    # 将startTime转换为日期格式并创建日期列用于分组
    df_run['date'] = pd.to_datetime(df_run['date']).dt.date
    # 按日期分组并合计intervals字段，保持其他字段不变
    return df_run


@ai_router_equipment.post("/equipment_v2")
async def equipment_statistics_v2(data: dict) -> dict:
    """
    设备数据分析V2版本 - 重构版

    新架构：
    1. 数据获取 → 统计计算 → 表格生成 → AI分析 → 模板渲染 → PDF生成
    2. 清晰的数据流和职责分离
    3. 统计数据标准化，AI基于表格内容分析
    """
    # 1. 获取基础信息
    title = data.get('title')
    start_time, end_time, total_days = await get_data_range_from_utc_date_str(data)

    # 2. 获取原始数据
    equipment_entity = await get_equipment_list(data.get('query_bool'))
    if not equipment_entity:
        print("❌ 未找到设备数据")
        return {'status': 0, 'message': '未找到设备数据', 'url': ''}

    df_equipment_entity = pd.DataFrame(equipment_entity)
    imei_list = df_equipment_entity['imei'].tolist()

    if not df_equipment_entity.empty:
        print(f"📋 设备数据样例: {df_equipment_entity.iloc[0].to_dict()}")

    iot_run_data = await get_iot_run_data(data.get('startDate'), data.get('endDate'), imei_list=imei_list)

    return iot_run_data


def build_device_daily_status(
        tickets: pd.DataFrame,  # cols: [imei, start_time, end_time]
        runtime: pd.DataFrame,  # cols: [imei, date, run_minutes] 或 [imei, date, work_hours]
        base_info: pd.DataFrame,  # cols: [imei, createDate]
        period_start,
        period_end,
        run_ok_threshold: float = 0.5,
) -> pd.DataFrame:
    LOCAL_TZ = "Asia/Shanghai"  # 统一口径

    # --- 0) 统计期边界：先按 UTC 解析 -> 转本地时区 -> 去时区 -> 归零到日 ---
    smin = (pd.to_datetime(period_start, errors="coerce", utc=True).floor("D")
            .tz_convert(LOCAL_TZ).tz_localize(None)).floor("D")
    smax = (pd.to_datetime(period_end, errors="coerce", utc=True).floor("D")
            .tz_convert(LOCAL_TZ).tz_localize(None)).floor("D")

    # 设备集合：以 base_info 为主
    devs = pd.Index(pd.unique(base_info["imei"]), name="imei")

    # --- 1) 基础信息（进场时间）统一口径 ---
    bi = base_info.copy()
    bi = bi[["imei", "createDate"]].drop_duplicates("imei")
    bi["entry_date"] = pd.to_datetime(bi["createDate"], errors="coerce", utc=True) \
        .dt.tz_convert(LOCAL_TZ).dt.tz_localize(None).dt.floor("D")
    bi = bi[["imei", "entry_date"]]

    # --- 2) 工单：统一口径 -> 裁剪 -> 事件 ---
    t = tickets.copy()
    if not t.empty:
        t["start_time"] = pd.to_datetime(t["start_time"], errors="coerce", utc=True) \
            .dt.tz_convert(LOCAL_TZ).dt.tz_localize(None)
        t["end_time"] = pd.to_datetime(t["end_time"], errors="coerce", utc=True) \
            .dt.tz_convert(LOCAL_TZ).dt.tz_localize(None)

        # 未结束 -> 视作到期末（smax 已为 tz-naive）
        t["end_time"] = t["end_time"].fillna(smax)

        # 日粒度
        t["start_date"] = t["start_time"].dt.floor("D")
        t["end_date"] = t["end_time"].dt.floor("D")

        # 裁剪到统计期（注意：上下界都用 smin/smax，同一口径）
        t["start_date"] = t["start_date"].clip(lower=smin, upper=smax)
        t["end_date"] = t["end_date"].clip(lower=smin, upper=smax)

        # 只保留与统计期有交集的区间
        t = t[t["end_date"] >= t["start_date"]]

        # 事件：S' +1，E' 的下一天 -1（闭区间）
        start_ev = t[["imei", "start_date"]].rename(columns={"start_date": "date"}).assign(delta=1)
        end_ev = t[["imei", "end_date"]].rename(columns={"end_date": "date"})
        end_ev["date"] = (end_ev["date"] + pd.Timedelta(days=1)) \
            .clip(upper=smax + pd.Timedelta(days=1))
        end_ev["delta"] = -1

        events = (pd.concat([start_ev, end_ev], ignore_index=True)
                  .groupby(["imei", "date"], as_index=False)["delta"].sum())
    else:
        events = pd.DataFrame(columns=["imei", "date", "delta"])

    # --- 3) 设备 × 全日历 & 前缀和 -> in_repair ---
    dates = pd.date_range(smin, smax, freq="D", name="date")
    full_idx = pd.MultiIndex.from_product([devs, dates], names=["imei", "date"])

    delta = events.set_index(["imei", "date"])["delta"] if not events.empty else pd.Series(dtype="int64")
    active_cnt = delta.reindex(full_idx, fill_value=0).groupby(level=0).cumsum()
    daily = (active_cnt > 0).to_frame("in_repair").reset_index()

    # --- 4) 合并运行表（统一到 run_minutes，口径一致到本地日） ---
    if runtime is None or runtime.empty:
        daily["run_minutes"] = 0
    else:
        r = runtime.copy()
        r["date"] = pd.to_datetime(r["date"], errors="coerce", utc=True) \
            .dt.tz_convert(LOCAL_TZ).dt.tz_localize(None).dt.floor("D")

        # 字段统一：优先使用 run_minutes；若只有 work_hours，则换算为分钟

        daily = daily.merge(r, on=["imei", "date"], how="left")
        daily["runtime_hours"] = daily["runtime_hours"].fillna(0)

    # “在维修期 且 当天可用时长 ≤ 阈值” -> 计停机
    daily["downtime_flag"] = daily["in_repair"] & (daily["runtime_hours"] <= run_ok_threshold)

    # --- 5) 合并进场 & 生成最终标签（同一口径比较，不会再 tz 冲突） ---
    daily = daily.merge(bi, on="imei", how="left")
    daily["entered"] = daily["entry_date"].notna() & (daily["date"] >= daily["entry_date"])

    daily["day_status"] = np.select(
        [
            ~daily["entered"],
            daily["downtime_flag"],
            daily["in_repair"] & ~daily["downtime_flag"]
        ],
        [
            "未进场",
            "售后中（计停机）",
            "售后中（不计停机）"
        ],
        default="正常"
    )
    out_cols = ["imei", "date", "day_status", "in_repair", "downtime_flag", "runtime_hours",
                "entry_date", "frequency", "rule_type", "hours"]
    df_result = daily.loc[:, out_cols].sort_values(["imei", "date"])
    # 数据补全和达标判断
    df_result['rule_type'] = df_result['rule_type'].fillna('auto')
    df_result['hours'] = df_result['hours'].fillna(2.0)
    df_result['frequency'] = df_result['frequency'].fillna(1.0)
    df_result['is_met'] = (df_result['hours'] <= df_result['runtime_hours']).astype(int)
    return df_result


def update_imei_list(coll_name="s_project_rules"):
    """
    遍历集合，提取 ruleOfDay/ruleOfWeek 里的 imei，
    生成去重后的 imeiList 并更新回文档。
    """
    coll = db[coll_name]
    cursor = coll.find({}, {"ruleOfDay.devices.imei": 1, "ruleOfWeek.devices.imei": 1})

    updated_count = 0
    for doc in cursor:
        imeis = set()
        # 从 ruleOfDay 提取
        for rule in doc.get("ruleOfDay", []):
            for dev in rule.get("devices", []):
                if dev.get("imei"):
                    imeis.add(dev["imei"])
        # 从 ruleOfWeek 提取
        for rule in doc.get("ruleOfWeek", []):
            for dev in rule.get("devices", []):
                if dev.get("imei"):
                    imeis.add(dev["imei"])

        # 更新回库
        coll.update_one({"_id": doc["_id"]}, {"$set": {"imeiList": list(imeis)}})
        updated_count += 1


async def get_work_rules(imei_list: list):
    query = {
        "imeiList": {"$in": imei_list},
        "effective": True
    }
    projection = {"project": 1, "member": 1, "ruleOfDay": 1, "ruleOfWeek": 1, "_id": 0, "imeiList": 1}
    return db.s_project_rules.find(query, projection).to_list(1000)


def backup_and_upload_with_retry(pdf_path: str, oss_file_name: str, max_retries: int = 3):
    """
    备份文件到本地并上传到OSS，支持重试机制

    Args:
        pdf_path: PDF文件路径
        oss_file_name: OSS文件名
        max_retries: 最大重试次数

    Returns:
        tuple: (success: bool, local_path: str, file_size: int, error_msg: str)
    """
    import os
    import shutil
    import time

    # 1. 首先备份到本地
    local_dir = ensure_local_directory()
    local_filename = os.path.basename(oss_file_name)
    local_path = os.path.join(local_dir, local_filename)

    # 获取原始文件大小
    file_size = get_file_size(pdf_path)

    try:
        # 复制文件到本地
        shutil.copy2(pdf_path, local_path)
        log_upload_operation("LOCAL_BACKUP", local_path, True)
        print(f"文件已备份到本地: {local_path} (大小: {file_size} 字节)")
    except Exception as e:
        error_msg = f"本地备份失败: {str(e)}"
        log_upload_operation("LOCAL_BACKUP", local_path, False, error_msg)
        return False, local_path, file_size, error_msg

    # 2. 尝试上传到OSS，支持重试
    last_error = None
    retry_delays = [1, 3, 5]  # 重试间隔（秒）

    for attempt in range(max_retries):
        try:
            print(f"尝试上传到OSS (第{attempt + 1}次)...")

            upload_file_to_oss_nosdk(
                access_key_id='LTAI5tJy1axoohNuyDZDVdi3',
                access_key_secret='KMC6Ebvn0SyUc3WFGuJbnPLaiveezh',
                bucket_name='landiwulian',
                endpoint='oss-cn-shenzhen.aliyuncs.com',
                object_name=oss_file_name,
                file_path=pdf_path
            )

            log_upload_operation("OSS_UPLOAD", oss_file_name, True)
            print(f"文件已成功上传至OSS: {oss_file_name}")
            return True, local_path, file_size, None

        except Exception as e:
            last_error = str(e)
            error_msg = f"OSS上传失败 (第{attempt + 1}次): {last_error}"
            print(error_msg)
            log_upload_operation("OSS_UPLOAD", oss_file_name, False, error_msg)

            # 如果不是最后一次尝试，等待后重试
            if attempt < max_retries - 1:
                delay = retry_delays[min(attempt, len(retry_delays) - 1)]
                print(f"等待{delay}秒后重试...")
                time.sleep(delay)

    # 所有重试都失败了
    final_error = f"OSS上传失败，已重试{max_retries}次。最后错误: {last_error}"
    print(final_error)
    print(f"文件已保存到本地: {local_path}")

    return False, local_path, file_size, final_error


def ensure_local_directory():
    """
    确保本地report目录存在，按日期创建目录结构

    Returns:
        str: 当天的本地保存目录路径
    """
    import os
    from datetime import datetime

    # 当前日期
    now = datetime.now()
    year = now.strftime("%Y")
    month = now.strftime("%m")
    day = now.strftime("%d")

    # 完整目录路径
    today_dir = os.path.join(base_dir, year, month, day)
    logs_dir = os.path.join(base_dir, "upload_logs")

    # 创建目录
    try:
        os.makedirs(today_dir, exist_ok=True)
        os.makedirs(logs_dir, exist_ok=True)
        print(f"本地目录已创建: {today_dir}")
        return today_dir
    except Exception as e:
        print(f"创建本地目录失败: {str(e)}")
        # 如果创建失败，使用临时目录
        return os.path.join(base_dir, "temp")


def log_upload_operation(operation, file_path, success, error_msg=None):
    """
    记录上传操作日志

    Args:
        operation: 操作类型
        file_path: 文件路径
        success: 是否成功
        error_msg: 错误信息
    """
    import os
    from datetime import datetime

    # 日志文件路径
    log_dir = "/report/upload_logs"
    os.makedirs(log_dir, exist_ok=True)

    log_file = os.path.join(log_dir, f"{datetime.now().strftime('%Y-%m-%d')}.log")

    # 日志内容
    timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    status = "SUCCESS" if success else "FAILED"
    log_entry = f"[{timestamp}] {operation} - {status} - {file_path}"

    if error_msg:
        log_entry += f" - ERROR: {error_msg}"

    log_entry += "\n"

    # 写入日志
    try:
        with open(log_file, 'a', encoding='utf-8') as f:
            f.write(log_entry)
    except Exception as e:
        print(f"写入日志失败: {str(e)}")


def get_file_size(file_path):
    """
    获取文件大小

    Args:
        file_path: 文件路径

    Returns:
        int: 文件大小（字节），失败返回0
    """
    try:
        import os
        if os.path.exists(file_path):
            return os.path.getsize(file_path)
        else:
            return 0
    except Exception as e:
        print(f"获取文件大小失败: {str(e)}")
        return 0
