from datetime import datetime
from concurrent.futures import ThreadPoolExecutor

# 自定义库导入
from core.config import config, BASE_DIR
from core.dependencies import get_processor
from data_processor.processor import DataProcessor

from db.mongo_connection import save_upload_record, save_prediction_results, save_reviews
from db.client import get_db  # 假设你有获取异步数据库的依赖函数
from motor.motor_asyncio import AsyncIOMotorDatabase
from db.repositories.rbs import RBSRepository
from db.repositories.reviews import ReviewRepository
from db.repositories.upload import UploadRecordRepository
from db.repositories.mbrs import MBRSRepository

from models.rbs import RB  # 导入 Pydantic 模型
from models.reviews import Review
from models.uploadRecords import UploadRecord
from models.mbrs import MBR

# 第三方库导入
import pandas as pd
import numpy as np
from fastapi import APIRouter, File, UploadFile, HTTPException, Depends, Path, status
from fastapi.responses import FileResponse
from pathlib import Path
from pymongo import UpdateOne
import logging

router = APIRouter(prefix="/model-api", tags=["model-api"])
logger = logging.getLogger(__name__)
executor = ThreadPoolExecutor(max_workers=4)
uploads_dir = BASE_DIR / "uploads_data"

def save_csv_with_gbk(data: pd.DataFrame, path: Path) -> None:
    """将 DataFrame 保存为 GBK 编码的 CSV 文件"""
    try:
        data.to_csv(path, index=False, encoding="gbk")
    except UnicodeEncodeError:
        data.to_csv(path, index=False, encoding="gb18030")

@router.post("/process_xlsx/")
async def process_xlsx(
    file: UploadFile = File(...),
    db: AsyncIOMotorDatabase = Depends(get_db),  # 注入异步数据库连接
    processor: DataProcessor = Depends(get_processor) # 注入模型操作
):
    try:
        # 生成上传时间和 ID
        upload_time = datetime.now()
        upload_id = str(upload_time.timestamp()).replace(".", "")
        folder_path = uploads_dir / upload_id
        folder_path.mkdir(parents=True, exist_ok=True)

        # 读取文件
        contents = await file.read()
        input_data = pd.read_excel(pd.io.common.BytesIO(contents))

        # 数据预处理
        mis_data, unmis_data = processor.mis_value(input_data)
        x_test, label, _ = processor.pre_manage(input_data)

        # 模型预测
        probability = processor.predict(x_test)
        result_data = pd.concat([x_test, label], axis=1) # 合并结构和标签
        result_data["probability"] = probability.flatten()
        result_data["predictions"] = np.where(probability <= 0.3, 0, 1)
        result_data["pre_level"] = processor.result_classifier(probability)

        # 添加原始字段
        for col in ["CLLI_OID", "APP_AMT", "MBR_NO", "MBR_TYPE", "ID_CARD_NO", "POHO_NO"]:
            result_data[col] = unmis_data[col].values

        # 保存结果文件
        result_csv_path = folder_path / "result.csv"
        mis_data_csv_path = folder_path / "mis_data.csv"
        save_csv_with_gbk(result_data, result_csv_path)
        save_csv_with_gbk(mis_data, mis_data_csv_path)

        # 保存到数据库
        # 保存 UploadRecord
        upload_record = UploadRecord(
            upload_id=upload_id,
            file_name=file.filename,
            upload_time=upload_time,
            has_return_data=not mis_data.empty,  # 简化判断
            folder_path=str(folder_path)
        )
        upload_repo = UploadRecordRepository(db)
        await upload_repo.create_or_update(upload_record)  # 异步调用
        # has_return_data = len(mis_data) > 0
        # save_upload_record(upload_id, file.filename, upload_time, has_return_data, folder_path)

        # 批量处理 RBS 数据
        rbs_repo = RBSRepository(db)
        bulk_ops = []
        for _, row in result_data.iterrows():
            rb_id = row["CLLI_OID"]
            rb_data = {
                "rbId": rb_id,
                "state": int(row["predictions"]),
                "fraud": int(row["predictions"]),
                "score": float(row["probability"]),
                "level": int(row["pre_level"]),
                "upload_id": upload_id,
                "APP_AMT": float(row["APP_AMT"]),
                "MBR_NO": row["MBR_NO"],
                "POHO_NO": row["POHO_NO"],
                "updateTime": upload_time,
                "createTime": upload_time  # 由数据库根据 upsert 自动处理，无需在代码中判断
            }
            # 构造 UpdateOne 操作
            rb = RB(**rb_data)
            bulk_ops.append(
                UpdateOne(
                    filter={"rbId": rb.rbId},
                    update={
                        "$set": rb.model_dump(exclude={"createTime"}),  # 排除 createTime，由 $setOnInsert 处理
                        "$setOnInsert": {"createTime": rb.createTime}
                    },
                    upsert=True
                )
            )
        if bulk_ops:
            await rbs_repo.collection.bulk_write(bulk_ops)  # 批量写入
        # save_prediction_results(upload_id, upload_time, result_data)
        
        # 处理 MBR 数据
        mbr_repo = MBRSRepository(db)
        
                # 按会员号分组处理
        for mbr_no, group in result_data.groupby("MBR_NO"):
            # 获取会员基本信息
            first_row = group.iloc[0]
            
            # 转换ID_CARD_NO为字符串
            id_card_no = str(first_row["ID_CARD_NO"]) if pd.notna(first_row["ID_CARD_NO"]) else "无信息"
            
            # 创建MBR实例
            mbr = MBR(
                MBR_TYPE=first_row["MBR_TYPE"],
                MBR_NO=mbr_no,
                ID_CARD_NO=id_card_no
            )
            
            # 计算欺诈记录数量和收集所有rb_id
            fraud_count = sum(group["predictions"] == 1)
            rb_ids = group["CLLI_OID"].tolist()
            
            # 准备更新操作
            update_operations = {
                "$set": {
                    "MBR_TYPE": mbr.MBR_TYPE,
                    "ID_CARD_NO": mbr.ID_CARD_NO,
                    "updateTime": upload_time
                },
                "$inc": {
                    "all_history": len(rb_ids),  # 增加总历史计数
                    "fraud_history": fraud_count  # 增加欺诈历史计数
                },
                "$addToSet": {"rb_history": {"$each": rb_ids}}  # 添加所有rb_id到历史记录
            }
            
            # 执行更新操作
            await mbr_repo.collection.update_one(
                {"MBR_NO": mbr.MBR_NO},
                {
                    "$set": update_operations["$set"],
                    "$inc": update_operations["$inc"],
                    "$addToSet": update_operations["$addToSet"],
                    
                },
                upsert=True
            )

        # 处理 Review 数据（仅处理 predictions=1 的记录）
        review_repo = ReviewRepository(db)
        review_data = result_data[result_data["predictions"] == 1].to_dict(orient="records")
        for row in review_data:
            review = Review(
                rbId=row["CLLI_OID"],
                fraud=int(row["predictions"]),
                score=float(row["probability"]),
                level=int(row["pre_level"]),
                upload_id=upload_id,
                onlineReview=0,
                offlineReview=0,
                remark="",
                reviewFraud=2,
                createTime=upload_time,
                updateTime=upload_time
            )
            await review_repo.create_or_update(review)  # 异步调用

        # save_reviews(upload_id, result_data)

        return {
            "code": 200,
            "data": {
                "upload_id": upload_id,
                "has_missing_data": not mis_data.empty,
                "result_file": str(folder_path / "result.csv"),
                "missing_file": str(folder_path / "mis_data.csv") if not mis_data.empty else None
            },
            "message": "文件处理成功"
        }

    except UnicodeEncodeError as e:
        logger.error(f"编码错误: {e}")
        return {"code": 666667, "data": {}, "message": f"编码错误: {str(e)}"}
    except FileNotFoundError as e:
        logger.error(f"文件不存在: {e}")
        return {"code": 666668, "data": {}, "message": f"文件不存在: {str(e)}"}
    except Exception as e:
        logger.exception("文件处理异常")
        return {"code": 666666, "data": {}, "message": "服务器内部错误"}

@router.get("/download/{upload_id}/{filename}")
async def download_file(
    upload_id: str ,
    filename: str
):
    """安全的文件下载接口"""
    file_path = uploads_dir / upload_id / filename
    
    # 验证文件存在且在允许的目录内
    if not file_path.is_file() or not str(file_path).startswith(str(uploads_dir)):
        raise HTTPException(status_code=404, detail="文件不存在")
    
    return FileResponse(
        file_path,
        media_type="text/csv",
        filename=filename,
        headers={
            "Content-Disposition": f"attachment; filename={filename}",
            "Content-Type": "text/csv; charset=utf-8-sig"
        }
    )