import os
import uuid
import datetime
import shutil
import zipfile
import logging
from typing import Dict, List, Optional, Any

import aiohttp
import requests
from fastapi import FastAPI, HTTPException, Body
from pydantic import BaseModel
from starlette.middleware.cors import CORSMiddleware

from app.config import settings

# --------------- 配置 ----------------
# API_KEY = "dify-xxx"
# BASE_URL = "http://rag-admin.dcits.sg/v1"
SUPPORTED_EXTS = {".pdf", ".docx", ".txt", ".md", ".html", ".csv", ".epub", ".json"}

headers = {"Authorization": f"Bearer {settings.dify_api_key}"}

logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
logger = logging.getLogger(__name__)

app = FastAPI(title="上传文件到Dify知识库")

app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


# ----------------- Pydantic Model -----------------
class UploadRequest(BaseModel):
    file_url: str
    file_name: str
    dataset_name: str
    separator: str = "\n"
    max_tokens: int = 1024
    doc_language: Optional[str] = "English"
    search_method: Optional[str] = "hybrid_search"
    remove_extra_spaces: bool = True
    remove_urls_emails: bool = False
    overlap_tokens: Optional[int] = 50
    topk: int
    score: float
    score_enabled: bool = False,
    rerank_model: str
    rerank_provider: str
    weight: Optional[float]
    reranking_mode: Optional[str]
    reranking_enable: Optional[bool] = True
    embedding_model_name: Optional[str] = settings.dify_embedding_model,
    embedding_model_provider_name: Optional[str] = settings.dify_embedding_model_provider,


class UploadResponse(BaseModel):
    status: str = "success"
    message: Optional[str] = None
    data: Optional[Dict] = None


class PreProcessingRule(BaseModel):
    id: str
    enabled: bool


class Segmentation(BaseModel):
    separator: str = "##"
    max_tokens: int = 1024
    chunk_overlap: int = 50


# class SubChunkSegmentation(BaseModel):
#     separator: str = "***"
#     max_tokens: int = 500
#     overlap_tokens: int = 50

class Rules(BaseModel):
    pre_processing_rules: List[PreProcessingRule] = [
        PreProcessingRule(id="remove_extra_spaces", enabled=True),
        PreProcessingRule(id="remove_urls_emails", enabled=False),
    ]
    segmentation: Segmentation = Segmentation()
    # subchunk_segmentation: SubChunkSegmentation = SubChunkSegmentation()


class ProcessRule(BaseModel):
    mode: str = settings.dify_process_rule_mod  # automatic / custom / hierarchical
    rules: Rules = Rules()
    # pre_processing_rules: List[PreProcessingRule] = [
    #     PreProcessingRule(id="remove_extra_spaces", enabled=True),
    #     PreProcessingRule(id="remove_urls_emails", enabled=False),
    # ]
    # segmentation: Segmentation = Segmentation()


class RetrievalModel(BaseModel):
    search_method: str = "hybrid_search"
    reranking_enable: bool = True
    top_k: int = 3
    score_threshold_enabled: bool = False
    score_threshold: float = 0.0


class UploadData(BaseModel):
    indexing_technique: str = settings.dify_indexing_technique
    doc_form: str = settings.dify_doc_form
    doc_language: Optional[str] = "English"
    process_rule: ProcessRule = ProcessRule()
    retrieval_model: RetrievalModel = RetrievalModel()
    embedding_model: str = settings.dify_embedding_model
    embedding_model_provider: str = settings.dify_embedding_model_provider


# ----------------- Service -----------------
class DifyUploadService:
    def __init__(self, temp_dir_root="./tmp"):
        os.makedirs(temp_dir_root, exist_ok=True)
        self.temp_dir_root = temp_dir_root

    async def download_file(self, url: str, file_name: str) -> str:
        """下载文件到本地"""
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        unique_id = uuid.uuid4().hex[:8]
        temp_dir = os.path.join(self.temp_dir_root, f"tmp_{timestamp}_{unique_id}")
        os.makedirs(temp_dir, exist_ok=True)

        file_path = os.path.join(temp_dir, file_name)

        try:
            async with aiohttp.ClientSession() as session:
                async with session.get(url) as resp:
                    if resp.status != 200:
                        raise HTTPException(status_code=400, detail=f"下载失败, 状态码 {resp.status}")
                    with open(file_path, "wb") as f:
                        while True:
                            chunk = await resp.content.read(1024 * 1024)
                            if not chunk:
                                break
                            f.write(chunk)
            return file_path
        except Exception as e:
            raise HTTPException(status_code=500, detail=f"文件下载失败: {str(e)}")

    def extract_zip(self, zip_path: str) -> List[str]:
        """解压 ZIP，返回文件路径"""
        extract_dir = os.path.join(os.path.dirname(zip_path), "extracted")
        os.makedirs(extract_dir, exist_ok=True)

        extracted_files = []
        try:
            with zipfile.ZipFile(zip_path, "r") as zip_ref:
                for file in zip_ref.namelist():
                    if any(file.lower().endswith(ext) for ext in SUPPORTED_EXTS):
                        zip_ref.extract(file, extract_dir)
                        extracted_files.append(os.path.join(extract_dir, file))
                    else:
                        raise HTTPException(status_code=400, detail=f"ZIP 包含不支持的文件类型: {file}")
        except zipfile.BadZipFile:
            raise HTTPException(status_code=400, detail="ZIP 文件损坏")

        if not extracted_files:
            raise HTTPException(status_code=400, detail="ZIP 文件中未找到支持的文件")

        return extracted_files

    def get_dataset_id(self, dataset_name: str) -> str:
        """根据 dataset 名称获取 id"""
        url = f"{settings.dify_base_url}/datasets"
        resp = requests.get(url, headers=headers)
        resp.raise_for_status()
        for ds in resp.json().get("data", []):
            if ds.get("name") == dataset_name:
                return ds.get("id")
        raise HTTPException(status_code=404, detail=f"未找到知识库 {dataset_name}")

    def upload_file(self, dataset_id: str, file_path: str, payload: UploadRequest) -> Dict:
        """上传单个文件到 Dify"""
        url = f"{settings.dify_base_url}/datasets/{dataset_id}/document/create-by-file"

        # 把 UploadRequest 转换成 UploadData（自动带入除了 file_url/file_name/dataset_name）
        rules = Rules(
            segmentation=Segmentation(
                separator=payload.separator,
                max_tokens=payload.max_tokens,
                chunk_overlap=payload.overlap_tokens,
            ),
            pre_processing_rules=[
                PreProcessingRule(id="remove_extra_spaces", enabled=payload.remove_extra_spaces),
                PreProcessingRule(id="remove_urls_emails", enabled=payload.remove_urls_emails),
            ],
        )
        process_rule = ProcessRule(
            rules=rules,
        )

        retrieval_model = RetrievalModel(
            search_method=payload.search_method,
        )

        payload_model = UploadData(
            indexing_technique=settings.dify_indexing_technique,
            doc_form=settings.dify_doc_form,
            doc_language=payload.doc_language,
            process_rule=process_rule,
            retrieval_model=retrieval_model,
            embedding_model=settings.dify_embedding_model,
            embedding_model_provider=settings.dify_embedding_model_provider,
        )

        files = {"file": open(file_path, "rb")}
        data = {"data": payload_model.model_dump_json()}
        # ✅ 打印 debug 日志
        logger.info("Upload payload data: %s", data["data"])
        resp = requests.post(url, headers=headers, files=files, data=data)
        resp.raise_for_status()
        return resp.json()

    def update_dataset(
            self,
            dataset_name: str,
            search_method: str,
            reranking_enable: Optional[bool] = True,
            reranking_provider_name: Optional[str] = settings.dify_rerank_model_provider,
            reranking_model_name: Optional[str] = settings.dify_rerank_model,
            vector_weight: Optional[float] = 0.7,
            top_k: Optional[int] = 3,
            score_threshold_enabled: Optional[bool] = False,
            score_threshold: Optional[float] = 0.0,
            name: Optional[str] = None,
            retrieval_model: Optional[Dict[str, Any]] = None,
            partial_member_list: Optional[List] = None,
            reranking_mode: Optional[str] = "reranking_model",
            embedding_model_name: Optional[str] = settings.dify_embedding_model,
            embedding_model_provider_name: Optional[str] = settings.dify_embedding_model_provider,

    ) -> Dict:
        """更新知识库信息"""
        dataset_id = self.get_dataset_id(dataset_name)
        url = f"{settings.dify_base_url}/datasets/{dataset_id}"
        keyword_weight = 1 - vector_weight
        weights = {

            "weight_type": None,
            "keyword_setting": {
                "keyword_weight": keyword_weight
            },
            "vector_setting": {
                "vector_weight": vector_weight,
                "embedding_model_name": embedding_model_name,
                "embedding_provider_name": embedding_model_provider_name,
            }
        }
        # 默认 retrieval_model
        if retrieval_model is None:
            retrieval_model = {
                "search_method": search_method,
                "reranking_enable": reranking_enable,
                "reranking_mode": reranking_mode,
                "reranking_model": {
                    "reranking_provider_name": reranking_provider_name,
                    "reranking_model_name": reranking_model_name
                },
                "weights": weights,
                "top_k": top_k,
                "score_threshold_enabled": score_threshold_enabled,
                "score_threshold": score_threshold
            }

        payload = {
            "name": name or dataset_name,
            "indexing_technique": settings.dify_indexing_technique,
            "permission": "all_team_members",
            "embedding_model_provider": embedding_model_provider_name,
            "embedding_model": embedding_model_name,
            "retrieval_model": retrieval_model,
            "partial_member_list": partial_member_list or []
        }

        logger.info(f"Updating dataset {dataset_id} with payload: {payload}")
        resp = requests.patch(url, headers={**headers, "Content-Type": "application/json"}, json=payload)
        resp.raise_for_status()
        return resp.json()

    async def process(self, req: UploadRequest) -> Dict:

        self.update_dataset(req.dataset_name, req.search_method, vector_weight=req.weight, top_k=req.topk,
                            score_threshold_enabled=req.score_enabled, score_threshold=req.score,
                            reranking_mode=req.reranking_mode, reranking_enable=req.reranking_enable,
                            embedding_model_name=req.embedding_model_name,
                            embedding_model_provider_name=req.embedding_model_provider_name,
                            reranking_model_name=req.rerank_model,
                            reranking_provider_name=req.rerank_provider
                            )

        file_path = await self.download_file(req.file_url, req.file_name)

        if file_path.lower().endswith(".zip"):
            files_to_upload = self.extract_zip(file_path)
        else:
            ext = os.path.splitext(file_path)[1].lower()
            if ext not in SUPPORTED_EXTS:
                raise HTTPException(status_code=400, detail=f"不支持的文件类型: {ext}")
            files_to_upload = [file_path]

        dataset_id = self.get_dataset_id(req.dataset_name)

        results = []
        for f in files_to_upload:
            results.append(self.upload_file(dataset_id, f, req))

        shutil.rmtree(os.path.dirname(file_path), ignore_errors=True)
        return {"uploaded_files": results, "count": len(results)}
# @app.post("/rag/upload_to_dify", response_model=UploadResponse)
# async def upload_to_dify(request_data: UploadRequest = Body(...)):
#     service = DifyUploadService()
#     try:
#         result = await service.process(request_data)
#         return UploadResponse(data=result)
#     except Exception as e:
#         logger.error(f"上传失败: {str(e)}", exc_info=True)
#         return UploadResponse(status="error", message=str(e))
#
# if __name__ == "__main__":
#     import uvicorn
#
#     uvicorn.run(app, host="localhost", port=8080)