import json

from langchain_community.chat_models import ChatOpenAI

import config
from retriever.services import baseline_search_chunks
from tables.knowledge import KG
from tables.preset_problem import PreProblems
from tables.vector import Vectors
from utils.main import str_to_hash, get_keywords, get_embedding, get_rerank_scores
from fastapi.responses import JSONResponse
from datetime import datetime
from typing import List
from fastapi import UploadFile, File
from tables.user import UserModel
from tables.file import Files
import os
import config
from config import project_root
import pandas as pd


async def rags_query(
        collection_name: str,
        userid: int,
        query: str,
        rag_ids: list[str],
        llm: str,
        is_preset_query: bool,
        # 查询拓展，1：无
        query_expansion: int,
        # 推荐问题
        is_recommend_query: bool,
        temperature: float,
        stream: bool = False,
):
    # 问题拓展
    if query_expansion == 1:
        pass
    else:
        pass
    # 检索各个知识库,拼接背景知识
    context = ""
    quote_num = 0
    chunk_list = []
    for rag_id in rag_ids:
        kg = KG.get_knowledge(userid, rag_id)
        if not kg.is_set:
            raise Exception("知识库未设置")
        if kg.rag_type == 1:
            chunks = await baseline_search_chunks(
                query=query,
                llm=llm,
                embed=kg.embed,
                collection_name=str_to_hash(rag_id),
                retrieval_type=kg.retrieval_type,
                reranker=kg.reranker,
                content_augmentation=kg.content_augmentation,
                top_k=kg.top_k,
                alpha=kg.mix_threshold,
            )
        else:
            chunks = []
        # 文本块添加排序值，添加引用头
        for i, chunk in enumerate(chunks):
            chunk.properties["sort"] = i
            chunk.properties["source_rag"] = rag_id
            # 更改chunk对象结构
            tmp = {"collection": chunk.collection, "properties": chunk.properties, "uuid": str(chunk.uuid)}
            chunk_list.append(tmp)
            source = chunk.properties['source']
            text = f"{chunk.properties['text']}\n{chunk.properties['description']}"
            context += f"[[citation:{quote_num + i}]] 出自{source} {text}\n\n\n"
        quote_num += len(chunks)

    # 检索预设问题
    if is_preset_query:
        # 获取问题关键词
        keywords = await get_keywords(query, llm)
        pre_chunks = await Vectors.search_chunks_by_hybrid_generic(
            collection_name=collection_name,
            query_vector=(await get_embedding(model=config.default_embed, text=query))[0],
            query_keywords=keywords,
            query_properties=["query", "answer"],
            target_vector=["query", "answer"],
            alpha=0.75,
            limit=20
        )
        # 重排
        contexts = [f"{x.properties['query']}\n{x.properties['answer']}" for x in pre_chunks]
        scores = await get_rerank_scores(query=query, contexts=contexts, reranker=config.default_reranker)
        sorted_data = sorted(scores, key=lambda x: x['score'], reverse=True)
        # 获取top_k检索结果
        top_results = [pre_chunks[item['index']] for item in sorted_data[:5]]
        # 拼接预设问题内容到上下文
        pre_context = ""
        for i, pre_chunk in enumerate(top_results):
            pre_chunk.properties["sort"] = i
            pre_chunk.properties["source_rag"] = "preset problem"
            # 更改chunk对象结构
            tmp = {"collection": pre_chunk.collection, "properties": pre_chunk.properties, "uuid": str(pre_chunk.uuid)}
            chunk_list.append(tmp)
            pre_context += f"{pre_chunk.properties['query']}\n{pre_chunk.properties['answer']}\n\n\n"
        context = pre_context + context
    else:
        pass

    # llm总结
    llm_chain = ChatOpenAI(
        model=llm,
        api_key=config.model["api_key"],
        base_url=config.model["api_base"],
        temperature=temperature,
    )

    # 生成回答
    if stream:
        mid = ""
        for cr in llm_chain.stream(config.query_prompt.format(context=context, query=query)):
            mid += cr
            yield json.dumps({"code": 200, "message": "回答", "data": {"content": mid, "done": False}, "type": "summary"}) + "\n"
        yield json.dumps({"code": 200, "message": "回答", "data": {"content": "", "done": True}, "type": "summary"}) + "\n"
    else:
        response = llm_chain.invoke(
            config.query_prompt.format(
                context=context,
                query=query
            )
        )
        yield json.dumps({"code": 200, "message": "回答", "data": response.content, "type": "summary"}) + "\n"


    # 推荐问题
    if is_recommend_query:
        response = llm_chain.invoke(
            config.more_questions_prompt.format(
                context=context,
                query=query
            )
        )
        more_problems = [s for s in response.content.split("\n") if len(s.strip()) > 1]
        yield json.dumps({"code": 200, "message": "推荐问题", "data": more_problems, "type": "recommend"}) + "\n"
        if len(more_problems) > 1:
            yield json.dumps({"code": 200, "message": "来源", "data": chunk_list, "type": "source"}) + "\n"
    else:
        yield json.dumps({"code": 200, "message": "来源", "data": chunk_list, "type": "source"}) + "\n"

async def upload(files: List[UploadFile] = File(...), faq_id: str="", current_user: UserModel = None):
    """
    文件上传接口，支持单文件和多文件上传
    """
    all_prproblems = []  # 用于存储所有文件的 prproblems
    for file in files:
        user_upload_dir = os.path.join(project_root, "data", "user_files", current_user.username,str(faq_id))
        if not os.path.exists(user_upload_dir):
            os.makedirs(user_upload_dir)

        file_path = os.path.join(user_upload_dir, file.filename)

        # 保存文件
        content = await file.read()
        with open(file_path, "wb") as f:
            f.write(content)

        now = datetime.now()
        # 格式化为 yyyy-MM-dd HH:mm:ss
        formatted_date_time = now.strftime('%Y-%m-%d %H:%M:%S')
        is_chunk: bool = False
        # 保存文件信息到数据库
        file_res =  Files.insert_query_file(current_user.id, os.path.basename(file_path), faq_id, file_path,formatted_date_time,is_chunk,2)
        if not file_res:
            raise Exception("文件插入失败")
        file_id = file_res.id
        #读取excel信息，并保存到预设问题数据库表中
        # 读取 Excel 文件
        df = pd.read_excel(file_path)

        questions = df.iloc[:, 0].astype(str).tolist()  # 读取第一列
        answers = df.iloc[:, 1].astype(str).tolist()  # 读取第二列
        for question, answer in zip(questions, answers):
            problem = PreProblems.create_preset_problem(
                userid=current_user.id,
                query=question,
                answer=answer,
                file_id = file_id,
                faq_id = int(faq_id)
            )
            all_prproblems.append(problem)
    return all_prproblems

