import json
from venv import logger
from fastapi import FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field
from typing import List, Optional, Union, Dict
from motor.motor_asyncio import AsyncIOMotorClient
from bson.objectid import ObjectId
from bson.errors import InvalidId
from fuzzywuzzy import fuzz, process
import logging
import re
import httpx

# MongoDB连接字符串
# MONGODB_URI = "mongodb://username:password@81.68.124.174:31809/fastgpt?authSource=admin"
# MONGODB_URI = "mongodb://username:password@1.15.125.13:8089/fastgpt?authSource=admin"
MONGODB_URI = "mongodb://username:password@baospark-mongo:27017/fastgpt?authSource=admin"


# 连接到MongoDB
client = AsyncIOMotorClient(MONGODB_URI)
db = client['fastgpt']
dataset_collection = db['datasets']
dataset_collection_collection = db['dataset.collections']
dataset_data_collection = db['dataset.datas']


app = FastAPI()

# 添加CORS中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
    expose_headers=["*"],
)


class AnswerFormat(BaseModel):
    通用名称: str = Field("", description="药品的通用名称")
    英文名称: str = Field("", description="药品的英文名称")
    汉语拼音: str = Field("", description="药品名称的汉语拼音")
    适应症: str = Field("", description="药品的适应症")
    化学名称: str = Field("", description="药品的化学名称")
    分子式: str = Field("", description="药品的分子式")


class Hodler_Productor(BaseModel):
    上市许可持有人名称: str = Field("", description="上市许可持有人的名称")
    注册地址: str = Field("", description="上市许可持有人的注册地址")
    生产地址: str = Field("", description="药品的生产地址")
    生产企业名称: str = Field("", description="生产企业的名称")
    网址: str = Field("", description="企业的网址")
    批准文号: str = Field("", description="药品的批准文号")
    贮藏: str = Field("", description="药品的贮藏方法")


class Productor(BaseModel):
    企业名称: str = Field("", description="生产企业的名称")
    生产地址: str = Field("", description="生产企业的地址")
    邮政编码: str = Field("", description="生产企业的邮政编码")
    电话: str = Field("", description="生产企业的联系电话")
    网址: str = Field("", description="生产企业的网址")

# 请求模型


class Message(BaseModel):
    content: str


class SimpleChatCompletionRequest(BaseModel):
    messages: List[Message]
    stream: bool = False
    datasetId: str


class DatasetIdRequest(BaseModel):
    datasetId: str


class QueryRequest(BaseModel):
    datasetId: str
    query: str


class ProcessedQueryRequest(BaseModel):
    datasetId: str
    query: str

# 响应模型


class ContentResponse(BaseModel):
    collection_id: str
    data_id: str
    content: str


class ProcessedQueryResponse(BaseModel):
    matched_field: str
    content: Union[str, List[str]]


# 查询词字段映射
query_field_mapping = {
    '通用名称': ['通用名称', '通用名'],
    '英文名称': ['英文名称', '英文名', '英文'],
    '汉语拼音': ['汉语拼音', '拼音'],
    '商品名称': ['商品名称', '商品名'],
    '适应症': ['适应症', '适用症'],
    '化学名称': ['化学名称', '化学名'],
    '化学结构式': ['化学结构式', '化学结构', '化学结构'],
    '分子式': ['分子式'],
    '上市许可持有人': ['上市许可持有人', '上市许可持有人名称', '上市持有人', '上市许可持有人地址', 
                 '药品上市许可持有人', '药品上市许可', '上市许可'],
    '注册地址': ['注册地址'],
    '生产企业': ['生产地址', '生产企业地址', '生产地点', '生产企业名称', '生产企业名'],
    '企业名称': ['企业名称',  '企业名'],
    '网址': ['网址', '网站'],
    '文号': ['批准文号', '文号'],
    '贮藏': ['贮藏', '储存'],
    '邮政编码': ['邮政编码', '邮编'],
    '电话': ['电话', '联系电话'],
    '用法用量': ['规格', '用法用量'],
}


async def extract_information(query: str, datasetId: str):
    try:
        query_result = await full_text_search(QueryRequest(datasetId=datasetId, query=query))
        logging.debug(f"Extract Information Query Result: {query_result}")
        if not query_result["matched_results"]:
            return None
        return query_result["matched_results"][0].content
    except Exception as e:
        logging.error(f"Error extracting information: {str(e)}")
        return None


def get_mapped_query(query: str) -> str:
    query_lower = query.lower()
    max_ratio = 0
    best_key = query  # 默认返回原始查询
    
    for key, values in query_field_mapping.items():
        for value in values:
            # 使用部分比率匹配，更灵活地处理部分匹配
            ratio = fuzz.partial_ratio(query_lower, value.lower())
            if ratio > max_ratio:
                max_ratio = ratio
                best_key = key
    
    # 只有当匹配度超过阈值时才返回映射的键
    return best_key if max_ratio > 75 else query


@app.post("/dataset/segment-content", tags=["数据集操作"])
async def get_dataset_segment_content(request: DatasetIdRequest):
    """
    获取数据集的分段内容
    通过数据集ID获取该数据集中所有文档的分段内容。这个接口用于检索和展示数据集中的原始文本段落。
    参数:
    - datasetId: 数据集的唯一标识符
    返回:
    - 包含分段内容的列表,每个段落包含其所属的集合ID和数据ID
    """
    try:
        dataset_id = ObjectId(request.datasetId)
    except InvalidId:
        raise HTTPException(status_code=400, detail="无效的 datasetId")

    collection_ids = await dataset_data_collection.distinct("collectionId", {"datasetId": dataset_id})
    if not collection_ids:
        raise HTTPException(status_code=404, detail="未找到与给定 datasetId 相关的集合")

    results = []
    for collection_id in collection_ids:
        data_items = await dataset_data_collection.find(
            {"collectionId": collection_id},
            {"_id": 1, "q": 1}
        ).to_list(length=None)

        for data in data_items:
            results.append(ContentResponse(
                collection_id=str(collection_id),
                data_id=str(data["_id"]),
                content=data.get("q", "")
            ))

    return {"results": results}


@app.post("/dataset/full-text-search", tags=["数据集操作"])
async def full_text_search(request: QueryRequest):
    """
    数据集全文检索，按优先级返回匹配结果
    优先级顺序：【】> # > ## > ### >（）> 普通字段
    """
    try:
        dataset_id = ObjectId(request.datasetId)
    except InvalidId:
        raise HTTPException(status_code=400, detail="无效的 datasetId")

    # 获取数据库内容
    q_content = await get_dataset_segment_content(DatasetIdRequest(datasetId=request.datasetId))
    
    # 获取映射后的查询词
    mapped_query = get_mapped_query(request.query)
    # 获取所有可能的查询词变体
    query_variants = []
    if mapped_query in query_field_mapping:
        query_variants.extend(query_field_mapping[mapped_query])
    query_variants.append(mapped_query)
    query_variants.append(request.query)
    # 去重
    query_variants = list(set(query_variants))
    
    all_matched_results = []
    for query in query_variants:
        # 定义六个优先级的结果列表
        priority_1_matches = []  # 最高优先级：【查询词】格式
        priority_2_matches = []  # 次高优先级：# 格式
        priority_3_matches = []  # 第三优先级：## 格式
        priority_4_matches = []  # 第四优先级：### 格式
        priority_5_matches = []  # 第五优先级：(查询词) 格式
        priority_6_matches = []  # 最低优先级：普通文本匹配
        
        for item in q_content["results"]:
            content = item.content
            
            # 优先级1：检查是否有【查询词】格式的标题
            if f"【{query}】" in content:
                if content.strip().startswith(f"【{query}】"):
                    priority_1_matches.insert(0, item)
                else:
                    priority_1_matches.append(item)
                continue
            
            # 优先级2：检查是否有 # 格式
            if re.search(rf'#\s*{re.escape(query)}(?:\s|$)', content):
                priority_2_matches.append(item)
                continue
            
            # 优先级3：检查是否有 ## 格式
            if re.search(rf'##\s*{re.escape(query)}(?:\s|$)', content):
                priority_3_matches.append(item)
                continue
            
            # 优先级4：检查是否有 ### 格式
            if re.search(rf'###\s*{re.escape(query)}(?:\s|$)', content):
                priority_4_matches.append(item)
                continue
            
            # 优先级5：检查是否有（查询词）格式
            if f"（{query}）" in content or f"({query})" in content:
                priority_5_matches.append(item)
                continue
            
            # 优先级6：检查内容是否包含查询词或者查询词：格式
            if query.lower() in content.lower() or re.search(rf'{re.escape(query)}[:：]', content):
                priority_6_matches.append(item)
        
        # 按优先级顺序合并结果
        all_matched_results.extend(priority_1_matches)
        all_matched_results.extend(priority_2_matches)
        all_matched_results.extend(priority_3_matches)
        all_matched_results.extend(priority_4_matches)
        all_matched_results.extend(priority_5_matches)
        all_matched_results.extend(priority_6_matches)
    
    # 去重，保持优先级顺序
    seen_ids = set()
    unique_results = []
    for result in all_matched_results:
        if result.data_id not in seen_ids:
            seen_ids.add(result.data_id)
            unique_results.append(result)
    
    logging.info(f"Query: {request.query}")
    logging.info(f"Mapped Query: {mapped_query}")
    logging.info(f"Query Variants: {query_variants}")
    logging.info(f"Dataset ID: {request.datasetId}")
    logging.info(f"All matched results: {unique_results}")
    
    return {"matched_results": unique_results}


def process_content(query: str, content: str) -> Optional[Union[str, Dict[str, str]]]:
    """
    处理查询内容，如果匹配到以#开头的问题，返回整个内容。
    否则，保持原有的处理逻辑。
    """
    # 首先检查是否匹配特定模式（以#开头的问题）
    match = re.match(r'#\s*【?(.+?)】?\s*(.*)', content,
                     re.IGNORECASE | re.DOTALL)
    if match:
        question = match.group(1).strip()
        answer = match.group(2).strip()
        if query.lower() in question.lower():
            return answer
    # 匹配标准模式的"字段：内容"
    pattern = rf'{re.escape(query)}[:：]\s*(.*?)(?=\s*\n|\s*$)'
    match = re.search(pattern, content, re.IGNORECASE | re.DOTALL)
    if match:
        return match.group(1).strip()
    return None


def merge_html_tables(text):
    # 查找所有HTML表格
    pattern = r'<html><body><table>.*?</table></body></html>'
    tables = re.finditer(pattern, text, re.DOTALL)
    tables = list(tables)
    
    if len(tables) <= 1:
        return text
    
    result = text
    i = 0
    while i < len(tables) - 1:
        current_table = tables[i]
        next_table = tables[i + 1]
        
        # 检查两个表格之间的内容
        between_content = text[current_table.end():next_table.start()].strip()
        
        # 如果两个表格之间只有空白字符或换行符
        if not between_content or between_content.isspace():
            # 提取第一个表格的内容（不包括结束标签）
            first_table = text[current_table.start():current_table.end()-19]
            # 提取第二个表格的内容（不包括开始标签）
            second_table = text[next_table.start()+19:next_table.end()]
            
            # 合并表格
            merged_table = first_table + second_table
            
            # 替换原文中的两个表格
            result = (result[:current_table.start()] + 
                    merged_table + 
                    result[next_table.end():])
            
            # 更新匹配对象列表
            tables = list(re.finditer(pattern, result, re.DOTALL))
        i += 1
    
    return result


@app.post("/dataset/hybrid-search", tags=["数据集操作"])
async def hybrid_search(request: ProcessedQueryRequest):
    try:
        queries = request.query.split('/')
        results = []
        for query in queries:
            original_query = query.strip()
            logging.debug(f"Processing query: {original_query}")
            search_result = await full_text_search(QueryRequest(datasetId=request.datasetId, query=original_query))
            if search_result["matched_results"]:
                content_found = False
                
                for result in search_result["matched_results"]:
                    content = result.content
                    
                    # 应用表格合并
                    content = merge_html_tables(content)
                    
                    # 继续原有的匹配逻辑
                    title_match = re.match(
                        rf'【{re.escape(original_query)}】\s*(.*?)(?=\n【|\Z)',
                        content,
                        re.DOTALL | re.IGNORECASE
                    )
                    if title_match:
                        matched_content = title_match.group(1).strip()
                        results.append(matched_content)
                        content_found = True
                        break

                if not content_found:
                    for result in search_result["matched_results"]:
                        content = merge_html_tables(result.content)  # 在这里也应用表格合并
                        patterns = [
                            rf'#\s*{re.escape(original_query)}(.*?)(?=\n#|\Z)',
                            rf'{re.escape(original_query)}[:：](.*?)(?=\n\n|\Z)'
                        ]
                        for pattern in patterns:
                            match = re.search(pattern, content, re.DOTALL | re.IGNORECASE)
                            if match:
                                matched_content = match.group(1).strip()
                                results.append(matched_content)
                                content_found = True
                                break
                        if content_found:
                            break

                if not content_found:
                    ai_result = await get_ai_completion(original_query, request.datasetId)
                    results.append(ai_result)
            else:
                ai_result = await get_ai_completion(original_query, request.datasetId)
                results.append(ai_result)
        
        # 清理结果
        cleaned_results = []
        for result in results:
            # 应用表格合并
            cleaned_result = merge_html_tables(result)
            # 移除多余的空白字符，但保留HTML标签内的空格
            cleaned_result = re.sub(r'(?<=>)\s+|\s+(?=<)', ' ', cleaned_result).strip()
            cleaned_results.append(cleaned_result)
            
        return {"processed_result": cleaned_results}
        
    except Exception as e:
        logging.error(f"An unexpected error occurred in hybrid_search: {str(e)}")
        raise HTTPException(status_code=500, detail=f"An unexpected error occurred: {str(e)}")


def process_matched_content(content: str, query: str) -> str:
    # 移除所有的 "#" 符号和前后空白
    content = re.sub(r'#', '', content).strip()

    # 合并相邻的HTML表格
    def merge_tables(text):
        # 使用正则表达式找到所有表格
        parts = re.split(r'(</html>\s*\n*\s*<html>)', text)
        merged_text = ''
        
        for i, part in enumerate(parts):
            if i % 2 == 0:  # 实际内容
                merged_text += part
            else:  # 分隔符
                # 如果是表格分隔符，则不添加任何内容（即合并表格）
                continue
                
        return merged_text

    # 应用表格合并
    content = merge_tables(content)

    # 特殊处理分子式和分子量
    if query in ["分子式", "分子量"]:
        if query == "分子式":
            latex_match = re.search(r'\$.*?\$', content)
            if latex_match:
                return latex_match.group(0)
        elif query == "分子量":
            number_match = re.search(r'\b(\d+(\.\d+)?)\b', content)
            if number_match:
                return number_match.group(1)

    # 如果内容包含冒号，只保留冒号后的部分
    if ':' in content or '：' in content:
        content = re.split(r'[:：]', content, 1)[1].strip()

    # 移除辅料信息及之后的所有内容
    content = re.split(r'\s+辅料[:：]', content)[0].strip()

    return content


async def get_ai_completion(query: str, dataset_id: str) -> str:
    mapped_query = get_mapped_query(query)
    chat_request = SimpleChatCompletionRequest(
        messages=[Message(content=f"原始查询: {query}, 映射查询: {mapped_query}")],
        stream=False,
        datasetId=dataset_id
    )
    try:
        chat_response = await ai_chat_completion(chat_request)
        answer = chat_response.get("answer", "未找到相关信息")
        # 移除每行开头的破折号和格
        answer = re.sub(r'^[-\s]+', '', answer, flags=re.MULTILINE)
        return answer
    except Exception as e:
        logging.error(f"Error in AI chat completion: {str(e)}")
        return "未找到相关信息"


DEFAULT_SYSTEM_PROMPT = """
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
"""


def get_prompt(message: str, system_prompt: str = DEFAULT_SYSTEM_PROMPT) -> str:
    return f'{system_prompt}\n\n{message}'


class FullTextSearchToSchemaRequest(BaseModel):
    datasetId: str
    query: str


@app.post("/dataset/full-text-search-to-schema", tags=["数据集操作"])
async def full_text_search_to_schema(request: FullTextSearchToSchemaRequest):
    """
    将全文检索结果转换为结构化的JSON格式
    首先执行全文检索，然后将结果转换结构化的JSON。
    参数:
    - datasetId: 数据集的唯一标识符
    - query: 搜索查询字符串
    返回:
    - 结构化的JSON和原始检索结果
    """
# 执行全文检索
    mapped_query = get_mapped_query(request.query)
    search_result = await full_text_search(QueryRequest(datasetId=request.datasetId, query=mapped_query))
    if not search_result["matched_results"]:
        return {"json内容": [], "原始内容": ""}
    # 提取检索到的所有内容
    extracted_contents = [result.content for result in search_result["matched_results"]]
    combined_content = "\n".join(extracted_contents)
    # 分割多个生产企业
    enterprises = re.split(r'【生产企业\d+】', combined_content)
    enterprises = [e.strip() for e in enterprises if e.strip()]
    # 构建结构化的JSON
    structured_contents = []
    for enterprise in enterprises:
        # 使用正则表达式提取所有字段和值
        field_pattern = r'([\u4e00-\u9fa5a-zA-Z/]+)[:：]\s*(.+?)(?=\s+(?=[\u4e00-\u9fa5a-zA-Z/]+[:：])|$)'
        matches = re.findall(field_pattern, enterprise, re.DOTALL)
        structured_content = {}
        for key, value in matches:
            key = key.strip()
            value = value.strip()
            # 处理可能的多值字段（如电话号码）
            if ',' in value and not (value.startswith('"') and value.endswith('"')):
                value = [v.strip() for v in value.split(',') if v.strip()]
            structured_content[key] = value
        structured_contents.append(structured_content)
    return {
        "json内容": structured_contents,
        "原始内容": combined_content
    }

@app.post("/ai/chat", tags=["AI对话"])
async def ai_chat_completion(request: SimpleChatCompletionRequest):
    ai_service_url = "http://10.199.1.109:5020/v1/chat/completions"
    original_query = request.messages[0].content
    mapped_query = get_mapped_query(original_query)
    # 获取所有匹配的内容
    search_result = await full_text_search(QueryRequest(datasetId=request.datasetId, query=mapped_query))
    extracted_infos = [result.content for result in search_result["matched_results"]]
    
    if not extracted_infos:
        return {"answer": "请手动输入", "raw_content": ""}
    
    # 合并所有提取的信息
    combined_info = "\n\n".join(extracted_infos)
    
    prompt = f"""
    你是一个精准信息提取助手。请从以下文本中提取与"{original_query}"相关的信息，只给出准确答案，不要有任何额外描述：
    {combined_info}
    请记住：
    1. 只提供准的答案，不要有任何额外描述。
    2. 如果找不到相关信息，请回复"请手动输入"。
    3. 对于地址类查询，只需提供具体的地址，不要包含其他信息。
    4. 原始查询为"{original_query}"，映射后的查询为"{mapped_query}"，请确保回答与原始查询直接相关。
    5. 如果有多个相关信息，请以逗号分隔列出所有相关答案。
    """
    
    payload = {
        "model": "Qwen2.5-72B-Instruct",
        "messages": [
            {"role": "system", "content": "你是一个精准信息提取助手，只提供准确的答案，不需要额外解释。"},
            {"role": "user", "content": prompt}
        ],
        "stream": request.stream
    }
    headers = {
        "Authorization": "Bearer sk-3033&5004",
        "Content-Type": "application/json"
    }
    
    try:
        async with httpx.AsyncClient() as client:
            response = await client.post(ai_service_url, json=payload, headers=headers)
            response.raise_for_status()
            ai_response = response.json()
            answer = ai_response['choices'][0]['message']['content'].strip()
            
            # 对于特定查询进行后处理
            if "地址" in original_query.lower():
                # 移可能的前缀，如"地址："
                answer = re.sub(r'^.*?[:：]\s*', '', answer)
                # 只保留第一个句号之前的内容
                answer = answer.split('。')[0].strip()
            
            return {
                "answer": answer,
                "raw_content": combined_info
            }
    except httpx.HTTPStatusError as e:
        logger.error(f"HTTP error occurred: {e.response.text}")
        raise HTTPException(status_code=e.response.status_code, detail=f"AI service error: {e.response.text}")
    except Exception as e:
        logger.error(f"An unexpected error occurred: {str(e)}")
        raise HTTPException(status_code=500, detail=f"An unexpected error occurred: {str(e)}")
    
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8080)
