import json
import os

from app.utils import get_milvus_client
from app.utils.chatgpt import chat_gpt_azure
from app.utils.embedding_api import get_embedding
from app.utils.milvus_do import MilvusDo

milvus_do = MilvusDo()


def upload_files_to_milvus_one():
    # milvus_do.create_index(collection_name="sfat123456")

    directory = r"C:\Users\Administrator\Desktop\poc\sfat2"

    # 遍历目录下的所有文件
    for root, dirs, files in os.walk(directory):
        for file in files:
            file_path = os.path.join(root, file)
            print(f"文件名: {file}, 文件路径: {file_path}")
            # 读取文件内容
            with open(file_path, 'rb') as f:
                file_content = f.read()
            print(f"文件内容: {file_content}...")
            if not os.path.exists("output_one"):
                os.makedirs("output_one")
            output_path = os.path.join("output_one/", file)
            prompt_summary = (f"You are a file summarization expert. Please analyze the following file content and provide a summary of the file content. The language of the summary should match the language of the file content. Please do not output any other content, directly output the summary. The output format should be plain text format without line breaks."
                              f" File content:{file_content.decode('utf-8', errors='ignore')}")
            summary = chat_gpt_azure(prompt_summary)

            # 写入清理后的内容
            with open(output_path, 'w', encoding='utf-8') as f:
                f.write(summary)

            # 获取output_path 文件内容后面追加内容“！！！！！！！！！！！！！！！！！！”
            with open(output_path, 'a', encoding='utf-8') as f:
                f.write(
                    "\n!!!!!!!!!!!!$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$!!!!!!!!!!!!!!!!\n")

            # 获取file_content 的前2000个字符
            file_content_preview = file_content[:2000].decode('utf-8', errors='ignore')
            prompt_meta_data = (f"你是一个文件元数据提取专家，请分析下面文件内容，给出文件的元数据。"
                                f"1. 判断文档中的数据是否为Date of Ruling 或 Date of Determination。如果"
                                f"为Date of Ruling  输出 cate为 Ruling。Date of Determination  输出 cate为 Determination。"
                                f"2. 查看文档中的Date of Ruling 或 Date of Determination，后面字段的日期。"
                                f"输出year 为年份，month 为月份，用int类型返回"
                                f"3. 输出cate、year、month 3个字段。"
                                f"要求输出的元数据格式为json格式"
                                f"4. 不要输出任何其他内容，直接输出json格式的元数据。只输出json格式数据其他多余内容不要输出！！！！"
                                f"5.只输出输出实例内容"
                                f"输出示例为:"
                                f"{{'cate': 'Ruling', 'year': 2023, 'month': 10}}"
                                f"文件内容：{file_content_preview}")
            meta_data = chat_gpt_azure(prompt_meta_data)
            with open(output_path, 'a', encoding='utf-8') as f:
                valid_json = meta_data.replace("'", '"')
                f.write(valid_json)
            # 获取output_path 文件内容后面追加内容“！！！！！！！！！！！！！！！！！！”
            with open(output_path, 'a', encoding='utf-8') as f:
                f.write(
                    "\n!!!!!!!!!!!!$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$!!!!!!!!!!!!!!!!\n")
            # 上传到milvus
            prompt_interlocutory_data = (f"请根据下面判断依据判断文件内容是否为Interlocutory"
                                         f"判断依据Interlocutory applications are temporary or interim applications filed during the pendency of a main appeal or case before the tribunal. These applications are typically filed to seek specific relief or directions that are necessary for the effective adjudication of the main matter.  In the context of SFAT, interlocutory applications may include requests for:  Stay of SEBI Orders: A party may file an interlocutory application seeking a stay on the implementation of an order passed by SEBI, such as penalties, suspension, or other regulatory actions, until the final decision on the appeal is made.  Interim Relief: Parties may seek temporary relief, such as permission to continue trading, accessing funds, or carrying out business activities, pending the resolution of the main appeal.  Extension of Time: Applications may be filed to request an extension of time for compliance with certain procedural requirements or deadlines.  Amendment of Pleadings: A party may file an interlocutory application to amend the appeal or other documents submitted to the tribunal.  Production of Additional Evidence: If a party wishes to submit additional evidence or documents that were not included in the original appeal, they may file an interlocutory application seeking permission to do so.  Adjournments: Applications may be filed to request adjournments or postponements of hearings for valid reasons."
                                         f"直接返回是或则不是，不要输出其他内容。"
                                         f"文件内容：{file_content_preview}")
            interlocutory_data = chat_gpt_azure(prompt_interlocutory_data)
            with open(output_path, 'a', encoding='utf-8') as f:
                f.write(interlocutory_data)
            with open(output_path, 'a', encoding='utf-8') as f:
                f.write(
                    "\n!!!!!!!!!!!!$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$!!!!!!!!!!!!!!!!\n")
            # 上传到milvus
            prompt_sanction_reduce_data = (f"请判断文件内容是否为sanction reduce"
                                           f"直接返回是或则不是，不要输出其他内容。"
                                           f"文件内容：{file_content_preview}")
            sanction_reduce_data = chat_gpt_azure(prompt_sanction_reduce_data)
            with open(output_path, 'a', encoding='utf-8') as f:
                f.write(sanction_reduce_data)


# upload_files_to_milvus_one()

def get_files_to_milvus_one():
    memory_name ="sfat_metadata2"
    # milvus_do.create_index(collection_name=memory_name)
    documents = []
    for root, dirs, files in os.walk("output_one/"):
        for file in files:
            print(f"文件名: {file}")
            #读取文件内容
            with open(os.path.join(root, file), 'r', encoding='utf-8') as f:
                file_content = f.read()
            #根据  "\n!!!!!!!!!!!!$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$!!!!!!!!!!!!!!!!\n" 切分文件内容
            parts = file_content.split(
                "\n!!!!!!!!!!!!$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$!!!!!!!!!!!!!!!!\n" )
            # 遍历每个部分，提取cate、year、month
            cate= ""
            fileName =file
            year = 0
            month= 0
            tags= ""
            summary =""
            if parts[0].strip():
                summary = parts[0].strip()
            if parts[1].strip():
                str_json  = parts[1].strip()
                # 去掉首位的 ```json 和 ``` 并提取中间的 JSON 部分
                start_index = str_json.find('```json') + len('```json')  # 找到第一个 ```json 的位置并跳过它
                end_index = str_json.rfind('```')  # 找到最后一个 ``` 的位置

                # 提取 JSON 部分
                json_part = str_json[start_index:end_index].strip()  # 使用 strip() 去掉多余的换行符

                # 解析 JSON 并获取值
                data = json.loads(json_part)

                # Extract values
                cate = data.get("cate")
                year = data.get("year")
                month = data.get("month")

            if parts[2].strip():
                if "是" in parts[2].strip():
                    tags = tags  +"Interlocutory"
            if parts[3].strip():
                if "是" in parts[2].strip():
                    tags = tags  +",sanction reduce"
            # 构建向量
            output_data = {
                "cate": cate,
                "fileName": fileName,
                "year": year,
                "month": month,
                "tags": tags,
                "summary": summary
            }

            # Convert dictionary to JSON string
            json_output = json.dumps(output_data, ensure_ascii=False, indent=4)
            documents.append({

                "content": "全文搜索",
                "metadata":output_data
            })
            # Print JSON output
            print(json_output)
    print(documents)
    # Prepare entities for insertion
    entities = []
    texts = [doc["content"] for doc in documents]
    em = [get_embedding("全文搜索")][0]
    print(em)

    for i, doc in enumerate(documents):
        entities.append(
            {
                "content": doc["content"],
                "dense_vector":em,
                "metadata": doc.get("metadata", {}),
                "cate": doc["metadata"]["cate"],
                "fileName": doc["metadata"]["fileName"],
                "year": str(doc["metadata"]["year"]),
                "month": str(doc["metadata"]["month"]),
                "tags": doc["metadata"]["tags"],
                "summary": doc["metadata"]["summary"]
            }
        )

    # Insert data
    Collection = get_milvus_client()
    Collection.insert(memory_name, entities)
    print(f"Inserted {len(entities)} documents")

def search_metedata(database,filter=''):
    collection_name = database
    query = "全文搜索"
    client = get_milvus_client()
    # BM25 sparse vectors
    results = client.search(
        collection_name=collection_name,
        data=[query],
        anns_field="sparse_vector",
        limit=1000,
        filter=filter,
        output_fields=["content", "metadata","id"],
    )
    sparse_results = results[0]

    # Print results
    print("\nSparse Search (Full-text search):")
    for i, result in enumerate(sparse_results):
        print(
            f"{i+1}. Score: {result['distance']:.4f}, Content: {result['entity']['content']}，metadata: {result['entity']['metadata']}"
            f"id: {result['entity']['id']}"
        )
    return   sparse_results

# get_files_to_milvus_one()
# search_metedata("sfat_metadata2",filter="year == '2022' and cate =='Ruling' ")
# search_metedata("sfat_metadata")