import os
import re
import json
import chromadb
import numpy as np
from tqdm import tqdm
from base import Agent
from dotenv import load_dotenv
from FlagEmbedding import BGEM3FlagModel
from langchain_core.documents import Document
from langchain_community.document_loaders import TextLoader
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter


load_dotenv()  # read local .env file

def clean_generated_text(gen_text):
    """清理生成的文本，去除 <think> 标签及其内容。"""
    return re.sub(r'<think>.*?</think>', '', gen_text, flags=re.DOTALL).strip()


def load_json_lines(file_path):
    """
    读取一个文件，每一行都是完整的 JSON 数据，并返回一个包含所有 JSON 对象的列表。

    :param file_path: 文件路径
    :return: 包含所有 JSON 对象的列表
    
    json_objects = []
    with open(file_path, 'r', encoding='utf-8') as file:
        lines = file.readlines()  # 读取所有行
        for line in tqdm(lines, desc="加载 JSON 行"):  # 添加进度条
            json_objects.append(json.loads(line.strip())) """

    with open(file_path, 'r', encoding='utf-8') as file:
        json_objects = json.load(file)  # 直接加载整个 JSON 列表
    
    # 将 json_objects 转换为 Document 格式
    #documents = [Document(page_content=json.dumps(obj)) for obj in json_objects]
    documents = [Document(page_content=obj['page_content'], 
                          metadata={"id": obj['id'], "authors": obj['authors']})
                            for obj in json_objects]
    return documents




def load_pdf(fpath="tikz.pdf"):
    loader = PyPDFLoader(fpath)
    doc = loader.load()
    return doc

def load_md(fpath):
    loader = TextLoader(fpath)
    doc = loader.load()
    return doc

def load_code(work_dir, sur_fix=".py"):
    loader = DirectoryLoader(work_dir, glob="**/*%s"%sur_fix)
    docs = loader.load()
    return docs

def load_dir(work_dir, sur_fix=".pdf"):
    loader = DirectoryLoader(work_dir, glob="**/*%s"%sur_fix)
    docs = loader.load()
    return docs

def load_pdfs(work_dir, sur_fix=".pdf"):
    loaded_docs = []
    failed_files = []
    for file in os.listdir(work_dir):
        if file.endswith(sur_fix):
            try:
                loader = PyPDFLoader(os.path.join(work_dir, file))
                docs = loader.load()
                loaded_docs.extend(docs)
            except Exception as e:
                if "Multiple definitions in dictionary" in str(e):
                    print(f"警告: 文件 {file} 加载失败，原因: {e}")
                else:
                    failed_files.append(file)  # 记录加载失败的文件名
    return loaded_docs, failed_files  # 返回加载的文档和失败的文件名



def create_datebase_with_split(doc, dbname="mydb", persist_directory = 'docs/chroma/'):
    model = BGEM3FlagModel('bge-m3', use_fp16=True) 
    # Setting use_fp16 to True speeds up computation with a slight performance degradation
    r_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1024,
            chunk_overlap=20,
        )
    
    splits = []
    metas = []
    
    for page in tqdm(doc):
        splits.extend(r_splitter.split_text(page.page_content))
        metas.extend(page.metadata)
    
    client = chromadb.PersistentClient(path=persist_directory)
    collection = client.get_or_create_collection(name=dbname)
    collection.add(
            ids= ["id%s"%j for j in range(len(splits))],
            documents=splits,
            embeddings = model.encode(splits, 
                                batch_size=16, 
                                max_length=1024)['dense_vecs']
        )


def create_datebase(doc, dbname="mydb", persist_directory = 'docs/chroma/'):
    model = BGEM3FlagModel('bge-m3', use_fp16=True) 
    # Setting use_fp16 to True speeds up computation with a slight performance degradation
    # r_splitter = RecursiveCharacterTextSplitter(
    #         chunk_size=1024,
    #         chunk_overlap=20,
    #     )
    
    splits = []
    metas = []
    
    for page in tqdm(doc):
        # 确保 page 是一个具有 page_content 属性的对象
        if hasattr(page, 'page_content'):
            splits.append(page.page_content[:2500])  # 直接使用 page_content
            metas.append(page.metadata)
        else:
            # 如果 page 是一个列表，处理每个元素
            for p in page:
                splits.append(p.page_content[:2500])  # 直接使用 page_content
                metas.append(p.metadata)
    
    client = chromadb.PersistentClient(path=persist_directory)
    collection = client.get_or_create_collection(name=dbname)
    batch_size = 41666  # 设置最大批量大小
    #for i in range(0, 2*batch_size, batch_size):
    for i in range(20*batch_size, len(splits), batch_size):
        end = min(i + batch_size, len(splits))
        collection.add(
            ids=["id%s" % j for j in range(i, end)],
            documents=splits[i:end],
            embeddings=model.encode(splits[i:end], 
                                    batch_size=8, 
                                    max_length=2500)['dense_vecs']
        )


def query_doc(questions, dbname="mydb", n=1,
              persist_directory='docs/chroma/'):
    '''query the database for a list of questions'''
    client = chromadb.PersistentClient(path=persist_directory)
    collection = client.get_or_create_collection(name=dbname)
    
    model = BGEM3FlagModel('bge-m3', use_fp16=True) 

    if type(questions) == str:
        questions = [questions]

    embeddings = model.encode(questions, 
                              batch_size=16, 
                              max_length=2500)['dense_vecs']
    
    results = collection.query(
        query_embeddings=embeddings,
        n_results=n)  # 返回的结果已经根据相关性重新排序

    res = []
    meta_data = []  # 新增用于存储 meta data
    for doc in results["documents"]:
        # 过滤掉与问题反义的结果
        if not is_antonym(doc, questions):  # 添加反义词检查
            res.extend(doc)
            #meta_data.extend(doc.metadata)  # 收集 meta data

    return res, meta_data  # 返回文档和 meta data

def is_antonym(doc, questions):
    # 这里可以实现一个简单的反义词检查逻辑
    # 例如，使用词典或模型来判断文档内容是否与问题相反
    # 返回 True 表示是反义词，返回 False 表示不是
    return False  # 需要根据具体逻辑实现

def query_sentence(questions, dbname="mydb", n=20,
              persist_directory = 'docs/chroma/'):
    '''query the database for a list of questions, split the documents into sentences
       and query again the most relevant part'''
    client = chromadb.PersistentClient(path=persist_directory)
    collection = client.get_or_create_collection(name=dbname)
    
    model = BGEM3FlagModel('bge-m3', use_fp16=True) 

    if type(questions) == str:
        questions = [questions]

    embeddings = model.encode(questions,
                              batch_size=16,
                              max_length=1024)['dense_vecs']
    
    results = collection.query(
        query_embeddings = embeddings,
        n_results = n)

    r_splitter = RecursiveCharacterTextSplitter(
            chunk_size=256,
            chunk_overlap=20)

    res = []
    meta_data = []  # 新增用于存储 meta data
    for doc in results["documents"]:
        res.extend(doc)
        #meta_data.extend(doc.metadata)  # 收集 meta data

    res = set(res)

    splits = []
    for doc in res:
        splits.extend(r_splitter.split_text(doc))

    retrieve_embeddings = model.encode(splits, batch_size=16, max_length=1024)['dense_vecs']

    retrieve_score = np.array(embeddings) @ np.array(retrieve_embeddings).T

    ind = np.argsort(retrieve_score, axis=1)

    return [splits[i] for i in set(ind[:, -n:].flatten())], meta_data  # 返回句子和 meta data



def ai_suggest_queries(user_input, relevance_scores=None, json_loads=False, n=5):
    prompt = '''You are a physicist who is very strict and meticulous.
                 For user's question, you will first determine if there is enough information to answer,
                 if there is not enough information, you need to query these information from the internet or local database,
                 therefore, you need to provide %s or less query suggestions, 
                 and the query should be related to the user's question.
                 Each query should be as specific as possible and be independently queryable and cannot use pronouns,
                 in the following list format:
                 ["suggested query question 1",
                  "suggested query question 2",
                   ...,
                  "suggested query question n",
                 ]

                 Note: Return in English.'''%n

    agent = Agent(prompt)
    if relevance_scores is not None:
        reflect = "\n".join(relevance_scores)
        context = f"根据相关性得分，生成新的检索问题。得分: {reflect}"
    else:
        context = ""
    
    if json_loads:
        gen_text = agent.chat(user_input, context=context, stream=False)  
        gen_text = clean_generated_text(gen_text)
        if not gen_text.strip():  # 检查 gen_text 是否为空
            raise ValueError("生成的文本为空，无法解析为 JSON。")
        try:
            return json.loads(gen_text)
        except json.JSONDecodeError as e:
            return json.loads(fix_json(gen_text, e))
        except Exception as e:
            print(f"JSON 解码错误: {e}，生成的文本: {gen_text}")
            raise  # 重新抛出异常以便上层处理
    else:
        import streamlit as st
        gen_text = agent.chat(user_input, context=context, stream=True)  

        return gen_text



def fix_json(json_string, error_msg, example='''["query1", "query2", ..., "queryn"]'''):
    prompt = '''你是非常厉害的 json 修复专家。
                 你将修复一个 json 字符串，
                 修复后的 json 字符串应该符合 json 的格式，
                 并且应该符合给定的格式。
                 如果你判断 json 为列表，则返回一个列表，
                 如果你判断 json 为字典，则返回一个字典。
                 记得清除不必要的内容，比如开头的解释性、说明性文字。

                 注意: 不要输出任何除了 json 之外的解释性文字！
                 Example Output: %s'''%example

    agent = Agent(prompt)
    context = "Error message: %s"%error_msg
    print("json expert is fixing the json string")
    
    # 在尝试解析 JSON 字符串之前，先对其进行预处理
    json_string = json_string.replace("```json", "")
    json_string = json_string.replace("```", "")
    json_string = json_string.replace("“", '"')
    json_string = json_string.replace('\\', '\\\\')  # 转义反斜杠

    gen_text = agent.chat_local(json_string, context=context, stream=False)
    gen_text = clean_generated_text(gen_text)
    return gen_text
 

def test_query_mechanics():
    fname = "docs/mechanics/0190c3a9-45ae-7913-9566-b31f23825bb9_md.md"
    dbname = "analytical_mechanics"
    #doc = load_md(fname)
    #create_datebase(doc, dbname)
    #user_input = "什么是转动惯量？"
    #queries = ai_suggest_queries(user_input)
    #print("ai suggested queries", queries)
    #gen_txt = query_doc(queries, dbname)
    #print("\n".join(gen_txt))

    user_input = "分析力学中所有的概念以及解答"
    #queries = ai_suggest_queries(user_input)
    queries = [user_input]
    rag = query_sentence(queries, dbname)

    #for question in rag:
    #    ic(question)


def test_load_json_lines():
    path = "arxiv/arxiv-metadata-oai-snapshot-processed.json"
    data = load_json_lines(path)
    dbname = "arxiv"
    create_datebase(data, dbname)
    print("create database done")
    queries = ["Ultra Central Puzzle in heavy ion collisions"]
    gen_txt = query_doc(queries, dbname, n=10)
    print("\n".join(gen_txt))




if __name__ == '__main__':
    #test_query_mechanics()
    #test_code_rag()
    #test_nlpcoding_rag()
    #test_funding_rag()
    test_load_json_lines()
