from tool.tool import asearch_papers, aquery_by_chunk_contain, aquery_by_title
from tool.hybrid_search import hybrid_search
from tool.prompt import REWRITE_PROMPT
import json
from zhipuai import ZhipuAI
import re
import argparse
import asyncio
from FlagEmbedding import BGEM3FlagModel
import os
from dotenv import load_dotenv
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import time
from tqdm import tqdm
from gen_survey import get_related
from sklearn.cluster import KMeans
import hashlib
from generate_zhipu import generate_by_user_contents
import time
load_dotenv()

def calculate_file_hash(file_path):
    hash_func = getattr(hashlib, "md5")()
    with open(file_path, "rb") as f:
        while chunk := f.read(8192):
            hash_func.update(chunk)
    
    return hash_func.hexdigest()

conference_year = {
            "Conf_Paper_Meta_Data_SIGIR2023_with_whole_text.db": 2023,
            "Journal_Paper_Meta_Data_IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence_with_whole_text.db": 0,
            "Conf_Paper_Meta_Data_NeurIPS_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_AAAI_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_IJCAI2024_with_whole_text.db": 2024,
            "Conf_Paper_Meta_Data_EMNLP_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_ICML2024_with_whole_text.db": 2024,
            "Journal_Paper_Meta_Data_IEEE_Transactions_on_Knowledge_and_Data_Engineering_with_whole_text.db": 0,
            "Conf_Paper_Meta_Data_ICLR2024_with_whole_text.db": 2024,
            "Conf_Paper_Meta_Data_WWW_2023__with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_IJCAI2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_ECCV2024_with_whole_text.db": 2024,
            "Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db": 2024,
            "Conf_Paper_Meta_Data_ICML_2023_with_whole_text.db": 2023,
            "Journal_Paper_Meta_Data_Journal_of_Machine_Learning_Research_with_whole_text.db": 0,
            "Journal_Paper_Meta_Data_Artificial_Intelligence_with_whole_text.db": 0,
            "Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db": 2024,
            "Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db": 2023,
            "Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db": 0,
            "Conf_Paper_Meta_Data_ACL_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_ICLR_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_ICCV_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db": 2024,
            "Conf_Paper_Meta_Data_ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_ECAI_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_Crypto_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_CCS_2022_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_ECCV_2022_European_Conference_on_Computer_Vision_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_Crypto_2022_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_CVPR_2022_IEEE_Conference_on_Computer_Vision_and_Pattern_Recognition_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_ICML_2022_International_Conference_on_Machine_Learning_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_ICLR_2022_International_Conference_on_Learning_Representation_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_ISSTA_2022_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_MobiCom_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_KDD2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_SIGIR_2022_Special_Interest_Group_on_Information_Retrieval_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data__STOC_2022_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_SP_2022_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_SIGMOD_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_USENIX_Security_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_SP_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_VLDB2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_USENIX_Security_2022_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_STOC_2023_with_whole_text.db": 2023,
            "Conf_Paper_Meta_Data_VLDB_2022_with_whole_text.db": 2022,
            "Conf_Paper_Meta_Data_WWW_2022_The_Web_Conference_with_whole_text.db": 2022
}

conference_name = {
            "Conf_Paper_Meta_Data_SIGIR2023_with_whole_text.db": "SIGIR, 2023",
            "Journal_Paper_Meta_Data_IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence_with_whole_text.db": "TPAMI",
            "Conf_Paper_Meta_Data_NeurIPS_2023_with_whole_text.db": "NeurIPS, 2023",
            "Conf_Paper_Meta_Data_AAAI_2023_with_whole_text.db": "AAAI, 2023",
            "Conf_Paper_Meta_Data_IJCAI2024_with_whole_text.db": "IJCAI, 2024",
            "Conf_Paper_Meta_Data_EMNLP_2023_with_whole_text.db": "EMNLP, 2023",
            "Conf_Paper_Meta_Data_ICML2024_with_whole_text.db": "ICML, 2024",
            "Journal_Paper_Meta_Data_IEEE_Transactions_on_Knowledge_and_Data_Engineering_with_whole_text.db": "TPAMI",
            "Conf_Paper_Meta_Data_ICLR2024_with_whole_text.db": "ICLR, 2024",
            "Conf_Paper_Meta_Data_WWW_2023__with_whole_text.db": "WWW, 2023",
            "Conf_Paper_Meta_Data_IJCAI2023_with_whole_text.db": "IJCAI, 2023",
            "Conf_Paper_Meta_Data_ECCV2024_with_whole_text.db": "ECCV, 2024",
            "Conf_Paper_Meta_Data_SIGIR2024_with_whole_text.db": "SIGIR, 2024",
            "Conf_Paper_Meta_Data_ICML_2023_with_whole_text.db": "ICML, 2023",
            "Journal_Paper_Meta_Data_Journal_of_Machine_Learning_Research_with_whole_text.db": "JMLR",
            "Journal_Paper_Meta_Data_Artificial_Intelligence_with_whole_text.db": "AI",
            "Conf_Paper_Meta_Data_AAAI2024_with_whole_text.db": "AAAI, 2024",
            "Conf_Paper_Meta_Data_CVPR_2023_with_whole_text.db": "CVPR, 2023",
            "Journal_Paper_Meta_Data_International_Journal_of_Computer_Vision_with_whole_text.db": "IJCV",
            "Conf_Paper_Meta_Data_ACL_2023_with_whole_text.db": "ACL, 2023",
            "Conf_Paper_Meta_Data_ICLR_2023_with_whole_text.db": "ICLR, 2023",
            "Conf_Paper_Meta_Data_ICCV_2023_with_whole_text.db": "ICCV, 2023",
            "Conf_Paper_Meta_Data_CVPR2024_with_whole_text.db": "CVPR, 2024",
            "Conf_Paper_Meta_Data_ACL_2022_Annual_Meeting_of_the_Association_for_Computational_Linguistics_with_whole_text.db": "ACL, 2022",
            "Conf_Paper_Meta_Data_ECAI_2023_with_whole_text.db": "ECAI, 2023",
            "Conf_Paper_Meta_Data_Crypto_2023_with_whole_text.db": "Crypto, 2023",
            "Conf_Paper_Meta_Data_CCS_2022_with_whole_text.db": "CCS, 2022",
            "Conf_Paper_Meta_Data_ECCV_2022_European_Conference_on_Computer_Vision_with_whole_text.db": "ECCV, 2022",
            "Conf_Paper_Meta_Data_Crypto_2022_with_whole_text.db": "Crypto, 2022",
            "Conf_Paper_Meta_Data_CVPR_2022_IEEE_Conference_on_Computer_Vision_and_Pattern_Recognition_with_whole_text.db": "TPAMI",
            "Conf_Paper_Meta_Data_EMNLP_2022_Empirical_Methods_in_Natural_Language_Processing_with_whole_text.db": "EMNLP, 2022",
            "Conf_Paper_Meta_Data_ICML_2022_International_Conference_on_Machine_Learning_with_whole_text.db": "ICML, 2022",
            "Conf_Paper_Meta_Data_ICLR_2022_International_Conference_on_Learning_Representation_with_whole_text.db": "ICLR, 2022",
            "Conf_Paper_Meta_Data_ISSTA_2022_with_whole_text.db": "ISSTA, 2022",
            "Conf_Paper_Meta_Data_IJCAI_2022_International_Joint_Conference_on_Artificial_Intelligence_with_whole_text.db": "IJCAI, 2022",
            "Conf_Paper_Meta_Data_MobiCom_2023_with_whole_text.db": "MobiCom, 2023",
            "Conf_Paper_Meta_Data_NeurIPS_2022_Neural_Information_Processing_Systems_with_whole_text.db": "NeurIPS, 2022",
            "Conf_Paper_Meta_Data_KDD2023_with_whole_text.db": "KDD, 2023",
            "Conf_Paper_Meta_Data_SIGIR_2022_Special_Interest_Group_on_Information_Retrieval_with_whole_text.db": "SIGIR, 2022",
            "Conf_Paper_Meta_Data__STOC_2022_with_whole_text.db": "STOC, 2022",
            "Conf_Paper_Meta_Data_SP_2022_with_whole_text.db": "SP, 2022",
            "Conf_Paper_Meta_Data_SIGMOD_2023_with_whole_text.db": "SIGMOD, 2023",
            "Conf_Paper_Meta_Data_USENIX_Security_2023_with_whole_text.db": "USENIX, 2023",
            "Conf_Paper_Meta_Data_SP_2023_with_whole_text.db": "SP, 2023",
            "Conf_Paper_Meta_Data_VLDB2023_with_whole_text.db": "VLDB, 2023",
            "Conf_Paper_Meta_Data_USENIX_Security_2022_with_whole_text.db": "USENIX, 2022",
            "Conf_Paper_Meta_Data_STOC_2023_with_whole_text.db": "STOC, 2023",
            "Conf_Paper_Meta_Data_VLDB_2022_with_whole_text.db": "VLDB, 2022",
            "Conf_Paper_Meta_Data_WWW_2022_The_Web_Conference_with_whole_text.db": "WWW, 2022"
}

summary_prompt = "# Given the following papers (including paper id, title, publication time, main content of the paper), please summarize the papers related to {} into a paragraph. The summary should be able to summarize the core ideas of the given paper and have a certain logical relationship, such as subject classification, publication time sequence, etc.\n## Note: When the source of the summary is a certain (or multiple) given papers, it is necessary to cite it. The citation format is required to be [paper id] (for example, when the paper id of the source of part of the content is 1, it is necessary to add [1] after the quoted content to indicate the source of the content).\n ##Text\n"
def get_rewrite_query(query):
    client = ZhipuAI()
    response = client.chat.completions.create(
        model="glm-4-plus",
        messages=[
            {"role": "user", "content": REWRITE_PROMPT + query}
        ],
        extra_body={"temperature": 0}
    )
    response = response.choices[0].message.content
    print(response)
    match = re.search(r'```json([\s\S]*?)```', response)
    if match:
        inner_content = match.group(1)
        result = json.loads(inner_content)
        result["原问题"] = query
        query_list = [result["原问题"]]
        for rewrite_type in ["翻译", "问题改写"]:
            query_list.extend(result[rewrite_type])
        for rewrite_type in ["实体抽取"]:
            query_list.extend(result[rewrite_type][0].split(","))
        for rewrite_type in ["概念关联及扩展"]:
            extension_list = []
            if isinstance(result[rewrite_type], list):
                for extension in result[rewrite_type]:
                    extension_list.extend(extension.split(","))
            elif isinstance(result[rewrite_type], str):
                extension_list.extend(result[rewrite_type].split(","))
            query_list.extend(extension_list[:15])
        return [query.strip() for query in query_list if query != "" and query is not None]
    else:
        raise ValueError(
            "问题重写失败"
        )

# 异步查询
def asyncio_query(query_list):
    query_list = [query_list[i:i+3] for i in range(0, len(query_list), 3)]
    results = []
    for query in query_list:
        tasks = []
        for i in range(len(query)):
            task = asyncio.ensure_future(asearch_papers(query[i], top_k=20))
            tasks.append(task)

        loop = asyncio.get_event_loop()
        result = loop.run_until_complete(asyncio.gather(*tasks))
        results.extend(result)
    # 去重
    result_list = []
    for res in results:
        result_list.extend(res)
    result = []
    title_set = set()
    for i in result_list:
        if "entity" in i:
            title_set.add(i["entity"]["paper_title"])
            result.append(i["entity"]["paper_title"])
        elif "paper_title" in i:
            title_set.add(i["paper_title"])
            result.append(i["paper_title"])
        else:
            print(i)
    return result, list(title_set)

def rerank(model, title_list, query):
    # title_lists = [title_list[i:i+200] for i in range(0, len(title_list), 200)]
    # results = []
    # for titles in tqdm(title_lists):
    #     tasks = []
    #     for title in titles:
    #         task = asyncio.ensure_future(aquery_by_title(title, top_k=1))
    #         tasks.append(task)
    #     loop = asyncio.get_event_loop()
    #     result = loop.run_until_complete(asyncio.gather(*tasks))
    #     results.extend(result)
    # results = [result for result in results if type(result) != str and len(result) > 0]
    # with open("docs.json", "w") as f:
    #     json.dump(results, f, indent=4, ensure_ascii=False)
    # docs = [{"paper_title": result[0]["paper_title"], "chunk_text": result[0]["chunk_text"]} for result in results]
    return hybrid_search(model=model, query=query, docs=title_list, bm25_weight=0)

def get_abs(title: list):
    title = [title[i:i+3] for i in range(0, len(title), 3)]
    results = []
    for query in tqdm(title):
        tasks = []
        for i in range(len(query)):
            task = asyncio.ensure_future(aquery_by_title(query[i], top_k=100))
            tasks.append(task)

        loop = asyncio.get_event_loop()
        result = loop.run_until_complete(asyncio.gather(*tasks))
        results.extend(result)
    return results

def process_paper(paper: str):
    chunk = paper.split("#")
    if len(chunk) >= 3:
        return "#" + chunk[1] + "#" + chunk[2]
    else:
        return paper

def get_emb(paper, related, model):
    related_paper = []
    related_abs = []
    for i in range(len(paper)):
        result = next((d for d in related if d.get("id") == i+1), None)
        if result:
            if result["relevant"]:
                related_paper.append({"content": paper[i]})
                related_abs.append(process_paper(paper[i][0]["chunk_text"]))
    print(len(related_abs))
    print(len(paper))
    related_abs_emb = model.encode(related_abs, batch_size=12, max_length=8192)['dense_vecs']
    return related_abs_emb, related_paper


def get_group(clusters_6, related_paper):
    paper_group = []
    for i in range(len(clusters_6)):
        temp = {
            "group_id": clusters_6[i],
            "paper_title": related_paper[i]["content"][0]["paper_title"],
            "paper_abs": related_paper[i]["content"][0]["chunk_text"] + "\n" + related_paper[i]["content"][-1]["chunk_text"],
            "conference": related_paper[i]["content"][0]["original_filename"],
            "chunk_id": [related_paper[i]["content"][0]["chunk_id"], related_paper[i]["content"][-1]["chunk_id"]],
            "year": related_paper[i]["content"][0]["year"],
            "name": conference_name[related_paper[i]["content"][0]["original_filename"]]
        }
        paper_group.append(temp)
    paper_group = sorted(paper_group, key=lambda x: x['year'])
    for i in range(len(paper_group)):
        paper_group[i]["id"] = i
    grouped_papers = {
        "0": [],
        "1": [],
        "2": [],
        "3": [],
        "4": [],
        "5": [],
    }
    for paper in paper_group:
        group_id = paper['group_id']
        grouped_papers[str(group_id)].append(paper)
    k = 0
    for i in range(6):
        for j in grouped_papers[str(i)]:
            k+=1
            j["id"] = k
    return grouped_papers


def gen_survey(grouped_papers, topic):
    client = ZhipuAI()
    user_contents = []
    for i in range(6):
        content = ""
        for paper in grouped_papers[str(i)]:
            content = content + "# id:" + str(paper["id"]) + "\n" + paper["paper_title"] + "\n" + "Time of publication: " + str(paper["year"]) + "\n" + paper["paper_abs"] + "\n"
        content = summary_prompt.format(topic) + content
        user_contents.append(content)
    result = generate_by_user_contents(client, user_contents, False, 48)
    content = ""
    for i in result:
        content += i + "\n"
    prompt = """
I will provide you with some summary information, which will involve different aspects. Please generate a review article about {} in English based on the summary information provided below. If some summary information is not relevant to {}, you can discard this part of information. Please make sure that the review contains introduction, background, methods, results and discussion sections, and cite the corresponding summary information in each section.
#### **Generation requirements:**

1. **Introduction**:
- Provide background information to explain why these articles are worth reviewing.
- Describe the main problem or research field of the review.
- Outline the structure of the review.

2. **Background**:
- Briefly introduce the basic concepts and historical development of the relevant field.
- Describe current research trends and hot issues.

3. **Method**:
- Present the main findings of each article in a logical order (such as subject classification).
- If necessary, use charts or tables to present data (if applicable).
- Emphasize consistency and differences, and point out any controversial or unresolved issues.

5. **Discussion**:
- Analyze the similarities and differences between the articles.
- Discuss the significance and potential impact of the findings.
- Propose directions or suggestions for future research.

6. **Conclusion**:
- Summarize the main findings of the review.
- Emphasize the importance of the field and future research directions.
#### **Enter summary information:**
""".format(topic,topic)
    current_statu = client.chat.completions.create(
                model="glm-4-plus",
                messages=[
                    {"role": "user", "content": prompt + content}
                ],
                extra_body={"temperature": 0}
            )
    summary = current_statu.choices[0].message.content
    return summary, content
    # current_statu = client.chat.completions.create(
    #             model="glm-4-plus",
    #             messages=[
    #                 {"role": "user", "content": "Please summarize the current status of this field based on the following content and return a paragraph.\n" + content}
    #             ],
    #         )
    # head = current_statu.choices[0].message.content

    # development = client.chat.completions.create(
    #             model="glm-4-plus",
    #             messages=[
    #                 {"role": "user", "content": "Based on the following content, please give your own views on the future development direction of this field and return a paragraph.\n" + content}
    #             ],
    #         )
    # tail = development.choices[0].message.content
    # return head, result, tail

def k_means(k, related_abs_emb):
    k_means = KMeans(n_clusters=k, random_state=0)
    k_means.fit(related_abs_emb)
    return k_means.labels_.tolist()

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="接受一个 --topic 参数")
    parser.add_argument('--topic', type=str, required=True, help="请输入一个主题（topic）")
    args = parser.parse_args()
    topic = args.topic
    if not os.path.exists("./bge"):
        os.makedirs("./bge")
    start_time = time.time()
    model = BGEM3FlagModel('BAAI/bge-m3',  use_fp16=True, cache_dir="./bge") 
    # model = BGEM3FlagModel("/Users/passeroma/downloads/projects/models/bge-m3",  use_fp16=True, cache_dir="./bge")
    #model = BGEM3FlagModel("/data/huggingface/models/BAAI/bge-m3/", use_fp16=True)
    
    # 获取重写问题
    rewrite_query = get_rewrite_query(topic)
    print("rewrite query", rewrite_query)
    # 多路查询
    result_list, title_list = asyncio_query(rewrite_query)
    print(f"去重前paper数量：{len(result_list)}")
    print(f"去重后paper数量：{len(title_list)}")
    print(f"重复率：{1 - (len(title_list)/len(result_list))}")
    # 重排
    title_list = [doc for doc in title_list if doc is not None]
    rank_result = rerank(model=model, query=rewrite_query[0], title_list=title_list)
    # print("rank_result", rank_result)
    paper_abs = get_abs(rank_result)
    # with open("paper_abs.json", "w")as f:
    #     json.dump(paper_abs, f, indent=4, ensure_ascii=False)
    # with open("paper_abs.json", 'r', encoding='utf-8') as f:
    #     paper_abs = json.load(f)
    # 判断重排后结果是否相关
    related = get_related(paper_abs, topic=topic)
    print("related", related)
    # with open("related.json", "w")as f:
    #     json.dump(related, f, indent=4, ensure_ascii=False)
    # 获取相关paper的emb
    related_abs_emb, related_paper = get_emb(paper=paper_abs, related=related, model=model)
    # with open("related_paper.json", "w")as f:
    #     json.dump(related_paper, f, indent=4, ensure_ascii=False)
    # # 聚类
    clusters_6 = k_means(k=6, related_abs_emb=related_abs_emb)
    print("clusters_6", clusters_6)
    # with open("clusters_6.json", "w")as f:
    #     json.dump(clusters_6, f, indent=4, ensure_ascii=False)
    # 分组
    grouped_papers = get_group(clusters_6=clusters_6, related_paper=related_paper)
    # with open("grouped_papers.json", "w")as f:
    #     json.dump(grouped_papers, f, indent=4, ensure_ascii=False)
    # with open("grouped_papers.json", 'r', encoding='utf-8') as f:
    #     grouped_papers = json.load(f)
    # 获取结果
    summary, content = gen_survey(grouped_papers=grouped_papers,topic=topic)
    # save
    file_path = "4.md"
    with open(file_path, "w") as f:
        # f.write(content + "\n\n\n\n")
        # r.find
        if "#### References" in summary:
            summary = summary.split("References")[0]
        f.write(summary.replace("[", "<sup>").replace("]", "</sup>") + "\n\n")
        for j in range(6):
            for i in grouped_papers[str(j)]:
                chunk_id = list(set(i["chunk_id"]))
                print(set(i["chunk_id"]))
                if len(chunk_id) == 1:
                    chunk_id_text = str(chunk_id[0])
                elif len(chunk_id) == 2:
                    chunk_id_text = f"{chunk_id[0]}"
                f.write("[" + str(i["id"]) + "], " + i["paper_title"] + ", " + i["name"].split(",")[0] + ", " + str(i["year"]) + ", chunk " + chunk_id_text + "\n\n")

    hash_value = calculate_file_hash(file_path)
    print(f"当前文章对应的MD5码如下：{hash_value}")
    run_time = time.time() - start_time
    print(f"运行时间：{run_time}秒")