from research_agent.core.paper_review import PaperReviewer
from research_agent.core.rerank_with_LLM import RerankByLLM
from research_agent.core.writer import Writer
import logging
import pickle
import asyncio


if __name__ == "__main__":
    def main():
        paper_reviewer = PaperReviewer()
        with open("./draft_iteration_output/20250306_160821/0.md", "r", encoding="utf-8") as f:
            paper = f.read()
        print(paper_reviewer.parse_draft(paper))
    main()
    '''
        query = "如何学习人工智能"


        documents = [
            "# 4 Methods In this section we will first recap the mechanism behind current contrastive, multimodal representation learning methods that rely on clean data. We will then introduce our Context Adapter Module that allows learning from the auxiliary modality through an attention mechanism. Finally, we will describe how we can extend an existing backbone for images to videos and audio, to be able to leverage large, pretrained models.",
            "Python编程入门教程",
            "深度学习的数学基础",
            "人工智能发展历史概述",
            "如何准备AI面试"
        ]
        glm4_rerank = RerankByLLM()
        result = glm4_rerank.rerank_documents(query, documents, batch_size=2)
        if result:
            result = sorted(result,
                   key=lambda x: x['index'],
                   reverse=False)
            print("排序结果：")
            for item in result:
                print(f"文档 {item['index']} - 得分 {item['relevance_score']:.4f}: {documents[item['index']]}")


    asyncio.run(main())
    '''
    '''
    import nest_asyncio
    nest_asyncio.apply()
    async def main():
        topic = 'What does the technological roadmap of multi-model large model look like?'
        with open("./draft_iteration_output/20250301_064634/0.md","r",encoding="utf-8") as f:
            paper = f.read()
        with open("./survey.data","rb") as f:
            survey = pickle.load(f)
        paper_reviewer = PaperReviewer(logger = logging.getLogger("my_logger"))
        writer = Writer(logger=logging.getLogger("my_logger"))
        outline = survey.get_whole_outline()
        paper_draft = paper
        new_survey = await paper_reviewer.review_paper(topic, paper_draft, survey)
        max_section_code = new_survey.get_max_section_code()
        new_paper_content = new_survey.get_paper_content(except_sections = [str(1),max_section_code])
        tasks = [writer.write_title(topic, outline, new_paper_content),
                 writer.write_introduction(topic, outline, new_paper_content),
                 writer.write_conclusion(topic, outline, new_paper_content)]

        paper_title, introduction, conclusion = await asyncio.gather(*tasks, return_exceptions=True)
        # 生成文章全部内容
        paper_draft = f"# {paper_title}\n\n## 1 Introduction\n\n{introduction}\n\n {new_paper_content} \n\n## {max_section_code} Conclusion\n\n{conclusion}"
        # 为文章内容添加参考文献
        with open("./draft_iteration_output/20250301_064634/1.md","w",encoding="utf-8") as f:
            f.write(paper_draft)

    asyncio.run(main())
    '''




