from Searcher.llm import llm, super_eval, search_llm
from Searcher.api import search_papers
from collections import defaultdict
import pandas as pd
import concurrent.futures
from fuzzywuzzy import fuzz
from collections import Counter

def merge2df(res_list):
    d = defaultdict(dict)

    for res in res_list:
        try:
            if d[res['title']].get(res['part']):
                d[res['title']][res['part']] += res['core']
            else:
                d[res['title']][res['part']] = res['core']
        except:
            print('===>', res)

    res = []
    for k, v in d.items():
        v['Title'] = k
        res.append(v)

    return pd.DataFrame(res)


def get_max_similar(title, title_list):
    max_similarity = 0
    most_similar_title = None

    for candidate in title_list:
        similarity = fuzz.ratio(title, candidate)
        if similarity > max_similarity:
            max_similarity = similarity
            most_similar_title = candidate

    return most_similar_title



def get_summary_paper_df(search_list, title):
    def process_chunk(search):
        doc = ''

        paper_title = []
        for j in search:
            # distance = j['distance']
            entity = j['entity']
            paper_title.append(f'{entity["paper_title"]} {entity["year"]} chunk {entity["chunk_id"]}')
            doc += f"title:{entity['paper_title']}\n{entity['chunk_text']}\n---\n"
        PROMPT = '''
I will provide you with the titles and excerpts of five papers. Please carefully analyze them:
1. Determine if they are highly relevant to the title.
2. Determine which part of the paper they are suitable for citation: Abstract/Introduction/RelatedWork/Methodology/Experiment/Discussion/Conclusion, and summarize their core points in about 150 words.
3. Evaluate whether the paper has significant citation value. The standards for citation value are as follows:
   - Low value: Not closely related to the topic. For example, the title is "Applications of Reinforcement Learning in Natural Language," but the content mainly involves machine learning.
   - Medium value: Partially related. For example, the title is "Applications of Reinforcement Learning in Natural Language," but the content mainly involves reinforcement learning.
   - High value: Highly related. For example, the title is "Applications of Reinforcement Learning in Natural Language," and the content involves the combination of reinforcement learning and natural language processing.
4. Output in the following JSON format:
```json
[
    {"title":"xxx1","part":"Abstract/Introduction/RelatedWork/Methodology/Experiment/Discussion/Conclusion","core":"xxx","citationValue":"low/medium/high"},
    {"title":"xxx2","part":"Abstract/Introduction/RelatedWork/Methodology/Experiment/Discussion/Conclusion","core":"xxx","citationValue":"low/medium/high"},
    ...
]
```
JSON explanation: The "part" field can only be chosen from Abstract/Introduction/RelatedWork/Methodology/Experiment/Discussion/Conclusion. If the content involves multiple aspects, please use multiple dictionaries to summarize, meaning one paper can be summarized into multiple dictionaries.
`Core represents the core points of this section. If there are specific terms, such as method names, please keep the original names and summarize the core points in approximately 150 words, while appropriately retaining important details.
5. Only analyze relevant papers, do not analyze irrelevant papers.
'''

        prompt2= '''
我会为你提供五篇论文的标题和摘录。请仔细分析它们：
1. 确定它们是否与标题高度相关。
2. 确定它们适合引用论文的哪个部分：摘要/方法/指标/评估，并用大约100字总结其核心要点。
3. 评估论文是否具有显著的引用价值。引用价值的标准如下：
   - 低价值：与主题不密切相关。例如，标题是“强化学习在自然语言中的应用”，但内容主要涉及机器学习。
   - 中等价值：部分相关。例如，标题是“强化学习在自然语言中的应用”，但内容主要涉及强化学习。
   - 高价值：高度相关。例如，标题是“强化学习在自然语言中的应用”，内容涉及强化学习和自然语言处理的结合。
4. 按以下JSON格式输出：
```json
[
    {"title":"xxx1","part":"摘要/引言/相关工作/方法论/实验/讨论/结论","core":"xxx","citationValue":"低/中/高"},
    {"title":"xxx2","part":"摘要/引言/相关工作/方法论/实验/讨论/结论","core":"xxx","citationValue":"低/中/高"},
    ...
]
```
JSON解释：“part”字段只能从`摘要/引言/相关工作/方法论/实验/讨论/结论`中选择。如果内容涉及多个方面，请使用多个字典进行总结，即一篇论文可以总结为多个字典。

`core`为这个段落的核心观点，如果有专有名词，如方法名称等，请保留原始名称，总结成150词的核心观点即可。

5. 只分析相关论文，不分析无关论文。
'''
        messages = [{'role': 'system', 'content': PROMPT},
                    {'role': 'user',
                     'content': f"Document:\n{doc}\n\n"
                                f"Article Title: {title}\n"
                                f"If the content involves multiple aspects, please use multiple dictionaries to summarize, meaning one article can be summarized into multiple dictionaries."
                                f"Pay attention to the summarization requirements. "
                                "Please only output the JSON. JSON notes: Ignore subheadings in the original text,'part' can only be chosen from `Abstract/Introduction/RelatedWork/Methodology/Experiment/Discussion/Conclusion.` "

                     }]
        llm_cls = super_eval(llm(messages))
        if isinstance(llm_cls, list):
            for i in llm_cls:
                i['title'] = get_max_similar(i['title'],paper_title)
        llm_cls = [i for i in llm_cls if i['citationValue'] =='high']
        return llm_cls

    res = []
    chunks = [search_list[i:i + 5] for i in range(0, len(search_list), 5)]

    with concurrent.futures.ThreadPoolExecutor(max_workers=50) as executor:
        future_to_chunk = {executor.submit(process_chunk, chunk): chunk for chunk in chunks}
        for future in concurrent.futures.as_completed(future_to_chunk):
            try:
                result = future.result()
                res.extend(result)
            except Exception as exc:
                print(f'Generated an exception: {exc}')

    return res


def get_paper_df(title):
    '''
    根据一个题目，搜索相关论文片段，然后整理成一个表格，这个表格包含 Summary, Methods, Metrics, Evaluation部分
    搜索文章采用的方法是：先把这四部分写出来，然后去再去向量化匹配。
    使用大模型判断是否符合主题。
    然后总结成表格。

    :param title:
    :return:
    '''
    prompt = '''
Please draft an abstract based on the provided topic, which should include the following sections: introduction, methodology, experiments, discussion, and conclusion.
Firstly, search for the latest research findings related to the given topic. Then, write down the methods, indicators,
And the evaluation part based on search results.
You should add more topic keywords in your content.
Ensure that your writing is concise and clear, focusing on the key aspects of each section. The output should follow the following structure:

```json
{
  "Introduction": "Please summarize the background and purpose of this topic in concise language (approximately 100 words).",
  "Methodology": "Please describe the methods used in the research, including data collection, analysis, and tools (approximately 150 words).",
  "Experiment": "Please explain the experiment conducted to validate the method, including the experimental design and steps (approximately 150 words).",
  "Discussion": "Please discuss the standards or indicators used to measure the effectiveness of the method and analyze the experimental results (approximately 150 words).",
  "Conclusion": "Please summarize the advantages and limitations of the applied method and provide recommendations for future research (approximately 150 words)."
}
```
Please query and output JSON:
'''


    prompt2 = '''请根据提供的主题起草一份摘要，其中应包括以下部分：引言、方法论、实验,讨论,结论。
首先，搜索与给定主题相关的最新研究成果。然后，写下方法、指标、，
以及基于搜索结果的评估部分。
你应该在你的内容中添加更多的主题关键字。
确保你的写作简洁明了，专注于每个部分的关键方面。输出应遵循以下结构：
```json
{
  "引言": "请用简洁的语言概述本课题的背景和目的（约100字）。",
  "方法论": "请描述研究中使用的方法，包括数据收集、分析和工具（约150字）。",
  "实验": "请解释为验证方法所进行的实验，包括实验设计和步骤（约150字）。",
  "讨论": "请讨论用于衡量方法有效性的标准或指标，并分析实验结果（约150字）。",
  "结论": "请总结所应用方法的优势和局限性，并提出未来研究的建议（约150字）。"
}
```
请查询后输出json:
'''
    messages = [{'role': 'system', 'content': prompt},
                {'role': 'user', 'content': title}]
    res_list = []
    exist_id = []
    paper_ids = []

    draft = []
    def process_paper(inner_messages):
        json_paper = search_llm(inner_messages)
        answers = super_eval(json_paper)
        return answers

    # 先随机写五篇内容，使用较高的温度
    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        futures = [executor.submit(process_paper, messages) for _ in range(5)]
        concurrent.futures.wait(futures)
        for future in futures:
            draft.append(future.result())

    # 使用写好的内容去向量库匹配
    for answers in draft:
        Introduction = search_papers(answers['Introduction'], top_k=20)
        Methodology = search_papers(answers['Methodology'], top_k=30)
        Experiment = search_papers(answers['Experiment'], top_k=30)
        Discussion = search_papers(answers['Discussion'], top_k=30)
        Conclusion = search_papers(answers['Conclusion'], top_k=30)


        for item in [Introduction, Methodology, Experiment, Discussion,Conclusion]:
            item = [i for i in item if i['id'] not in exist_id]
            exist_id.extend([i['id'] for i in item])
            paper_ids.extend([i['entity']['paper_id'] for i in item])
            res_list.extend(item)

    # 如果一篇论文有多个段落命中，则认定此论文高度相关。
    paper_count = Counter(paper_ids)
    res_list = [i for i in res_list if paper_count.get(i['entity']['paper_id'])>1]
    summary_list = get_summary_paper_df(res_list, title)
    paper_df = merge2df(summary_list)
    try:
        paper_df.to_csv('paper_summary3.csv')
    except Exception as exc:
        ...
    return paper_df



if __name__ == '__main__':
    Summary = search_papers('Summary', top_k=20)
    print(Summary)
# title = 'Natural language reinforcement learning'
#
# get_paper_chuck(title)