#!/usr/bin/env python

from __future__ import annotations

import asyncio
from typing import Any, Callable, Optional, Union

from pydantic import TypeAdapter, model_validator, BaseModel

from metagpt.actions import Action
from metagpt.config2 import config
from metagpt.logs import logger
from metagpt.tools.search_engine import SearchEngine
from metagpt.tools.web_browser_engine import WebBrowserEngine
from metagpt.utils.common import OutputParser
from metagpt.utils.text import generate_prompt_chunk, reduce_message_length
from metagpt.utils.token_counter import count_string_tokens


from data_dyne_wrj.rag.kb_api import (
    KnowledgeBaseAPI,
    SimpleRelevantPaper,
    PaperChunkWithDistance,
)
from data_dyne_wrj.agent.utils import dump_list_obj, load_list_obj

LANG_PROMPT = "Please respond in {language}."

RESEARCH_BASE_SYSTEM = """You are an AI critical thinker research assistant. Your sole purpose is to write well \
written, critically acclaimed, objective and structured reports on the given text."""

RESEARCH_TOPIC_SYSTEM = (
    "You are an AI researcher assistant, and your research topic is:\n#TOPIC#\n{topic}"
)

SUERVY_SYSTEM = "You are an AI research assistant, and you want to product a survey report on the topic:\n#TOPIC#\n{topic}"

SEARCH_TOPIC_PROMPT = """Please provide up to {keyword_nums} necessary keywords related to your research topic for paper search.\
The keywords must relvant to the research topic, should be the subject of the research, or should be the main focus of the research.\
The keywords should be sorted in order of importance.
Your response must be in JSON format, for example: ["keyword1", "keyword2", ...]."""

KEYWORDS_SEARCH_PROMPT = """### Paper Abstract Information
{paper_abstract}

### Requirements
1. Provide up to {keyword_nums} keywords related to your research topic base on the search paper abstract.
2. Please respond in JSON format, for example: ["keyword1", "keyword2", ...].
"""

SUMMARIZE_PAPER_PROMPT = """### Topic
{topic}

### Paper Title
{paper_title}

### Paper Abstract
{paper_abstract}

### Paper Relevant Information
{paper_relevant}

### Requirements
1. Provide a comprehensive summary of the paper in 200 words.
2. If the paper is entirely unrelated to the research topic, please reply with a simple text "Not relevant."
"""

SUMMARIZE_KEYWORK_PROMPT = """### Keyword
{keyword}

### Paper Summary
{paper_summary}

### Requirements
- Provide a simple summary of the keyword.
- The summary should have a max word count of 500.
"""
# comprehensive

SUMMARIZE_SEARCH_PROMPT = """### Requirements
1. The keywords related to your research topic and the search results are shown in the "Search Result Information" section.
2. Provide up to {decomposition_nums} queries related to your research topic base on the search results.
3. Please respond in the following JSON format: ["query1", "query2", "query3", ...].

### Search Result Information
{search_results}
"""

COLLECT_AND_RANKURLS_PROMPT = """### Topic
{topic}
### Keywords and Keywords Summary
{keywords_summary}

### Requirements
Please remove irrelevant keyword that are not related to the topic base on its summary.DO NOT forget the information above and remember the topic.\
Then, sort the remaining keywords based on the its summary. \
If two results have equal credibility, prioritize them based on the relevance. Provide the
ranked results' indices in JSON format, like [0, 1, 3, 4, ...], without including other words.
"""

PAPER_AND_SUMMARIZE_PROMPT = """### Requirements
1. Utilize the text in the "Reference Information" section to respond to the question "{query}".
2. If the question cannot be directly answered using the text, but the text is related to the research topic, please provide \
a comprehensive summary of the text.
3. If the text is entirely unrelated to the research topic, please reply with a simple text "Not relevant."
4. Include all relevant factual information, numbers, statistics, etc., if available.

### Reference Information
{content}
"""

CONDUCT_RESEARCH_PROMPT = """### Reference Information
{content}

### Requirements
Please provide a detailed research report in response to the following topic: "{topic}", using the information provided \
above. The report must meet the following requirements:

- Focus on directly addressing the chosen topic.
- Ensure a well-structured and in-depth presentation, incorporating relevant facts and figures where available.
- Present data and findings in an intuitive manner, utilizing feature comparative tables, if applicable.
- The report should have a minimum word count of 2,000 and be formatted with Markdown syntax following APA style guidelines.
- Include all source URLs in APA format at the end of the report.
"""

SEARCH_PROMPT = """### Requirements
- You have a basic RAG search engine, it can find paper excerpts with vector distance search. \
Please generate several search query based on the topic "{topic}".
- If the topic is a research review on a technical concept, such as "what is loss function?".
- If the topic is a research review on the current state of a research direction, such as "How is the current state of Text2SQL research, and what challenges does it face?".
- If the topic is a comparative analysis and review of multiple methods., such as "What methods can enhance the planning capabilities of large models, and what are their respective advantages and disadvantages?".
- If the topic is a research review on the evolution of a technical approach, such as "What is the technological development path of multimodal large models?".
- Your response must be in JSON format, for example: ["query1", "query2", ...]
"""

CLASIFY_TOPIC_PROMPT = """### Requirements
Please classify the topic "{topic}" into one of the following categories:
0. a technical concept, such as "what is loss function?".
1. the current state of a research direction, such as "How is the current state of Text2SQL research, and what challenges does it face?".
2. comparative analysis and review of multiple methods, such as "What methods can enhance the planning capabilities of large models, and what are their respective advantages and disadvantages?".
3. the evolution of a technical approach, such as "What is the technological development path of multimodal large models?".
Your response must be in JSON format, and contain only the number of the category, for example: [1].
"""

SEARCH_PROMPT_FOR_0 = """## Requirements
You have a basic RAG search engine, it can find paper excerpts with vector distance search.Please generate several search query based on the topic "{topic}".
When searching for papers on a technical concept, crafting effective query questions can help you find relevant and high-quality literature. Here are some examples of query questions you might use:

### General Queries
- "What are the definitions and explanations of [technical concept] in the literature?"
- "What are the latest advancements in [technical concept]?"
- "How has [technical concept] evolved over the past decade?"
- "What are the current challenges and future directions in [technical concept]?"

### Methodological Queries
- "What methodologies are commonly used to study [technical concept]?"
- "How do researchers measure the effectiveness of [technical concept]?"
- "What are the best practices for implementing [technical concept] in real-world applications?"

### Application-Based Queries
- "How is [technical concept] applied in [specific industry or field]?"
- "What are the success stories of implementing [technical concept] in [specific context]?"
- "What are the limitations of using [technical concept] in [specific application]?"

### Comparative Queries
- "How does [technical concept] compare to [related concept] in terms of performance?"
- "What are the advantages and disadvantages of [technical concept] compared to traditional methods?"
- "What are the key differences between [technical concept] and its alternatives?"

### Theoretical and Conceptual Queries
- "What theoretical frameworks support the understanding of [technical concept]?"
- "How does [technical concept] relate to broader theoretical concepts in [related field]?"
- "What are the foundational principles of [technical concept]?"

### Research Gap Queries
- "What are the current research gaps in [technical concept]?"
- "What questions remain unanswered in the field of [technical concept]?"
- "What areas of [technical concept] need further exploration?"

These query questions can guide your literature search and help you identify relevant papers that address specific aspects of the technical concept you are interested in.
You should provide up to {keyword_nums} of comprehensive query queries to understand the technical concept base on the topic.
Your response must be in JSON format, for example: ["query1", "query2", ...]
"""

INFORMATION_COMPREHENSIVE_PROMPT = """### Relevant Information
{content}

### Topic
{topic}

### Requirements
- Can the topic be sufficiently and comprehensively explained based on the above relevant information?
- If it is possible, please return "yes". If there is a lack of information, please provide additional questions for RAG search.
- If the reference Information lack sufficient and comprehensive explanations of information, such as concepts or definitions, corresponding questions can be raised for RAG search.
- You should respond in JSON format, for example: ["yes"] or ["question1", "question2", ...].
"""

ANSWER_PROMPT = """### Relevant Information
{content}

### Question
{question}

### Requirements
1. Provide a comprehensive answer to the question in 500 words.
2. Include all relevant factual information, numbers, statistics, etc., if available.
"""


SIMPLFY_RELEVANT_CHUNK_PROMPT = """### Paper Chunks
{paper_chunks}

### Question
{question}

### Requirements
Extract the text relevant to the question from paper chunks. \
Ensure that the extraction is directly from the original text without including any unnecessary words.
If there is no relevant text, please reply with [].
Your response must be in JSON format, for example: ["relevant text1", "relevant text2", ...].
"""

CONDUCT_SUERVY_PROMPT = """### Reference Information
{content}

### Topic
{topic}

### Requirements
Please provide a comprehensive and sufficiently research report in response to the following topic: "{topic}", using the information provided \
above. The report must meet the following requirements:

- Ensure a well-structured and in-depth presentation, incorporating relevant facts and figures where available.
- The report must include an Introduction at the beginning, which briefly introduces the content of the report, and a Conclusion at the end, discussing future challenges.
- Present data and findings in an intuitive manner, utilizing feature comparative tables, if applicable.
- The report should have a minimum word count of 2,000 and be formatted with Markdown syntax following APA style guidelines.
- Include all source Global Index the end of the report, if serveral points is from the same source, please use the same index.\
you should provide only the index of the source, format in JSON format, for example: [1, 2, 3, 4, ...].
"""


class SimpleQueryObj(BaseModel):
    query: str
    papers: list[SimplePaperChunkForAnswer]
    answer: Optional[str] = None


class SimplePaperChunkForAnswer(BaseModel):
    paper_chunk: PaperChunkWithDistance
    simplified_chunk: Optional[str] = None


class SurveyKeyword(BaseModel):
    keyword: str
    relvent_papers: Optional[list[SimpleRelevantPaper]] = None
    summary: Optional[str] = None


class KeywordCollecter(Action):
    """Action class to collect keywords from a search engine"""

    name: str = "KeywordCollecter"
    i_context: Optional[str] = None
    desc: str = "Collect keywords from a search engine."
    search_func: Optional[Any] = None
    search_engine: Optional[KnowledgeBaseAPI] = None
    rank_func: Optional[Callable[[list[str]], None]] = None

    @model_validator(mode="after")
    def validate_engine_and_run_func(self):
        if self.search_engine is None:
            self.search_engine = KnowledgeBaseAPI()
        return self

    async def run(
        self,
        topic: str,
        keywords: list[str] = None,
        paper_per_query: int = 5,
        keyword_nums_per_paper: int = 5,
        system_text: str | None = None,
    ) -> dict[str, list[str]]:
        """Run the action to collect keywords.

        Args:
            topic: The research topic.
            keywords: The keywords related to the research topic.
            paper_per_query: The max number of paper to collect per search question, these papers would only contain title, abstract and relevant information.
            keyword_nums: The number of keywords to collect from each paper.
            system_text: The system text.

        Returns:
            A dictionary containing the search questions as keys and the collected keywords as values.
        """
        if not keywords:
            keywords = [topic]
        self.system_text = (
            system_text if system_text else RESEARCH_TOPIC_SYSTEM.format(topic=topic)
        )
        relevant_papers_dict = await self._search_paper_chunk_with_abstract_by_keywords(
            keywords, paper_per_query
        )

        analysis_keywords: list[list[str]] = await asyncio.gather(
            *(
                self._get_paper_keywords(i, keyword_nums_per_paper)
                for i in relevant_papers_dict.values()
            )
        )
        keywords_res = dict(zip(relevant_papers_dict.keys(), analysis_keywords))
        return keywords_res

    async def _search_paper_chunk_with_abstract_by_keywords(
        self, query: list[str], top_k: int = 10
    ) -> dict[list[SimpleRelevantPaper]]:
        """Search paper chunk by query keywords with abstract above chunk

        Args:
            query: list of query string
            top_k: top k results

        Returns:
            dict of query and relevant papers
        """
        paper_list = await asyncio.gather(
            *(
                self.search_engine.query_simple_related_papers_by_keyword(i, top_k)
                for i in query
            )
        )
        return dict(zip(query, paper_list))

    async def _get_paper_keywords(
        self, relevant_papers: list[SimpleRelevantPaper], keyword_nums: int = 4
    ) -> list[str]:
        """Get keywords from paper abstract

        Args:
            papers: list of SimpleRelevantPaper
            keyword_nums: number of keywords to get
        """

        # def gen_msg():
        #     while True:
        #         paper_abstract = "\n".join(
        #             f"#### Paper Title: {relevant_paper.paper_title}\n Paper Abstract: {relevant_paper.abstract_chunk.chunk_text}\n"
        #             for relevant_paper in relevant_papers
        #         )
        #         prompt = KEYWORDS_SEARCH_PROMPT.format(
        #             keyword_nums=keyword_nums, paper_abstract=paper_abstract
        #         )
        #         yield prompt
        #         remove = relevant_papers
        #         remove.pop()
        #         if len(remove) == 0:
        #             break

        # model_name = config.llm.model
        # prompt = reduce_message_length(
        #     gen_msg(), model_name, self.system_text, config.llm.max_token
        # )
        res_analysis_keywords = []
        for paper in relevant_papers:
            paper_abstract = f"\n#### Paper Title: {paper.paper_title}\n Paper Abstract: {paper.abstract_chunk.chunk_text}\n"
            prompt = KEYWORDS_SEARCH_PROMPT.format(
                keyword_nums=keyword_nums, paper_abstract=paper_abstract
            )
            logger.debug(prompt)
            analysis_keywords = await self._aask(prompt, [self.system_text])
            try:
                analysis_keywords = OutputParser.extract_struct(analysis_keywords, list)
                analysis_keywords = TypeAdapter(list[str]).validate_python(
                    analysis_keywords
                )
                if len(analysis_keywords) == 0:
                    analysis_keywords = []
            except Exception as e:
                logger.exception(
                    f"fail to get keywords related to the research topic for {e}"
                )
                analysis_keywords = []
            res_analysis_keywords.extend(analysis_keywords)

        return list(set(res_analysis_keywords))


class PaperCollecter(Action):
    """Action class to collect Paper via keywords"""

    name: str = "KeywordOrganization"
    desc: str = "Organize the keywords and find the best relevant keywords, then search for the papers"
    search_engine: Optional[KnowledgeBaseAPI] = None
    rank_func: Optional[Callable[[list[str]], None]] = None

    @model_validator(mode="after")
    def validate_engine_and_run_func(self):
        if self.search_engine is None:
            self.search_engine = KnowledgeBaseAPI()
        return self

    async def run(
        self,
        topic: str,
        keywords: list[str],
        keyword_nums: int = 10,
        paper_num_per_keywords: int = 6,
        system_text: str | None = None,
    ) -> list[SurveyKeyword]:
        """Run the action to organize the keywords and find the best relevant keywords

        Args:
            topic: The research topic
            keywords: The keywords related to the research topic
            keyword_nums: The max number of keywords to collect
            paper_num_per_keywords: The max number of papers to collect per keyword
            system_text: The system text

        Returns:
            The best relevant keywords
        """
        system_text = (
            system_text if system_text else RESEARCH_TOPIC_SYSTEM.format(topic=topic)
        )
        papers_list_with_keyword = await asyncio.gather(
            *(
                self._get_papers_by_keywords(topic, keyword, paper_num_per_keywords)
                for keyword in keywords
            )
        )

        surveykeyword_list: list[SurveyKeyword] = []

        for i in papers_list_with_keyword:
            surveykeyword = SurveyKeyword(
                keyword=i["keyword"], relvent_papers=i["papers"]
            )
            surveykeyword_list.append(surveykeyword)

        for surveykeyword in surveykeyword_list:
            for paper in surveykeyword.relvent_papers:
                paper.summary = await self._get_paper_summary(paper, topic)

        for surveykeyword in surveykeyword_list:
            surveykeyword.summary = await self._get_keyword_summary(
                surveykeyword.relvent_papers, surveykeyword.keyword
            )

        rank_keywords = await self._rank_keywords(
            topic, surveykeyword_list, keyword_nums
        )

        print(1111)
        return rank_keywords

    async def _get_papers_by_keywords(
        self, topic: str, keyword: str, paper_per_query: int = 5
    ) -> dict[str, list[SimpleRelevantPaper]]:
        """Search paper chunk by query keywords with abstract above chunk

        Args:
            keyword: query string
            paper_per_query: top k results

        Returns:
            dict of query and relevant papers with summary
        """
        papers = await self.search_engine.query_simple_related_papers_by_keyword(
            keyword, paper_per_query
        )
        # summaries = await asyncio.gather(
        #     *(self._get_paper_summary(i, topic) for i in papers)
        # )

        # for i in papers:
        #     i.summary = await self._get_paper_summary(i, topic)

        # for i in range(len(papers)):
        #     papers[i].summary = summaries

        return dict(keyword=keyword, papers=papers)

    async def _get_paper_summary(self, paper: SimpleRelevantPaper, topic: str) -> str:
        """Get summary from paper

        Args:
            paper: SimpleRelevantPaper
        """
        paper_relevant = "\n".join(
            relevant_chunk.chunk_text for relevant_chunk in paper.relevant_chunks
        )
        prompt = SUMMARIZE_PAPER_PROMPT.format(
            topic=topic,
            paper_title=paper.paper_title,
            paper_abstract=paper.abstract_chunk.chunk_text,
            paper_relevant=paper_relevant,
        )
        summary = await self._aask(prompt)
        return summary

    async def _get_keyword_summary(
        self, papers: list[SimpleRelevantPaper], keyword: str
    ) -> str:
        """Get summary from keyword base on the papers

        Args:
            papers: list of SimpleRelevantPaper
            keyword: keyword
        """

        prompt = SUMMARIZE_KEYWORK_PROMPT.format(
            keyword=keyword,
            paper_summary="\n".join(
                f"#### Paper Title: {paper.paper_title}\n Paper Summary: {paper.summary}\n"
                for paper in papers
            ),
        )

        summary = await self._aask(prompt)
        return summary

    async def _rank_keywords(
        self, topic: str, surveykeyword_list: list[SurveyKeyword], keyword_nums: int = 4
    ) -> list[str]:
        """Rank the keywords based on the relevance and return the best keywords

        Args:
            topic: The research topic
            surveykeyword_list: The list of SurveyKeyword
            keyword_nums: The max number of keywords to collect

        Returns:
            The best relevant keywords
        """

        _keyword_summary = "\n\n".join(
            f"{idx}: \n Keyword: {surveykeyword.keyword}\n Summary: \n{surveykeyword.summary}\n"
            for idx, surveykeyword in enumerate(surveykeyword_list)
        )

        prompt = COLLECT_AND_RANKURLS_PROMPT.format(
            topic=topic, keywords_summary=_keyword_summary
        )
        logger.debug(prompt)
        indices = await self._aask(prompt)
        try:
            indices = OutputParser.extract_struct(indices, list)
            assert all(isinstance(i, int) for i in indices)
        except Exception as e:
            logger.exception(f"fail to rank results for {e}")
            indices = list(range(surveykeyword_list))
        results = [surveykeyword_list[i] for i in indices]
        if self.rank_func:
            results = self.rank_func(results)
        return [i for i in results[:keyword_nums]]


class TestAction(Action):
    """Test action class"""

    name: str = "TestAction"
    desc: str = "Test action class"
    search_engine: KnowledgeBaseAPI = None

    @model_validator(mode="after")
    def validate_engine_and_run_func(self):
        if self.search_engine is None:
            self.search_engine = KnowledgeBaseAPI()
        return self

    async def run(self, topic: str, system_text: str | None = None) -> dict:
        """Run the test action"""

        system_text = system_text if system_text else SUERVY_SYSTEM.format(topic=topic)

        print(111)

        # prompt = CLASIFY_TOPIC_PROMPT.format(topic=topic)

        # search_clasify = await self._aask(prompt, [system_text])

        # try:
        #     search_clasify = OutputParser.extract_struct(search_clasify, list)
        #     search_clasify = TypeAdapter(list[int]).validate_python(search_clasify)
        #     if len(search_clasify) == 0:
        #         search_clasify = []
        # except Exception as e:
        #     logger.exception(
        #         f"fail to get search query related to the research topic '{topic}' for {e}"
        #     )
        #     search_clasify = []

        # print(111)

        # prompt = SEARCH_PROMPT_FOR_0.format(topic=topic, keyword_nums=15)

        # search_query = await self._aask(prompt, [system_text])

        # try:
        #     search_query = OutputParser.extract_struct(search_query, list)
        #     search_query = TypeAdapter(list[str]).validate_python(search_query)
        #     if len(search_query) == 0:
        #         search_query = []
        # except Exception as e:
        #     logger.exception(
        #         f"fail to get search query related to the research topic '{topic}' for {e}"
        #     )
        #     search_query = []

        print(111)

        search_query = [
            "What are the definitions and explanations of Dataset Distillation in the literature?",
            "What are the latest advancements in Dataset Distillation?",
            "How has Dataset Distillation evolved over the past decade?",
            "What are the current challenges and future directions in Dataset Distillation?",
            "What methodologies are commonly used to study Dataset Distillation?",
            "How do researchers measure the effectiveness of Dataset Distillation?",
            "What are the best practices for implementing Dataset Distillation in real-world applications?",
            "How is Dataset Distillation applied in machine learning and artificial intelligence fields?",
            "What are the success stories of implementing Dataset Distillation in data science projects?",
            "What are the limitations of using Dataset Distillation in specific application scenarios?",
            "How does Dataset Distillation compare to traditional dataset reduction techniques in terms of performance?",
            "What are the advantages and disadvantages of Dataset Distillation compared to other methods?",
            "What theoretical frameworks support the understanding of Dataset Distillation?",
            "How does Dataset Distillation relate to broader theoretical concepts in machine learning?",
            "What are the foundational principles of Dataset Distillation?",
        ]

        # papers1 = await asyncio.gather(
        #     *(self.search_engine.search_papers(i, 5) for i in search_query)
        # )

        simple_query_objs: list[SimpleQueryObj] = []

        # for papers_chunks, query in zip(papers1, search_query):
        #     simple_query_objs.append(SimpleQueryObj(
        #         query=query,
        #         papers=[SimplePaperChunkForAnswer(paper_chunk=paper_chunk) for paper_chunk in papers_chunks]
        #         ))

        # for simple_query_obj in simple_query_objs:

        #     def gen_msg():
        #         while True:
        #             content = "\n".join(
        #                 f"  Paper Title: {paper.paper_chunk.paper_title}\n  Paper Chunk: {paper.paper_chunk.chunk_text}\n"
        #                 for paper in simple_query_obj.papers
        #             )
        #             prompt = ANSWER_PROMPT.format(question=simple_query_obj.query, content=content)
        #             yield prompt
        #             del simple_query_obj.papers[-1]
        #             if len(simple_query_obj.papers) == 0:
        #                 break

        #     model_name = config.llm.model
        #     prompt = reduce_message_length(gen_msg(), model_name, system_text, config.llm.max_token)
        #     answer = await self._aask(prompt, [system_text])
        #     simple_query_obj.answer = answer

        #     print(111)

        # dump_list_obj(simple_query_objs, "/app/data/tmp")

        simple_query_objs = load_list_obj("/app/data/tmp", SimpleQueryObj)

        # content = "\n".join(
        #     f"{index}.\n Quetion: {simple_query_obj.query}\n  Answer: {simple_query_obj.answer}\n\n" + "-"*50 + "\n"
        #     for index, simple_query_obj in enumerate(simple_query_objs)
        # )
        # prompt = CONDUCT_SUERVY_PROMPT.format(topic=topic, content=content)

        # model_name = config.llm.model
        # # prompt = reduce_message_length(gen_msg(), model_name, system_text, config.llm.max_token)
        # token_num = count_string_tokens(prompt, model_name) + count_string_tokens(system_text, model_name) + config.llm.max_token

        # print(token_num)
        # answer = await self._aask(prompt, [system_text])

        # content = "\n".join(
        #     f"{index}.\n  Quetion: {simple_query_obj.query}\n  Paper Chunks: {'\n\n'.join(
        #         f'    Paper Title: {paper.paper_chunk.paper_title}\n    Paper Chunk: {paper.paper_chunk.chunk_text}' for paper in simple_query_obj.papers)}\n\n"
        #     + "-" * 50
        #     + "\n"
        #     for index, simple_query_obj in enumerate(simple_query_objs)
        # )
        
        content = []
        i = 0
        for index, simple_query_obj in enumerate(simple_query_objs):
            paper_content = []
            for paper in simple_query_obj.papers:
                paper_content.append(
                    f"    Global Index:{i}\n    Paper Title: {paper.paper_chunk.paper_title}\n    Paper Chunk: {paper.paper_chunk.chunk_text}"
                )
                i += 1
            paper_content = "\n".join(paper_content)

            content.append(
                f"####  Quetion: {simple_query_obj.query}\n  Paper Chunks: {paper_content}\n\n"
                + "-" * 50
                + "\n"
            )

        content = "\n".join(content)

        prompt = INFORMATION_COMPREHENSIVE_PROMPT.format(content=content, topic=topic)

        model_name = config.llm.model
        token_num = count_string_tokens(prompt, model_name) + count_string_tokens(system_text, model_name) + config.llm.max_token

        # answer = await self._aask(prompt, [system_text])

        prompt2 = CONDUCT_SUERVY_PROMPT.format(topic=topic, content=content)

        token_num = count_string_tokens(prompt2, model_name) + count_string_tokens(system_text, model_name) + config.llm.max_token
        print(token_num//1024)
        answer = await self._aask(prompt2, [system_text])

        print(count_string_tokens(answer, model_name))
        print(111)

        # save_to_md(answer, "/app/data/tmp4.md")

        # token_num = count_string_tokens(answer, model_name)

        print(token_num)
        return dict()


def save_to_md(content: str, path: str):
    with open(path, "w") as f:
        f.write(content)


if __name__ == "__main__":
    import asyncio

    async def main():
        # keywords = ['hash featurized manifold', 'realism score', 'dataset distillation', 'novel view synthesis', 'spatio-temporal evaluation', 'synthetic datasets', 'data-efficient learning', 'video generative models', 'low-level visual perception', '3D objects', 'large-scale datasets']
        topic = "What is Dataset Distillation?"
        # topic = "How can we improve the accuracy of question-answering generation for table-based file retrieval?"
        # topic = "What are the methods for fine-tuning large models, and what are the advantages and disadvantages of each method?"
        # topic = "The development trajectory of text-to-image models."
        # query = "什么是数据蒸馏"
        # kc = KeywordCollecter()
        # keywords = []

        # keywords_res = await kc.run(topic, keywords, 5, 3)
        # keywords.extend([j for i in keywords_res.values() for j in i])
        # keywords = list(set(keywords))

        # keywords_res = await kc.run(topic, keywords, 2, 2)
        # keywords.extend([j for i in keywords_res.values() for j in i])
        # keywords = list(set(keywords))

        # print(keywords)

        # pc = PaperCollecter()
        # keywords_res = await pc.run(topic, keywords, 4, 3)

        ta = TestAction()
        res = await ta.run(topic)

        print(res)

    asyncio.run(main())
