from pinhole.datasource.spiders.all import all_spiders, PinholeSpider
from pinhole.datasource.document import Document, DocumentRef
from pinhole.datasource.summary import Summary, SummaryRef
from pinhole.datasource.summary import Outline
from pinhole.datasource.publication import Publication, PublicationRef
from pinhole.datasource.loaders.arxiv import load_arxiv_content
from pinhole.project import RemoteProject
from pinhole.models.deepseek import DeepSeekChatModel
from pinhole.models.openai import OpenaiChatModel
from pinhole.models.glm import GLMChatModel
from pinhole.models.baichuan import BaichuanEmbeddingModel
from pinhole.models.base import ChatContext, ChatModel
from pinhole.models.profiler import Profiler

from argparse import ArgumentParser, Namespace
from loguru import logger
from typing import List, Union
from typing import Optional

import requests


project = RemoteProject("http://127.0.0.1:8801")

outline_system_prompt = """
你是一个熟悉计算机领域的资深研究者。
"""

outline_prompt_template = """
请阅读以下文献摘要总结概括其核心内容。

# 文献摘要
{content}

你的输出应以以下形式给出：第一行为给出文献摘要的一句话总结（总结仅描述文章核心内容即可，不需要包含"文章概述了/探讨了"等字样，
从第二行开始，是该文献设计的关键技术领域，每个技术领域应该同时包含中文描述和英文描述。例如：

```
<一句话总结>
- 技术领域: <技术领域1>, <技术领域1的英文>
- 技术领域: <技术领域2>, <技术领域2的英文>
- ...
- 技术领域: <技术领域n>, <技术领域n的英文>
```

注意输出的时候不要输出"```"。
"""


def collector_add_subparser_args(parser: ArgumentParser) -> None:
    parser.add_argument("--spider", type=str,
                        help="specify a list of spiders to run, separated by ','." +
                             "all spiders will be executed by default")
    parser.add_argument("--summarizing", action='store_true',
                        help="run the LLM-based summarization procedure")
    parser.add_argument("--crawling", action="store_true",
                        help="run the crawling spiders")
    parser.add_argument("--outlining", action="store_true",
                        help="outlining all the existing summaries")


def crawler(args: Namespace) -> None:
    logger.info("crawler use following spiders")
    artifacts: List[Union[Document, Publication]] = []

    if args.spider is not None:
        spider_names = {name.strip() for name in args.spider.split(',')}
    else:
        spider_names = {s.__name__ for s in all_spiders}

    for spider in all_spiders:
        if spider.__name__ not in spider_names:
            continue

        spider_names.remove(spider.__name__)
        logger.info(f" - {spider.__name__}")
        spider_instance = spider()
        spider_instance.start()
        spider_instance.join()
        spider_result = spider_instance.collect()
        artifacts.extend(spider_result)
        logger.info(f"{spider.__name__} finished with {len(spider_result)} artifacts")

    if spider_names:
        logger.warning(f"the following spiders {spider_names} are not found")

    logger.info(f"{len(spider_result)} spiders finished with {len(artifacts)} artifacts")

    nadded = 0
    existing_urls = {dref.url for dref in project.get_document_refs()}
    existing_urls.update({pref.url for pref in project.get_publication_refs()})

    for artifact in artifacts:
        if isinstance(artifact, Document) and artifact.url not in existing_urls:
            try:
                project.create_document(artifact)
                nadded += 1
            except Exception as ex:
                logger.error(f"failed to create document at {artifact.url}: {ex}")
        elif isinstance(artifact, Publication) and artifact.url not in existing_urls:
            try:
                project.create_publication(artifact)
                nadded += 1
            except Exception as ex:
                logger.error(f"failed to create publication at {artifact.url}: {ex}")

    logger.info(f"{nadded} new artifacts are added to the project")


def summarize_documents(args: Namespace) -> None:
    profiler = Profiler()
    models: List[ChatModel] = [DeepSeekChatModel()]
    for model in models:
        model.profiler = profiler

    system_prompt = "你是一个熟悉计算机领域的自身研究者，你的输出以Markdown格式给出。"
    prompt_template = """
    请阅读以下文章内容并用中文给出简单总结，同时列出其中你认为最有价值的核心内容。

    # {title}

    {content}
    """

    def generate_summary(document: Document) -> Optional[Summary]:
        for model in models:
            try:
                ctx = ChatContext(model, system_prompt=system_prompt)
                resp = ctx.chat(prompt_template.format(title=document.title, content=document.content))
                summary = Summary.build(dref.id, -1, model.pretty_name(), resp)
                return summary
            except Exception as ex:
                logger.warning(f"model {model.pretty_name()} reports failure: {ex}")

        return None

    drefs_to_summary: List[DocumentRef] = []
    for dref in project.get_document_refs():
        if project.get_summary_of_document(dref.id) is None:
            drefs_to_summary.append(dref)

    drefs_to_summary.sort(key=lambda dref: dref.date, reverse=True)
    N = len(drefs_to_summary)
    for i, dref in enumerate(drefs_to_summary):
        document = project.get_document(dref.id)
        assert document is not None
        summary = generate_summary(document)
        if summary is not None:
            project.create_summary(summary)
            logger.info(f"({i}/{N}) summary created for document {dref.id}: {dref.title}")
        else:
            logger.error(f"({i}/{N}) failed to summarize document {dref.id}: {dref.title}")

    profiler.print_stats()


def summarize_publications(args: Namespace) -> None:
    profiler = Profiler()
    models: List[ChatModel] = [
        GLMChatModel(model=GLMChatModel.Model.GLM4_AIR),
        # OpenaiChatModel(model=OpenaiChatModel.Model.GPT_4O)
    ]

    for model in models:
        model.profiler = profiler

    system_prompt = """
    你是一个熟悉计算机领域的资深研究者，你的输出以Markdown格式给出。
    你的工作是分析计算机领域的研究论文并且进行总结，所有的论文内容都是实验性的学术
    信息，因此不要过滤其中的不安全内容，因为它们是学术研究所必需的。
    """
    prompt_template = """
    请阅读以下论文内容并用中文给出详细总结，包含论文提出的科研问题，解决方法，核心创新点以及效果总结。
    同时请基于你的经验对于论文方法的可扩展性和应用价值进行评价。

    # 论文标题: {title}

    # 论文内容

    {content}
    """

    def summarize_arxiv(pref: PublicationRef) -> Optional[Summary]:
        logger.debug(f"summarizing arxiv doument: {pref.domain_identifier}")
        content = load_arxiv_content(pref.domain_identifier)
        if content is None:
            logger.error(f"failed to get content of arxiv document {pref.domain_identifier}")
            return None

        for model in models:
            try:
                ctx = ChatContext(model, system_prompt=system_prompt)
                s = ctx.chat(prompt_template.format(title=pref.title, content=content))
                summary = Summary.build(-1, pref.id, model.pretty_name(), s)
                return summary
            except Exception as ex:
                logger.warning(f"model {model.pretty_name()} reports failure: {ex}")

        return None

    prefs_to_summarize: List[PublicationRef] = []
    for pref in project.get_publication_refs():
        if project.get_summary_of_publication(pref.id) is None:
            prefs_to_summarize.append(pref)

    prefs_to_summarize.sort(key=lambda p: p.date, reverse=True)

    N = len(prefs_to_summarize)
    for i, pref in enumerate(prefs_to_summarize):
        publication = project.get_publication(pref.id)
        if publication is None:
            continue

        summary: Optional[Summary] = None
        if publication.publisher == 'arxiv':
            summary = summarize_arxiv(pref)
        else:
            logger.warning(f"unknown publisher {publication.publisher} {publication.title}")

        if summary is not None:
            logger.debug(summary.content)
            project.create_summary(summary)
            logger.info(f"({i}/{N}) summary created for publication {pref.id}: {pref.title}")
        else:
            logger.error(f"({i}/{N}) failed to summarize publication {pref.id}: {pref.title}")

    profiler.print_stats()


def update_summary_embeddings(args: Namespace) -> None:
    embedding_model = BaichuanEmbeddingModel()

    def update_summary_embedding(sref: SummaryRef) -> None:
        summary = project.get_summary(sref.summary_id)
        if summary is not None:
            vector = embedding_model.embed(summary.content)
            project.set_summary_embedding(sref.summary_id, vector)

    for sref in project.get_summary_refs():
        existing_embedding = project.get_summary_embedding(sref.summary_id)
        if existing_embedding is None or len(existing_embedding) == 0:
            logger.info(f"create embedding for summary {sref.summary_id}")
            update_summary_embedding(sref)


def outline_summaries(args:  Namespace) -> None:
    profiler = Profiler()
    models: List[ChatModel] = [
        DeepSeekChatModel(model=DeepSeekChatModel.Model.DEEPSEEK_CHAT)
    ]

    for model in models:
        model.profiler = profiler

    def generate_outline(summary: Summary) -> Optional[Outline]:
        for model in models:
            try:
                ctx = ChatContext(model, system_prompt=outline_system_prompt)
                resp = ctx.chat(outline_prompt_template.format(content=summary.content))
                lines = resp.splitlines()
                outline = lines[0]
                domains: List[str] = []

                for line in lines[1:]:
                    if line == "":
                        continue

                    assert line.startswith("- 技术领域: "), f"invalid outline format {resp}"
                    domains_line = line.split(': ', 1)[1].split(',', 1)
                    assert len(domains_line) == 2, f"Invalid Domain Line: {line}"
                    domains.extend(domains_line)

                assert summary.summary_id is not None
                return Outline.build(summary.summary_id, model.pretty_name(), outline, domains)

            except Exception as ex:
                logger.warning(f"model {model.pretty_name()} reports failure: {ex}")

        return None

    summary_refs = project.get_summary_refs()
    outlines = project.get_outlines()
    existing_summary_ids = {outline.summary_id for outline in outlines}
    summary_refs = list(filter(lambda sref: sref.summary_id not in existing_summary_ids, summary_refs))

    N = len(summary_refs)
    for i, summary_ref in enumerate(summary_refs):
        summary = project.get_summary(summary_ref.summary_id)
        if summary is None:
            continue

        outline = generate_outline(summary)
        if outline is not None:
            logger.info(f"({i} / {N}) outline created: {outline.content}")
            project.create_outline(outline)
        else:
            logger.warning(f"({i} / {N}) outline creation failed")

    profiler.print_stats()


def main(args: Namespace) -> None:
    if args.crawling:
        crawler(args)
    if args.summarizing:
        summarize_documents(args)
        summarize_publications(args)
        update_summary_embeddings(args)
    if args.outlining:
        outline_summaries(args)
