import logging
from pathlib import Path
from typing import AsyncIterator, List, Optional
from pydantic import BaseModel
from gchat_processing.gch_chain_wrapper import GigaChainWrapper
from semscholar.semscholar import SemScholarHttpClient, PaperInfo
from .base import BaseBlock, ProgressChunk
from ...config import PaperAIConfig


class SourcesStartRequest(BaseModel):
    idea: str
    state_id: Optional[str] = None
    append: bool = False


class SourcesItem(BaseModel):
    title: str
    summary: str
    bibtex: str = ''
    disabled: bool = False


class SourcesUpdateRequest(BaseModel):
    sources: List[SourcesItem]
    state_id: Optional[str] = None


class SourcesBlock(BaseBlock):
    """Literature sources block.

    """
    config: PaperAIConfig

    def __init__(self, idea: str, sources: List[SourcesItem], request: SourcesStartRequest):
        self.idea = idea
        self.sources = sources
        self.request = request
        # TODO env variables
        # self.sem_client = SemScholarHttpClient(config_path=Path('semscholar/credentials/config.json'))
        self.sem_client = SemScholarHttpClient(
            api_key=self.config.semanticscholar_api_key,
        )
        self.gchain_wrapper = GigaChainWrapper(
            sql_cache_path=Path('gchat_processing/sqlite_cache.db'),
            giga_token=self.config.gigachat_token,
            giga_scope='GIGACHAT_API_CORP'
        )
        self.logger = logging.getLogger(__name__)

    @classmethod
    def from_state(cls, data: dict, request: SourcesStartRequest) -> 'SourcesBlock':
        data['idea'] = request.idea  # update idea
        return SourcesBlock(
            idea=data.get('idea', ''),
            sources=[
                SourcesItem.model_validate(item)
                for item in data.get('sources', [])
            ],
            request=request,
        )

    async def run(self) -> AsyncIterator[ProgressChunk]:
        results = []
        if self.request.append:
            results = self.sources

        queries = await self.gchain_wrapper.get_search_queries(text=self.idea, n_queries=3)
        self.logger.info(queries)
        # TODO move repeats control higher on stack (another)
        all_papers = []
        for i, query in enumerate(queries):
            query_papers: List[PaperInfo] = [
                paper
                for query in queries
                for paper in await self.sem_client.get_papers(req=query, n_papers=3)
                if paper not in all_papers
            ]
            all_papers = [*all_papers, *query_papers]
            new_items = [
                # here we generate new sources
                SourcesItem.model_validate({
                    'title': paper.title,
                    'summary': await self.gchain_wrapper.get_summary_simple(paper.abstract),
                    'bibtex': paper.citation
                })
                for paper in query_papers
            ]
            results.extend(new_items)
            yield ProgressChunk(
                done=False,
                total_steps=len(queries),
                current_step=i + 1,
                data={
                    'sources': new_items,
                }
            )
        self.sources = results
        yield ProgressChunk(
            done=True,
            total_steps=len(queries),
            current_step=len(queries),
            data={
                'sources': results,
            }
        )

    def to_state(self, data: dict):
        data['idea'] = self.idea
        data['sources'] = self.sources
        return
