from typing import Protocol, List, Literal
from parsel import Selector as Locator
from .core.models import (
    SimpleNewsItem,
    Selectors,
    Response,
    Request,
    Element,
)
from .utils import (
    is_css_or_xpath,
    simple_news_item_to_news_item,
    process_datetime_format,
    replace_relative_urls,
)
from playwright.async_api import TimeoutError


class Parser(Protocol):
    default_selectors = {
        "time_selectors": [".date", ".publish-time"],
        "author_selectors": [
            "//div/*[(self::span or self::p) and (position()=1 or position()=last()) and contains(text(), '来源：')]"
        ],
    }

    def __init__(self, selectors: Selectors):
        self.selectors = selectors

    async def get_pub_time(self, response, selector=None) -> str:
        selector = selector or self.selectors.time_selectors
        pub_date = await self._parse(response, selector, "text")
        return process_datetime_format(pub_date or "")

    async def get_title(self, response) -> str:

        title = await self._parse(response, self.selectors.title_selectors, "text")
        if not title:
            title = await self._parse(response, ["title"], "text")
        return title

    async def get_author(self, response, selector=None) -> str:
        selector = selector or self.selectors.author_selectors
        author = await self._parse(response, selector, "text-list") or []
        author = list(map(lambda i: i.strip(), author))
        author = " ".join(author)
        # 进行一些处理
        author = author.split("|")[0].strip()
        return author

    async def get_content(self, response) -> str:
        content = await self._parse(response, self.selectors.content_selectors)
        if content:
            content = replace_relative_urls(content, str(response.url))
        return content

    async def get_navigators(self, response) -> str:
        navs = await self._parse(
            response, self.selectors.nav_selectors, type="text-list"
        )
        if navs:
            navs = list(filter(lambda i: i not in (">",), navs))
            navs = list(map(lambda i: i.strip(">"), navs))
        else:
            navs = []
        return navs

    async def _parse(self, response: Response, selectors: List[str], type="html"):
        if not response.page:
            content = response.content
            locator = Locator(text=content)
            for selector in selectors:
                if is_css_or_xpath(selector) == "css":
                    result = locator.css(selector)
                elif is_css_or_xpath(selector) == "xpath":
                    result = locator.xpath(selector)
                if result:
                    return self._parse_inner(result, type)
        else:
            for selector in selectors:
                try:
                    if type == "html":
                        result = await response.page.locator(selector).inner_html(
                            timeout=30
                        )
                        pass
                    elif type == "text":
                        result = await response.page.locator(selector).inner_text(
                            timeout=30
                        )
                    elif type == "text-list":
                        result = await response.page.locator(selector).all_inner_texts()
                    else:
                        result = await response.page.locator(selector).inner_html(
                            timeout=30
                        )
                    if result:
                        return result
                except TimeoutError:
                    continue
            await response.page.close()
        return None

    def _parse_inner(
        self, ele: Element, content_type: Literal["text", "html", "text-list"]
    ):
        if content_type == "text":
            return "".join(ele.xpath(".//text()").getall())
        elif content_type == "text-list":
            return ele.xpath(".//text()").getall()
        else:
            return "\n".join(ele.getall())

    async def parse(self, response: Response) -> SimpleNewsItem:
        meta = response.request.meta or {"article": {}}
        meta["published_time"] = process_datetime_format(meta.get("published_time"))
        title = await self.get_title(response)
        published_time = await self.get_pub_time(response)
        url = str(response.url)
        raw_url = str(response.request.url)
        content = await self.get_content(response)
        byline = await self.get_author(response)
        navigators = await self.get_navigators(response)
        ret = {
            "title": title,
            "published_time": published_time,
            "url": url,
            "raw_url": raw_url,
            "content": content,
            "byline": byline,
            "navigators": navigators,
        }
        ret = await self.fallback_default_selectors(response, ret)
        if meta.get("use_readability") and not content:
            ret.update(meta.get("article", {}))
        # 此外如果
        for k, v in meta.items():
            if k not in [
                "title",
                "published_time",
                "url",
                "raw_url",
                "content",
                "byline",
                "navigators",
            ]:
                continue
            if v:
                ret[k] = v
        simple = SimpleNewsItem(**ret)
        return simple_news_item_to_news_item(simple)

    async def fallback_default_selectors(self, response: Response, ret: dict) -> dict:
        if not ret["published_time"]:
            ret["published_time"] = await self.get_pub_time(
                response, self.default_selectors["time_selectors"]
            )
        if not ret["byline"] or "{{name}}" in ret["byline"]:
            ret["byline"] = await self.get_author(
                response, self.default_selectors["author_selectors"]
            )
        return ret


if __name__ == "__main__":

    async def test_parse():
        selectors = Selectors()
        parser = Parser(selectors)
        req = Request(url="https://www.huzheng.site", meta={})
        with open("baijiahao.html", encoding="utf-8") as f:
            c = f.read()
        resp = Response(request=req, url="https://www.huzheng.site", content=c)
        ret = await parser.parse(resp)
        print(ret)
        pass

    import asyncio

    asyncio.run(test_parse())
