# -*- coding: utf-8 -*-
# @Author   : SongLiangCheng
# @QQ       : 2192383945
# @Time     : 2022/11/29 10:17
# @File     : blog.py 
# @Project  : scraping
# @Desc     : this is a toy
import dataclasses
import logging
from concurrent.futures._base import Future
import requests
from common.blog import Blog
from common.http import get_page
from pathlib import Path
from pyquery.pyquery import PyQuery
from pyquery import PyQuery as pq

from .config import CnblogConfiguration

logger = logging.getLogger(__file__)


@dataclasses.dataclass
class CnBlog(Blog):
    config: CnblogConfiguration

    def run(self, URL):
        super().run(URL)

    def get_doc(self, url: str) -> list[PyQuery | None, bool]:
        for i in range(20):
            try:
                res = requests.get(url, timeout=3)
                return [pq(res.text), True]

            except Exception as e:
                logger.error(f"{url} 请求失败 \n 报错: {e}")
                continue
        return [None, False]

    def parse_content(self, doc: PyQuery) -> list[str]:
        days = doc('#mainContent .forFlow div.day')

        days_contents = []
        for day in days.items():
            pubdate = day('.dayTitle a').text()
            days=day('.postTitle')
            if len(days) > 1:
                # 1天N篇
                for i,d in enumerate(days.items()):
                    blogs_bydate_url = day('.dayTitle a').attr('href')
                    posturl = d('a').attr('href')
                    title = d('a').text()
                    desc = list(day('.postCon').items())[i].html()
                    logger.info("%s %s %s %s %s", pubdate, blogs_bydate_url, posturl, title, desc)
                    if not self.config.only_outline:
                        # 将url保存
                        # https://www.cnblogs.com/zyyang1993/archive/2022/10/26.html https://www.cnblogs.com/zyyang1993/p/16829030.html
                        self.write_content(blogs_bydate_url)
                        self.write_content(posturl)

                    line = f"""
        <div class="line">
        <p>
            <a href="{posturl}">{title}</a>
            <a href="{blogs_bydate_url}">{pubdate} </a>
        </p>
        <p>{desc}</p>
        </div>
            """
                    days_contents.append(line)
            else:
                # 1天1篇
                blogs_bydate_url = day('.dayTitle a').attr('href')
                posturl = day('.postTitle a').attr('href')
                title = day('.postTitle a').text()
                desc = day('.postCon').html()
                logger.info("%s %s %s %s %s", pubdate, blogs_bydate_url, posturl, title, desc)

                if not self.config.only_outline:
                    # 将url保存
                    # https://www.cnblogs.com/zyyang1993/archive/2022/10/26.html https://www.cnblogs.com/zyyang1993/p/16829030.html
                    self.write_content(blogs_bydate_url)
                    self.write_content(posturl)

                line = f"""
    <div class="line">
    <p>
        <a href="{posturl}">{title}</a>
        <a href="{blogs_bydate_url}">{pubdate} </a>
    </p>
    <p>{desc}</p>
    </div>
        """
                days_contents.append(line)
        return days_contents

    def get_total_page(self, doc: PyQuery) -> str:
        end_page = list(doc('.pager a').items())[-1]
        if (end_page.text() == '下一页'):
            return end_page.attr('href')
        raise Exception('页面不存在下一页')

    def parse_next_url(self, doc: PyQuery) -> str:
        if (nav_next_page := doc('#nav_next_page a')):
            url = nav_next_page.attr('href')
            return url
        return self.get_total_page(doc)


if __name__ == '__main__':
    logger.info(f'{__name__} start')
    URL = 'https://www.cnblogs.com/zyyang1993'
    total_page = get_page(URL, get_doc=get_doc, parse_content=parse_content, parse_next_url=parse_next_url,
                          save_contents=save_contents)

    logger.info(f'{__name__} end')
