# -*- coding: utf-8 -*-
# @Author   : SongLiangCheng
# @QQ       : 2192383945
# @Time     : 2022/12/13 8:38
# @File     : blog.py 
# @Project  : scraping
# @Desc     : this is a toy
import logging

import requests
from pyquery import PyQuery
from pyquery import PyQuery as pq

from common.blog import Blog
logger = logging.getLogger(__file__)


print(f'{__name__} start')
class fiveoneCto(Blog):

    def get_doc(self, url: str) -> list[PyQuery | None, bool]:
        for i in range(20):
            try:
                res = requests.get(url, timeout=3)
                return [pq(res.text), True]
            except Exception as e:
                logger.error(f"{url} 请求失败 \n 报错: {e}")
                continue
        return [None, False]

    def parse_content(self, doc: dict | PyQuery):
        articles = doc('.common-article-listbox .common-article-list')

        days_contents = []
        for article in articles.items():
            pubdate = article('.messages .actions').text()
            posturl = article('.title a').attr('href')
            title = article('.title a').text()
            desc = article('.dec a').text()
            logger.info("%s %s %s %s ", pubdate, posturl, title, desc)

            if not self.config.only_outline:
                # 将url保存
                # https://www.cnblogs.com/zyyang1993/archive/2022/10/26.html https://www.cnblogs.com/zyyang1993/p/16829030.html
                self.write_content(posturl)

            line = f"""
<div class="line">
<p>
    <a href="{posturl}">{title}</a>
    <a href="/">{pubdate} </a>
</p>
<p>{desc}</p>
</div>
    """
            days_contents.append(line)
        return days_contents
    def get_total_page(self, doc: PyQuery) -> str:

        end_page = doc('.pagination .last a')
        if end_page:
            return end_page.attr('href')

        else:
            raise Exception('页面不存在下一页')

    def parse_next_url(self, doc: PyQuery | str):
        if (nav_next_page := doc('.pagination .next a')):
            url = nav_next_page.attr('href')
            return url
        return self.get_total_page(doc)



print(f'{__name__} end')
