# -*- coding: utf-8 -*-
# @Author   : SongLiangCheng
# @QQ       : 2192383945
# @Time     : 2022/12/2 14:52
# @File     : blog.py 
# @Project  : scraping
# @Desc     : this is a toy
import logging
import threading
import time
from concurrent.futures._base import Future
from concurrent.futures.thread import ThreadPoolExecutor
from dataclasses import dataclass
from pathlib import Path
from pyquery import PyQuery
from pyquery import PyQuery as pq
from common.blog import Blog
from .config import CsDnConfiguration,Configuration
import requests
from urllib import parse

logger = logging.getLogger(__file__)


@dataclass
class CsDnBlog(Blog):
    config: Configuration|CsDnConfiguration

    def get_doc(self, url: str) -> [dict, bool]:
        """
        获取的是字典
        :param url:
        :return:
        """
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
        }

        response = requests.request("GET", url, headers=headers)

        data:dict = response.json()
        # articles = [ line for line in data.get('data').get('list')
        #     if line.get('rtype') == "article" ]
        #
        # data['data']['list'] = articles
        data['data']['list'] = data.get('data').get('list')



        return [data, True]

    def parse_content(self, doc: dict):
        days_contents = []
        for line in doc.get('data').get('list'):
            logger.debug(line)
            pubdate = line.get('formatTime')
            title = line.get('title')
            desc = line.get('description')
            posturl = line.get('url')
            if not self.config.only_outline:
                self.write_content(posturl)
            line = f"""
    <div class="line">
    <p>
        <a href="{posturl}">{title}</a>
        <span>{pubdate} </span>
    </p>
    <p>{desc}</p>
    </div>
        """
            days_contents.append(line)
        return days_contents

    def parse_next_url(self, doc:dict|PyQuery) -> str:
        # 'https://blog.csdn.net/community/home-api/v1/get-business-list?page=1&businessType=lately&noMore=false&username=alwaysbefine'
        d = dict(parse.parse_qsl(parse.urlsplit(self.config.next_url).query))
        d['page'] = int(d['page']) + 1
        logger.debug(f"{d['page']}    this is page")
        p = parse.urlparse(self.config.next_url)
        self.config.next_url = f"{p.scheme}://{p.netloc}{p.path}?{parse.urlencode(d)}"

        content, _ = self.get_doc(self.config.next_url)
        if doc.get('data').get('list'):
            raise Exception('已到最后一页')
        return self.config.next_url

    def get_real_doc(self,url):
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36',
        }

        response = requests.request("GET", url, headers=headers)

        return pq(response.text)


    def write_content(self, url: str, is_img=False) -> None:
        urlobj = self.geturl(url)
        logger.debug(f'write_content 获取到的urlobj {urlobj}')
        if Path(urlobj.fullpath).exists():
            logger.debug(f'{url}对应的路径{urlobj.fullpath}已经存在，所以直接跳过解析')
            return

        if not urlobj.path.exists():
            try:
                urlobj.path.mkdir(parents=True)
            except:
                pass
        if is_img:
            if not Path(urlobj.fullpath).exists():
                with open(urlobj.fullpath, 'wb') as fb:
                    fb.write(requests.get(url).content)
            return
        doc = self.get_real_doc(url)
        # image save
        if (doc('img')):
            doc = self.image_save(doc)
        # image save
        if (doc('script')):
            self.script_save(doc)
        if (doc('link')):
            # css save
            self.css_save(doc)
        if not Path(urlobj.fullpath).exists():
            with open(urlobj.fullpath, 'w', encoding='utf8') as fb:
                fb.write(doc.html())
