import json
import os
import re

import requests
from beeize.scraper import Scraper
from loguru import logger
from parsel import Selector

headers = {
    'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 ('
                  'KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0',
}
column_maps = {
    '第一关注': '/first/care/diyi',
    '中国': '/generalColumns/zhongguo',
    '国际': '/generalColumns/gj',
    '观点': '/generalColumns/guandian'
}
url_maps = {
    '/first/care/diyi': 'https://www.cankaoxiaoxi.com/json/channel/diyi/list.json',
    '/generalColumns/zhongguo': 'https://www.cankaoxiaoxi.com/json/channel/zhongguo/list.json',
    '/generalColumns/gj': 'https://www.cankaoxiaoxi.com/json/channel/gj/list.json',
    '/generalColumns/guandian': 'https://www.cankaoxiaoxi.com/json/channel/guandian/list.json',
}


class CanKaoXiaoXi:
    def __init__(self):
        self.scraper = Scraper()
        self._input = self.scraper.input
        self.queue = self.scraper.request_queue  # 请求队列集
        self.kv_store = self.scraper.key_value_store  # 键值存储集
        self.start_urls =  self._input.get_list('start_urls') # 从 array 类型读取
        self.page_number = self._input.get_int('page_number')  # 从 integer 类型读取
        self.download_media = self._input.get_bool('download_media')  # 从 boolean 类型读取
        self.column = self._input.get_string('column')  # 从 string 类型读取
        self.proxies = {
            'http': self._input.get_random_proxy(),
            'https': self._input.get_random_proxy(),
        }  # 读取代理配置

    def fetch(self, url, retry_count=0):
        try:
            response = requests.get(
                url=url,
                headers=headers,
                proxies=self.proxies,
            )
            return response.text
        except (Exception,):
            if retry_count < 3:
                return self.fetch(url, retry_count + 1)

    def add_task(self):
        # 从数组获取URL
        for start_url in self.start_urls:
            url = start_url.get('url')
            api_url = url_maps.get(url.split('#')[-1])
            request = {'url': api_url, 'type': 'list_page', 'page_number': 1, }
            self.queue.add_request(request)

        api_url = url_maps.get(column_maps.get(self.column))
        request = {'url': api_url, 'type': 'list_page', 'page_number': 1, }
        self.queue.add_request(request)

    def run(self):
        self.add_task()

        while self.queue.is_finished():
            # 取任务
            request = self.queue.fetch_next_request()

            # 下载
            logger.info(request.get('url'))  # 日志
            if request['type'] == 'list_page':
                list_item = json.loads(self.fetch(request.get('url'))).get('list')
                for item in list_item:
                    data = item.get('data')
                    detail_request = {'url': data.get('url'), 'type': 'detail_page', 'data': data}
                    self.queue.add_request(detail_request)

                if request.get('page_number') < self.page_number:
                    request['page_number'] = request.get('page_number') + 1
                    request['url'] = request.get('url') + f"#{request['page_number']}"
                    self.queue.add_request(request)

            if request['type'] == 'detail_page':
                content_txt = re.findall(r'var contentTxt =\"(.*)\";', self.fetch(request.get('url')))
                if not content_txt:
                    continue
                content_txt = content_txt[0]
                content_txt = re.sub(r'\\', '', content_txt)
                content = Selector(text=content_txt).xpath('string(.)').extract_first()
                result = request['data']
                result['content'] = content
                self.scraper.push_data(result)


if __name__ == '__main__':
    # os.environ['START_URLS'] = '[{"url":"https://www.cankaoxiaoxi.com/#/first/care/diyi"},{"url":"https://www.cankaoxiaoxi.com/#/generalColumns/zhongguo"},{"url":"https://www.cankaoxiaoxi.com/#/generalColumns/gj"},{"url":"https://www.cankaoxiaoxi.com/#/generalColumns/guandian"}]'
    # os.environ['COLUMN'] = '中国'
    # os.environ['PAGE_NUMBER'] = '1'
    CanKaoXiaoXi().run()
