import logging
import time

from modules.browser_simulate.selenium_simulate import SeleniumSimulate
from modules.html_parse.parse_main import ParseMain
from modules.request.request_main import request_main as req
from scrapy_main.base.base_scraper import BaseScraper
from utils.format import format_url_by_template

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')



class BrowserMiddle(BaseScraper):
    def __init__(self, start_url=None):

        self.start_url = start_url
        self.current_page = 1
        self.page_count = 1
        self.page_index = 1
        self.browser_simulate = None

        self.start_browser()

    def start_browser(self):
        self.browser_simulate = SeleniumSimulate()


    # 爬取列表
    def scrape_page_list(self, url):
        logging.info(f"请求链接：{url}")
        request_data = req.request('get', url, proxy=self.proxy, headers=self.headers, verify=True)
        if request_data and request_data.status_code == 200:
            parse_html = ParseMain(request_data.text)
            self.get_pagination(parse_html)

            return self.get_page_list(parse_html)

        else:
            raise Exception("请求异常：", f"{url}, Error Code: {request_data.code}")

    # 爬取详情页
    def scrape_page_detail(self, page, counter, search_key=None):
        raise NotImplementedError

    # 爬取整个页面 包括列表页和详情页
    def scrape_whole_page(self, search_key, counter):
        self.search_key = search_key
        search_url = format_url_by_template(self.search_url, self.__dict__)
        self._scrape_page_list_and_details(search_url, search_key)

        for i in range(2, self.page_count + 1):
            self.page_index = i
            page_url = format_url_by_template(self.paging_url, self.__dict__)
            self._scrape_page_list_and_details(page_url, search_key)

    def _scrape_page_list_and_details(self, page_url, search_key):
        page_list = self.scrape_page_list(page_url)
        if page_list:
            for page in page_list:
                self.scrape_page_detail(page, search_key)

    # 开始爬取
    def start_scraper(self):
        for search_key in self.search_keys:
            self.scrape_whole_page(search_key.value)

        return {
            'msg': '爬取完成',
            'code': 200
        }

    # 爬取详细的分页内容
    def scrapy_detail_page_content(self, url):
        time.sleep(2)
        request_detail = req.request('get', url, proxy=self.proxy, headers=self.headers, verify=True)
        print(f"请求链接：{url}")
        if request_detail:
            parse_html = ParseMain(request_detail.text)
        return self.get_detail_page_content(parse_html)
