import logging

from modules.html_parse.parse_main import ParseMain
from modules.request.request_main import request_main as req
from scrapy_main.middleware.blog_middle import BlogMiddle
from utils.format import format_txt_eng, format_url_by_template

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class TingclassScraper(BlogMiddle):

    def get_page_list(self, html):
        page_list = []

        elements = html.find_all('a', {'class': 'ell'})

        for ele in elements:
            page_item = {
                'name': ele.get_text(),
                'url': html.get_by_attr(ele, 'href'),
            }
            page_list.append(page_item)
        return page_list

    def get_page_detail(self, html):

        element = html.find('div', {'id': 'arti_tab_1'})
        content = format_txt_eng(element.get_text(' '))
        return content

    def get_pagination(self, html):
        # 获取页码数
        pages = html.find_all('li', {'class': 'a'})
        if html.find('a', {'class': 'thisclass'}):
            # 当前页码
            self.current_page = int(html.find('a', {'class': 'thisclass'}).get_text())
            # 总页码数
            self.page_count = int(pages[len(pages) - 1].get_text())

    # 爬取详情页
    def scrape_page_detail(self, page, counter=None, search_key=None):
        self.search_key = search_key

        request_detail = req.request('get', page['url'], proxy=self.proxy, headers=self.headers, verify=True)
        logging.info(f"请求链接：{page['url']}")

        if request_detail:
            parse_html = ParseMain(request_detail.text)

            page['detail'] = self.get_page_detail(parse_html)

        if self.enable_save_database:
            self.saver.add_or_update_data(search_key, page)

        if self.enable_save_local:
            file_save_path = format_url_by_template(self.save_path, self.__dict__)
            self.saver.save_data_to_file(page, file_save_path)

        if self.enable_multi_process:
            # 在共享的计数器上进行操作
            with counter.get_lock():
                counter.value += 1
        else:
            self.resulter.count += 1

        print(f"获取结果：{page}")


if __name__ == '__main__':
    scraper = TingclassScraper('scrapy_main/scripts/tingclass.json')
    scraper.start_scraper()
