import logging
import os

from modules.html_parse.parse_main import ParseMain
from modules.request.request_main import request_main as req
from scrapy_main.middleware.blog_middle import BlogMiddle
from utils.os_main import create_directory, does_file_exist, get_abs_file_path

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class XlustScraper(BlogMiddle):

    # 获取列表页
    def get_page_list(self, html):
        page_list = []

        elements = html.find_all('a', {'rel': 'bookmark'})

        for ele in elements:
            page_item = {
                'name': ele.get_text(),
                'url': html.get_by_attr(ele, 'href'),
                'details': []
            }
            page_list.append(page_item)
        return page_list

    # 获取详情页
    def get_page_detail(self, html):
        detail_list = []
        elements = html.find_all('img', {'src': True})

        for ele in elements:
            url = ele['src']
            detail_list.append(url)

        return detail_list

    # 获取页码
    def get_pagination(self, html):
        pages_html = html.find_all('ul', {'class': 'page-numbers'})
        if pages_html:
            pages = pages_html[0].find_all('li')
            if html.find('span', {'class': 'page-numbers current'}):
                # 当前页码
                self.current_page = int(html.find('span', {'class': 'page-numbers current'}).get_text())
                # 总页码数
                self.page_count = int(pages[len(pages) - 2].get_text())

    # 爬取详情页
    def scrape_page_detail(self, page, counter=None, search_key=None):
        request_detail = req.request('get', page['url'], proxy=self.proxy, headers=self.headers, verify=True)
        logging.info(f"请求链接：{page['url']}")

        if request_detail:
            parse_html = ParseMain(request_detail.text)

            page['details'] = self.get_page_detail(parse_html)

            # 获取到链接后的后续处理
            for url in page['details']:
                file_name = os.path.basename(url)

                if self.enable_save_database:
                    if self.database_config_param['server_save_path']:
                        server_save_path = self.database_config_param['server_save_path']
                        save_path = f'{server_save_path}/{self.script_name}/{search_key}'
                    else:
                        save_path = f'{self.script_name}/{search_key}'

                    file_path = f'{save_path}/{file_name}'

                    if not self.saver.check_file_exists(file_path):
                        self.saver.save_file_to_server(url, file_name, save_path)
                        self.resulter.count += 1

                    # 文件存在且开关打开，则中断循环
                    elif self.enable_new_only:
                        break

                if self.enable_save_local:
                    create_directory(get_abs_file_path(self.target_dir))
                    save_path = f'{self.target_dir}/{search_key}'
                    create_directory(get_abs_file_path(save_path))
                    file_path = os.path.join(save_path, file_name)
                    # 文件不存在才下载
                    if not does_file_exist(file_path):
                        self.saver.save_to_media(url, file_path)
                        # 在共享的计数器上进行操作
                        if self.enable_multi_process:
                            # 在共享的计数器上进行操作
                            with counter.get_lock():
                                counter.value += 1
                        else:
                            self.resulter.count += 1

                    # 文件存在且开关打开，则中断循环
                    elif self.enable_new_only:
                        break




if __name__ == '__main__':
    scraper = XlustScraper('scrapy_main/scripts/xlust.json')
    scraper.start_scraper()
