import logging
from modules.html_parse.parse_main import ParseMain
from modules.request.request_main import request_main as req
import time
from scrapy_main.middleware.blog_middle import BlogMiddle
from utils.format import format_txt, format_url_by_template


class OtokakeScraper(BlogMiddle):

    def get_page_list(self, html):
        page_list = []

        elements = html.find_all('h3', {'class': 'article-list__title'})

        for element in elements:
            child_ele = element.find('a')
            detail_path = child_ele.attrs['href']
            title = child_ele.get_text(' ')
            page_item = {
                'title': format_txt(title),
                'url': detail_path
            }
            page_list.append(page_item)

        return page_list

    def get_page_detail(self, html):
        detail_list = []
        elements = html.find_all('div', {'class': 'article-body'})

        for ele in elements:
            content = ele.get_text(' ')
            detail_page_item = {
                'page_no': 1,
                'content': format_txt(content)
            }
            detail_list.append(detail_page_item)
            print(f"获取结果：{detail_page_item}")

        return detail_list

    def get_detail_page_content(self, html):
        detail_content = ''
        elements = html.find_all('div', {'class': 'article-body'})

        for ele in elements:
            content = ele.get_text(' ')
            detail_content += format_txt(content)

        return detail_content

    def get_pagination(self, html):
        # 获取页码数
        pages = html.find('ul', {'class': 'pagination pagination'})
        if pages:
            lis = pages.find_all('li')
            if pages.find('li', {'class': 'active'}):
                # 当前页码
                self.current_page = int(pages.find('li', {'class': 'active'}).get_text())
                # 总页码数
                self.page_count = int(lis[len(lis) - 2].get_text())

    # 爬取详情页
    def scrape_page_detail(self, page, counter=None, search_key=None):
        """
        获取页面的详细信息，包括详情页和分页信息。

        :param page: 包含页面URL等信息的字典
        :param search_key: 搜索关键字（可选）
        """
        # 记录请求链接
        logging.info(f"请求链接：{page['url']}")

        # 等待2秒（可根据需求调整）
        time.sleep(2)

        # 设置搜索关键字和详情页链接
        self.search_key = search_key
        self.detail_href = page['url']

        # 格式化文件保存路径和详情页URL
        detail_url = format_url_by_template(self.detail_url, self.__dict__)

        # 发送GET请求获取详情页内容
        request_detail = req.request('get', detail_url, proxy=self.proxy, headers=self.headers, verify=True)


        # 如果请求成功，则解析HTML内容
        if request_detail:
            parse_html = ParseMain(request_detail.text)

            # 获取页面详情信息
            detail_list = self.get_page_detail(parse_html)

            # 获取分页信息
            self.get_pagination(parse_html)

            # 遍历后续页面进行内容获取
            for i in range(self.current_page + 1, self.page_count + 1):
                # 更新page_index
                self.page_index = i
                detail_page_url = format_url_by_template(self.detail_paging_url, self.__dict__)
                page_detail_content = self.scrapy_detail_page_content(detail_page_url)
                detail_page_item = {
                    'page_no': i,
                    'content': page_detail_content
                }
                detail_list.append(detail_page_item)

            # 如果详情列表不为空，则将其添加到页面字典中
            if detail_list is not None:
                page['details'] = detail_list

        if self.enable_save_local:
            file_save_path = format_url_by_template(self.save_path, self.__dict__)
            # 保存数据到文件
            self.saver.save_data_to_file(page, file_save_path)
        if self.enable_save_database:
            self.saver.add_data(search_key, page)
            pass

        if self.enable_multi_process:
            # 在共享的计数器上进行操作
            with counter.get_lock():
                counter.value += 1
        else:
            self.resulter.count += 1

        print(f"获取结果：{page}")


if __name__ == '__main__':
    scraper = OtokakeScraper('scrapy_main/scripts/otokake.json')
    scraper.start_scraper()
