import logging
import os

from modules.html_parse.parse_main import ParseMain
from modules.request.request_main import request_main as req
from scrapy_main.middleware.blog_middle import BlogMiddle
from utils.os_main import create_directory, does_file_exist, get_abs_file_path

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class TiktokScraper(BlogMiddle):

    # 获取列表页
    def get_page_list(self, html):
        page_list = []

        elements = html.find_all('a', {'rel': 'bookmark'})

        for ele in elements:
            page_item = {
                'name': ele.get_text(),
                'url': html.get_by_attr(ele, 'href'),
                'details': []
            }
            page_list.append(page_item)
        return page_list

    # 获取详情页
    def get_page_detail(self, html):
        detail_list = []
        elements = html.find_all('img', {'src': True})

        for ele in elements:
            url = ele['src']
            detail_list.append(url)

        return detail_list

    # 获取页码
    def get_pagination(self, html):
        pages_html = html.find_all('ul', {'class': 'page-numbers'})
        if pages_html:
            pages = pages_html[0].find_all('li')
            if html.find('span', {'class': 'page-numbers current'}):
                # 当前页码
                self.current_page = int(html.find('span', {'class': 'page-numbers current'}).get_text())
                # 总页码数
                self.page_count = int(pages[len(pages) - 2].get_text())

    # 爬取详情页
    def scrape_page_detail(self, page, counter=None, search_key=None):
        request_detail = req.request('get', page['url'], proxy=self.proxy, headers=self.headers, verify=True)
        logging.info(f"请求链接：{page['url']}")

        if request_detail:
            parse_html = ParseMain(request_detail.text)

            page['details'] = self.get_page_detail(parse_html)
            absolute_target_dir = get_abs_file_path(self.target_dir)
            create_directory(absolute_target_dir)
            save_path = f'{absolute_target_dir}/{search_key}'
            create_directory(save_path)
            # 获取到链接后的后续处理
            for url in page['details']:
                file_name = os.path.basename(url)

                file_path = os.path.join(save_path, file_name)
                print(file_name)
                # 文件不存在才下载
                if not does_file_exist(file_path):
                    self.saver.save_to_media(url, file_path)
                    if self.enable_multi_process:
                        # 在共享的计数器上进行操作
                        with counter.get_lock():
                            counter.value += 1
                    else:
                        self.resulter.count += 1

                # 文件不存在且开关打开，则中断循环
                elif self.enable_new_only:
                    break


if __name__ == '__main__':
    result = req.request('get',
                         "https://v16-webapp-prime.tiktok.com/video/tos/alisg/tos-alisg-pve-0037/c089d84547df48fcb1ea1305606cd75f/?a=1988&ch=0&cr=3&dr=0&lr=tiktok&cd=0%7C0%7C1%7C&cv=1&br=1228&bt=614&bti=ODszNWYuMDE6&cs=0&ds=3&ft=-Csk_mH1PD12NjTo_f-Uxq72hY6e3wv25IcAp&mime_type=video_mp4&qs=0&rc=ZGhnOzVoZWc8NjVnODw1ZkBpamlkMzg6ZndqZzMzODgzNEBjNWExMjJhX14xYi1hNV80YSNfbzYucjQwX15gLS1kLy1zcw%3D%3D&btag=e00088000&expire=1703254077&l=2023122208074148C9B8252B680304EB31&ply_type=2&policy=2&signature=719be991224a9dfd955e65ea48e73786&tk=tt_chain_token"
                         )
    print(result)
    print(result.text)
