"""
爬取海贼王吧的漫画
tip: 遇到数据不完整情况，重新换cookies
"""

import requests
import json
from config import Config
from lxml import etree
import os
import re
from DrissionPage import WebPage
import time
import shutil


class OnePiece:
    def __init__(self):
        conf = Config()
        self.comic_search_url = conf.comic_search_url
        self.headers = conf.headers
        self.main_domain = conf.main_domain
        self.session = requests.Session()

    def get_response(self, url, type='Text'):  # 获取请求
        if type == 'Text':
        # 循环的方式，遇到需要验证的，手动验证后回写cookies，重新提交获取，否则直接请求
            while True:
                response = self.session.get(url=url, headers=self.headers)
                response = response.text
                selector = etree.HTML(response)

                # 防止过多访问，百度弹窗验证，手动验证，回写cookies
                is_validate = selector.xpath("//title/text()")
                if is_validate[0] == '正在进行安全检测...':
                    print(response)
                    validate_url = response.split('"')[-2]
                    page = WebPage()
                    page.get(validate_url)
                    time.sleep(15)  # 手工验证时间
                    cookies_dict = page.cookies(as_dict=True)
                    cookies = ''
                    for key in cookies_dict:
                        cookies += ';' + key + '=' + cookies_dict[key]
                    cookies = cookies[1:]
                    with open('Cookies.txt', 'w') as file:
                        file.write(cookies)
                    self.__init__()  # 再重新初始化一遍
                    page.close()
                else:
                    # print(response)
                    return response
        elif type == 'Content':
            response = self.session.get(url=url, headers=self.headers).content
            return response
        else:
            print(f'''设置正确返回方式!''')

    def all_search_result(self, num='All'):
        # 获取所有搜索结果
        total_url, comic_url, high_quality_url, final_url = {}, [], {}, {}
        response = self.get_response(self.comic_search_url)
        # print(response)
        selector = etree.HTML(response)
        last_page = selector.xpath('//a[@class="last"]/@href')[0]
        total_page = int(last_page.split('&')[-1].split('=')[-1])
        common_url = self.main_domain + last_page[:-6]
        for i in range(1, total_page+1):
            key = f'''第{i}页'''
            value = common_url + f'''&pn={i}'''
            total_url[key] = value
        # print(f'''total_url: {total_url}''')

        # 只取主帖(没有主贴，只有回复的todo)
        for page in total_url:
            print(f'''开始查取{page}的主题帖''')
            response_page = self.get_response(total_url[page])
            selector_page = etree.HTML(response_page)
            main_post = selector_page.xpath("//a[contains(text(), '野生吧友自汉化') and not(contains(text(), '回复')) and @class='bluelink']/@href")
            if main_post:
                for i in main_post:
                    value = self.main_domain + i
                    comic_url.append(value)
        # print(f'''comic_url: {comic_url}''')
        print(f'''总共话数: {len(comic_url)}''')

        if num == 'All':
            # 爬取全部话
            # 拼接高清图片的预览链接
            for i, page in enumerate(comic_url):
                print(f'''开始遍历第{i}个主题帖的链接，{page}''')
                high_url_list = []
                # 获取tid
                tid = page.split('/')[-1].split('?')[0]
                # 获取post_id
                pid = page.split('/')[-1].split('#')[-1]
                response_page = self.get_response(page)
                selector_page = etree.HTML(response_page)
                # 获取每个主贴的标题，对应漫画集数
                title_comic = selector_page.xpath("//title/text()")
                episode = title_comic[0][4:len(title_comic[0]) - 11]

                # 从缩略图获取高清图片的pic_id
                thumbnail = selector_page.xpath("//img[@class='BDE_Image']/@src")
                for j in thumbnail:
                    picture_id = j.split('/')[-1].split('.')[0]
                    high_url = f'''https://tieba.baidu.com/photo/p?kw=%E6%B5%B7%E8%B4%BC%E7%8E%8B&flux=1&tid={tid}&pic_id={picture_id}&pn=1&fp=2&see_lz=1&post_id={pid}'''
                    high_url_list.append(high_url)
                high_quality_url[episode] = high_url_list
        else:
            # 只获取对应的话
            for i, page in enumerate(comic_url):
                print(f'''开始遍历第{i}个主题帖的链接，{page}''')
                high_url_list = []
                tid = page.split('/')[-1].split('?')[0]
                pid = page.split('/')[-1].split('#')[-1]
                response_page = self.get_response(page)
                selector_page = etree.HTML(response_page)
                title_comic = selector_page.xpath("//title/text()")
                episode = title_comic[0][4:len(title_comic[0]) - 11]
                if num in episode:  # 找到对应的话数就停止遍历
                    # 从缩略图获取高清图片的pic_id
                    thumbnail = selector_page.xpath("//img[@class='BDE_Image']/@src")
                    for j in thumbnail:
                        picture_id = j.split('/')[-1].split('.')[0]
                        high_url = f'''https://tieba.baidu.com/photo/p?kw=%E6%B5%B7%E8%B4%BC%E7%8E%8B&flux=1&tid={tid}&pic_id={picture_id}&pn=1&fp=2&see_lz=1&post_id={pid}'''
                        high_url_list.append(high_url)
                    high_quality_url[episode] = high_url_list
                    break
        print(f'''high_quality_url: {high_quality_url}''')
        # print(f'''共有图片：{len(high_quality_url.values())} 张''')

        if high_quality_url:
            # 获取高清图片链接
            global false, null, true
            false, null, true = '', '', ''  # 后面eval转字典的时候，有false存在，换成''
            for episode_i in high_quality_url:
                print(f'''开始遍历{episode_i}的高清图的链接，获取高清图地址''')
                episode_list = []
                for url_i in high_quality_url[episode_i]:
                    response_i = self.get_response(url_i)
                    selector_i = etree.HTML(response_i)
                    func = selector_i.xpath("//script[contains(text(), 'viewfloatPageData')]/text()")
                    img_dict = func[0].split(';')[1][15:]
                    img_dict = eval(img_dict)  # 将字符串转成字典
                    picture_i = img_dict['img']['original']['waterurl']
                    episode_list.append(picture_i)
                final_url[episode_i] = episode_list
            # 对于全部的话数链接，写入文件，方便下次使用
            with open('picture_url.json', 'w') as file:
                json.dump(final_url, file, ensure_ascii=False)
        else:
            print(f'''贴吧没有找到第{num}话的主题帖！''')
        print(f'''final_url: {final_url}''')
        print(f'''共有图片：{len(final_url.values())} 张''')

        return total_url, comic_url, high_quality_url, final_url

    def all_comic(self, is_update=True):
        save_path = self.mk_save_dir()
        if is_update:  # 是否更新所有的图片链接
            picture = self.all_search_result()[-1]
        else:
            with open('picture_url.json', 'r') as file:
                picture = json.load(file)
        for episode in picture:
            # 保存路径设置
            episode_name = episode
            episode_path = save_path + '\\' + f'''{episode_name}'''
            if not os.path.exists(episode_path):
                os.mkdir(episode_path)
            else:
                shutil.rmtree(episode_path)  # remove只能删除文件
                os.mkdir(episode_path)

            # 下载每话的图片
            for i, picture_url in enumerate(picture[episode]):
                response = self.get_response(picture_url, 'Content')
                picture_name = f'''第{i}页.jpg'''
                picture_path = episode_path + '\\' + picture_name
                with open(picture_path, 'wb') as file:
                    print(f'''开始下载{episode_name}话，第{i}页''')
                    file.write(response)
        print(f'''所有爬取到的话数都下载完成！''')

    def single_comic(self, num):
        save_path = self.mk_save_dir()
        picture = self.all_search_result(num=num)[-1]
        if picture:
            episode_name = list(picture.keys())[0]
            episode_path = save_path + '\\' + f'''{episode_name}'''
            if not os.path.exists(episode_path):
                os.mkdir(episode_path)
            else:
                shutil.rmtree(episode_path)
                os.mkdir(episode_path)

            # 下载每话的图片
            for i, picture_url in enumerate(picture[episode_name]):
                response = self.get_response(picture_url, 'Content')
                picture_name = f'''第{i}页.jpg'''
                picture_path = episode_path + '\\' + picture_name
                with open(picture_path, 'wb') as file:
                    print(f'''开始下载{episode_name}话，第{i}页''')
                    file.write(response)
            print(f'''{episode_name}都下载完成！''')
        else:
            print(f'''主题贴没有对应的第{num}话！''')

    def mk_save_dir(self):
        save_path = r'D:\pythonProject\网页爬取\爬取贴吧图片\海贼王'
        if not os.path.exists(save_path):
            os.mkdir(save_path)
        return save_path


if __name__ == '__main__':
    my_comic = OnePiece()
    # my_comic.all_comic()
    my_comic.single_comic('1115')

