# coding=utf-8
# creator: Lu
# creat_time: 2021-10-28 17:10
import time

import requests
from bs4 import BeautifulSoup

from log import write_report
from utils import user_agent

logger = write_report.WriteReport("picture")


class Wallhaven:

    def __init__(self, host):
        self.WALLHAVEN_HOST = host
        # self.PATH = "/search?categories=110&purity=110&atleast=1920x1080&sorting=hot&order=desc&page="
        self.PATH = "/search?categories=110&purity=110&atleast=1920x1080&topRange=1M&sorting=favorites&order=desc&page="
        self.PREFIX = "wallhaven-"
        self.DEFAULT_SUFFIX = ".jpg"
        self.SUFFIX = ".png"
        self.START_PAGE = 1355
        self.END_PAGE = 13369
        self.DOWNLOAD_ADDR = "https://w.wallhaven.cc/full/{}/{}{}"
        self.USER_AGENT = user_agent.UserAgent.randomUserAgent()

        self.HOME_DIR = "F:\\picture\\19 期\\"

    # 拼接首页
    def create_wallhaven_url(self, page):
        return "{}{}".format(self.WALLHAVEN_HOST, "{}{}".format(self.PATH, page))

    # 请求每个页面
    def request_index_html(self, page):
        cookie = "_pk_id.1.01b8=37e837306653bc3b.1634541833.; _pk_ses.1.01b8=1; XSRF-TOKEN=eyJpdiI6Im5BVlVqaGtZeWJNRHhwRnhqcnVPY0E9PSIsInZhbHVlIjoibWp4RUtSU2hEeStpZnUyRGx3bWd6WW5ZRStISWlJNStMWE5yWFl3RGdpZ1BVQVhJbmE2TTlFT3ByMnYzTStycCIsIm1hYyI6IjAxOTNjZjU5MWNiYjIwNzk0MGNhMTdmZmFhNDBmNTgxNDRiNTIwZjM0ZWI4MWEyMDZlNDZkNzk5MWZjZjMyM2YifQ%3D%3D; wallhaven_session=eyJpdiI6IkxkbzVFT1NVcjRZUnZ4ZW1jNnNMYWc9PSIsInZhbHVlIjoiM1RsVFlkd3ErbnFvdHFxbFdTRjdNaHBTbzEwNm4wWjVXcmZ5VTVndVNUY3dVZE5xWVVSQ041Z2FaR3ZkY3R1cyIsIm1hYyI6IjRmZTA0OWVmMzNmNmNhYmQ0Y2Y5NGEwOGZiZmUxMTUzODhmOThkZjE4NTlkN2U5NzU1NGM5ZjdmZTU1MWU0NDgifQ%3D%3D"
        path = "{}{}".format(self.PATH, page)
        headers = {
            "authority": "wallhaven.cc",
            "method": "GET",
            "scheme": "https",
            "path": path,
            "cookie": cookie,
            "user-agent": str(self.USER_AGENT),
        }
        req_url = self.create_wallhaven_url(page)
        logger.write_info_log("开始请求页面：{}".format(req_url))
        try:
            download_req = requests.get(url=req_url, headers=headers, timeout=60)
        except Exception as e:
            time.sleep(5)
            download_req = requests.get(url=req_url, headers=headers, timeout=60)
            logger.write_error_log("请求页面发生异常：{}，尝试重新请求，服务器状态：{}".format(e, download_req.status_code))
        return BeautifulSoup(download_req.text, 'html.parser')

    # 解析图片路径
    def parse_html_2_get_href(self, html: BeautifulSoup):
        preview_list = html.select('.thumb-listing-page ul li .preview')
        a_href_list = {}

        # 拼接请求地址
        for a in preview_list:
            href = str(a).split("href=")
            if href is None:
                continue

            target = href[1].split(" target=")
            if target is None:
                continue
            address = target[0].replace("'", "")

            pic_path = address.split("/")
            pic_name = pic_path[len(pic_path) - 1]
            name_ = pic_name[:len(pic_name) - 1]
            req_address = self.DOWNLOAD_ADDR.format(pic_name[:2], self.PREFIX, name_)
            a_href_list["{}{}".format(self.PREFIX, name_)] = req_address
        logger.write_info_log("解析到 {} 张图片，开始下载".format(len(a_href_list.items())))
        return a_href_list

    # 下载图片到本地
    def download_picture(self, href_list: {}):
        headers = {
            "authority": "w.wallhaven.cc",
            "method": "GET",
            "scheme": "https",
            "user-agent": str(self.USER_AGENT),
        }

        sleep_time = 0.1
        for name, href in href_list.items():
            req_href = href + self.DEFAULT_SUFFIX
            try:

                logger.write_info_log("开始下载图片：{}".format(name))
                try:
                    download_req = requests.get(url=req_href, headers=headers, timeout=40)
                except Exception as e:
                    time.sleep(3)
                    logger.write_error_log("正常下载发生异常：{}，图片：{}，尝试重新下载".format(e, req_href))
                    download_req = requests.get(url=req_href, headers=headers, timeout=40)

                path = self.HOME_DIR + name + self.DEFAULT_SUFFIX
                if not download_req.ok:
                    logger.write_error_log("指定图片不存在，进入暴力阶段")
                    time.sleep(2)
                    try:
                        req_href = href + self.SUFFIX
                        path = self.HOME_DIR + name + self.SUFFIX
                        download_req = requests.get(url=req_href, headers=headers, timeout=40)
                        logger.write_error_log("暴力下载图片，服务器状态：{}".format(download_req.status_code))
                    except Exception as e:
                        logger.write_error_log("暴力下载发生异常：{}，图片：{}，尝试重新暴力".format(e, req_href))
                        time.sleep(3)
                        download_req = requests.get(url=req_href, headers=headers, timeout=40)

                if not download_req.ok:
                    logger.write_error_log("图片下载出错：{}，服务器状态：{}".format(req_href, download_req.status_code))
                    continue

                img = download_req.content
                with open(path, 'wb') as f:
                    f.write(img)

                logger.write_info_log("成功下载图片：{}".format(req_href))
                time.sleep(sleep_time)
                sleep_time += 0.1
            except Exception as e:
                logger.write_error_log("图片下载异常：{}，跳过此图片：{}".format(e, req_href))
                continue

    # 请求页面，获取下载地址，下载图片
    def request_html(self):
        for page in range(self.START_PAGE, self.END_PAGE + 1):
            index_html = self.request_index_html(page)
            get_href = self.parse_html_2_get_href(index_html)
            self.download_picture(get_href)


if __name__ == '__main__':
    wallhaven = Wallhaven(host="https://wallhaven.cc/")
    wallhaven.request_html()
