# coding=utf-8
import functools
import hashlib
from queue import Queue
import random
import time
from multiprocessing import Pool, Manager
from typing import Set, Union
from urllib.request import urlopen
import requests
import os
import re
import logging


PAGE_NUMBER = 2
ENDPOINT = "http://www.xixi.cc"
IMG_PATTERN = """var totalpage = ([\s\S]*?);"""
OBJ_PATTERN = """<div class="arcBody">[\s\S]*?href=[\s\S]*?<img src='([\s\S]*?)' id=[\s\S]*?"""
HEADER = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
    "Referer": "http: // www.xixi.cc / riben / 2418_10.html"}


def init_logger():
    # 获取logger的实例
    logger = logging.getLogger("Meizi_img")
    logger.setLevel(logging.DEBUG)
    # 指定logger的输出格式
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    # 文件日志，终端日志
    file_handler = logging.FileHandler("Meizi_img.log")
    file_handler.setFormatter(formatter)
    # 设置默认的级别
    file_handler.setLevel(logging.DEBUG)
    logger.addHandler(file_handler)
    return logger


def despath(path: str):
    if not os.path.exists(path):
        try:
            os.mkdir(path)
        except Exception as e:
            logger.error(e)
    return path


def hashStr(strInfo):
    """
     对字符串做HASH
    """
    h = hashlib.sha256()  # md5,sh1,sha256
    h.update(strInfo.encode("utf-8"))
    return h.hexdigest()


def get_one_page(url: str) -> str:
    time.sleep(random.randint(1, 3))
    """
    发起Http请求，获取Response的响应结果
    """
    reponse = requests.get(url, headers=HEADER)
    if reponse.status_code == 200:  # ok
        return reponse.text
    return ""


def get_img_url(html) -> str:
    time.sleep(random.randint(1, 3))
    pattern = re.compile(OBJ_PATTERN)
    items = re.findall(pattern, html)
    if not items:
        return ""
    try:
        url = ENDPOINT + items[0]
        logger.info(url)
        return url
    except IndexError as e:
        logger.error(f"Failed to get img url {url}")


def download_images(url: str) -> None:
    time.sleep(random.randint(1, 3))
    imgName = hashStr(time.ctime()) + '.jpg'
    path = despath()
    local = path + imgName
    try:
        data = urlopen(url.strip()).read()
        with open(local, "wb") as f:
            f.write(data)
    except Exception as e :
        logger.error(f"downloaded error in {url} - Error {e}" )


def get_all_page(html) -> Union[int, None]:
    pattern = re.compile(IMG_PATTERN)
    items = re.findall(pattern, html)

    if not items:
        return None

    try:
        page = int(items[0])
        return page
    except IndexError as e:
        logger.error(e)


def crawl_picture_info(queue: Queue, page: int) -> None:
    time.sleep(random.randint(1, 3))
    new_url = ENDPOINT + str(page) + ".html"
    html = get_one_page(new_url)

    if not html:
        logger.error('not find html')

    result = get_img_url(html)

    if not result:
        logger.error('not find the image!' % (result))

    queue.put(result)


def multiprocess_crawl_picture(image_url: str, pages: int) -> Set[str]:
    pool = Pool()
    queue = Manager().Queue()  # 构造出一个在进程池之间共享的队列
    queue.put(image_url)
    partial_CrawlPictureInfo = functools.partial(crawl_picture_info, queue)
    pool.map(partial_CrawlPictureInfo, [
             i for i in range(PAGE_NUMBER, pages + 1)])
    pool.close()  # 通知进程池任务添加结束

    images = set()
    while not queue.empty():
        image_url = queue.get()
        images.add(image_url)

    pool.join()

    return images


def main():

    html = get_one_page(ENDPOINT)

    if not html:
        logger.error("get one page error!")
        return

    image_url = get_img_url(html)

    if not image_url:
        logger.error("Not found image url")

    pages = get_all_page(html)

    if not pages:
        logger.error("not find pages")
        return

    count = f"{pages}_page"
    logger.info(count + '\n\n')

    images = multiprocess_crawl_picture(image_url, pages)

    for url in images:
        download_images(url)
        logger.info(f"{url} page download ok!")


if __name__ == '__main__':
    logger = init_logger()
    try:
        main()
    except Exception as e:
        logger.error(e)
