# coding=utf-8
import hashlib
import os
import random
import threading
import time
from typing import Mapping, List
from urllib.request import urlopen
from loguru import logger
import requests
from bs4 import BeautifulSoup
from urllib.error import URLError


IMAGE_ENDPOINT = "https://images.unsplash.com"
ENDPOINT = "https://unsplash.com/s/photos"
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.75 Safari/537.36",
}


class GetImageHtmlError(Exception):
    pass


class GetImageURLError(Exception):
    pass


class DownloadImageError(Exception):
    pass


# 用户界面
def main():
    """
    Input image type
    Output download image successfully.
    """
    input_image_type = input("Input Image Type => ")
    endpoint_url = f"{ENDPOINT}/{input_image_type}"

    try:
        download_image(endpoint_url)
    except DownloadImageError as err:
        logger.error(f"Download Image Failed, {err=}")

    logger.info("Download image successfully.")


# 使用urlopen下载image
def download_image(endpoint_url: str) -> None:
    try:
        images_url = get_images_url(endpoint_url)
    except GetImageURLError as err:
        raise DownloadImageError(f"{err=}")

    logger.info(f"The total number of images url is {len(images_url)}")
    start_time = time.time()
    logger.info(f"Start to download image, {start_time=}")

    threads: List[threading.Thread] = []
    for index, image_url in enumerate(images_url):
        t = threading.Thread(
            target=async_download_image, args=(image_url, index, images_url)
        )
        threads.append(t)
        t.start()
    for t in threads:
        t.join()

    logger.info(f"Elapsed time {time.time() - start_time}")


# 异步方式下载图像
def async_download_image(image_url: str, index: int, images_url: set) -> None:
    time.sleep(random.randint(1, 3))

    try:
        image_data = urlopen(image_url.strip(), timeout=300).read()
    except URLError as err:
        logger.warning(f"Download {image_url=} failed!, {err=}")

    with open(generate_image_name(), "wb") as fp:
        fp.write(image_data)

    logger.info(f"{index+1}/{len(images_url)}, Download {image_url=} successfully.")


# 从中提取出image的url link
def get_images_url(endpoint_url: Mapping) -> set:
    logger.info("Start to get image url.")
    try:
        image_html = get_image_html(endpoint_url)
    except GetImageHtmlError as err:
        raise GetImageURLError(f"{err=}")

    images_url = set()
    try:
        for image_url in extract_images_url(image_html):
            is_1000p_image = (IMAGE_ENDPOINT and "w=1000") in image_url
            if is_1000p_image:
                images_url.add(image_url)
    except TypeError as err:
        raise GetImageURLError(f"{err=}")

    logger.info("Got images url successfully.")
    return images_url


def extract_images_url(image_html: str):
    soup = BeautifulSoup(image_html, "html.parser")

    for image_label in soup.find_all("img"):
        image_url = image_label.get("src")
        yield image_url


# 获取网页的html数据
def get_image_html(endpoint_url: str, headers: Mapping = HEADERS) -> dict:
    logger.info("Start to get image html.")

    resp = requests.get(url=endpoint_url, headers=headers, timeout=300)

    if resp.status_code != 200:
        raise GetImageHtmlError(f"Got image html failed, {resp.status_code=}")

    logger.info("Got image html successfully.")
    return resp.text


def generate_image_name() -> str:
    image_name = f"{hash_str(time.ctime())}.jpg"
    return f"{despath('./images')}/{image_name}"


def hash_str(str_data):
    """
    对字符串做HASH
    """
    h = hashlib.sha256()  # md5,sh1,sha256
    h.update(str_data.encode("utf-8"))
    return h.hexdigest()


def despath(path: str):
    if not os.path.exists(path):
        try:
            os.mkdir(path)
        except Exception as err:
            logger.error(str(err))
    return path


if __name__ == "__main__":
    main()
