import asyncio
import json
import logging
import os
from time import time

import aiohttp
from lxml import etree


class BingImagesSpiderAsync:
    PER_PAGE_IMAGES = 30
    IGNORE_CHARS = ["|", ".", "，", ",", "", "", "/", "@", ":", "：", ";", "；", "[", "]", "+"]
    IMAGE_TYPES = [
        "bmp",
        "jpg",
        "png",
        "tif",
        "gif",
        "pcx",
        "tga",
        "exif",
        "fpx",
        "svg",
        "psd",
        "cdr",
        "pcd",
        "dxf",
        "ufo",
        "eps",
        "ai",
        "raw",
        "WMF",
        "webp",
    ]
    HEADERS = {
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36"
    }
    BING_IMAGE_URL_PATTERN = "https://www.bing.com/images/async?q={}&first={}&count={}&mmasync=1"

    def __init__(self, keyword, amount, path="./"):
        self.keyword = keyword
        self.amount = amount
        self.path = path
        self.count = 0
        self.success_count = 0
        self.lock = asyncio.Lock()

    async def request_homepage(self, session, url):
        try:
            async with session.get(url, headers=self.HEADERS) as response:
                return await response.text()
        except (asyncio.TimeoutError, aiohttp.ClientError) as e:
            logging.error(f"Request failed for {url}. Error: {str(e)}")
            return None

    def parse_homepage_response(self, response):
        tree = etree.HTML(response)
        m_list = tree.xpath('//*[@class="imgpt"]/a/@m')

        info_list = []
        for m in m_list:
            dic = json.loads(m)
            image_title = dic["t"]
            image_title = "".join(char if char not in self.IGNORE_CHARS else " " for char in image_title).strip()

            image_type = dic["murl"].split(".")[-1]
            image_type = image_type if image_type in self.IMAGE_TYPES else "jpg"

            info = {
                "image_title": image_title,
                "image_type": image_type,
                "image_md5": dic["md5"],
                "image_url": dic["murl"],
            }

            info_list.append(info)
        return info_list

    async def request_and_save_image(self, session, info):
        async with self.lock:
            self.count += 1
            current_count = self.count

        filename = f"{current_count:08d}.{info['image_type']}"
        filepath = os.path.join(self.path, filename)

        try:
            async with session.get(info["image_url"], headers=self.HEADERS) as response:
                image_content = await response.read()

            if not os.path.exists(filepath):
                with open(filepath, "wb") as fp:
                    fp.write(image_content)

                self.success_count += 1
                logging.info(f"{current_count}: Saving {filepath} done.")

            else:
                logging.warning(f"{current_count}: File {filepath} already exists. Skipping.")

        except (asyncio.TimeoutError, aiohttp.ClientError) as e:
            logging.error(f"{current_count}: Saving {filepath} failed. URL: {info['image_url']}. Error: {str(e)}")

    def deduplication(self, info_list):
        result = []
        md5_set = set()
        for info in info_list:
            if info["image_md5"] not in md5_set:
                result.append(info)
                md5_set.add(info["image_md5"])
        return result

    async def run(self):
        if not os.path.exists(self.path):
            os.mkdir(self.path)

        homepage_urls = [
            self.BING_IMAGE_URL_PATTERN.format(self.keyword, i * self.PER_PAGE_IMAGES, self.PER_PAGE_IMAGES)
            for i in range(int(self.amount / self.PER_PAGE_IMAGES * 1.5) + 1)
        ]

        async with aiohttp.ClientSession() as session:
            tasks = [self.request_homepage(session, url) for url in homepage_urls]
            homepage_responses = await asyncio.gather(*tasks)

        info_list = []
        for response in homepage_responses:
            if response is None:
                continue
            result = self.parse_homepage_response(response)
            info_list += result
        info_list = self.deduplication(info_list)[: self.amount]

        async with aiohttp.ClientSession() as session:
            tasks = [self.request_and_save_image(session, info) for info in info_list]
            await asyncio.gather(*tasks)

        logging.info(
            "All done. {} successfully downloaded, {} failed.".format(
                self.success_count, self.count - self.success_count
            )
        )


if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO)
    start = time()
    asyncio.run(BingImagesSpiderAsync("物流仓库防火门", 3000, "物流仓库防火门").run())
    print(time() - start)
