# -*- coding: UTF-8 -*-
import pandas as pd
import os
import re
import base64
from DrissionPage import SessionPage
from DrissionPage.items import SessionElement
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import ProcessPoolExecutor
from PIL import Image
from io import BytesIO
import requests
from loguru import logger
from fake_useragent import UserAgent
from urllib.parse import urlparse


def add_cookie_test(sp: SessionPage, tag: str):
    try:
        if sp.cookies():
            cookies = {ck["name"]: ck["value"] for ck in sp.cookies()}
            sp.set.cookies(cookies=cookies)
            sp.get(sp.url)  # 带cookie重新发送请求
            if sp.response is not None and sp.response.status_code not in [404, 403, 500, 501]:
                eles = sp.eles(f'@tag()={tag}')
                if not eles:
                    logger.error(f'{sp.url} retry - link_eles:{eles}')
                    return False
                return eles

        return False

    except Exception as e:
        logger.error(e)
        return False


def read_excel():
    filepath = os.path.join(os.path.dirname(__file__), "website.xlsx")
    df = pd.read_excel(filepath)
    website_list = list(zip(df['媒体名称'], df['媒体网址']))
    return website_list


def filter_url(urls):
    fileter_urls = [
        "beian.miit.gov.cn",
        "beian.mps.gov.cn",
        "www.miibeian.gov.cn",
        "www.gxfda.gov.cn",
        "www.cpi.gov.cn",
        "www.moh.gov.cn",
        "www.satcm.gov.cn",
        "www.sda.gov.cn",
        "www.gxws.gov.cn",
    ]
    ret = []
    for url in urls:
        if urlparse(url).netloc not in fileter_urls:
            ret.append(url)

    return ret


class Main:
    def __init__(self):
        self.page = SessionPage()
        self.page.set.headers({"User-Agent": UserAgent().random})
        self.url_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
        self.root_path = os.path.dirname(os.path.abspath(__file__))
        self.output_dir_path = os.path.join(self.root_path, "output")

    def parse_link_url(self, ele: SessionElement):
        try:
            href = ele.attr('href')  # 超链接url
            if href is None:
                return href
            if re.match(self.url_pattern, href):
                return href
            else:
                return None

        except Exception as e:
            logger.error(e)
            return None

    def get_image_type(self, resp):
        try:
            content_type = resp.headers.get('Content-Type', '')
            if 'image/png' in content_type:
                return 'png'
            elif 'image/jpg' in content_type:
                return "jpg"
            elif 'image/jpeg' in content_type:
                return 'jpeg'
            elif 'image/gif' in content_type:
                return 'gif'
            elif 'image/webp' in content_type:
                return 'webp'
            elif 'image/svg' in content_type:
                return 'svg'
            else:
                # 响应头没有Content-Type属性
                return None

        except Exception as e:
            logger.error(e)
            return None

    def parse_image_url(self, ele: SessionElement):
        try:
            img_url = ele.attr("src")  # 图片url
            if img_url is None:
                return None
            if re.match(self.url_pattern, img_url):
                return img_url

            return None

        except Exception as e:
            logger.error(e)
            return None

    def gather_image_url(self, url: str):
        try:
            # 临时开启session对象 需要手动关闭
            cur_page = SessionPage()
            cur_page.set.headers({"User-Agent": UserAgent().random})
            cur_page.get(url)  # 这里比较耗时

            if cur_page.response is not None and cur_page.response.status_code not in [404, 403, 500, 501]:
                # 排除图片链接
                if cur_page.response.headers.get("Content-Type", '') in ["image/jpg", "image/jpeg", "image/png", "image/gif", "image/webp"]:
                    self.save_img(url)
                    return None

                img_eles = cur_page.eles('@tag()=img')
                if not img_eles:
                    logger.error(f'{url} - img_eles: {img_eles}')
                    # 加Cookie重试
                    ret = add_cookie_test(cur_page, "img")
                    if not ret:
                        return None
                    img_eles = ret

                # 解析所有的图片链接 可能存在重复链接
                image_links = [i for i in map(self.parse_image_url, img_eles) if i is not None]

                cur_page.close()
                return image_links
            else:
                cur_page.close()
                return None

        except Exception as e:
            logger.error(f'{url} - {e}')
            return None

    def save_img(self, url: str):
        try:
            if url.endswith('.png') or url.endswith(".jpg") or url.endswith(".gif") or url.endswith(".jpeg"):
                response = requests.get(url, headers={"user-agent": UserAgent().random}, timeout=6)
                response.raise_for_status()  # 主动抛出错误

                name = url.split("/")[-1]
                filename = os.path.join(self.current_dir, name)
                with Image.open(BytesIO(response.content)) as img:
                    if (name.endswith(".jpg") or name.endswith(".jpeg")) and img.mode == 'RGBA':
                        img.convert("RGB")
                    img.save(filename)

                logger.success(f'{name} - OK', enqueue=True)
                return


            elif url.endswith(".webp"):
                response = requests.get(url, headers={"user-agent": UserAgent().random}, timeout=6)
                response.raise_for_status()  # 主动抛出错误

                # webp文件格式特殊 单独保存
                name = url.split("/")[-1].split(".webp")[0] + ".png"
                filename = os.path.join(self.current_dir, name)
                with Image.open(BytesIO(response.content)) as img:
                    img.save(filename)

                logger.success(f'{url} - OK', enqueue=True)
                return

            elif "data:image" in url:
                data_str = url.split(",")[1]
                decode_bytes = base64.b64decode(data_str)
                with Image.open(decode_bytes) as img:
                    img_type = img.format.lower()
                    if img_type == "webp":
                        name = data_str[:8] + ".png"
                    else:
                        name = data_str[:8] + "." + img_type
                    filename = os.path.join(self.current_dir, name)
                    img.save(filename)

                logger.success(f'{name} - OK', enqueue=True)
                return

            else:
                response = requests.get(url, headers={"user-agent": UserAgent().random}, timeout=6)
                response.raise_for_status()  # 主动抛出错误

                # 非标准url 先根据响应头判断文件类型
                img_type = self.get_image_type(response)
                name = ''
                if img_type == 'png':
                    name = "ABC" + url.split("?")[0].split("/")[-1] + ".png"
                elif img_type == "jpg":
                    name = "ABC" + url.split("?")[0].split("/")[-1] + ".jpg"
                elif img_type == "jpeg":
                    name = "ABC" + url.split("?")[0].split("/")[-1] + ".jpeg"
                elif img_type == 'gif':
                    name = "ABC" + url.split("?")[0].split("/")[-1] + ".gif"
                elif img_type == 'webp':
                    name = "ABC" + url.split("?")[0].split("/")[-1] + ".png"
                elif img_type == 'svg':
                    # svg图片暂时丢弃
                    return

                filename = os.path.join(self.current_dir, name)
                with Image.open(BytesIO(response.content)) as img:
                    if (name.endswith(".jpg") or name.endswith(".jpeg")) and img.mode == 'RGBA':
                        img.convert("RGB")
                    img.save(filename)

                logger.success(f'{name} - OK', enqueue=True)
                return

        except Exception as e:
            logger.error(f'{url} - {e}')
            return

    def main(self, name: str, url: str):
        try:
            # 当前项目文件夹
            self.current_dir = os.path.join(self.output_dir_path, name)
            if not os.path.exists(self.current_dir):
                # 创建当前项目文件夹
                os.mkdir(os.path.join(self.current_dir))

            # 无法访问此网站,返回 Document is empty
            self.page.get(url)
            if self.page.response is not None and self.page.response.status_code not in [404, 403, 500, 501]:
                # 收集所有的a链接
                link_eles = self.page.eles('@tag()=a')
                if not link_eles:  # 网站可能存在检测 结果为[]
                    logger.error(f'{url} - link_eles:{link_eles}')
                    # 加cookie重试
                    ret = add_cookie_test(self.page, "a")
                    if not ret:
                        return
                    link_eles = ret

                links = {a for a in map(self.parse_link_url, link_eles) if a is not None}
                # 过滤不需要的url
                link_urls = filter_url(links)

                # 收集所有的图片链接
                with ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
                    futures = [executor.submit(self.gather_image_url, u) for u in link_urls]
                    # 构造图片链接集合 去重
                    image_urls = {item for future in futures if future.result() is not None for item in future.result()}

                    # 批量下载图片
                    for url in image_urls:
                        executor.submit(self.save_img, url)

            else:
                return

        except Exception as e:
            logger.error(f'{url} - {e}')
            return

    def __del__(self):
        self.page.close()


if __name__ == '__main__':
    website_urls = read_excel()
    # s = Main()
    # print(website_urls[100])
    # s.main(website_urls[100][0], website_urls[100][1])
    # Main().main("玉林人才网", 'https://yl.gxrc.com/')
    with ProcessPoolExecutor(max_workers=os.cpu_count()) as proc:
        for item in website_urls:
            proc.submit(Main().main, item[0], item[1])
