# -*- coding: UTF-8 -*-
import pandas as pd
import os
import re

from DrissionPage import SessionPage
from DrissionPage.items import SessionElement
from concurrent.futures import ThreadPoolExecutor
from PIL import Image
from io import BytesIO
import requests

from loguru import logger
from fake_useragent import UserAgent
from urllib.parse import urlparse


def read_excel():
    filepath = os.path.join(os.path.dirname(__file__), "website.xlsx")
    df = pd.read_excel(filepath)
    website_list = list(zip(df['媒体名称'], df['媒体网址']))
    return website_list


def filter_url(urls):
    fileter_urls = [
        "beian.miit.gov.cn"
    ]
    result = []
    for url in urls:
        if urlparse(url).netloc not in fileter_urls:
            result.append(url)
    return result


class Main:
    def __init__(self):
        self.page = SessionPage()
        self.page.set.headers({"User-Agent": UserAgent().random})
        self.url_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
        self.executor = ThreadPoolExecutor(max_workers=os.cpu_count())
        self.root_path = os.path.dirname(os.path.abspath(__file__))
        self.output_dir_path = os.path.join(self.root_path, "output")

    def parse_link_url(self, ele: SessionElement):
        try:
            href = ele.attr('href')  # 超链接url
            if href is None:
                return href
            if re.match(self.url_pattern, href):
                return href
            else:
                return None

        except Exception as e:
            logger.info(e)
            return None

    def parse_image_url(self, ele: SessionElement):
        try:
            img_url = ele.attr("src")  # 图片url
            if img_url is None:
                return None
            if re.match(self.url_pattern, img_url):
                return img_url
            else:
                return None

        except Exception as e:
            logger.info(e)
            return None

    def get_image_type(self, resp):
        try:
            content_type = resp.headers.get('Content-Type', '')
            if 'image/jpg' in content_type:
                return 'jpg'
            elif 'image/jpeg' in content_type:
                return "jpeg"
            elif 'image/png' in content_type:
                return 'png'
            elif 'image/gif' in content_type:
                return 'gif'
            elif 'image/webp' in content_type:
                return 'webp'
            else:
                file_header = resp.content[:8]
                if file_header.startswith(b'\xff\xd8\xff'):
                    return 'jpg'
                elif file_header.startswith(b'\x89PNG'):
                    return 'png'
                elif file_header.startswith(b'GIF8'):
                    return 'gif'
                elif file_header.startswith(b'RIFF') and file_header[8:12] == b'WEBP':
                    return 'webp'
                else:
                    return None

        except Exception as e:
            logger.info(e)
            return None

    def save_img(self, url: str):
        try:
            # 请求图片url
            response = requests.get(url, headers={"user-agent": UserAgent().random}, timeout=10, allow_redirects=False)
            # 构造文件名
            name = ''
            if url.endswith('.jpg') or url.endswith(".png") or url.endswith(".gif") or url.endswith(".jpeg"):
                name = url.split("/")[-1]
            elif url.endswith(".webp"):
                # webp文件格式特殊 单独保存
                name = url.split("/")[-1].split(".webp")[0] + ".png"
                filename = os.path.join(self.current_dir, name)
                Image.open(BytesIO(response.content)).save(filename)
                logger.info(f'{url} - OK', enqueue=True)
                return
            else:
                # 非标准url 先根据响应头判断文件类型
                img_type = self.get_image_type(response)
                if img_type is None:
                    return
                if img_type == 'png':
                    name = url.split("?")[0].split("/")[-1] + ".png"
                elif img_type == "jpg":
                    name = url.split("?")[0].split("/")[-1] + ".jpg"
                elif img_type == "jpeg":
                    name = url.split("?")[0].split("/")[-1] + ".jpeg"
                elif img_type == 'gif':
                    name = url.split("?")[0].split("/")[-1] + ".gif"
                elif img_type == 'webp':
                    name = url.split("?")[0].split("/")[-1] + ".png"

            # 文件全路径
            filename = os.path.join(self.current_dir, name)
            if not os.path.exists(filename):
                Image.open(BytesIO(response.content)).save(filename)
                logger.debug(f'{name} - OK', enqueue=True)

        except Exception as e:
            logger.info(e)
            return

    def gather_image_url(self, url: str):
        try:
            page = SessionPage()
            page.get(url, allow_redirects=False)
            img_eles = page.eles('@tag()=img')

            # 解析所有的图片链接
            img_collect = []
            for ele in img_eles:
                img_url = self.parse_image_url(ele)
                if img_url is not None:  # 过滤掉None
                    img_collect.append(img_url)

            page.close()
            return img_collect

        except Exception as e:
            logger.info(f'{url} - {e}')
            return None

    def main(self, name: str, url: str):
        try:
            self.page.get(url, allow_redirects=False)

            # 当前项目文件夹
            self.current_dir = os.path.join(self.output_dir_path, name)
            if not os.path.exists(self.current_dir):
                os.mkdir(os.path.join(self.current_dir))

            # 收集所有的a链接
            link_eles = self.page.eles('@tag()=a')
            link_futures = [self.executor.submit(self.parse_link_url, ele) for ele in link_eles]
            link_urls = {future.result() for future in link_futures if future.result() is not None}
            # 过滤不需要的url
            link_urls = filter_url(link_urls)

            # 收集所有的图片链接
            image_futures = [self.executor.submit(self.gather_image_url, link) for link in link_urls]
            image_urls = {item for future in image_futures if future.result() is not None for item in future.result()}

            # 批量下载图片
            # _ = [self.executor.submit(self.save_img, img_url) for img_url in image_urls]
            self.executor.submit(self.save_img, image_urls.pop())

        except Exception as e:
            logger.info(f'{url} - {e}')
            return

    def __del__(self):
        self.executor.shutdown()
        self.page.close()


if __name__ == '__main__':
    website_urls = read_excel()
    s = Main()
    s.main(website_urls[57][0], website_urls[57][1])
