from datetime import datetime
import os
import re
import hashlib
import sys
from urllib.parse import urlparse, unquote

import requests
from bs4 import BeautifulSoup

from local_path_mapping import build_mapping
from logger import Logger
from util_store import append_url_with_subsidiary, read_url_list, append_comments

### JM：这个main和process_html.py、process_metadata.py没有任何关系，是可以独立运行的

# 确保这个目录存在，用于存放下载的图片
images_base_dir = "E:\PlayniteHtmlImages"
images_cache_path = "cache"
if not os.path.exists(images_base_dir):
    os.makedirs(images_base_dir)

headers = {
        'Connection': 'Keep-Alive',
        'Accept': 'text/html, application/xhtml+xml, */*',
        'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36',
    }

def is_guid_html(filename):
    # GUID的正则表达式
    guid_pattern = r'^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}\.html$'
    # 使用re.match检查文件名是否匹配GUID模式
    if re.match(guid_pattern, filename):
        return True
    else:
        return False

def extract_filename_from_url(url):
    # 解析URL
    parsed_url = urlparse(url)
    # 获取路径部分并解码任何URL编码的字符
    path = unquote(parsed_url.path)
    # 使用正则表达式找到最后一个斜杠之后的所有内容
    filename = path.split('/')[-1]
    return filename

def index_img(src, local_path):
    img_hash = hashlib.sha1(src.encode("utf-8")).hexdigest()
    append_url_with_subsidiary(images_cache_path, img_hash, local_path, src)

img_cache_dict = read_url_list(images_cache_path)
def get_index_img(src):
    img_hash = hashlib.sha1(src.encode("utf-8")).hexdigest()
    if img_hash in img_cache_dict.keys():
        return img_cache_dict[img_hash]
    else:
        return None

local_file_mapping = build_mapping()

if __name__ == "__main__":
    sys.stdout = Logger()
    directory = "E:\PlayniteLibraryHtml"

    # 正则表达式匹配外部图片链接
    external_img_pattern = re.compile(r'http[s]?://[^"\']+\.(jpg|jpeg|png|gif)')
    valid_file_path_list = list()
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.endswith("_updated_html_file.html"):
                continue
            if is_guid_html(file):
                file_path = os.path.join(root, file)
                valid_file_path_list.append(file_path)

    append_comments(images_cache_path, datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
    currentProgress = 0
    maxProgress = len(valid_file_path_list)
    for file_path in valid_file_path_list:
        currentProgress += 1
        print(f"{currentProgress}/{maxProgress} processing {file_path}")
        # file_path = os.path.join(root, file)
        file = os.path.basename(file_path)
        file_name, file_extension = os.path.splitext(file)
        # 读取HTML文件
        with open(file_path, 'r', encoding='utf-8') as f_handler:
            html_content = f_handler.read()
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(html_content, 'html.parser')
        # 找到所有<img>标签
        img_tags = soup.find_all('img')
        html_modified = False
        for img in img_tags:
            try:
                src = img.get('src') or img.get('data-src')
                if src and external_img_pattern.match(src):
                    use_cache_flag = False
                    img_local_relative_path = get_index_img(src)
                    if img_local_relative_path and img_local_relative_path in local_file_mapping:
                        # JM: 这里要做一个img_local_path的mapping，映射到真正的物理地址
                        # 下面直接join(images_base_dir, img_local_path)是最简单的物理地址映射
                        # 要进一步支持将img_local_path映射到images_base_dir/batch_folder/img_local_path
                        img_full_path = local_file_mapping[img_local_relative_path]
                        # img_full_path = os.path.join(images_base_dir, img_local_relative_path)
                        if os.path.exists(img_full_path):
                            print(f"use image cache {img_local_relative_path}...")
                            img['src'] = img_full_path
                            use_cache_flag = True
                            html_modified = True

                    if not use_cache_flag:
                        print(f"requesting {src}...")
                        response = requests.request("GET", src, timeout=8, headers=headers)
                        # response = requests.request("GET",
                        #                             "https://pic2.zhimg.com/v2-241922f0e6dabcf609d0caf8c7c8fb05.jpg?source=6a64a727",
                        #                             timeout=60, headers=headers)
                        if response.status_code == 200:
                            # 为图片生成文件名
                            # img_name = os.path.basename(src)
                            img_name = extract_filename_from_url(src)
                            img_path = os.path.join(file_name, img_name)
                            img_full_dir = os.path.join(images_base_dir, file_name)
                            img_full_path = os.path.join(img_full_dir, img_name)
                            if not os.path.exists(img_full_dir):
                                os.makedirs(img_full_dir)
                            with open(img_full_path, 'wb') as f:
                                f.write(response.content)
                                print(f"save image to {img_full_path}")
                            img_hash = index_img(src, img_path)
                            # 替换HTML中的图片链接为本地路径
                            img['src'] = img_full_path
                            html_modified = True
                        else:
                            print(f"request failed: status: {response.status_code}")
            except Exception as e:
                if "Read timed out" in str(e):
                    print(e)
                else:
                    print(e)
        # 将修改后的HTML写回文件
        # with open(f'{root}/{file_name}_updated_html_file{file_extension}', 'w',
        #           encoding='utf-8') as f_handler:
        if html_modified:
            with open(f'{root}/{file_name}{file_extension}', 'w',
                      encoding='utf-8') as f_handler:
                f_handler.write(str(soup))
    print('图片下载和HTML更新完成。')
                # html_file_name = "2dd0f658-24fc-4dee-a9cd-436b557099fe"
                # file_path = os.path.join(root, file)
                # print(file_path)