from datetime import datetime

import json

import os

import re
import requests

from bs4 import BeautifulSoup

from folder_scanner import read_valid_files
from local_path_mapping import build_mapping
from util_cache import init_img_index, get_index_img, index_img
from util_store import append_comments
from util_url import extract_filename_from_url

external_img_pattern = re.compile(r'http[s]?://[^"\']+\.(jpg|jpeg|png|gif|webp|avif)')
images_cache_path = "cache"
replace_link_flag = False
enable_skip_gif = False
img_cache_dict = init_img_index(images_cache_path)
headers = {
        'Connection': 'Keep-Alive',
        'Accept': 'text/html, application/xhtml+xml, */*',
        'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36',
    }


def process(file_path):  # metadata json file
    file = os.path.basename(file_path)
    file_name, file_extension = os.path.splitext(file)
    try:
        with open(file_path, "r", encoding="utf-8") as file:
            json_content = json.load(file)
    except json.JSONDecodeError as e:
        print(f"解析文件 {file_path} 时出错：{e}")
    except Exception as e:
        print(f"读取文件 {file_path} 时出错：{e}")

    html_content = json_content["Description"]
    # 使用BeautifulSoup解析HTML
    soup = BeautifulSoup(html_content, 'html.parser')
    # 找到所有<img>标签
    img_tags = soup.find_all('img')
    html_modified = False
    if len(img_tags) <=0:
        print(f"no images in description.")
    for img in img_tags:
        try:
            src = img.get('src') or img.get('data-src')
            if src and external_img_pattern.match(src):
                use_cache_flag = False
                img_local_relative_path = get_index_img(img_cache_dict, src)
                if img_local_relative_path and img_local_relative_path in local_file_mapping:
                    # JM: 这里要做一个img_local_path的mapping，映射到真正的物理地址
                    # 下面直接join(images_base_dir, img_local_path)是最简单的物理地址映射
                    # 要进一步支持将img_local_path映射到images_base_dir/batch_folder/img_local_path
                    img_full_path = local_file_mapping[img_local_relative_path]
                    if os.path.exists(img_full_path):
                        print(f"use image cache {img_local_relative_path}...")
                        if replace_link_flag:
                            img['src'] = img_full_path  ## 这里最好替换成image-hosting的地址
                        use_cache_flag = True
                        html_modified = True

                if not use_cache_flag:  # 如果在cache中没有
                    print(f"requesting {src}...")
                    response = requests.request("GET", src, timeout=8, headers=headers)
                    # response = requests.request("GET",
                    #                             "https://pic2.zhimg.com/v2-241922f0e6dabcf609d0caf8c7c8fb05.jpg?source=6a64a727",
                    #                             timeout=60, headers=headers)
                    if response.status_code == 200:
                        # 为图片生成文件名
                        img_name = extract_filename_from_url(src)
                        if enable_skip_gif and img_name.endswith(".gif"):
                            print(f"skipped gif file: {src}")
                            continue
                        img_path = os.path.join(file_name, img_name)
                        img_full_dir = os.path.join(images_base_dir, batch_name, file_name)
                        img_full_path = os.path.join(img_full_dir, img_name)
                        if not os.path.exists(img_full_dir):
                            os.makedirs(img_full_dir)
                        with open(img_full_path, 'wb') as f:
                            f.write(response.content)
                            print(f"save image to {img_full_path}")
                        index_img(src, images_cache_path, img_path)
                        # 替换HTML中的图片链接为本地路径
                        if replace_link_flag:
                            img['src'] = img_full_path  ## 这里最好替换成image-hosting的地址
                        html_modified = True
                    else:
                        print(f"request failed: status: {response.status_code}")
            else:
                print(f"skipped src: {src}")
        except Exception as e:
            if "Read timed out" in str(e):
                print(e)
            else:
                print(e)
    if replace_link_flag and html_modified:
        json_content["Description"] = str(soup)
        with open(file_path, "w", encoding="utf-8") as file:
            # 使用ensure_ascii=False确保中文字符正常写入
            json.dump(json_content, file, ensure_ascii=False)
        # with open(f'{root}/{file_name}{file_extension}', 'w',
        #           encoding='utf-8') as f_handler:
        #     f_handler.write(str(soup))
batch_name = "20250831"
folder_path = f"E:/PlayniteMetadata/{batch_name}/games"  # 替换为你的文件夹路径
images_base_dir = "E:/PlayniteHtmlImages"  # build_mapping方法会遍历所有batch_name，因此这里不用再嵌入batch_name了
# export_file_path = f"assets/step_1_1_export_name/name_{config_setting.batch_name}.txt"
local_file_mapping = build_mapping()
valid_file_path_list = read_valid_files(folder_path, ".json")
append_comments(images_cache_path, datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
currentProgress = 0
maxProgress = len(valid_file_path_list)
for file_path in valid_file_path_list:
    currentProgress += 1
    print(f"{currentProgress}/{maxProgress} processing {file_path}")
    process(file_path)

