import hashlib
import re
import time
from datetime import datetime
import json
import os
import sys
import uuid

import requests
from lxml import etree

import config_setting
import util_store
from logger import Logger
from util import extract_filename_from_url, format_path

enable_gallery = True
enable_selenium = True
enable_sniffer = False

if enable_selenium:
    from selenium import webdriver
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.support.wait import WebDriverWait
    from selenium.webdriver.chrome.service import Service
    from selenium.webdriver.chrome.options import Options

    chromedriver_path = "D:/webdriver/chromedriver/chromedriver.exe"
    chrome_path = "D:/webdriver/chrome-win64/chrome.exe"
    chrome_options = Options()
    chrome_options.binary_location = chrome_path
    prefs = {
        "profile.managed_default_content_settings.javascript": 2  # 2代表禁用
    }
    chrome_options.add_experimental_option("prefs", prefs)

    service = Service(executable_path=chromedriver_path)
    driver = webdriver.Chrome(service=service, options=chrome_options)

sys.stdout = Logger(datetime.now().strftime("log/image_crawler_%Y%m%d_%H%M%S") + ".txt")
batch_name = config_setting.batch_name
enable_skip_gif = False

base_path = f"E:/fitgirl_repack_data/{batch_name}"
image_base_path = f"E:/fitgirl_repack_images/{batch_name}"
html_cache_base_path = f"E:/fitgirl_repack_images/html_cache/{batch_name}"
cookies11 = []
img_headers = {
        'Connection': 'Keep-Alive',
        'Accept': 'image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br, zstd',
        'Accept-Language': 'zh-CN,zh;q=0.9,zh-TW;q=0.8,en;q=0.7',
        'Referer': 'https://imageban.ru/',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36',
    }





text_headers = {
    # ':authority': 'ru.riotpixels.com',
    # ':method': 'GET',
    # ':path': 'https',
    # ':scheme': '/games/tropico-6/screenshots/44/?utm_source=emb-gfx-html&utm_medium=image&utm_campaign=rp-mass',
    # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    # 'Accept-Encoding': 'gzip, deflate, br, zstd',
    'Accept-Language': 'zh-CN,zh;q=0.9,zh-TW;q=0.8,en;q=0.7',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36',
    'Cookie': '__gads=ID=46ddbdcfe51bb3b6:T=1745205106:RT=1745205106:S=ALNI_MZs3z1L3Los8C88JssC26jNURxxfQ; __gpi=UID=000010a80c18fd42:T=1745205106:RT=1745205106:S=ALNI_MZQQ4cWzOtgOYdJknarCI1VeoLtPw; __eoi=ID=e2acd5c4bb88ea80:T=1745205106:RT=1745205106:S=AA-AfjY69TdoaSNEi9SRqJ_aGtDr; popdown=0; rp_utc_offset=28800; rp_session=eyIuY3NyZiI6eyIgYiI6Ik5EVTJOMlpoT0dZeE16SXhNbUpqWWpBMk1EazJNR1ZpTmpVM1kyTTBNVGc9In19.GudJDA.aB1sAezikJJw6t-vYxh9vZOn9gk; __utma=95367967.514228594.1745205133.1745205133.1745205133.1; __utmc=95367967; __utmz=95367967.1745205133.1.1.utmcsr=emb-gfx-html|utmccn=rp-mass|utmcmd=image; __utmt=1; adtech_uid=0a8d25cf-c155-45ae-992c-cf51ababd098%3Ariotpixels.com; top100_id=t1.2946854.1465899886.1745205135183; cf_clearance=TsKKNz2_xcwFolywClUz4W8W_E.Uk_q6n.PmWnM8RPY-1745205135-1.2.1.1-BeS.nU4CQIoxO7ndexOiocJgjmfjrPpG2uQNXOuFoXtORX5xP8apm38WbAyYWyGMYlJ2pa0djNG6lFZnVZsNGnYkNdVNTeG_b34jMs0wIp9.SrFOHDhwTAu.cY0PCNNW9DE8PSeZQMSIVeyDtp4f7pZWONzL7f9uUPrZSLhtF3BOoqPveAut2BYuLdjQzzQhMOt7tSn0uHL..QQYaVwVXK65ieTN3Dg4oFPaXjrHZSSuiVxjVPYYUvXbSBFc9BDNYrnHyaCCxKq8PpcAIa1twvgDjYpwVr78NBpayiIrSHtgO.ajtn4V8Aq79XUoTmpOkYg.9RNN8OcnIae0WFfTKh82yq4lHq395eFQv_eO2IY; _ym_uid=1745205135415679508; _ym_d=1745205135; __utmb=95367967.6.7.1745205135428; rp_cdn_check=1; rp_cdn_tld=net; _ym_visorc=b; _ym_isad=1; FCNEC=%5B%5B%22AKsRol9x8l4sANT-K1sNNWabQb33P0bLXJGW6FbIoEy2lqRnWvaVq0OQJofgz14j9ZmMs7OOU_yQyHjJMMNkJHkMgjs-O-ZQZcpF4ym_KLeKorvArEFtd0BYXSaaKeQBq2uusC8fJz02GGDKuvIfhnnTgrZO9uyrWA%3D%3D%22%5D%5D; t3_sid_2946854=s1.127996673.1745205135184.1745205151099.1.3.1.1',
    # 'upgrade-insecure-requests': '1',
    # 'priority': 'u=0, i',
    # 'sec-ch-ua': '"Google Chrome";v="135", "Not-A.Brand";v="8", "Chromium";v="135"',
    # 'sec-ch-ua-mobile': '?0',
    # 'sec-ch-ua-platform': '"Windows"',
    # 'sec-fetch-dest': 'document',
    # 'sec-fetch-mode': 'navigate',
    # 'sec-fetch-site': 'none',
    # 'sec-fetch-user': '?1',
    # 'cache-control': 'max-age=0'
}
collection = dict()


def save_image(src, category, param_data):
    img_name = extract_filename_from_url(src)
    if enable_skip_gif and img_name.endswith(".gif"):
        print(f"skipped gif file: {src}")
        return False
    if img_name.strip() == "":
        img_name = str(uuid.uuid4()) + ".jpg"
    # img_path = "%s/%s__%s/%s/%s" % (
    # image_base_path, format_path(data['serialNo']), format_path(data['title']), category, img_name)
    img_path = get_path(category, img_name, param_data)
    if not os.path.isdir(os.path.dirname(img_path)) and os.path.dirname(img_path) != "":
        os.makedirs(os.path.dirname(img_path))
    if not os.path.exists(img_path):
        local_path = img_path.replace('/', '\\')
        print(f"requesting image {src}, local path {local_path}")
        try:
            response = requests.request("GET", src, timeout=30, headers=img_headers)
            if response.status_code == 200:
                with open(img_path, 'wb') as f:
                    f.write(response.content)
                    print(f"saved image to {img_path}")
                return True
            else:
                print(f"error: status code {response.status_code}")
                if response.status_code == 404:
                    return True
                else:
                    return False
        except Exception as ex:
            print(f"error: {param_data['filepath']} {ex}")
            return False
    else:
        print(f"skipped, image already exists: {img_path}")
        return True


def get_path(sub_folder, filename, param_data):
    if sub_folder:
        return "%s/%s__%s/%s/%s" % (
            image_base_path, format_path(param_data['serialNo']), format_path(param_data['title']), sub_folder, filename)
    else:
        return "%s/%s__%s/%s" % (
        image_base_path, format_path(param_data['serialNo']), format_path(param_data['title']), filename)


def done_marker(param_data):
    # done_marker_path = "%s/%s__%s/%s" % (
    #     image_base_path, format_path(param_data['serialNo']), format_path(param_data['title']), "done.txt")
    done_marker_path = get_path(None, "done.txt", param_data)
    with open(done_marker_path, 'w') as f:
        f.write("done")


def process_gallery_html(src, param_data):
    global cookies11
    base_url = "https://en.riotpixels.com"
    success = True
    src = src.split("screenshots")[0] + "screenshots"  # https://en.riotpixels.com/games/antioma/screenshots/11/?utm_source=emb-gfx-html&utm_medium=image&utm_campaign=rp-mass 转换为 https://en.riotpixels.com/games/antioma/screenshots/
    src_hash = hashlib.sha1(src.encode("utf-8")).hexdigest()
    html_cache_path = "%s/%s.html" % (html_cache_base_path, src_hash)
    if os.path.exists(html_cache_path):
        print(f"use cached html {html_cache_path} from {src}")
        with open(html_cache_path, 'r', encoding='utf-8') as html_file:
            content = html_file.read()
    elif os.path.exists("%s.empty" % (html_cache_path)):  # 如果之前网页打不开，下一个同样的就跳过
        return False
    else:
        print(f"requesting html {src} cache path {html_cache_path}")
        try:
            if enable_selenium:
                driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
                    "source": """
                	Object.defineProperty(navigator, 'webdriver', {
                	  get: () => undefined
                	})
                  """
                })
                for cookie in cookies11:
                    driver.add_cookie(cookie)
                driver.get(src)
                cookies11 = driver.get_cookies()
                WebDriverWait(driver, 10, 0.5).until(
                    EC.presence_of_element_located((By.ID, "footer-area"))
                    # EC.presence_of_element_located((By.ID, "article_gfx"))
                )
                content = driver.page_source
                # cache html on disk
                with open("%s/%s.html" % (html_cache_base_path, src_hash), "w", encoding='utf-8') as html_file:
                    html_file.write(content)
            else:
                response = requests.request("GET", src, timeout=8, headers=text_headers)
                if response.status_code == 200:
                    content = response.text
                    # cache html on disk
                    with open("%s/%s.html" % (html_cache_base_path, src_hash), "w", encoding='utf-8') as html_file:
                        html_file.write(response.text)
                else:
                    print(f"error: status code {response.status_code}")
                    if response.status_code == 404:  # 404就认为这个资源已经不可能再存在了，之后重复请求就是浪费资源
                        return True
                    else:
                        return False
        except Exception as ex:
            with open("%s/%s.html.empty" % (html_cache_base_path, src_hash), "w", encoding='utf-8') as html_file:
                html_file.write("empty!")
            print(f"error: {param_data['filepath']} {ex}")
            return False

    tree = etree.HTML(content)

    img_src_array = []
    img_src_data_list = tree.xpath("//article[@id='article_gfx']/section[1]/ul/li/a/@onclick")
    img_src_data_list = [item.replace("return", "") for item in img_src_data_list]
    for img_src_data_str in img_src_data_list:
        img_src_data = json.loads(img_src_data_str)
        if len(img_src_data) > 0:
            max_item = max(img_src_data, key=lambda x: x['h'])  # 默认第一条分辨率最高，可以优化下按照'h'或'w'排序。
            img_src_url = max_item['u']
            img_src_array.append(img_src_url)
            # img_src_url = img_src_data[0]['u']  # 默认第一条分辨率最高，可以优化下按照'h'或'w'排序。
            # print(img_src_url)
        # img_src_url = json.loads(img_src_data[0])[0]['u']
    # print(img_src_data)

    # img_src_array = tree.xpath("//article[@id='article_gfx']/section[1]/ul/li/a/@href")
    # img_src_array = [f"{base_url}{x}?utm_source=emb-gfx-html&utm_medium=image&utm_campaign=rp-mass" for x in img_src_array]
    for img_src in img_src_array:
        if "riotpixels" in img_src:
            success &= save_image(img_src, "screenshots", param_data)
    return success


def process_html(src, param_data):
    global cookies11
    success = True
    print(f"requesting html {src}")
    src_hash = hashlib.sha1(src.encode("utf-8")).hexdigest()
    html_cache_path = "%s/%s.html" % (html_cache_base_path, src_hash)
    if os.path.exists(html_cache_path):
        print(f"use cached html {html_cache_path} from {src}")
        with open(html_cache_path, 'r', encoding='utf-8') as html_file:
            content = html_file.read()
    else:
        print(f"requesting html {src} cache path {html_cache_path}")
        try:
            if enable_selenium:
                driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
                    "source": """
                	Object.defineProperty(navigator, 'webdriver', {
                	  get: () => undefined
                	})
                  """
                })
                for cookie in cookies11:
                    driver.add_cookie(cookie)
                driver.get(src)
                cookies11 = driver.get_cookies()
                WebDriverWait(driver, 10, 0.5).until(
                    EC.presence_of_element_located((By.ID, "footer-area"))
                    # lambda d: d.find_element(By.XPATH, "//footer/section")
                )

                # time.sleep(10)

                content = driver.page_source
                # cache html on disk
                with open("%s/%s.html" % (html_cache_base_path, src_hash), "w", encoding='utf-8') as html_file:
                    html_file.write(content)
            else:
                response = requests.request("GET", src, timeout=8, headers=text_headers)
                if response.status_code == 200:
                    content = response.text
                    # cache html on disk
                    with open("%s/%s.html" % (html_cache_base_path, src_hash), "w", encoding='utf-8') as html_file:
                        html_file.write(response.text)
                else:
                    print(f"error: status code {response.status_code}")
                    if response.status_code == 404:  # 404就认为这个资源已经不可能再存在了，之后重复请求就是浪费资源
                        return True
                    else:
                        return False
        except Exception as ex:
            print(f"error: {param_data['filepath']} {ex}")
            return False

    tree = etree.HTML(content)
    img_parent_link_array = tree.xpath("//a[img]/@onclick")  # 先找有<a>标签的<img>标签
    img_src_array = list()
    if len(img_parent_link_array) > 0:
        for link in img_parent_link_array:
            link_json = json.loads(link.replace("return ", ""))
            if len(link_json) > 0:
                img_src_array.append(link_json[0]['u'])
            else:
                print("error: no full image src found")
                continue
    else:
        img_src_array = tree.xpath("//img/@src")

    # img_src_array = [extract_filename_from_url(item) for item in tree.xpath("//img/@src")]
    for img_src in img_src_array:
        # img_name = extract_filename_from_url(img_src)
        if "riotpixels" in img_src:
            success &= save_image(img_src, "screenshots", param_data)
    return success


if not os.path.isdir(image_base_path) and os.path.dirname(image_base_path) != "":
    os.makedirs(image_base_path)
if not os.path.isdir(html_cache_base_path) and os.path.dirname(html_cache_base_path) != "":
    os.makedirs(html_cache_base_path)
print(f"walking {base_path}")
for root, dirs, files in os.walk(base_path):
    for file in files:
        # 检查文件扩展名是否为.json
        if "_Updated__" in file:
            print(f"skipped {file} cos it is Updated version.")
            continue
        if file.endswith('.json'):
            # 构造完整的文件路径
            file_path = os.path.join(root, file)
            with open(file_path, 'r', encoding='utf-8') as file:
                # 加载JSON内容
                data = json.load(file)
                data["filepath"] = file_path
                collection[data["serialNo"]] = data

# data = collection["#5010"]
# screenshot_links = data['screenshot_links']
# for index, screenshot_src in enumerate(screenshot_links):
#     process_html(screenshot_src)
#
# album_image_src = data['album_image_src']
# save_image(album_image_src, "album")

currentProgress = 0
maxProgress = len(collection.items())
for key, data in collection.items():
    currentProgress += 1
    try:
        print(f"{currentProgress}/{maxProgress} processing {key}: {data['title']}")  # 打印键和值
        # 检查是否已经完成下载过的
        if os.path.exists(get_path(None, "done.txt", data)):
            print(f"skipped {key}: {data['title']}")
            continue
        # 只要游戏的目录已经存在了就认为是已经爬过了，用来跳过某个卡住的资源，一般不启用
        # if os.path.isdir("%s/%s__%s" % (image_base_path, format_path(data['serialNo']), format_path(data['title']))):
        #     continue
        # if key != "#053":
        #     continue
        success = True
        # 保存游戏截图图片
        if enable_gallery:
            screenshot_links = data['screenshot_links']
            # 这里其实只要随便挑一个就可以。因为已经优化为将一个远程目录中所有图片下载下来
            # 但是为以防图片放在不同的远程目录，还是让他循环一遍
            # 对性能影响微乎其微，因为已经做好了html和图片的cache，不会造成额外的请求。
            for index, screenshot_link in enumerate(screenshot_links):
                success &= process_gallery_html(screenshot_link, data)
                # success &= process_html(screenshot_link, data)

        screenshot_img_srcs = data['screenshot_img_srcs']
        for index, screenshot_src in enumerate(screenshot_img_srcs):
            success &= save_image(screenshot_src, "screenshots_album", data)

        if enable_sniffer:
            image_resolution_sniffer = ['720p', '480p', '1080p']
            sniffer_list_path = get_path(None, "screenshots_sniffer_map.txt", data)
            sniffer_map = util_store.read_map(sniffer_list_path)
            for screenshot_img_src in screenshot_img_srcs:
                if screenshot_img_src in sniffer_map:
                    screenshot_high_resolution_img_src = sniffer_map[screenshot_img_src]
                    success &= save_image(screenshot_high_resolution_img_src, "screenshots_sniffer", data)
                else:
                    for resolution in image_resolution_sniffer:
                        screenshot_high_resolution_img_src = re.sub(r'\.240p\.', f'.{resolution}.', screenshot_img_src)
                        print(f"sniffering {resolution}...")
                        response = requests.request("GET", screenshot_high_resolution_img_src, timeout=10, headers=img_headers)
                        if response.status_code == 200:
                            print(f"sniffer success: {screenshot_high_resolution_img_src}")
                            # sniffer 成功。 1. 保存图片；2. 将sniffer出来的图片src对应关系保存下来
                            success &= save_image(screenshot_high_resolution_img_src, "screenshots_sniffer", data)
                            util_store.append_with_subsidiary(sniffer_list_path, screenshot_img_src, screenshot_high_resolution_img_src)
                            break
                        elif response.status_code == 404:
                            pass
                        else:
                            print(f"sniffer error: status code {response.status_code}")

        # screenshot_high_resolution_img_srcs = [re.sub(r'\.240p\.', '.720p.', url) for url in screenshot_img_srcs]
        # for index, screenshot_src in enumerate(screenshot_high_resolution_img_srcs):
        #     success &= save_image(screenshot_src, "screenshots", data)
        # 保存封面图片
        album_image_src = data['album_image_src']
        success &= save_image(album_image_src, "album", data)
        if success:
            done_marker(data)
    except Exception as ex:
        # self.unknown_error_current_retry += 1
        print(f"error: {data['filepath']}, {ex}")
#     src = data['album_image_src']
#     response = requests.request("GET", src, timeout=8, headers=headers)
#     img_path = "E:/abc.jpg"
#     if response.status_code == 200:
#         with open(img_path, 'wb') as f:
#             f.write(response.content)
#             print(f"save image to {img_path}")
