import codecs
from datetime import datetime
import json
import os
import sys

import requests
from lxml import etree

import config_setting
from logger import Logger
from util import format_path

date_line = config_setting.date_line
batch_name = config_setting.batch_name

sys.stdout = Logger((datetime.now().strftime("log/main_%Y%m%d_%H%M%S")) + ".txt")
if date_line:
    base_path = f"E:/fitgirl_repack_data/{batch_name}"
else:
    base_path = "E:/fitgirl_repack_data"
headers = {
        'Connection': 'Keep-Alive',
        'Accept': 'text/html, application/xhtml+xml, */*',
        'Accept-Language': 'en-US,en;q=0.8,zh-Hans-CN;q=0.5,zh-Hans;q=0.3',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36',
    }
collection = dict()
for root, dirs, files in os.walk(base_path):
    for file in files:
        # 检查文件扩展名是否为.json
        if file.endswith('.json'):
            # 构造完整的文件路径
            file_path = os.path.join(root, file)
            with open(file_path, 'r', encoding='utf-8') as file:
                # 加载JSON内容
                data = json.load(file)
                collection[data["serialNo"]] = data


stop_flag = False
for page_num in range(11, 12):
    if date_line and stop_flag:
        break
    if page_num <= 0:
        continue
    if page_num == 1:
        src = "https://fitgirl-repacks.site"
    else:
        src = f"https://fitgirl-repacks.site/page/{page_num}/"
    print(f"processing page_num: {page_num}")
    response = requests.request("GET", src, timeout=8, headers=headers)
    content = response.content
    tree = etree.HTML(content)
    articles = tree.xpath("//article")
    for article in articles:
        article_entity = dict()
        div_entry_headers = article.xpath("header[@class='entry-header']")
        title = div_entry_headers[0].xpath("h1/descendant::*/text()")[0]
        article_entity["title"] = title
        print(f"processing article: {title}")
        date = div_entry_headers[0].xpath("div[@class='entry-meta']/span[@class='entry-date']/descendant::*/text()")
        article_entity["date"] = date
        if date_line and datetime.strptime(date[0], "%d/%m/%Y").date() < date_line.date():  # 如果指定了date_line，那么只爬取到date_line就结束
            stop_flag = True
            break
        div_entry_contents = article.xpath("div[@class='entry-content']")
        serialNo_array = div_entry_contents[0].xpath("h3/span[contains(text(), '#')]/text()")
        if len(serialNo_array) <= 0:
            continue
        # serialNo = serialNo_array[0]
        serialNo = div_entry_contents[0].xpath("string(h3/span[contains(text(), '#')])")
        if serialNo in collection:
            print(f"existed, skipped {serialNo}")
            continue
        article_entity["serialNo"] = serialNo
        album_panel = div_entry_contents[0].xpath("h3/following-sibling::p[a]")[0]  # 选取跟在h3后面的第一个有a标签的p标签
        album_image_src_array = album_panel.xpath("a/img/@src")
        if len(album_image_src_array) > 0:
            album_image_src = album_panel.xpath("a/img/@src")[0]
        else:
            album_image_src = album_panel.xpath("img/@src")[0]
        article_entity["album_image_src"] = album_image_src
        album_feature_text = album_panel.xpath("string()")
        article_entity["album_feature_text"] = album_feature_text

        screenshot_panel_array = div_entry_contents[0].xpath("h3[contains(text(), 'Screenshots')]/following-sibling::p")
        if len(screenshot_panel_array) > 0:
            screenshot_panel = div_entry_contents[0].xpath("h3[contains(text(), 'Screenshots')]/following-sibling::p")[0]
        else:
            try_array = div_entry_contents[0].xpath("descendant::*/h3[contains(text(), 'Screenshots')]/following-sibling::p")
            if len(try_array) > 0:
                screenshot_panel = try_array[0]
            else:
                screenshot_panel_temp = div_entry_contents[0].xpath("p[count(a[img]) >=3]")
                if len(screenshot_panel_temp) > 0:
                    screenshot_panel = screenshot_panel_temp[0]
                else:
                    print(f"Warn: Screenshots panel not found")
                    screenshot_panel = None
        if screenshot_panel is not None:
            screenshot_links = screenshot_panel.xpath("a/@href")
            article_entity["screenshot_links"] = screenshot_links
            screenshot_img_srcs = screenshot_panel.xpath("a/img/@src")
            article_entity["screenshot_img_srcs"] = screenshot_img_srcs
        else:
            article_entity["screenshot_links"] = []
            article_entity["screenshot_img_srcs"] = []

        magnet_urls = div_entry_contents[0].xpath("descendant::*/a[contains(text(), 'magnet')]/@href")
        article_entity["magnet_urls"] = magnet_urls
        torrent_url_array = div_entry_contents[0].xpath("descendant::*/a[contains(text(), '.torrent file only')]/@href")
        if len(torrent_url_array) > 0:
            article_entity["torrent_urls"] = torrent_url_array[0]
        repack_features = div_entry_contents[0].xpath("string(h3[contains(text(), 'Repack Features')]/following-sibling::ul)")
        article_entity["repack_features"] = repack_features
        description = div_entry_contents[0].xpath("string(descendant::*/div[contains(text(), 'Game Description')]/following-sibling::div)")
        article_entity["description"] = description

        collection[serialNo] = article_entity
        # write to file
        output_path = "%s/%s__%s.json" % (base_path, format_path(serialNo), format_path(title))
        if not os.path.isdir(os.path.dirname(output_path)) and os.path.dirname(output_path) != "":
            os.makedirs(os.path.dirname(output_path))
        with codecs.open(output_path, mode='ab+', encoding='utf-8') as f:
            line = json.dumps(article_entity, default=lambda o: o.__dict__, indent=2)
            print(f"output file to {output_path}...")
            f.write(line)
# for screenshot_panel in screenshot_panels:
#     screenshot_panel.xpath("")
# response_img = requests.request("GET", "https://en.riotpixels.com/games/little-big-adventure-twinsens-quest/screenshots/9/?utm_source=emb-gfx-html&amp;utm_medium=image&amp;utm_campaign=rp-mass", timeout=8, headers=headers)
# img_path = "E:/abc.jpg"
# if response_img.status_code == 200:
#     with open(img_path, 'wb') as f:
#         f.write(response.content)
#         print(f"save image to {img_path}")
print("1")
print("done")
