import argparse
import codecs
import json
import os
import re
import shutil
import sys
import time
import traceback
from datetime import datetime

import requests
from lxml import etree

import config_setting
import util
from logger import Logger
from module_loader import discover_available_jobs, dynamic_module_loader
from proxy_pool import ProxyPool
enable_proxy = True
pool = None
error_count = 0

headers = {
        'Connection': 'Keep-Alive',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        # 'accept-encoding': 'gzip, deflate, br, zstd',
        'Accept-Language': 'zh-CN,zh;q=0.9,zh-TW;q=0.8,en;q=0.7',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36',
        # 'cookie': 'bbs_sid=uglvr5hago7ce2ec0qlmjko8ah; bbs_token=ouwgm2ZQjDISkvWRcBkat1YE9_2BpMp4cQnT2eqnuFOxnyG_2FDzorPScmB1vxZ9sY75IbcFlCuCGhAL61hlIHHEIoWegEw_3D; Hm_lvt_041715a44f3d6718f1e69867b53b8aa0=1760105869,1760156972,1760192379,1760229246; HMACCOUNT=0EF7018863B2E101; cf_clearance=72qQRqDvNXD5VvyxxAPFb6TNSwUlKYF1hTJv8x.kPNE-1760230184-1.2.1.1-c31fYWUqnte2G49Yc5bUVUPKIc7TvovUHTYzKOjycZicQaXXlNsNm.Xg6_1dwDVc3W87aFn5RIm3rfTw6idP9X5Y0tjQa5hcsaBJKK8ixuAamGdF7GvmPJVvjeKbpy0kHE4oyV89N9FvXkYdvh6PbAK6kDzTta1vpHoghq865bk_joIwnO58ImdpWaQWetlf50O0PQjG26E.w52XpTobSeiJ1KbHTfoQkfT2VwQHF0s; Hm_lpvt_041715a44f3d6718f1e69867b53b8aa0=1760230352'
    }

listing_base_path = "E:/btbbt_data_listing"
detail_base_path = "E:/btbbt_data_detail"
torrent_base_path = "E:/btbbt_data_torrent"

base_url = "https://www.1lou.info"
skip_over_count_attach = 5

def save_attachment(id, name, forum, attach_id, attach_name, src):
    persistence_name = util.format_path(f"#{id} {attach_id} {attach_name}")
    # 构建一个老名字，用来改名，后续用完就没用了
    old_persistence_name = util.format_path(f"#{id} {attach_id} {name}")
    old_persistence_path = f"{torrent_base_path}/{config_setting.batch_name}/{forum}/{old_persistence_name}.torrent"
    #
    ignored_empty_file_path = f"{torrent_base_path}/{config_setting.batch_name}/{forum}/#{id}_{attach_id}_attachement_skipped"
    torrent_persistence_path = f"{torrent_base_path}/{config_setting.batch_name}/{forum}/{persistence_name}"
    if not os.path.isdir(os.path.dirname(torrent_persistence_path)) and os.path.dirname(torrent_persistence_path) != "":
        os.makedirs(os.path.dirname(torrent_persistence_path))

    if os.path.exists(torrent_persistence_path):
        print(f"skipped, torrent already exists: {torrent_persistence_path}")
    elif os.path.exists(old_persistence_path): # 老的错误命名的torrent文件存在
        shutil.move(old_persistence_path, torrent_persistence_path)
        print(f"{old_persistence_path} renamed to {torrent_persistence_path}")
    elif os.path.exists(ignored_empty_file_path):
        print(f"skipped, torrent ignored.")
    else:
        if enable_proxy:
            proxies = pool.get_requests_proxy()
            print(f"requesting torrent {src} with proxy: {proxies}")
            response_attachment = requests.request("GET", base_url + "/" + src, proxies=proxies,timeout=18, headers=headers)
        else:
            print(f"requesting torrent {src}")
            response_attachment = requests.request("GET", base_url + "/" + src, timeout=18, headers=headers)
        if "附件不存在" in response_attachment.text:
            print(f"附件不存在")
            with open(ignored_empty_file_path, 'w'):
                pass
            return False
        else:
            with open(torrent_persistence_path, 'wb') as f:
                f.write(response_attachment.content)
            print(f"torrent saved to {torrent_persistence_path}")
        time.sleep(0.3)
    return True


def process_detail_html(id, name, forum, page, src):
    success = True
    detail = {}
    persistence_name = util.format_path(f"#{id} {name}")
    persistence_path = f"{detail_base_path}/{config_setting.batch_name}/cache/{forum}/{persistence_name}.html"
    if not os.path.isdir(os.path.dirname(persistence_path)) and os.path.dirname(persistence_path) != "":
        os.makedirs(os.path.dirname(persistence_path))
    if os.path.exists(persistence_path):
        print(f"use cached html {persistence_path} from {src}")
        with open(persistence_path, 'r', encoding='utf-8') as html_file:
            content = html_file.read()
    else:

        if page:
            page_src = f"{base_url}/{forum}-{page}.htm?orderby=tid&digest=0"
            headers["referer"] = page_src
        if enable_proxy:
            proxies = pool.get_requests_proxy()
            print(f"requesting html {src} with proxy: {proxies}")
            response = requests.request("GET", src, proxies=proxies,timeout=18, headers=headers)
        else:
            print(f"requesting html {src}")
            response = requests.request("GET", src, timeout=18, headers=headers)
        content = response.content
        with open(persistence_path, "w", encoding='utf-8') as html_file:
            html_file.write(response.text)
        print(f"cached html {src} to {persistence_path}")
        time.sleep(0.3)

    tree = etree.HTML(content)
    # 爬取detail页面
    detail_page_array = tree.xpath("//div[@class='card-body']/div[@isfirst='1']//text()")
    detail_page_image_links_array = tree.xpath("//div[@class='card-body']/div[@isfirst='1']//img/@src")
    detail["id"] = id
    detail["name"] = name
    detail["forum"] = forum
    detail["src"] = src
    detail["detail"] = "\n".join(detail_page_array)
    detail["images"] = detail_page_image_links_array

    output_path = f"{detail_base_path}/{config_setting.batch_name}/{forum}/{persistence_name}.json"
    if not os.path.isdir(os.path.dirname(output_path)) and os.path.dirname(output_path) != "":
        os.makedirs(os.path.dirname(output_path))
    with codecs.open(output_path, mode='w', encoding='utf-8') as f:
        line = json.dumps(detail, indent=2, ensure_ascii=False)
        print(f"json saved to {output_path}...")
        f.write(line)

    # 下载附件
    li_attach_list = tree.xpath("//ul[@class='attachlist']/li")
    if skip_over_count_attach and len(li_attach_list) > skip_over_count_attach:
        print(f"attach count {len(li_attach_list)} over count, skipped for temporary.")
        return success
    for li in li_attach_list:
        attach_id = li.attrib['aid']
        attach_href_array = li.xpath("descendant::a/@href")
        attach_name_array = li.xpath("descendant::a/text()")
        if len(attach_href_array) > 0:
            attach_href = attach_href_array[0]
            attach_name = "".join(attach_name_array).strip()
            if attach_name == "":
                attach_name = name
            # if len(attach_name_array) > 0:
            #     attach_name = attach_name_array[0]
            # else:
            #     attach_name = name
            success &= save_attachment(id, name, forum, attach_id, attach_name, attach_href)
        else:
            print(f"attachment href not found!")
        pass
    return success


if __name__ == "__main__":
    forum_dict = {
        # "forum-1": "最新电影",
        # "forum-2": "电视剧集",
        # "forum-3": "高清电影",
        "forum-4": "高清剧集",
        # "forum-9": "游戏专区",
        # "forum-17": "动漫专区",
        # "forum-8": "音乐专区",
    }
    starting_offset = 39016

    parser = argparse.ArgumentParser(description='btbbt爬虫detail')
    parser.add_argument('--batch_name', type=str, required=False, help='批次名（例：20231231）')
    parser.add_argument('--job_name', type=str, required=False, help='任务名（如批次下仅有唯一任务，可省略）')
    args = parser.parse_args()
    if args.batch_name:
        config_setting.batch_name = args.batch_name
    else:
        batch_name = input("input batch: ").strip()
        if not batch_name:
            batch_name = os.getcwd()
        config_setting.batch_name = batch_name

    if not os.path.isdir(f"assets"):
        os.mkdir(f"assets")
    if not os.path.isdir(f"assets/{config_setting.batch_name}"):
        os.mkdir(f"assets/{config_setting.batch_name}")
    if not os.path.isdir(f"assets/{config_setting.batch_name}/log"):
        os.mkdir(f"assets/{config_setting.batch_name}/log")
    sys.stdout = Logger(datetime.now().strftime(f"assets/{config_setting.batch_name}/log/%Y%m%d_%H%M%S") + ".txt")

    available_jobs = discover_available_jobs(f"./assets/{config_setting.batch_name}")
    if len(available_jobs) == 0:
        raise Exception("error: No job found!")
    elif len(available_jobs) == 1:
        config_setting.job_name = available_jobs[0]
    else:
        if args.job_name:
            config_setting.job_name = args.job_name
        else:
            job_name = input("input job: ").strip()
            if not job_name:
                job_name = os.getcwd()
            config_setting.job_name = job_name

    job_module = dynamic_module_loader(config_setting.job_name, f"./assets/{config_setting.batch_name}")
    if hasattr(job_module, 'enable_proxy'):
        enable_proxy = job_module.enable_proxy
    if hasattr(job_module, 'forum_dict'):
        forum_dict = job_module.forum_dict
    if hasattr(job_module, 'starting_offset'):
        starting_offset = job_module.starting_offset

    if enable_proxy:
        pool = ProxyPool(max_size=10)
    # id = "819782"
    # name = "[BT下载][喜剧之王单口季.第二季][第17-18集][WEB-MKV/4.77G][国语配音/中文字幕][4K-2160P][H265][流媒体]"
    # src = "https://www.1lou.info/thread-819782.htm"
    # process_detail_html(id, name, "test", 2, src)

    # id = "853477"
    # name = "暴太郎战队咚兄弟VS全开者[中文字幕]"
    # src = "https://www.1lou.info/thread-853477.htm"
    # process_detail_html(id, name, "test", 2, src)

    # forum = "forum-3"
    for k, v in forum_dict.items():
        failure_count = 0

        collection = dict()
        # forum = config_setting.forum
        forum = k

        listing_base_path = f"E:/btbbt_data_listing/{config_setting.batch_name}/{forum}"
        print(f"walking {listing_base_path}")
        for root, dirs, files in os.walk(listing_base_path):
            for file in files:
                # 检查文件扩展名是否为.json
                if file.endswith('.json'):
                    # 构造完整的文件路径
                    file_path = os.path.join(root, file)
                    with open(file_path, 'r', encoding='utf-8') as file:
                        # 加载JSON内容
                        data_list = json.load(file)
                    for data in data_list:
                        match = re.search(r'forum-\d_(\d+)\.', file_path)
                        if match:
                            number = match.group(1)
                            data["page"] = number
                        else:
                            data["page"] = None
                        data["filepath"] = file_path
                        collection[data["id"]] = data
        currentProgress = 0
        maxProgress = len(collection.items())
        for key, data in collection.items():
            currentProgress += 1
            if starting_offset and currentProgress <= starting_offset:
                continue
            try:
                print(f"{currentProgress}/{maxProgress} processing {key}: {data['name']}")  # 打印键和值
                if not process_detail_html(data["id"], data["name"], data["forum"], data["page"], data["link"]):
                    failure_count += 1
                if failure_count > 20:
                    print(f"exceed 20 failure, stopped!")
                    break
                # 检查是否已经完成下载过的
                # if os.path.exists(get_path(None, "done.txt", data)):
                #     print(f"skipped {key}: {data['title']}")
                #     continue
            except Exception as ex:
                # self.unknown_error_current_retry += 1
                error_count += 1
                print(f"error: {data['filepath']}, {ex}")
                traceback.print_exc()

print(f"error count: {error_count}")