#!/usr/bin/env python
# coding=utf-8
from __future__ import division

import os
import re
import csv
import argparse
import json
import codecs
import requests
from bs4 import BeautifulSoup
import sys # <--- **新增：导入 sys 模块**

HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
                  '(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}

VERSION = "VERSION 2.0.0"

# --- **新增：辅助函数，用于在打包和开发环境中获取正确的资源路径** ---
def get_resource_path(relative_path):
    """
    获取资源文件的绝对路径。
    在 PyInstaller 打包的程序中，它会返回临时目录中的路径（sys._MEIPASS）。
    在开发环境中，它会返回当前脚本所在的相对路径。
    """
    if hasattr(sys, '_MEIPASS'):
        # PyInstaller creates a temp folder and stores path in _MEIPASS
        # For --onefile, this is the root of the extracted temporary directory.
        base_path = sys._MEIPASS
    else:
        # Running as a normal Python script
        # Assumes 'rules' directory is a sibling to torrent_scraper.py
        base_path = os.path.dirname(os.path.abspath(__file__))
    
    return os.path.join(base_path, relative_path)
# --- 辅助函数结束 ---


def get_text_from_element(element, selector):
    """Safely gets text from a nested element."""
    el = element.select_one(selector)
    return el.get_text(strip=True) if el else "N/A"

def get_attribute_from_element(element, selector, attribute):
    """Safely gets an attribute from a nested element."""
    el = element.select_one(selector)
    return el.get(attribute) if el else "N/A"

def parse_total_results(soup, rule):
    """Parses total results based on the rule."""
    selector = rule["selectors"]["total_results"]
    hint = rule.get("parser_hints", {}).get("total_results_text_split")
    element = soup.select_one(selector)
    if not element:
        return "N/A"
    
    text = element.get_text(strip=True)
    if not hint:
        return re.sub(r'\D', '', text) or "N/A"
        
    try:
        parts = re.split(hint[0], text, flags=re.IGNORECASE)
        if len(parts) > 1:
            return re.sub(r'\D', '', parts[hint[1]]) or "N/A"
    except (IndexError, re.error):
        pass
    return "N/A"

def parse_total_pages(soup, rule):
    """Parses total pages based on the rule."""
    selector = rule["selectors"]["total_pages"]
    hint = rule.get("parser_hints", {})
    elements = soup.select(selector)
    if not elements:
        return "1"
    
    if hint.get("total_pages_is_last_link"):
        return elements[-1].get_text(strip=True) or "1"
        
    text = elements[0].get_text(strip=True)
    if hint.get("total_pages_text_split"):
        try:
            # Split the text by the hint word (e.g., 'of')
            parts = text.split(hint["total_pages_text_split"][0])
            # Take the relevant part after the split
            target_part = parts[hint["total_pages_text_split"][1]]
            # Remove all non-digit characters to get a clean number
            return re.sub(r'\D', '', target_part) or "1"
        except (IndexError, re.error):
            return "1"
            
    # Fallback for simple cases
    return re.sub(r'\D', '', text) or "1"

def run(query, sort_by_key, page, rule, cli_mode=False):
    """
    The main scraper function, driven by a JSON rule.
    """
    print(f"[INFO] Running search with rule: {rule['name']}")
    if cli_mode:
        print(f"Crawling data from {rule['name']} (Page {page}).....")

    sort_param = rule["sort_map"].get(sort_by_key, "")
    
    page_index = page
    if rule["page_index_type"] == "0-based":
        page_index = page - 1

    query_str = query[0] if isinstance(query, list) else query
    search_url = rule["search_url_template"].format(
        domain=rule["domain"],
        query=query_str.replace(" ", "+"),
        sort=sort_param,
        page=page_index
    )
    
    print(f"[INFO] Constructed Search URL: {search_url}")

    magnets = []
    search_stats = {"total_results": "0", "total_pages": "1"}

    try:
        print("[INFO] Sending request...")
        resp = requests.get(search_url, headers=HEADERS, timeout=20)
        resp.raise_for_status()
        print(f"[INFO] Received response with status code: {resp.status_code}")

        if rule.get("parser_hints", {}).get("is_json"):
            data = resp.json()
            # Handle cases where API returns a "no results" message
            if isinstance(data, list) and len(data) > 0 and data[0].get('name') == 'No results returned':
                 print("[INFO] Scraper finished, returning 0 magnets (API returned no results).")
                 return {"magnets": [], "stats": search_stats}

            for item in data:
                magnets.append({
                    "magnet": f"magnet:?xt=urn:btih:{item['info_hash']}&dn={item['name']}",
                    "magnet_name": item['name'],
                    "magnet_date": item.get('added', 'N/A'),
                    "magnet_size": str(item.get('size', 'N/A')),
                    "magnet_rank": int(item.get('seeders', 0))
                })
            print(f"[INFO] Parsed {len(magnets)} items from JSON response.")

        else:
            soup = BeautifulSoup(resp.text, "lxml")

            search_stats["total_results"] = parse_total_results(soup, rule)
            search_stats["total_pages"] = parse_total_pages(soup, rule)
            print(f"[INFO] Parsed search stats: Total Results: {search_stats['total_results']}, Total Pages: {search_stats['total_pages']}")

            results_container = soup.select_one(rule["selectors"]["results_container"])
            if not results_container:
                print(f"[DEBUG] Results container not found with selector: {rule['selectors']['results_container']}")
                if cli_mode: print("Sorry, found nothing.")
                return {"magnets": [], "stats": search_stats}

            result_items = results_container.select(rule["selectors"]["result_item"])
            print(f"[INFO] Found {len(result_items)} result items using selector: {rule['selectors']['result_item']}")

            for i, item in enumerate(result_items):
                try:
                    name = get_text_from_element(item, rule["selectors"]["name"])
                    magnet_link = get_attribute_from_element(item, rule["selectors"]["magnet_link"], "href")
                    peers_text = get_text_from_element(item, rule["selectors"]["peers"])
                    
                    size = "N/A"
                    date = "N/A"
                    
                    if "size" in rule["selectors"] and rule["selectors"]["size"]:
                        size = get_text_from_element(item, rule["selectors"]["size"])
                    if "date" in rule["selectors"] and rule["selectors"]["date"]:
                         date = get_text_from_element(item, rule["selectors"]["date"])

                    magnet_data = {
                        "magnet": magnet_link,
                        "magnet_name": name,
                        "magnet_date": date,
                        "magnet_size": size,
                        "magnet_rank": int(re.sub(r'\D', '', peers_text) or 0)
                    }
                    magnets.append(magnet_data)
                    print(f"[DEBUG] Parsed item {i}: {magnet_data['magnet_name']}")
                except Exception as e:
                    print(f"[ERROR] Failed to parse item {i}: {e}")

    except requests.exceptions.RequestException as e:
        print(f"[ERROR] An error occurred during the request: {e}")
        if not cli_mode: raise e
    except Exception as e:
        print(f"[ERROR] An error occurred during parsing: {e}")
        if not cli_mode: raise e
        
    print(f"[INFO] Scraper finished, returning {len(magnets)} magnets.")
    return {"magnets": magnets, "stats": search_stats}

def get_rules():
    """
    Loads all JSON rule files from the 'rules' directory.
    This function is modified to correctly locate the 'rules' directory
    when running as a PyInstaller bundled executable.
    """
    rules = []
    # **修改：使用 get_resource_path 来获取 'rules' 目录的正确路径**
    rules_dir = get_resource_path('rules') 

    # 检查 'rules' 目录是否存在，防止文件操作错误
    if not os.path.isdir(rules_dir):
        print(f"[ERROR] 'rules' directory not found at: {rules_dir}. Please ensure it's included with --add-data 'rules;rules' in PyInstaller.")
        return []

    for filename in os.listdir(rules_dir):
        if filename.endswith('.json'):
            # **修改：使用 os.path.join 来构建完整的规则文件路径**
            filepath = os.path.join(rules_dir, filename) 
            try:
                with open(filepath, 'r', encoding='utf-8') as f:
                    rule = json.load(f)
                    # 验证规则文件是否包含必要的字段，防止加载不完整的规则
                    # 根据你的 'run' 函数中对 'rule' 字典的使用，至少需要这些键
                    required_keys = ["name", "domain", "search_url_template", 
                                     "sort_map", "page_index_type", "selectors"]
                    if all(key in rule for key in required_keys):
                        rules.append(rule)
                    else:
                        print(f"[WARNING] Rule file '{filename}' is missing one or more essential keys ({', '.join(required_keys)}). Skipping.")
            except json.JSONDecodeError as e:
                print(f"[ERROR] Invalid JSON in '{filename}': {e}")
            except IOError as e:
                print(f"[ERROR] Could not read rule file '{filename}': {e}")
            except Exception as e:
                print(f"[ERROR] An unexpected error occurred with '{filename}': {e}")
    return rules

def main():
    """CLI entry point."""
    parser = argparse.ArgumentParser(description=f"Multi-Site Magnet-Getter CLI Tools. {VERSION}")
    
    rules = get_rules()
    if not rules:
        print("Error: No valid rule files found. Cannot proceed with CLI search.")
        return

    parser.add_argument('keyword', metavar="KEYWORD", type=str, nargs="+", help='Search keyword(s).') # Changed nargs="+ " to nargs="+" for standard argparse
    parser.add_argument('-s', '--source', type=str, default=rules[0]['name'] if rules else '',
                        help=f"Source to search from. Available: {[r['name'] for r in rules]}.")
    parser.add_argument('-o', '--output', type=str, help='Output file path (csv or json).')
    parser.add_argument('-p', '--page', type=int, default=1, help='Page number to fetch.')
    parser.add_argument('--sort', type=str, default='date', choices=['date', 'size', 'peers'], help='Sort by criteria.')

    args = parser.parse_args()

    selected_rule = next((r for r in rules if r['name'].lower() == args.source.lower()), None)
    if not selected_rule:
        print(f"Error: Source '{args.source}' not found. Please choose from available sources.")
        return

    data = run(query=args.keyword, sort_by_key=args.sort, page=args.page, rule=selected_rule, cli_mode=True)
    
    if not data['magnets']:
        print("No magnets found for the given search criteria.")
        return

    if args.output:
        # Simplified output handling for CLI
        ext = os.path.splitext(args.output)[1].lower() # Convert to lower for consistent checking
        if ext == ".csv":
            try:
                with open(args.output, 'w', newline='', encoding='utf-8-sig') as f:
                    # Check if magnets list is not empty before accessing keys for fieldnames
                    if data['magnets']:
                        writer = csv.DictWriter(f, fieldnames=data['magnets'][0].keys())
                        writer.writeheader()
                        writer.writerows(data['magnets'])
                        print(f"Saved {len(data['magnets'])} results to {args.output}")
                    else:
                        print("No data to save to CSV.")
            except IOError as e:
                print(f"Error writing CSV file: {e}")
        elif ext == ".json":
            try:
                with open(args.output, 'w', encoding='utf-8') as f:
                    json.dump(data['magnets'], f, indent=2, ensure_ascii=False)
                    print(f"Saved {len(data['magnets'])} results to {args.output}")
            except IOError as e:
                print(f"Error writing JSON file: {e}")
        else:
            print(f"Unsupported output format: {ext}. Only .csv and .json are supported.")
    else:
        # Pretty print to console
        stats = data['stats']
        print(f"\n--- Page {args.page} of {stats.get('total_pages', 1)} | Total Results: ~{stats.get('total_results', 0)} ---")
        for row in data['magnets']:
            print("-" * 20)
            print("Name:", row.get("magnet_name", "N/A"))
            print("Size:", row.get("magnet_size", "N/A"))
            print("Date:", row.get("magnet_date", "N/A"))
            print("Peers:", row.get("magnet_rank", "N/A"))
            print("Magnet:", row.get("magnet", "N/A"))
        print("-" * 20)

if __name__ == "__main__":
    main()
