#!/usr/bin/env python
# coding=utf-8
from __future__ import division

import os
import csv
import argparse
import json
import codecs

import requests
from bs4 import BeautifulSoup


HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
                  '(KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'
}

VERSION = "VERSION 1.0.0 (bitsearch.to)"
DOMAIN = "https://bitsearch.to"


def get_parser():
    """
    Parse command line arguments.
    """
    parser = argparse.ArgumentParser(description='Magnets-Getter CLI Tools (bitsearch.to edition).')
    parser.add_argument('keyword', metavar="KEYWORD", type=str, nargs="*", default=["school"],
                        help='magnet keyword. (default: school)')
    parser.add_argument('-n', '--num', type=int, default=10,
                        help='magnet number.(default 10)')
    parser.add_argument('-s', '--sort-by', type=int, default=0,
                        help='0: Sort by date, 1: Sort by size, 2: Sort by peers.(default 0)')
    parser.add_argument('-o', '--output', type=str,
                        help='output file path, supports csv and json format.')
    parser.add_argument('-p', '--pretty-oneline', action='store_true',
                        help='show magnets info with one line.')
    parser.add_argument('-v', '--version', action='store_true',
                        help='version information.')
    return parser


def command_line_runner():
    """
    Execute command line operations.
    """
    parser = get_parser()
    # Add page argument for CLI
    parser.add_argument('-pg', '--page', type=int, default=1, help='page number to fetch.(default 1)')
    args = vars(parser.parse_args())

    if args['version']:
        print(VERSION)
        return

    data = run(kw=args["keyword"],
               num=args["num"], 
               sort_by=args["sort_by"], 
               page=args["page"], 
               cli_mode=True)
    
    if args["output"]:
        # Pass only the list of magnets to the output function
        _output(data['magnets'], args["output"])
    else:
        _print(data, args["pretty_oneline"])


def run(kw, num, sort_by, page=1, cli_mode=False):
    """
    The main crawler function.

    :param kw: Keyword to search for.
    :param num: Number of results to return per page.
    :param sort_by: Sorting method. 0: date, 1: size, 2: peers.
    :param page: The page number to fetch.
    :param cli_mode: Flag to enable console printing for CLI usage.
    :return: A dictionary containing magnets, total results, and total pages.
    """
    if cli_mode:
        print(f"Crawling data from {DOMAIN} for you (Page {page}).....")
    
    _kw = "+".join(kw)
    
    # Select sort type
    if sort_by == 0:
        sort_str = "created"
    elif sort_by == 1:
        sort_str = "size"
    elif sort_by == 2:
        sort_str = "seeders"
    else:
        if cli_mode:
            print("Unknown Sort Method, defaulting to sort by date.")
        sort_str = "created"

    url = f"{DOMAIN}/search?q={_kw}&sortBy={sort_str}&page={page}"
    
    magnets = []
    search_stats = {
        "total_results": "0",
        "total_pages": "0"
    }

    try:
        resp = requests.get(url, headers=HEADERS)
        resp.raise_for_status()
        soup = BeautifulSoup(resp.text, "lxml")

        # Parse search stats
        stats_p = soup.find('p', text=lambda t: t and "Found" in t and "results" in t)
        if stats_p:
            found_span = stats_p.find_all('span', class_='font-semibold')
            if found_span and len(found_span) > 0:
                search_stats["total_results"] = found_span[0].text.strip()

        page_info_p = soup.find('p', text=lambda t: t and "Showing page" in t)
        if page_info_p:
            page_text = page_info_p.get_text(strip=True)
            parts = page_text.split()
            try:
                # Find the index of 'of' and get the next element, which is the total page number
                of_index = parts.index('of')
                search_stats["total_pages"] = parts[of_index + 1]
            except (ValueError, IndexError):
                # Fallback if the text format is unexpected
                search_stats["total_pages"] = "1"

        results_container = soup.find('div', class_='space-y-4')
        if not results_container:
            if cli_mode:
                print("Sorry, found nothing :(")
            return {"magnets": [], "stats": search_stats}

        results = results_container.find_all('div', recursive=False)
        if not results:
            if cli_mode:
                print("Sorry, could not parse results from the page.")
            return {"magnets": [], "stats": search_stats}

        for result in results[:num]:
            name_element = result.find('h3').find('a')
            name = name_element.get_text(strip=True) if name_element else "N/A"
            
            size = "N/A"
            date = "N/A"
            peers = "0"

            stats_container = result.find('div', class_='mb-3')
            if stats_container:
                size_element = stats_container.find(lambda tag: tag.name == 'span' and tag.find('i', class_='fa-download'))
                if size_element:
                    size = size_element.get_text(strip=True)

                date_element = stats_container.find(lambda tag: tag.name == 'span' and tag.find('i', class_='fa-calendar'))
                if date_element:
                    date = date_element.get_text(strip=True)

            swarm_container_divs = result.find_all('div', class_='flex-wrap')
            if len(swarm_container_divs) > 1:
                peers_element = swarm_container_divs[1].find('span', class_='text-green-600')
                if peers_element:
                    peers_text = peers_element.find('span', class_='font-medium')
                    if peers_text:
                        peers = peers_text.get_text(strip=True)

            magnet_link_element = result.find('a', href=lambda href: href and href.startswith('magnet:?'))
            magnet_link = magnet_link_element['href'] if magnet_link_element else "N/A"

            magnets.append({
                "magnet": magnet_link,
                "magnet_name": name,
                "magnet_date": date,
                "magnet_size": size,
                "magnet_rank": int(peers.replace(',', ''))
            })
            
    except requests.exceptions.RequestException as e:
        if cli_mode:
            print(f"An error occurred during the request: {e}")
        else:
            raise e
    except Exception as e:
        if cli_mode:
            print(f"An error occurred during parsing: {e}")
        else:
            raise e
        
    return {"magnets": magnets, "stats": search_stats}


def _print(data, is_show_magnet_only):
    """
    Print results to the console.

    :param data: Dictionary containing magnets and stats.
    :param is_show_magnet_only: One-line output flag.
    """
    magnets = data.get("magnets", [])
    stats = data.get("stats", {})

    if not magnets:
        print("No results found.")
        return
        
    print(f"\n--- Page {stats.get('current_page', 1)} of {stats.get('total_pages', 1)} | Total Results: {stats.get('total_results', 0)} ---")

    if is_show_magnet_only:
        for row in magnets:
            print(row["magnet"], row["magnet_size"], row["magnet_date"])
    else:
        for row in magnets:
            print("-" * 20)
            print("Name:", row["magnet_name"])
            print("Size:", row["magnet_size"])
            print("Date:", row["magnet_date"])
            print("Peers:", row["magnet_rank"])
            print("Magnet:", row["magnet"])
    print("-" * 20)


def _output(magnets, path):
    """
    Save data to a local file.

    :param magnets: List of magnet data.
    :param path: File path (supports .csv and .json).
    """
    if not magnets:
        print("No data to save.")
        return
        
    # Extract just the list of magnets if the whole data dictionary is passed
    if isinstance(magnets, dict) and 'magnets' in magnets:
        magnets_list = magnets['magnets']
    else:
        magnets_list = magnets

    if not magnets_list:
        print("No magnets in data to save.")
        return

    if path:
        _, extension = os.path.splitext(path)
        if extension == ".csv":
            with open(path, mode="w", encoding="utf-8-sig", newline="") as fout:
                fieldnames = ("magnet", "magnet_name", "magnet_size", "magnet_date", "magnet_rank")
                f_csv = csv.DictWriter(fout, fieldnames, extrasaction="ignore")
                f_csv.writeheader()
                f_csv.writerows(magnets_list)
            print(f"Saved results to {path}")
        elif extension == ".json":
            with codecs.open(path, mode="w", encoding="utf-8") as f:
                json.dump(magnets_list, f, indent=2, ensure_ascii=False)
            print(f"Saved results to {path}")
        else:
            print("Failed to save the file! Unsupported format.")


if __name__ == "__main__":
    command_line_runner()
