import os
from bs4 import BeautifulSoup, NavigableString, Comment
import requests
import pickle
import hashlib
import argparse
import shutil
import filecmp

def get_cache_dir(project_dir):
    """Get the cache directory path and create it if it doesn't exist"""
    cache_dir = os.path.join(project_dir, "cache")
    os.makedirs(cache_dir, exist_ok=True)
    return cache_dir

def get_cache_file_path(project_dir, url):
    """Get the full path of the cache file"""
    cache_dir = get_cache_dir(project_dir)
    cache_key = hashlib.md5(url.encode()).hexdigest()
    return os.path.join(cache_dir, f"{cache_key}.pkl")

def load_from_cache(project_dir, url):
    """Load data from cache file if it exists"""
    cache_file = get_cache_file_path(project_dir, url)
    if os.path.exists(cache_file):
        with open(cache_file, 'rb') as f:
            return pickle.load(f)
    return None

def save_to_cache(project_dir, url, data):
    """Save data to cache file"""
    cache_file = get_cache_file_path(project_dir, url)
    with open(cache_file, 'wb') as f:
        pickle.dump(data, f)

def get_website_root(url):
    # Remove protocol (http:// or https://)
    if url.startswith('http://'):
        url = url[7:]
    elif url.startswith('https://'):
        url = url[8:]
    
    # Find the first slash after domain
    first_slash = url.find('/')
    if first_slash != -1:
        domain = url[:first_slash]
    else:
        domain = url
    
    # Reconstruct the full URL with protocol
    if url.startswith('http://'):
        return 'http://' + domain
    else:
        return 'https://' + domain

def parse_index_html(url=None, use_cache=True, project_dir=None):
    # If no URL provided, use default
    if url is None:
        url = "https://xyyuedu.com/etdw/lvyexianzong/index.html"
    
    # Try to load from cache if enabled
    if use_cache and project_dir:
        cached_data = load_from_cache(project_dir, url)
        if cached_data is not None:
            return cached_data
    
    # Get content from URL
    response = requests.get(url)
    response.encoding = 'gb2312'  # Set correct encoding
    html_content = response.text

    # Create BeautifulSoup object
    soup = BeautifulSoup(html_content, 'html.parser')

    # 1. Parse jianjie div
    jianjie_div = soup.find('div', class_='jianjie')
    
    # Get img tag info
    img_tag = jianjie_div.find('img')
    img_src = img_tag['src']
    img_alt = img_tag['alt']
    
    # Get h1 content
    h1_content = jianjie_div.find('h1').text
    
    # Get category and author
    b_tags = jianjie_div.find_all('b')
    # 分类在a标签内
    category = b_tags[0].find_next('a').text.strip()
    # 作者在b标签后直接是文本
    author = b_tags[1].next_sibling.strip()
    
    # Get description (second p tag in jianjie div)
    description = jianjie_div.find_all('p')[1].text.strip()

    # 2. Parse zhangjie2 ul
    zhangjie2_ul = soup.find('ul', class_='zhangjie2')
    chapters = []
    for a_tag in zhangjie2_ul.find_all('a'):
        chapters.append({
            'Chapter': a_tag.text,
            'Link': a_tag['href'],
            'Title': a_tag['title']
        })

    # Combine all information into a dictionary
    book_info = {
        'Image': {
            'Source': img_src,
            'Alt': img_alt
        },
        'Title': h1_content,
        'Category': category,
        'Author': author,
        'Description': description,
        'Chapters': chapters
    }

    # Save to cache if enabled
    if use_cache and project_dir:
        save_to_cache(project_dir, url, book_info)

    return book_info

def parse_content_html(url=None, page=1, use_cache=True, project_dir=None):
    # If no URL provided, read from local file
    if url is None:
        with open('templates/content.html', 'r', encoding='gb2312') as file:
            html_content = file.read()
    else:
        # Try to load from cache if enabled
        if use_cache and project_dir:
            cache_url = f"{url}_{page}" if page > 1 else url
            cached_data = load_from_cache(project_dir, cache_url)
            if cached_data is not None:
                return cached_data

        # Get content from URL
        response = requests.get(url)
        response.encoding = 'gb2312'  # Set correct encoding
        html_content = response.text

    # Create BeautifulSoup object
    soup = BeautifulSoup(html_content, 'html.parser')

    # Get chapter title
    arcxs_title = soup.find('div', id='arcxs_title')
    chapter = arcxs_title.find('h1').text

    # Get content
    readcon = soup.find('div', id='readcon' if page==1 else 'onearcxsbd')
    if not readcon:
        readcon = soup.find('div', 'onearcxsbd')
    paragraphs = []
    for p in readcon.find_all('p'):
        # Stop if we reach the pagination div
        next_div = p.find('div', class_='list-pages page-center')
        if next_div:
            # Get text content up to the pagination div
            text = p.text
            next_div_text = next_div.text
            text = text.split(next_div_text)[0]
            paragraphs.append(text.strip())
            break
        paragraphs.append(p.text.strip())

    if not paragraphs:
        for s in readcon.contents:
            if isinstance(s, Comment):
                break
            if isinstance(s, NavigableString):
                s = s.strip()
                if s:
                    paragraphs.append(s)

    content = '\n'.join(paragraphs)

    # Get total pages
    pages_div = soup.find('div', class_='list-pages page-center')
    total_pages = 1  # Default to 1 page
    if pages_div:
        ul = pages_div.find('ul')
        if ul and ul.find_all('li'):
            first_li = ul.find_all('li')[0]
            a_tag = first_li.find('a')
            if a_tag and '共' in a_tag.text and '页' in a_tag.text:
                # Extract number between '共' and '页'
                text = a_tag.text
                start = text.find('共') + 1
                end = text.find('页')
                if start < end:
                    total_pages = int(text[start:end])

    if page == 1 and total_pages > 1:
        rs = [{
            'Chapter': chapter,
            'Content': content,
            'TotalPages': total_pages
        }]
        for p in range(2, total_pages+1):
            assert url.endswith(".html")
            new_url = url[:-5] + "_" + str(p) + ".html"
            r = parse_content_html(new_url, p, use_cache, project_dir)
            rs.append(r)
        result = rs
    else:
        # Return chapter info
        result = {
            'Chapter': chapter,
            'Content': content,
            'TotalPages': total_pages
        }

    # Save to cache if enabled
    if use_cache and project_dir and url is not None:
        cache_url = f"{url}_{page}" if page > 1 else url
        save_to_cache(project_dir, cache_url, result)

    return result


def order_name(idx, count_digits):
    return ("000" + str(idx))[-count_digits:]


def expand_pages(pages):
    expanded_pages = []
    for p in pages:
        if isinstance(p, list):
            expanded_pages.extend(p)
        else:
            expanded_pages.append(p)

    expanded_page_count = len(str(len(expanded_pages)))
    for idx, p in enumerate(expanded_pages, 1):
        p['OrderName'] = order_name(idx, expanded_page_count)

    return expanded_pages


def write_index(base_dir, result, pages):
    assert isinstance(result, dict)

    os.makedirs(base_dir, exist_ok=True)
    index_name = os.path.join(base_dir, "index.rst")

    with open(index_name, "w", encoding="utf-8") as f:
        title = result["Title"]
        f.write(title + "\n")
        f.write('=' * (len(title)*2) + "\n\n")
        f.write(result['Description'] + "\n\n")
        f.write(".. toctree::\n    :numbered:\n\n")
        for p in pages:
            f.write("    " + p['OrderName'] + "\n")


def write_page(page_file_name, page):
    with open(page_file_name, "w", encoding="utf-8") as f:
        title = page["Chapter"]
        f.write(title + "\n")
        f.write('=' * (len(title)*2) + "\n")

        for pg in page['Content'].split("\n"):
            f.write("\n" + pg + "\n")


def write_pages(base_dir, pages):
    for p in pages:
        file_name = os.path.join(base_dir, p['OrderName'] + ".rst")
        write_page(file_name, p)


def copy_sphinx_templates(project_dir):
    """
    Copy sphinx template files from templates/sphinx to project directory.
    Skip copying if target file exists and has same content as source file.
    
    Args:
        project_dir (str): The project root directory where files will be copied to
    """
    # Source directory containing sphinx templates
    source_dir = os.path.join(os.path.dirname(__file__), "templates", "sphinx_docs")
    
    # Check if source directory exists
    if not os.path.exists(source_dir):
        print(f"Warning: Template directory not found: {source_dir}")
        return False
    
    try:
        # Create a dircmp object to compare directories
        comparison = filecmp.dircmp(source_dir, project_dir)
        
        # Function to copy files recursively
        def copy_files(src, dst):
            if not os.path.exists(dst):
                os.makedirs(dst, exist_ok=True)
            
            # Get list of files and directories
            entries = os.listdir(src)
            
            for entry in entries:
                src_path = os.path.join(src, entry)
                dst_path = os.path.join(dst, entry)
                
                if os.path.isdir(src_path):
                    # Recursively copy directories
                    copy_files(src_path, dst_path)
                else:
                    # For files, check if they need to be copied
                    if not os.path.exists(dst_path) or not filecmp.cmp(src_path, dst_path, shallow=False):
                        shutil.copy2(src_path, dst_path)
                        print(f"Copied: {src_path} -> {dst_path}")
                    else:
                        print(f"Skipped (identical): {src_path}")
        
        # Start copying
        copy_files(source_dir, project_dir)
        print(f"Successfully processed sphinx templates in {project_dir}")
        return True
    except Exception as e:
        print(f"Error processing sphinx templates: {e}")
        return False


def update_conf_py(project_dir, result, github_repo):
    """
    Update the conf.py file with project specific information.
    
    Args:
        project_dir (str): The project root directory
        result (dict): The parsed book information
        github_repo (str): The GitHub repository URL
    """
    conf_path = os.path.join(project_dir, "docs", "source", "conf.py")
    
    if not os.path.exists(conf_path):
        print(f"Warning: conf.py not found at {conf_path}")
        return False
    
    try:
        # Read the content
        with open(conf_path, 'r', encoding='utf-8') as f:
            content = f.read()
        
        # Replace placeholders
        content = content.replace("{PROJECT}", result['Title'])
        content = content.replace("{AUTHOR}", result['Author'])
        content = content.replace("{GITHUB_REPO}", github_repo)
        
        # Write back the content
        with open(conf_path, 'w', encoding='utf-8') as f:
            f.write(content)
        
        print(f"Successfully updated {conf_path}")
        return True
    except Exception as e:
        print(f"Error updating conf.py: {e}")
        return False


def update_readme(project_dir, result, github_repo):
    """
    Update the README.md file with project information.
    
    Args:
        project_dir (str): The project root directory
        result (dict): The parsed book information
        github_repo (str): The GitHub repository URL
    """
    readme_path = os.path.join(project_dir, "README.rst")
    
    # Extract repository name from github_repo
    repo_name = github_repo.split('/')[-1]
    
    # Generate the new content
    title = result['Title']
    link = f"https://{repo_name}.readthedocs.io/zh-cn/latest/"
    desc = result['Description']
    
    new_content = f"""`{title} <{link}>`_
{'#' * (len(title) * 2)}

{desc}

"""
    
    try:
        # Read existing content if file exists
        existing_content = ""
        if os.path.exists(readme_path):
            with open(readme_path, 'r', encoding='utf-8') as f:
                existing_content = f.read()
        
        # Write new content followed by existing content
        with open(readme_path, 'w', encoding='utf-8') as f:
            f.write(new_content + existing_content)
        
        print(f"Successfully updated {readme_path}")
        return True
    except Exception as e:
        print(f"Error updating README.rst: {e}")
        return False


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Parse and generate documentation from a website.')
    parser.add_argument('--url', required=True, help='The URL of the website to parse')
    parser.add_argument('--project_dir', required=True, help='The project root directory')
    parser.add_argument('--github_repo', required=True, help='The GitHub repository URL')
    
    args = parser.parse_args()
    
    url = args.url
    docs_dir = os.path.join(args.project_dir, "docs", "source")
    github_repo = args.github_repo
    
    # Copy sphinx templates first
    copy_sphinx_templates(args.project_dir)
    
    root_url = get_website_root(url)
    result = parse_index_html(url, project_dir=args.project_dir)
    
    # Update conf.py with project information
    update_conf_py(args.project_dir, result, github_repo)
    
    # Update README.md with project information
    update_readme(args.project_dir, result, github_repo)
    
    # Print the result in a readable format
    print("Book Information:")
    print(f"Image Source: {result['Image']['Source']}")
    print(f"Image Alt: {result['Image']['Alt']}")
    print(f"Title: {result['Title']}")
    print(f"Category: {result['Category']}")
    print(f"Author: {result['Author']}")
    print(f"Description: {result['Description']}")
    
    print("\nChapters:")
    pages = []
    for chapter in result['Chapters']:
        print(f"Chapter: {chapter['Chapter']}")
        print(f"Link: {chapter['Link']}")
        print(f"Title: {chapter['Title']}")
        print("---")
        page = parse_content_html(root_url + chapter['Link'], project_dir=args.project_dir)
        pages.append(page)


    os.makedirs(docs_dir, exist_ok=True)
    expanded_pages = expand_pages(pages)
    
    for content_result in expanded_pages:
            print("\nContent Information:")
            print(f"Chapter: [{content_result['Chapter']}]")
            # print(f"Content: {content_result['Content']}")
            print(f"Total Pages: {content_result['TotalPages']}")

    write_index(docs_dir, result, expanded_pages)
    write_pages(docs_dir, expanded_pages)