import ebooklib
from ebooklib import epub
from bs4 import BeautifulSoup, Tag # Import Tag
import os
import argparse # Added argparse

def epub_to_html(epub_path, output_dir, split_into_parts: bool = True):
    """
    Converts an EPUB file to HTML, extracting chapters and saving them as individual HTML files
    or a single HTML file.

    Args:
        epub_path (str): The path to the EPUB file.
        output_dir (str): The directory to save the HTML files.
        split_into_parts (bool): If True, splits into multiple files. If False, creates a single file.
    """
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    book = epub.read_epub(epub_path)
    book_title = book.get_metadata('DC', 'title')[0][0] if book.get_metadata('DC', 'title') else 'Ebook' # Still useful for <title> tag

    # Use the original EPUB filename (without extension) as the prefix
    base_name = os.path.basename(epub_path)
    epub_filename_no_ext, _ = os.path.splitext(base_name)
    # Sanitize the filename for use as a prefix
    safe_title_prefix = "".join(c if c.isalnum() or c in (' ','.','_') else '_' for c in epub_filename_no_ext)
    # Limit the length of the prefix to avoid excessively long filenames
    safe_title_prefix = safe_title_prefix[:50] 

    # Estimate characters per 5-10 minutes of reading (e.g., 300 chars/min)
    chars_per_file_min = 300 * 5  # 1500
    chars_per_file_max = 300 * 10 # 3000

    file_counter = 1
    current_char_count = 0
    # current_html_content = "" # This variable seems unused, can be removed or checked

    # Consolidate all content into one soup
    # The title here is the main book title, suitable for a single HTML file output as well
    consolidated_soup = BeautifulSoup(f"<html><head><title>{book_title}</title></head><body></body></html>", 'html.parser')
    consolidated_body = consolidated_soup.body

    # Iterate through items in the book's spine order for correct content sequence
    ordered_items = []
    for item_id in book.spine:
        item = book.get_item_with_id(item_id[0])
        if item and item.get_type() == ebooklib.ITEM_DOCUMENT:
            ordered_items.append(item)
    
    # If spine is empty or doesn't yield documents, fall back to all documents
    if not ordered_items:
        ordered_items = book.get_items_of_type(ebooklib.ITEM_DOCUMENT)

    for item in ordered_items:
        item_soup = BeautifulSoup(item.get_content(), 'html.parser')
        # Append content of item's body, or the whole item_soup if body is not present
        content_to_append = item_soup.body if item_soup.body else item_soup
        if content_to_append:
            # Iterate over a list of children to avoid issues with modifying the tree while iterating
            for child in list(content_to_append.children):
                if child is not None:
                    # Detach child from its original parent before appending to the new parent
                    # This is important if child is a Tag object
                    if hasattr(child, 'extract') and callable(child.extract):
                        child.extract() # Removes from original parent
                    consolidated_body.append(child) # Appends to consolidated_body

    if not split_into_parts:
        # Output as a single file
        output_html_path = os.path.join(output_dir, f"{safe_title_prefix}.html")
        with open(output_html_path, 'w', encoding='utf-8') as f:
            f.write(consolidated_soup.prettify(formatter="html5"))
        print(f"Successfully created '{output_html_path}'")
        print(f"Successfully converted '{epub_path}' into a single HTML file in '{output_dir}'")
        return # Exit after creating single file

    # The following logic is for splitting into multiple parts
    if not split_into_parts: # Should have already returned if False, but as a safeguard
        return

    def write_chunk_to_file(content_soup, counter):

        output_html_path = os.path.join(output_dir, f"{safe_title_prefix}_part_{counter}.html")
        with open(output_html_path, 'w', encoding='utf-8') as f:
            # Use prettify with html5 formatter to decode entities
            f.write(content_soup.prettify(formatter="html5"))
        print(f"Successfully created '{output_html_path}'")

    # Split the content
    # We will iterate through top-level block elements (like p, div, h1-h6) in the body
    # and accumulate them until the character count is within the desired range.
    current_chunk_soup = BeautifulSoup("<html><head><title>{} - Part {}</title></head><body></body></html>".format(book_title, file_counter), 'html.parser')
    current_chunk_body = current_chunk_soup.body

    all_elements = list(consolidated_body.children) # Use consolidated_body here

    for element in all_elements:
        element_text_len = 0
        element_to_append = None

        if isinstance(element, Tag):
            element_text = element.get_text(separator=' ', strip=True)
            element_text_len = len(element_text)
            # Deep copy the element to preserve its structure and nested tags
            try:
                # Parse the string representation of the element to create a new, independent Tag object.
                # This effectively performs a deep copy.
                temp_soup = BeautifulSoup(str(element), 'html.parser')
                # The actual tag is usually the first child of the parsed soup's body or contents.
                copied_element = None
                if temp_soup.body and temp_soup.body.contents:
                    copied_element = temp_soup.body.contents[0]
                elif temp_soup.contents:
                    copied_element = temp_soup.contents[0]
                
                if copied_element and hasattr(copied_element, 'extract'):
                    copied_element.extract() # Detach from its temporary parent (temp_soup)
                    element_to_append = copied_element
                else:
                    # Fallback if parsing or extraction fails, or if the element is empty/unexpected
                    print(f"Warning: Could not properly deep copy Tag element (name: {element.name if hasattr(element, 'name') else 'Unknown Tag'}). Appending as string.")
                    if element_text_len > 0: # Only append if there's text, to avoid empty strings
                        element_to_append = str(element)
                    else:
                        continue # Skip if no text and copy failed
            except Exception as e:
                print(f"Error during deep copy of Tag element (name: {element.name if hasattr(element, 'name') else 'Unknown Tag'}): {e}. Appending as string.")
                if element_text_len > 0:
                    element_to_append = str(element)
                else:
                    continue
        elif isinstance(element, str): # It's a NavigableString
            stripped_text = str(element).strip()
            element_text_len = len(stripped_text)
            if element_text_len > 0:
                element_to_append = stripped_text
        else:
            # Skip other types or None elements
            continue
        
        if element_to_append is None:
            continue

        # If adding this element exceeds max and current chunk is not empty, write current chunk
        if current_char_count + element_text_len > chars_per_file_max and current_char_count > 0:
            write_chunk_to_file(current_chunk_soup, file_counter)
            file_counter += 1
            current_char_count = 0
            current_chunk_soup = BeautifulSoup("<html><head><title>{} - Part {}</title></head><body></body></html>".format(book_title, file_counter), 'html.parser')
            current_chunk_body = current_chunk_soup.body
        
        if element_to_append:
            current_chunk_body.append(element_to_append)
            current_char_count += element_text_len

        # If current chunk is substantial enough, write it
        if current_char_count >= chars_per_file_min:
            write_chunk_to_file(current_chunk_soup, file_counter)
            file_counter += 1
            current_char_count = 0
            current_chunk_soup = BeautifulSoup("<html><head><title>{} - Part {}</title></head><body></body></html>".format(book_title, file_counter), 'html.parser')
            current_chunk_body = current_chunk_soup.body
    
    # Write any remaining content in the last chunk if it's not empty and has actual content
    # Also ensure at least one file is written if file_counter is still 1 (meaning no files were written yet)
    # and there was some content processed (main_body has children).
    if current_char_count > 0 and len(current_chunk_body.get_text(strip=True)) > 0:
        write_chunk_to_file(current_chunk_soup, file_counter)
    elif file_counter == 1 and len(main_body.get_text(strip=True)) > 0 and len(current_chunk_body.get_text(strip=True)) > 0:
        # This case handles when the total content is less than min_chars but still needs to be written
        write_chunk_to_file(current_chunk_soup, file_counter)
    elif file_counter == 1 and not list(main_body.children): # No content at all from epub
        print("No content found in EPUB to write.")
    elif file_counter == 1 and len(current_chunk_body.get_text(strip=True)) == 0 and len(main_body.get_text(strip=True)) > 0:
        # This means main_body had content, but it all got skipped (e.g. all empty strings)
        # and current_chunk_body is empty. If current_chunk_body was supposed to have it, it should have been written.
        # This case might indicate an issue or an EPUB with non-textual content primarily.
        # For safety, if current_chunk_body has something (even if char_count is 0 due to stripping), write it.
        if list(current_chunk_body.children): # Check if it has any tags/strings at all
             write_chunk_to_file(current_chunk_soup, file_counter)
        else:
            print("No text content to write for the first part, though EPUB might have non-text elements.")

    # This final print is for multi-part conversion. Single part conversion prints its own message.
    if split_into_parts:
        print(f"Successfully converted '{epub_path}' into {file_counter} HTML part(s) in '{output_dir}'")

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Convert EPUB to HTML.')
    parser.add_argument('epub_file', type=str, help='Path to the EPUB file to convert.')
    parser.add_argument('--output-dir', type=str, default='output_html', help='Directory to save HTML files (default: output_html).')
    parser.add_argument('--single-file', action='store_true', help='Output as a single HTML file instead of splitting into parts.')

    args = parser.parse_args()

    # Clean or create the output directory before running
    if os.path.exists(args.output_dir):
        print(f"Cleaning existing output directory: {args.output_dir}")
        for f_to_remove in os.listdir(args.output_dir):
            file_path_to_remove = os.path.join(args.output_dir, f_to_remove)
            try:
                if os.path.isfile(file_path_to_remove) or os.path.islink(file_path_to_remove):
                    os.unlink(file_path_to_remove)
                elif os.path.isdir(file_path_to_remove):
                    # If you need to remove subdirectories, use shutil.rmtree
                    # For now, this example only removes files at the top level of output_dir
                    pass # Or implement rmtree if subdirs are expected and need removal
            except Exception as e:
                print(f'Failed to delete {file_path_to_remove}. Reason: {e}')
    else:
        print(f"Creating output directory: {args.output_dir}")
        os.makedirs(args.output_dir)

    # Determine if splitting is needed based on the --single-file flag
    split_parts = not args.single_file

    epub_to_html(args.epub_file, args.output_dir, split_into_parts=split_parts)