# coding=utf-8
import sys
sys.path.append(r'C:\Users\ronaldo\AppData\Local\Programs\Python\Python35\Lib')
sys.path.append(r'C:\Users\ronaldo\AppData\Local\Programs\Python\Python35\Lib\site-packages')
sys.path.append(r'C:\Users\ronaldo\AppData\Local\Programs\Python\Python35\Lib\site-packages\setuptools-20.10.1.dist-info')
import pdfkit
import BeautifulParser as BP
import os
import re

def login():
    pass

def fetchResponse():
    pass

def nextPage():
    pass

def generate_single_html(src_html_path, book_name):
    src_path = os.path.abspath(src_html_path)
    return BP.parseSourceStreamFromFile(src_path, book_name)

def generate_htmls(src_dir, book_name):
    generate_html_dict ={}
    file_list = os.listdir(src_dir)
    for file_name in file_list:
        temp = os.path.abspath(src_dir + os.sep + file_name)
        ctime = os.path.getctime(temp)
        generate_html_dict[ctime] = temp
    html_sorted_items = sorted(generate_html_dict.items(),  key=lambda d: d[0])  
    html_src_list = [value for key, value in html_sorted_items]
    generated_html_list = []
    for file_path in html_src_list:
        if os.path.splitext(file_path)[-1] == '.html':
            chapter_name = generate_single_html(file_path, book_name)
            chapter_name = re.sub('[:\\\\/|?<>*"]', '', chapter_name)
            relative_file_path = book_name + os.sep + chapter_name + '.html'
            generated_html_list.append(relative_file_path)
    return generated_html_list

def origin_plan():
    # Global Prepare
    response_list = []  # Hold the Web Response Stream
    parsed_html_list = []   # Hold the HTML Lists We Parsed from the response_list Stream
    # The Count of the Two List should be the Same

    # Step 1 ToDo: Login the Website.
    # ToDo by BigNikuNiku

    # Step 2 ToDo: Fetch Data from the Website
    # ToDo by BigNikuNiku

    # Step 3 Done: From the Template, We need to Intergrate the <head> Element into Our Stream to Define Meta Info and CSS for the Page.
    # Step 4 Done: Fetch Data from the Target Node:
    #   Target Body: <body class="reading sidenav  scalefonts library nav-collapsed">
    #   Target Div: <div class="sbo-reading-menu sbo-menu-top"> This Element Holds the Title of the Whole Chapter
    #   Target Div: <div class="annotator-wrapper"> This Element Holds the Real Content.
    # Step 5 Done: Deal with the Data: To Add Index to the Book, We Need to Change the <p class="ChapterTitle"> into <h2 class = "ChapterTitle">
    # Pseudo code:
    # for response in response_list:
    #   chapter_name = PS.parseSourceStream(response, 'C++ Primer 5th Adition')
    #   relative_file_path = os.curdir + os.sep + book + os.sep + chapter_name
    #   parsed_html_list.append(relative_file_path)
    book = 'C++ Primer 5th Adition'
    chapter_name = BP.parseSourceStream('', book)
    relative_target_dir = os.curdir + os.sep + book + os.sep
    relative_file_path = relative_target_dir + chapter_name + '.html'
    parsed_html_list.append(relative_file_path)
    # Done by Ronaldo Tong

    # Step 6 ToDo: Again to Fetch the Next Page. Repeat Step 3- Step 5 until the Document Ends.
    # ToDo by BigNikuNiku

    # Step 7 Done: Convert the Stream into PDF Data.
    # pseudo Code:
    # pdfkit.from_file(parsed_html_list, book + os.sep + 'pdf')
    BP.copyResource(book)
    output_pdf_path = relative_target_dir + chapter_name + '.pdf'
    pdfkit.from_file(relative_file_path, output_pdf_path)
    # Done by Ronaldo Tong

if __name__ == "__main__":
    # Waiting for BigNikuNiku equals waiting for the doom of the world
    src_html_dir = sys.argv[1]
    book_name = sys.argv[2]
    output_pdf_path = book_name + os.sep + book_name +'.pdf'
    BP.copyResource(book_name)
    html_list = generate_htmls(src_html_dir, book_name)
    pdfkit.from_file(html_list, output_pdf_path)
