#! /usr/bin/env python
import csv
import logging
import os
import sys
import time
from time import localtime
from time import strftime

from BeautifulSoup import BeautifulSoup
from prep_helper import GetInHMS
from prep_helper import PBwiki
from prep_helper import get_wiki_list_from_csv
from prep_helper import unescape_html
from prep_helper import extract_wiki_name_from_url

logger = logging.getLogger(__name__)

def main(argv=None):

    LIMIT = 9999

    #we will write a set of .txt files to the output directory
    dir = 'output'
    if not os.path.exists(dir):
        os.makedirs(dir)

    #read wikis to analyze from input csv
    wiki_urls = get_wiki_list_from_csv()

    iteration = 0
    total = 0
    start = int(time.time())
    
    #get data for each wiki, write it to file
    for wiki_url in wiki_urls:

        #time and loop tracking
        iteration += 1
        before = int(time.time())
        sys.stdout.write(str(GetInHMS(total)) + " " + str(iteration).zfill(3) + "/" + str(min(len(wiki_urls), LIMIT)).zfill(3) + ": " + wiki_url + " took: ")
        sys.stdout.flush()

        #the real work
        write_to_txt_given_wiki_url(wiki_url)

        #more time tracking
        after = int(time.time())
        diff = after - before
        sys.stdout.write(str(GetInHMS(diff)) + "\n")
        sys.stdout.flush()
        total = after - start

        #stop at the LIMIT
        if iteration >= LIMIT:
            break

    print "done."
    return

def write_to_txt_given_wiki_url(wiki_url):

    wiki_text = ''

    api = PBwiki(wiki_url)
   
    #and here's where the real work begins
    pages = api.api_call('GetPages')['pages']

    for page in pages:

        kwarg = {'page': page['name']}
        fetched_page = api.api_call('GetPage', ** kwarg)

        if not fetched_page:
            print "ERROR: failed fetching a page: %s" % fetched_page
            return

        #Get and process html
        page_html = fetched_page['html']
        page_html = unescape_html(page_html)

        soup = BeautifulSoup(page_html)

        #convert html to text
        page_text = ''.join(soup.findAll(text=True)).strip()

        #append text
        wiki_text += page_text

    #finally, write it to a file with the name of the wikiurl

    encoded_text = wiki_text.encode('UTF-8')

    wiki_name = extract_wiki_name_from_url(wiki_url)
    output_file_path_name = 'output/' + wiki_name + '.txt'
    f = open(output_file_path_name, 'w+')
    f.write(encoded_text)
    f.close()
    print "Wrote %s" % output_file_path_name
    return

if __name__ == "__main__":
    main()