#! /usr/bin/env python
import os
import csv
import logging
import time
from time import localtime
from time import strftime
import sys


from BeautifulSoup import BeautifulSoup
from text_helper import GetInHMS
from text_helper import get_wiki_list_from_csv
from text_helper import Row
#from text_helper import max_pages_users
#from text_helper import get_final_num_pages_users
from text_helper import unescape_html
from text_helper import UnicodeWriter
from text_helper import PBwiki
from text_helper import add_to_dictionary_if_key_not_exists
from text_helper import add_to_entity_timestamp_list
from text_helper import get_entity_count_up_to

logger = logging.getLogger(__name__)

def main(argv=None):

    #define globals
    global MAX_PAGES, MAX_USERS, LIMIT, ENABLE_TRUNCATION, TRUNCATE_TO

    MAX_PAGES = 104
    MAX_USERS = 167

    #Analayze a maximum of LIMIT wikis
    LIMIT = 4

    """
    Truncation is used to keep the number of chars in a cell under a reasonable
    limit. MS Excel and LibreOffice have difficulty rendering CSV files with
    cells that exceed about 30,000 characters.

    Keep truncation off to preserve all data.
    Turn it on to make the CSV files readable in most CSV readers.
    """
    ENABLE_TRUNCATION = 0
    TRUNCATE_TO = 30000

    #read wikis to analyze from input csv
    wiki_url_id_list = get_wiki_list_from_csv()

    #Create output file
    cur_date = strftime("%d-%m-%Y", localtime())
    num_wikis = str(min(len(wiki_url_id_list), LIMIT))

    #create an output directory, if needed. This is where we will save output.
    dir = 'output'
    if not os.path.exists(dir):
        os.makedirs(dir)

    #We will append "_trunc" to the end of the filename if truncation is on
    if ENABLE_TRUNCATION:
        filepath_name = dir + "/wiki_analysis_" + cur_date + "_" + num_wikis + "_wikis_trunc.csv"
        print "Truncation is On."
    else:
        filepath_name = dir + "/wiki_analysis_" + cur_date + "_" + num_wikis + "_wikis.csv"
        print "Truncation is Off."

    ofile  = open(filepath_name, "wb")
                          
    csv.field_size_limit(1131072)
    writer = UnicodeWriter(ofile)

    #create csv headers for page#wc and user#wc
    page_header_fragment = []
    for i in range(1, (MAX_PAGES + 1)):
        page_header_fragment.append("page" + str(i) + "wc")

    user_header_fragment = []
    for i in range(1, (MAX_USERS + 1)):
        user_header_fragment.append("user" + str(i) + "wc")

    #Static header for the Data
    csv_header = Row.getHeader()

    #Put the header together, and write it to the csv
    csv_header += page_header_fragment
    csv_header += user_header_fragment
    writer.writerow(csv_header)    
    
    iteration = 0
    start = int(time.time())
    total = 0
    #get data for each wiki, write it to file
    for wiki in wiki_url_id_list:

        wiki_url = wiki[0]
        dclc_id = wiki[1]

        #time and loop tracking
        iteration += 1
        before = int(time.time())
        sys.stdout.write(str(GetInHMS(total)) + " " + str(iteration).zfill(3) + "/" + str(min(len(wiki_url_id_list), LIMIT)).zfill(3) + ": " + wiki_url + " took: ")
        sys.stdout.flush()

        #the real work -- this is where we scrape + analyze a wiki
        write_to_csv_given_wiki_url(wiki_url, dclc_id, writer)

        #more time tracking
        after = int(time.time())
        diff = after - before
        sys.stdout.write(str(GetInHMS(diff)) + "\n")
        sys.stdout.flush()
        total = after - start

        #stop at the LIMIT
        if iteration >= LIMIT:
            break

    print "done."

    ofile.close()
    return

def write_to_csv_given_wiki_url(wiki_url, dclc_id, writer):

    #create list for page#wc and user#wc. Initialize them to empty string.
    pages_fragment_list = []
    for i in range(0, MAX_PAGES):
            pages_fragment_list.append("")

    users_fragment_list = []
    for i in range(0, MAX_USERS):
            users_fragment_list.append("")

    #list holding every row to be outputted
    rows = []

    #list which holds pages and their creation timestamp
    #creation timestamp = first revision # (aka unix time)
    #each item in the list is WikiEntity
    page_timestamps = []

    #same deal with user timestamps
    user_timestamps = []

    api = PBwiki(wiki_url)

    wiki_info = api.api_call('GetWikiInfo')

    final_num_users = wiki_info['usercount']
    final_num_pages = wiki_info['pagecount']

    if wiki_info == 0:
        print "ERROR: failed on wiki-meta-data-fetch: %s" % wiki_url
        return

    wiki_name = wiki_info['wikiname']
   
    #and here's where the real work begins -- Let's scrape some data
    pages_container = api.api_call('GetPages')
    pages = pages_container['pages']
    for page in pages:
        kwarg = {'page': page['name'], 'verbose': 'true'}
        page_revisions = api.api_call('GetPageRevisions', ** kwarg)['revisions']

        if page_revisions == 0:
            print "ERROR: failed on wiki-pages-fetch: %s" % wiki_url
            return

        #sort revisions in time order ('revision' is actually a unix time)
        page_revisions.sort(key=lambda dict: dict['revision'])
       
        previous_word_count = 0

        first_revision = 1
        for revision in page_revisions:

            kwarg = {'page': page['name'], 'revision':revision['revision']}
            fetched_page = api.api_call('GetPage', ** kwarg)

            if fetched_page == 0:
                print "ERROR: failed on wiki-revisions-fetch: %s" % wiki_url
                return

            #Get page id
            page_id = fetched_page['oid']

            #get revision id, which is, by definition, the unix creation time
            revision_unix_time = int(revision['revision'])

            #Get and process html
            revision_html = fetched_page['html']
            revision_html = unescape_html(revision_html)

            soup = BeautifulSoup(revision_html)

            #extract number of links
            links = soup.findAll('a')
            num_links = len(links)

            #extract number of images
            images = soup.findAll('img')
            num_images = len(images)

            #extract number of words per paragraph
            num_paragraphs = 0
            total_words = 0

            paragraphs = soup.findAll('p', text=True)
            for paragraph in paragraphs:
                if paragraph:
                    paragraph.strip()
                    if paragraph:
                        num_words_in_paragraph = len(paragraph.split())
                        if num_words_in_paragraph > 1:
                            num_paragraphs  += 1
                            total_words += num_words_in_paragraph

            try:
                words_per_paragraph = total_words / num_paragraphs
            except ZeroDivisionError:
                words_per_paragraph = ''

            #convert html to text
            revision_text = ''.join(soup.findAll(text=True)).strip()
            word_count = len(revision_text.split())

            revision_time = time.strftime("%d %b %Y %H:%M:%S", time.localtime(revision_unix_time))
            revision_day = time.strftime("%a", time.localtime(revision_unix_time))

            try:
                user_id = revision['author_uid']
            except KeyError:
                user_id = "unknown"

            try:
                user_name = revision['author_name']
            except KeyError:
                try:
                    user_name = revision['author_username']
                except KeyError:
                    user_name = "unknown"

            if first_revision:
                #populate page timestamps
                add_to_entity_timestamp_list(revision_unix_time, page_id, page_timestamps)                
                first_revision = 0

            #populate user timestamps
            add_to_entity_timestamp_list(revision_unix_time, user_id, user_timestamps)

            #truncate, if enabled. If a cell exceeds truncation length, lets
            #put "[[[TRUNCATED]]]" at the end, so it's obvious that truncation occured
            if ENABLE_TRUNCATION:
                revision_html = revision_html[:TRUNCATE_TO] + (revision_html[TRUNCATE_TO:] and '[[[TRUNCATED]]]')
                revision_text = revision_text[:TRUNCATE_TO] + (revision_text[TRUNCATE_TO:] and '[[[TRUNCATED]]]')

            #save row, unless the revision is system-generated
            #we ignore system-generated revisions, because we are trying to
            #analyze human behvaior, not computer behaviour
            if user_id != "system-uid":
                row = Row(wiki_name, dclc_id, 0, final_num_pages, 0, 0, final_num_users, page_id, page['name'], revision_unix_time, user_id, user_name, revision_time, revision_day, num_links, num_images, word_count, words_per_paragraph, revision_text, revision_html)
            
                #here we keep track of how many words were added since the previous revision
                row.num_words_added = word_count - previous_word_count
                #if it's negative, we'll make it zero (to ignore word deletions)
                if row.num_words_added < 0:
                    row.num_words_added = 0

                rows.append(row)

                previous_word_count = word_count

    ##Data Gathering Complete
    #--------------------------------------------------------------------------#
    ##Now we must sort the data, and write it to a .csv file
    
    #sort all revisions/pages on a wiki in unix time order
    rows.sort(key=lambda row: row.revision_number)
    page_timestamps.sort(key=lambda WikiEntity: WikiEntity.time)
    user_timestamps.sort(key=lambda WikiEntity: WikiEntity.time)
  
    pages = {} #mapping of page id to number in page#wc
    users = {} #mapping of user id to number in user#wc

    #set wiki creation time as the time of the first revision
    wiki_create_unix_time = rows[0].revision_number

    #First Pass
    for row in rows:
        #Lets prepare for the user#wc and page#wc:
        #we are assigning each user a number, and each page a number
        add_to_dictionary_if_key_not_exists(row.page_id, pages)
        add_to_dictionary_if_key_not_exists(row.user_id, users)

        #get the count as of that timestamp
        incremental_page_count = get_entity_count_up_to(row.revision_number, page_timestamps)
        incremental_user_count = get_entity_count_up_to(row.revision_number, user_timestamps)

        #save the value to the row
        row.page_count = incremental_page_count
        row.contributor_count = incremental_user_count
        row.final_contributor_count = len(user_timestamps)
        row.wiki_create_unix_time = wiki_create_unix_time

    #Second Pass:
    for row in rows:
        #here we are filling in all the user#wc's and page#wc's
        user_index = int(users[row.user_id])
        if users_fragment_list[user_index] == "":
            users_fragment_list[user_index] = int(row.num_words_added)
        else:
            users_fragment_list[user_index] += int(row.num_words_added)
        row.user_word_counts = users_fragment_list
        
        page_index = int(pages[row.page_id])
        pages_fragment_list[page_index] = int(row.word_count)
        row.page_word_counts = pages_fragment_list

        writer.writerow(row.getList())

    return
