from urllib import urlopen
import csv
import re
import htmlentitydefs
import json
import cStringIO
import codecs

def unescape_html(text):
    """
    Unescapes html. 
    For example:
    &lt; becomes <
    """
    def fixup(m):
        text = m.group(0)
        if text[:2] == "&#":
            # character reference
            try:
                if text[:3] == "&#x":
                    return unichr(int(text[3:-1], 16))
                else:
                    return unichr(int(text[2:-1]))
            except ValueError:
                pass
        else:
            # named entity
            try:
                text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
            except KeyError:
                pass
        return text # leave as is
    return re.sub("&#?\w+;", fixup, text)

class UnicodeWriter:
    """
    A CSV writer which will write rows to CSV file "f",
    which is encoded in the given encoding.
    """

    def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
        # Redirect output to a queue
        self.queue = cStringIO.StringIO()
        self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
        self.stream = f
        self.encoder = codecs.getincrementalencoder(encoding)()

    def writerow(self, row):
        encoded_row = []
        for s in row:
            try:
                encoded_row.append(s.encode("utf-8"))
            except AttributeError:
                encoded_row.append(s)

        self.writer.writerow(encoded_row)
        # Fetch UTF-8 output from the queue ...
        data = self.queue.getvalue()
        data = data.decode("utf-8")
        # ... and reencode it into the target encoding
        data = self.encoder.encode(data)
        # write to the target stream
        self.stream.write(data)
        # empty queue
        self.queue.truncate(0)

    def writerows(self, rows):
        for row in rows:
            self.writerow(row)


class PBwiki(object):
    """
    An object that:
    1) stores the url of a wiki
    2) can call the pbwiki api, and returns data from that api call
    """

    def __init__(self, url):
        # Define url for everything later
        self.url = url

    def api_call(self, op, ** kwargs):
        """
        Calls the pbwiki api, using url, and returns data

        - op is pbwiki operator
        - kwargs are kv options for ops
        """
        if kwargs:
            url_args = '/'.join(["%s/%s" % (k, v) for k, v in kwargs.iteritems()])
            call_url = '%s/api_v2/op/%s/%s' % (self.url, op, url_args)
        else:
            call_url = '%s/api_v2/op/%s' % (self.url, op)

        #replace whitespace with %20
        call_url = call_url.replace(' ', '%20')

        resp = urlopen(call_url)
        json_string = '\n'.join(resp.read().split('\n')[1:-2])
        json_data = json.loads(json_string)
        resp.close()

        return json_data

def GetInHMS(seconds):
    """
    Converts seconds to hours/minutes/seconds
    """
    hours = seconds / 3600
    seconds -= 3600 * hours
    minutes = seconds / 60
    seconds -= 60 * minutes
    if hours == 0:
        return "%02d:%02d" % (minutes, seconds)
    return "%02d:%02d:%02d" % (hours, minutes, seconds)

def get_wiki_list_from_csv():
    """
    Gets a list of wikis from input.csv
    """

    #read the csv data
    ifile  = open('input.csv', "rb")
    reader = csv.reader(ifile, delimiter="\t")

    #KEY   = wiki url
    #VALUE = justin's DCLCID
    wiki_url_id_list = []

    for row in reader:
        wiki_url = row[0]
        dclc_id = row[1]
        wiki_url_id_list.append([wiki_url, dclc_id])

    ifile.close()

    return wiki_url_id_list


class Row:
    """
    One row in the output csv file
    does not include page#wc or user#wc
    """

    def __init__(self, wiki_name, dclc_id, page_count, final_page_count,
                 contributor_count, final_contributor_count, final_user_count,
                 page_id, page_name, revision_number, user_id, user_name,
                 datetime, day, link_count, image_count, word_count,
                 words_per_paragraph, text, raw_html, page_word_counts=[],
                 user_word_counts=[], num_words_added=0):
        self.wiki_name = wiki_name
        self.dclc_id = dclc_id
        self.page_count = page_count
        self.final_page_count = final_page_count
        self.contributor_count = contributor_count
        self.final_contributor_count = final_contributor_count
        self.final_user_count = final_user_count
        self.page_id = page_id
        self.page_name = page_name
        self.wiki_create_unix_time = 0
        self.revision_number = revision_number
        self.user_id = user_id
        self.user_name = user_name
        self.datetime = datetime
        self.day = day
        self.link_count = link_count
        self.image_count = image_count
        self.word_count = word_count
        self.words_per_paragraph = words_per_paragraph
        self.text = text
        self.raw_html = raw_html
        self.page_word_counts = page_word_counts
        self.user_word_counts = user_word_counts
        self.num_words_added = num_words_added

    @staticmethod
    def getHeader():
        return ['Wiki', 'DCLCID', 'PageCount', 'FinalPageCount', 'ContributorCount', 'FinalContributorCount', 'FinalUserCount', 'Page ID', 'Page Name', 'Wiki Create Unix Time','Revision # (Unix Time)', 'User ID', 'User Name', 'Datetime', 'Day', 'Link Count', 'Image Count', 'Word Count', 'Words Added', 'Avg Words/Paragraph', 'Text', 'Raw HTML']

    def getList(self):
        output_list = [
            self.wiki_name,
            self.dclc_id,
            self.page_count,
            self.final_page_count,
            self.contributor_count,
            self.final_contributor_count,
            self.final_user_count,
            self.page_id,
            self.page_name,
            self.wiki_create_unix_time,
            self.revision_number,
            self.user_id,
            self.user_name,
            self.datetime,
            self.day,
            self.link_count,
            self.image_count,
            self.word_count,
            self.num_words_added,
            self.words_per_paragraph,
            self.text,
            self.raw_html,
        ]
        output_list += self.page_word_counts
        output_list += self.user_word_counts
        
        return output_list

def get_entity_count_up_to(revision_time, entity_list):
    """
    Gets the number of items in the list up to the unix time "revision_time"
    """
    for count, entity in enumerate(entity_list):
        if entity.time > revision_time:
            return count
    return len(entity_list)

def max_pages_users():
    """
    Call me from the command line to discover the maximum number of pages and users across all wikis
    You would do this if Justin gives you a new set of wikis, and you want to know the max users and max pages
    """
    max_pages = 0
    max_users = 0
    wiki_url_id_list = get_wiki_list_from_csv()
    print "Checking " + str(len(wiki_url_id_list)) + " wikis"

    for wiki in wiki_url_id_list:
        #make the call
        wiki_url = wiki[0]
        dict = get_final_num_pages_users(wiki_url)

        #calc maxes
        max_pages = max(dict['pagecount'], max_pages)
        max_users = max(dict['usercount'], max_users)

        #draw a dot
        sys.stdout.write(".")
        sys.stdout.flush()

    print "\nMax Pages = " + str(max_pages)
    print "Max Users = " + str(max_users)
    print "done."

def get_final_num_pages_users(wiki_url):
    """
    Gets the number of pages and users registered to a wiki
    returns a dictonary of pagecount, usercount
    """
    api = PBwiki(wiki_url)
    wiki_info = api.api_call('GetWikiInfo')
    max_dict = {'pagecount': int(wiki_info['pagecount']), 'usercount': int(wiki_info['usercount'])}
    return max_dict

class WikiEntity:
    """
    A generic class to store various mappings and wiki entities
    It's used to:
     - store incremental word and page counts
     - store a mapping between a user id and user#wc
     - store a mapping between a page id and page#wc
    """
    def __init__(self, id, time=0, word_count=0):
        self.time = time
        self.id = id
        self.word_count = word_count

    def addWords(self, words_added):
        self.word_count += words_added
        return

def add_to_entity_timestamp_list(unix_time, entity_id, entity_list):
    if not entity_id or not unix_time:
        return
    if entity_id == 'unknown':
        return
    new_entity = WikiEntity(entity_id, time=unix_time)
    add_to_list = 1
    # update entity, if found
    for existing_entity in entity_list:
        if entity_id == existing_entity.id:
            add_to_list = 0
            if unix_time < existing_entity.time:
                existing_entity.time = unix_time
                break

    #if entity wasn't found in list, add it
    if add_to_list:
        entity_list.append(new_entity)
    return

def add_to_list_if_element_not_exists(element, list):
    element_found = 0
    for item in list:
        if item == element:
            element_found = 1
    if not element_found:
        list.append(element)
    return

def add_to_dictionary_if_key_not_exists(key, dictionary):
    value = len(dictionary)
    try:
        dictionary[key]
    except KeyError:
        dictionary[key] = value
    return