from urllib import urlopen
import csv
import re
import htmlentitydefs
import json
import cStringIO
import codecs

def unescape_html(text):
    def fixup(m):
        text = m.group(0)
        if text[:2] == "&#":
            # character reference
            try:
                if text[:3] == "&#x":
                    return unichr(int(text[3:-1], 16))
                else:
                    return unichr(int(text[2:-1]))
            except ValueError:
                pass
        else:
            # named entity
            try:
                text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
            except KeyError:
                pass
        return text # leave as is
    return re.sub("&#?\w+;", fixup, text)

class PBwiki(object):
    """
    An object that:
    1) stores the url of a wiki
    2) can call the pbwiki api, and returns data
    """

    def __init__(self, url):
        # Define url for everything later
        self.url = url

    def api_call(self, op, ** kwargs):
        """
        Calls the pbwiki api, using url, and returns data

        - op is pbwiki operator
        - kwargs are kv options for ops
        """
        if kwargs:
            url_args = '/'.join(["%s/%s" % (k, v) for k, v in kwargs.iteritems()])
            call_url = '%s/api_v2/op/%s/%s' % (self.url, op, url_args)
        else:
            call_url = '%s/api_v2/op/%s' % (self.url, op)

        #replace whitespace with %20
        call_url = call_url.replace(' ', '%20')

        resp = urlopen(call_url)
        json_string = '\n'.join(resp.read().split('\n')[1:-2])
        json_data = json.loads(json_string)
        resp.close()

        return json_data

def get_wiki_list_from_csv():
    """
    Gets a list of wikis from the control file
    """

    #heading of csv will be:
    #filename,truth,trainingset

    #read the csv data
    ifile  = open('control.csv', "rb")
    reader = csv.reader(ifile, delimiter="\t")

    wiki_urls = []

    for row in reader:
        wiki_urls.append(row[0])

    ifile.close()

    #shave off the first row
    return wiki_urls[1:]


def GetInHMS(seconds):
    """
    Converts seconds to hours/minutes/seconds
    """
    hours = seconds / 3600
    seconds -= 3600 * hours
    minutes = seconds / 60
    seconds -= 60 * minutes
    if hours == 0:
        return "%02d:%02d" % (minutes, seconds)
    return "%02d:%02d:%02d" % (hours, minutes, seconds)

def extract_wiki_name_from_url(wiki_url):
    #strip out the leading/trailing whitespace
    wiki_url = wiki_url.strip()

    #regex pattern to pull englishdca from http://www.englishdca.pbworks.com/whatever
    pattern = r"""(https?://)?
    (www.)?
    (?P<wikiname>[\-,\w]*)
    (.\S*)?"""

    match_obj = re.search(pattern, wiki_url,  re.IGNORECASE| re.VERBOSE)
    return str(match_obj.group('wikiname'))