# -*- coding: utf-8 -*-

# Copyright (C) 2012 Maira Carvalho <hello@mairacarvalho.com>
#
# This file is part of Kapybara's Imuri module, and is provided to you as is under the terms
# and conditions of the GPL v3 license. The license should have been provided to
# you with this package via the file gpl-3.0.txt, but if not you can see it
# online at https://www.gnu.org/licenses/gpl-3.0.html

''' This module offers Imuri's word downloader functions.
    It downloads word definitions from Wiktionary's API.
'''

import urllib
import re
import htmlentitydefs


##
# Removes HTML or XML character references and entities from a text string.
#
# @param text The HTML (or XML) source text.
# @return The plain text, as a Unicode string, if necessary.

def unescape(text):
    def fixup(m):
        text = m.group(0)
        if text[:2] == "&#":
            # character reference
            try:
                if text[:3] == "&#x":
                    return unichr(int(text[3:-1], 16))
                else:
                    return unichr(int(text[2:-1]))
            except ValueError:
                pass
        else:
            # named entity
            try:
                text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
            except KeyError:
                pass
        return text # leave as is
    return re.sub("&#?\w+;", fixup, text) 


class Imuri(object):
    
    '''Inform the language pair in the following format
    languages = {
        "source":{'code':'en','name':'English'},
        "target":{'code':'fi','name':'Finnish'}
    }
    
    '''
    def __init__(self, languages):
        
        self.languages = languages
        #self.base_url = "http://%s.wiktionary.org/w/api.php?action=query&prop=revisions&rvprop=content&rvgeneratexml=&format=xml&titles=" % self.languages['source']['code']
        self.base_url = "http://%s.wiktionary.org/w/api.php?action=parse&prop=text&format=txt&page=" % self.languages['source']['code']

        
    def get_raw_content(self, word):
        #Returns results = {'exit_status':0, 'message':'', 'content':'' }
        
        query_url = self.base_url + word
        full_content = unescape(urllib.urlopen(query_url).read())
        #print full_content
        
        #From the complete wiki page, get only the target language
        
        regex_string = "(<span class=\"mw-headline\" id=\"%s\">.*?)(NewPP limit report|<h2)" % self.languages['target']['name']
               
        regex_section = re.compile(regex_string,re.I|re.M|re.S)
        content = regex_section.search(full_content)
        
        results = {}

        if content:
            results['exit_status'] = 0
            results['message'] = 'Successfully retrieved raw content from URL: %s' % query_url
            results['content'] = content.group(1)
            
        else:
            results['exit_status'] = -1
            results['message'] = "Could not retrieve word \"%s\" for language \"%s\". URL: %s" % (word, self.languages['target']['name'], query_url)
            results['content'] = False 
        
        return results
        
    def clean_raw_content(self, content):
        #Takes the HTML and removes unnecessary html formatting
        regex_links = re.compile('</?a[^>]*>',re.I|re.S|re.M)
        content = regex_links.sub('', content)
        content = content.replace('<span class="editsection">[edit]</span>', '')
        return content
