# -*- coding: UTF-8 -*-
# @author: Marcin Harasimczuk

import morfeusz
import sys
import codecs
import os
import math

from collections import defaultdict
import xml.parsers.expat
from HTMLParser import HTMLParser

# Globals:
# For alias parser:          
isNumber = False
isAlias = False
number = ""
alias = ""
# For log parser
isBody = False
tagCount = 0
# Distance from the beginning of logs
distance = 0
currentContact = ""

# Main data structures:
# Token dictionary:
# Dictionary of ((word,distance,contact), ...)
# for each word form.
forms = defaultdict(list)
# Frequency dictionary:
# Number of basic word appearance in logs
freqs = {}
# Alias dictionary:
# Number to Alias mapping
aliases = {}

# Functions:
# For alias parser:
def start_element(name, attrs):
        global isNumber
        global isAlias

        if name == "name":
                isNumber = True

        if name == "alias":
                isAlias = True


def end_element(name):
        global isNumber
        global isAlias
        global aliases

        if name == "name":
                isNumber = False

        if name == "alias":
                isAlias = False
                aliases[number] = alias


def char_data(data):
        global isNumber
        global isAlias
        global number
        global alias

        if isNumber == True:
                number = data

        if isAlias == True:
                alias = data

# LogParser is used to build two data structures: Token dictionary and Frequency dictionary 
# form data in logs.
class LogParser(HTMLParser):

        def handle_starttag(self, tag, attrs):
                global isBody
                global tagCount

                if tag == "html":
                        isBody = False
                        tagCount = 0
                else:
                        tagCount = tagCount + 1
                        # Try to ommit useless and confsing data
                        if tagCount == 6:
                                isBody = True

        def handle_data(self, data):
                global isBody
                global filteredTokens
                global distance
                global currentContact

                if isBody == True:
                        # Morfeusz is a lot faster with single words
                        chunks = data.split()
                        for chunk in chunks:
                                analyseResult = morfeusz.analyse(chunk.decode('UTF-8'), expand_tags=False)

                                for rawTokens in analyseResult:

                                        for rawToken in rawTokens:
                                                # Filter out unwanted Morfeusz tags here
                                                if unicode(rawToken[2]) != unicode("None") and \
                                                   unicode(rawToken[2]) != unicode("interp") and \
                                                   unicode(rawToken[2]) != unicode("conj"):
                                                        # Token = (word, distance from beginning, history contact)
                                                        trio = (rawToken[0], distance, currentContact)
                                                        distance = distance + 1
                                                        forms[rawToken[2]].append(trio)

                                                        # Update frequency dictionary using basic form of "word"
                                                        if rawToken[1].encode('UTF-8') not in freqs:
                                                                freqs[rawToken[1].encode('UTF-8')] = 0
                                                        freqs[rawToken[1].encode('UTF-8')] = freqs[rawToken[1].encode('UTF-8')] + 1
                                                
# Build Token dictionary and Frequency dictionary
def parseHistory(pathToLogs):
        global currentContact

        logDirs = os.listdir(pathToLogs)
        logDirs.remove(".svn")
        parser = LogParser()

        for logDir in logDirs:
                logFileNames = os.listdir(pathToLogs + '/' + logDir)
                logFileNames.remove(".svn")
                currentContact = logDir
                for logFileName in logFileNames:
                        logFile = open(pathToLogs + '/' + logDir + '/' + logFileName, "r")
                        logFile.readline()
                        htmlString = logFile.read()
                        parser.feed(htmlString)

# Build Alias dictionary
def parseAliases(pathToAliases):

        aliasFile = open(pathToAliases, 'r')

        aliasParser = xml.parsers.expat.ParserCreate()
        aliasParser.StartElementHandler = start_element
        aliasParser.EndElementHandler = end_element
        aliasParser.CharacterDataHandler = char_data
        aliasParser.ParseFile(aliasFile)

# Get all tokens of a given form
def getTokens(form):
        global forms

        tokens = forms[form]
        
        return tokens

# Return Alias
def getAlias(number):
        return aliases[number]

# Return number of parsed tokens (aka maximum distance)
def getMaxDistance():
        global distance

        return distance

# Analyze Frequency table to choose best token. If "firstGap" is not true
# "previous" = (contact, distance) must be given. 
def fillTheGap(form, firstGap = True, previous = []):
        global freqs
        resultTokens = []

        tokens = getTokens(form)
        
        if firstGap == True :

                maxFreq = 0
                maxFreqToken = []
                for token in tokens:
                        analyseResult = morfeusz.analyse(token[0], expand_tags=False)
                        rawTokens = analyseResult[0]
                        baseWord = rawTokens[0][1]
                        if freqs[baseWord.encode('UTF-8')] > maxFreq:
                                maxFreq = freqs[baseWord.encode('UTF-8')]
                                maxFreqToken = token

                return maxFreqToken

        else :
        
                minimalDistance = getMaxDistance()
                previousContact = previous[0]
                previousDistance = previous[1]
                nearestToken = []
                contactTokens = [] 

                for token in tokens :
                        if token[2] == previousContact:
                                contactTokens.append(token)

                if contactTokens:
                        tokens = contactTokens

                for token in tokens :
                        tokenDistance = math.fabs(token[1] - previousDistance)
                        if tokenDistance < minimalDistance :
                                minimalDistance = tokenDistance
                                nearestToken = token

                return nearestToken

