#!/usr/bin/env python
# encoding: utf-8
"""
greeklishout.py

Created by Orestis Markou on 2007-11-01.
Copyright (c) 2007 Orestis Markou. All rights reserved.
"""

import sys
import os
import getopt
import fileutils
import re
import numpy

from new_hmm import HMM


def print_usage():
    """docstring for print_usage"""
    print """
    Usage: python greeklishout.py [options] input_phrase
           python greeklishout.py [options] < <stdin>
           python greeklishout.py [options]
           
    Options: -s, --settings: the settings file. Default: settings.py
             -i, --input: a text file containing greeklish
             -o, --output: the file where output will be written
             -t, --timeit: print timing information
             -h, --help: print help
    """

def main(_argv): 
    
    try:                                
        opts, args = getopt.getopt(_argv, "s:i:o:th", 
            ["settings=", "input=", "output=","timeit","help"]) 
    except getopt.GetoptError:           
        print_usage()                          
        sys.exit(2)
    
    #args
    timing = False
    settings_module = 'settings.py'
    input_filename = None
    output_filename = None
    USING_NUMPY = True
    # OPTIMI
    
    for opt, arg in opts:                  
        if opt in ("-t", "--timeit"):      
            print "Timing!" 
            import time
            timing = True 
        elif opt in ("-h", "--help"):
            print_usage()
            sys.exit(0)
        elif opt in ("-s", "--settings"): 
            settings_module = arg 
        elif opt in ("-i", "--input"): 
            input_filename = arg
        elif opt in ("-o", "--output"): 
            output_filename = arg  
    

    try:
        if settings_module[-3:]=='.py':
            settings_module = settings_module[:-3]
        settings_module = __import__(settings_module, {}, {}, [''])
    except ImportError, e:
        raise EnvironmentError, "Could not import settings '%s' (Is it on sys.path? Does it have syntax errors?): %s" % (settings_module, e)
    
    args_input = [arg.replace('\\n', '\n') for arg in args]
    
    input_text = None
    if args_input:
        input_text = ''.join(args_input)
    elif input_filename:
        input_file = open(input_filename, 'rb')
        contents = input_file.read() #.decode('utf-8')
        if contents.startswith('\xef\xbb\xbf'):
            contents = contents[3:] #strip BOM       
        input_text = unicode(contents, 'utf-8')
    else:
        input_text = sys.stdin.read()
    
    if not input_text: 
        raise EnvironmentError, "You have to either supply text in the arguments, in an input file, or the stdin."
    
    output_file = None
    if output_filename:
        output_file = open(output_filename, 'wb')
    else:
        output_file = sys.stdout
    
    #now we have input_text, output_file, timing, and settings_module.
    #we pass them to the actual function
    
    configure_settings(settings_module)
    
    converter = get_converter(settings_module)
    print converter
    if(timing):
        t1 = time.clock()
    for word, mask, original in each_word_with_mask(input_text):
        if word:
            conv = converter.convert(word, join =True)
            # print converter.sequence(original), conv
            if not conv: conv = original
            else: conv = case_from_mask(conv, mask)

            output_file.write(conv.encode('utf-8'))
        else:
            output_file.write(original.encode('utf-8'))
    if(timing):
        t2 = time.clock()
        print 'Took',round(t2-t1, 3),'seconds.'
    
    
    # for setting in dir(settings_module):
    #     setting_value = getattr(settings_module, setting)
    #     print setting,':',setting_value
    
def get_converter(settings):
    """docstring for get_converter"""
    if settings.NUMBER_OF_CLUSTERS == 1:
        conv = Converter(settings.MODEL_FILE_FORMAT%'test', settings)
        return conv
    elif settings.NUMBER_OF_CLUSTERS > 1:
        #return cluster converter
        pass
    else:
        raise EnvironmentError, 'You must set at least 1 cluster in NUMBER_OF_CLUSTERS'
    
    
def configure_settings(settings_module):
    """configures the settings_module by importing all functions and verifying that files exist"""
    for setting in dir(settings_module):
        if setting == setting.upper():
            setting_value = getattr(settings_module, setting)
            if setting.endswith('_FUNCTION') or setting.endswith('_DICT') or setting.endswith('_LIST'):
                if setting_value is None: continue;
                mod, func = '.'.join(setting_value.split('.')[:-1]), setting_value.split('.')[-1:][0]
                # print mod, func
                try:
                    setattr(settings_module, setting, getattr( __import__((mod), {}, {}, ['']), func))
                except ImportError, e:
                    raise EnvironmentError, "Could not import function '%s' (Is it on sys.path? Does it have syntax errors?): %s" % (setting_value, e)
                
                

class Converter(object):
    def __init__(self, data_file, settings):
        self.data_file = data_file
        self.settings = settings
        self.model = HMM.read_model(data_file)
        if self.model is None:
            print 'No model...'
            t = Trainer(settings.GREEK_WORDS_FILE, settings.GREEKLISH_WORDS_FILE, settings)
            self.model = t.train()
            HMM.write_model(self.model, data_file)
        self.parts = self.model.omega_O[:]
        self.parts.sort(key=len, reverse=True)

        # print self.parts
        
    def convert(self, word, join=True):
        # print word
        seq = self.sequence(word)
        if(seq):
            result = self.model.viterbi(seq)
            if join: 
                res = ''.join(result)
                return res
            else: return result
        else:
            return None

    def sequence(self,word):
        seq =  self.settings.SPLIT_FUNCTION(word,self.parts, self.settings.GREEKLISH_SPLIT_LIST)
        # print seq
        return seq
    
    def __str__(self):
        return "Converter using a %s"%str(self.model)

class Trainer(object):
    """docstring for Trainer"""


    def __init__(self, greek_file, greeklish_file, settings):
        super(Trainer, self).__init__()
        self.greek_file = greek_file
        self.greeklish_file = greeklish_file
        self.settings = settings
        
        self._parse_words()
        self._import_settings(settings)

    def train(self):
        """docstring for train"""
        seq_observations = self._process_observations()
        states, observations = self.extract_states_observations(seq_observations)
        B = self.train_observations(seq_observations, states, observations)
        seq_states = self._process_states(states)
        pi, A = self.train_states(seq_states, states)
        self.hmm = HMM(states,observations,A,B,pi)
        return self.hmm
    
    def _parse_words(self):
        """docstring for _parse_words"""
        re_greek = re.compile(r'(?P<greek>\w+)-(?P<count>\d+).*', re.UNICODE)
        re_greeklish = re.compile(r'(?P<greeklish>\w+)\s*-\s*(?P<greek>\w+).*', re.UNICODE)
        self.words_greek = fileutils.parse_word_file(self.greek_file, re_greek, ['greek', 'count'])
        self.words_greeklish = fileutils.parse_word_file(self.greeklish_file, re_greeklish, ['greeklish', 'greek'])
    
    def _import_settings(self, settings):
        self.align = settings.ALIGN_FUNCTION
        self.split = settings.SPLIT_FUNCTION
        self.simple_words = settings.SIMPLE_WORDS
        self.simple_chars_list = settings.SIMPLE_CHARS_LIST
        self.greek_part_list = settings.GREEK_PART_LIST
        self.greek_split_list = settings.GREEK_SPLIT_LIST
        self.optimize = settings.OPTIMIZE    
        # self.numpy = settings.NUMPY
    
    def extract_states_observations(self, seq_observations):
        """docstring for extract_states_observations"""
        obs, states = set(),set()
        for seq, prob in seq_observations:
            for grkl,gr in seq:
                obs.add(grkl)
                states.add(gr)
        obs, states = list(obs),list(states)
        return states,obs
    
    def train_observations(self, seq_observations,states,observations):
        """docstring for train_observations"""        
        #Train, use normalization, and add new states to the HMM states
        # self.hmm.train_observations(seq_observations, normalize=True, addStates=True)
        obs_ind, state_ind = {},{}
        for i,ob in enumerate(observations):
            obs_ind[ob]=i
        for i,s in enumerate(states):
            state_ind[s]=i
        B = numpy.zeros((len(observations),len(states)))
        total = 0
        for seq, prob in seq_observations:
            for grkl,gr in seq:
                B[obs_ind[grkl],state_ind[gr]]+=prob
            total+=prob
        # print 'total',total
        # print 'sum', numpy.sum(B)
        # print B
        B /=total
        return B
    

    def _process_observations(self):
        """docstring for process_observations"""
        #align the greeklish - greek words into sequences
        seq_observations = []
        for record in self.words_greeklish:
            gr_word = record['greek']
            grkl_word = record['greeklish']
        
            #simplify word
            if self.simple_words:
                gr_word = gr_word.lower().translate(self.simple_chars_list)
                grkl_word = grkl_word.lower()
            #align the two words - see comments on alignment module about this    
            aligned_sequence = self.align(grkl_word, gr_word)
            # print aligned_sequence
            if aligned_sequence is not None:
                # aligned_sequence.append(('!','!'))
                # print aligned_sequence
                seq_observations.append((aligned_sequence,1))

        return seq_observations
    
    def _expand_states(self, states):
        parts_list = states+[i for i in self.greek_part_list if i not in states]
        parts_list.sort(key=len, reverse=True)
        return parts_list

    def _process_states(self, states):
        """docstring for _process_states"""
        # we use the states gathered from above to split the greek words according to the above sequences
        #we add all the greek chars to avoid un-split sequences...
        parts_list = self._expand_states(states)
        seq_states = []
        for record in self.words_greek:
            gr_word = record['greek']
            count = record['count']
            #simplify word
            if self.simple_words:
                gr_word = gr_word.lower().translate(self.simple_chars_list)
        
            parts_gr = self.split(gr_word, parts_list, self.greek_split_list)
            if parts_gr is not None:
                # parts_gr.append('!')
                seq_states.append((parts_gr,count))
        return seq_states
    
    def train_states(self,seq_states, states):
        """docstring for train_states"""
        # self.hmm.train_states(seq_states, normalize=True)
        state_ind={}
        for i,s in enumerate(states):
            state_ind[s]=i
        pi = numpy.zeros(len(states))
        A = numpy.zeros((len(states),len(states)))
        total = 0
        for seq, prob in seq_states:
            prev = None
            for part in seq:
                if not prev:
                    pi[state_ind[part]]+=prob
                else:
                    A[state_ind[prev],state_ind[part]]+=prob
                prev = part
            total+=prob
        # print 'A',A
        # print 'pi',pi
        A/=total
        pi/=sum(pi)
        return pi,A

def case_from_mask(word, mask):
    "Changes the case of the word according to the mask"
    while len(word)>len(mask):
        mask.append(mask[-1])
    while len(word)<len(mask):
        del mask[-1]    
        
    text_2 = []
    for i in xrange(len(word)):
        if mask[i]: 
            text_2.append(word[i].upper())
        else:
            text_2.append(word[i].lower())
    return u''.join(text_2)
        
    
def mask_from_case(word):
    "returns the case mask from the word, for usage in mask_to_case"
    mask = [not c.lower() == c for c in word]
    return mask

bounds = re.compile(r'(\W)', re.UNICODE)
digits = re.compile("\d+$")

#generator
def each_word_with_mask(text):
    "yields lowercased words along with their mask and original value OR punctuation between words"
    tokens = bounds.split(text) 
    for token in tokens:
        if token=='':continue
        if token.isalnum()and not digits.match(token):
            mask = mask_from_case(token)
            yield token.lower(), mask, token
        else: yield None, None, token
    return

class FreqDist(object):
    """
    A frequency distribution for the outcomes of an experiment.  A
    frequency distribution records the number of times each outcome of
    an experiment has occured.  For example, a frequency distribution
    could be used to record the frequency of each word type in a
    document.  Formally, a frequency distribution can be defined as a
    function mapping from each sample to the number of times that
    sample occured as an outcome.

    Frequency distributions are generally constructed by running a
    number of experiments, and incrementing the count for a sample
    every time it is an outcome of an experiment.  For example, the
    following code will produce a frequency distribution that encodes
    how often each word occurs in a text:

        >>> fdist = FreqDist()
        >>> for word in tokenize.whitespace(sent):
        ...    fdist.inc(word)
    """
    def __init__(self):
        """
        Construct a new empty, C{FreqDist}.  In particular, the count
        for every sample is zero.
        """
        self._count = {}
        self._N = 0
        self._Nr_cache = None
        self._max_cache = None

    def inc(self, sample, count=1):
        """
        Increment this C{FreqDist}'s count for the given
        sample.

        @param sample: The sample whose count should be incremented.
        @type sample: any
        @param count: The amount to increment the sample's count by.
        @type count: C{int}
        @rtype: None
        @raise NotImplementedError: If C{sample} is not a
               supported sample type.
        """
        if count == 0: return

        self._N += count
        self._count[sample] = self._count.get(sample,0) + count

        # Invalidate the Nr cache and max cache.
        self._Nr_cache = None
        self._max_cache = None

    def N(self):
        """
        @return: The total number of sample outcomes that have been
          recorded by this C{FreqDist}.  For the number of unique 
          sample values (or bins) with counts greater than zero, use
          C{FreqDist.B()}.
        @rtype: C{int}
        """
        return self._N

    def B(self):
        """
        @return: The total number of sample values (or X{bins}) that
            have counts greater than zero.  For the total
            number of sample outcomes recorded, use C{FreqDist.N()}.
        @rtype: C{int}
        """
        return len(self._count)

    def samples(self):
        """
        @return: A list of all samples that have been recorded as
            outcomes by this frequency distribution.  Use C{count()}
            to determine the count for each sample.
        @rtype: C{list}
        """
        return self._count.keys()

    def Nr(self, r, bins=None):
        """
        @return: The number of samples with count r.
        @rtype: C{int}
        @type r: C{int}
        @param r: A sample count.
        @type bins: C{int}
        @param bins: The number of possible sample outcomes.  C{bins}
            is used to calculate Nr(0).  In particular, Nr(0) is
            C{bins-self.B()}.  If C{bins} is not specified, it
            defaults to C{self.B()} (so Nr(0) will be 0).
        """
        if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative'

        # Special case for Nr(0):
        if r == 0:
            if bins is None: return 0
            else: return bins-self.B()

        # We have to search the entire distribution to find Nr.  Since
        # this is an expensive operation, and is likely to be used
        # repeatedly, cache the results.
        if self._Nr_cache is None:
            self._cache_Nr_values()

        if r >= len(self._Nr_cache): return 0
        return self._Nr_cache[r]

    def _cache_Nr_values(self):
        Nr = [0]
        for sample in self.samples():
            c = self._count.get(sample, 0)
            if c >= len(Nr):
                Nr += [0]*(c+1-len(Nr))
            Nr[c] += 1
        self._Nr_cache = Nr

    def count(self, sample):
        """
        Return the count of a given sample.  The count of a sample is
        defined as the number of times that sample outcome was
        recorded by this C{FreqDist}.  Counts are non-negative
        integers.

        @return: The count of a given sample.
        @rtype: C{int}
        @param sample: the sample whose count
               should be returned.
        @type sample: any.
        """
        return self._count.get(sample, 0)

    def freq(self, sample):
        """
        Return the frequency of a given sample.  The frequency of a
        sample is defined as the count of that sample divided by the
        total number of sample outcomes that have been recorded by
        this C{FreqDist}.  The count of a sample is defined as the
        number of times that sample outcome was recorded by this
        C{FreqDist}.  Frequencies are always real numbers in the range
        [0, 1].

        @return: The frequency of a given sample.
        @rtype: float
        @param sample: the sample whose frequency
               should be returned.
        @type sample: any
        """
        if self._N is 0: return 0
        return float(self._count.get(sample, 0)) / self._N

    def max(self):
        """
        Return the sample with the greatest number of outcomes in this
        frequency distribution.  If two or more samples have the same
        number of outcomes, return one of them; which sample is
        returned is undefined.  If no outcomes have occured in this
        frequency distribution, return C{None}.

        @return: The sample with the maximum number of outcomes in this
                frequency distribution.
        @rtype: any or C{None}
        """
        if self._max_cache is None:
            best_sample = None
            best_count = -1
            for sample in self._count.keys():
                if self._count[sample] > best_count:
                    best_sample = sample
                    best_count = self._count[sample]
            self._max_cache = best_sample
        return self._max_cache

    def sorted_samples(self):
        """
        Return the samples sorted in decreasing order of frequency.  Instances
        with the same count will be arbitrarily ordered.  Instances with a
        count of zero will be omitted. This method is C{O(N^2)}, where C{N} is
        the number of samples, but will complete in a shorter time on average.

        @return: The set of samples in sorted order.
        @rtype: sequence of any
        """
        items = [(-count,sample) for (sample,count) in self._count.items()]
        items.sort()
        return [sample for (neg_count,sample) in items]

    def __repr__(self):
        """
        @return: A string representation of this C{FreqDist}.
        @rtype: string
        """
        return '<FreqDist with %d samples>' % self.N()

    def __str__(self):
        """
        @return: A string representation of this C{FreqDist}.
        @rtype: string
        """
        samples = self.sorted_samples()
        items = ['%r: %r' % (s, self._count[s]) for s in samples]
        return '<FreqDist: %s>' % ', '.join(items)

    def __contains__(self, sample):
        """
        @return: True if the given sample occurs one or more times in
            this frequency distribution.
        @rtype: C{boolean}
        @param sample: The sample to search for.
        @type sample: any
        """
        return self._count.has_key(sample)


if __name__ == '__main__':
    # import hotshot
    # prof = hotshot.Profile("hotshot_edi_stats")
    # prof.runcall(main, sys.argv[1:])
    # prof.close()
    main(sys.argv[1:])
    
    


