#!/usr/bin/env python

# This module takes a piece of text and returns that text in Unisyn's accent
# NEUTRAL lexical keysymbols.


import re

import packages
import nltk

import dicts
import taggers


def tokenize(text): 
    """This tokenizer changes crucial parts of the NLTK tokenizer.
    
    This tokenizer is designed specifically to work with UNISYN 1.3
    text-speech synth system.  If you are not using that, don't use this.
    """
    
    # Separate most punctuation.
    text = re.sub(r"([^\w\.\'\-\/,&])", r' \1 ', text)
    
    # Separate commas if they're followed by space.
    # (E.g., don't separate 2,500)
    text = re.sub(r"(,\s)", r' \1', text)
    
    # Separate single quotes if they're followed by a space.
    text = re.sub(r"('\s)", r' \1', text)
    
    # Separate periods that come before newline or end of string.
    text = re.sub('\. *(\n|$)', ' . ', text)
    
    return text.split()


class Pronouncer(object):
    """ """

    def __init__(self):
        self._tagger = taggers.SimpleTagger.factory()
        self._gam_pos = dicts.GamPOS()
        self._gam_pron = dicts.GamPron()
         
    def _convert_prons(self, _tokens, _tagged_sents):
        
        noun_verb_pairs = [key for key in self._gam_pos.keys() if len(self._gam_pos[key]) == 2 and 'NN' and 'VB/VBP' in self._gam_pos[key]]
        prons = []
        for sent in _tokens:
            for token in sent:
                if token in self._gam_pron.keys():
                    if len(self._gam_pron[token]) == 1:
                        prons.extend(self._gam_pron[token])
                    elif token in noun_verb_pairs:
                        num_sent = _tokens.index(sent)
                        num_word = sent.index(token)
                        if 'N' in _tagged_sents[num_sent][num_word]:
                            pron_position = self._gam_pos[token].index('NN')
                            prons.append(self._gam_pron[token][pron_position])
                        elif 'V' in _tagged_sents[num_sent][num_word]:
                            pron_position = self._gam_pos[token].index('VB/VBP')
                            prons.append(self._gam_pron[token][pron_position])
                    else:
                        prons.append(self._gam_pron[token][0]) # When part of speech does not disambiguate pron; I just choose the first pron entry.
                else:
                    prons.append(token)
        return prons
    
    def get_prons(self, raw):    
        """Tokens are returned as a list of lists."""
        raw = raw.lower()
        sentences = nltk.sent_tokenize(raw)
        _tokens = [tokenize(sent) for sent in sentences]
        _tagged_sents = self._tagger.tag(raw)
        prons = self._convert_prons(_tokens, _tagged_sents)
        return prons
