#!/usr/bin/env python
# -*- coding: utf-8 -*- 
#utility functions and methods for opening urls and cleaning text
import re
import nltk
import codecs
import phon_transcript
import phon_data
import phon_corpus
from urllib import urlopen

#list of urls being used
#http://childes.psy.cmu.edu/browser/index.php?url=Eng-NA/Goad/Julia/
#http://childes.psy.cmu.edu/browser/index.php?url=Eng-NA/Goad/Sonya/
#http://childes.psy.cmu.edu/browser/index.php?url=Eng-UK/Smith/
#http://childes.psy.cmu.edu/browser/index.php?url=Romance/French/GoadRose/Clara/
#http://childes.psy.cmu.edu/browser/index.php?url=Romance/French/GoadRose/Theo/
#-----------------------------#
# Regular Expression Patterns #
#-----------------------------#

GET_TRANS = re.compile(r'(?<=\(1\) )(?P<transition>. --\> .): (?P<weight>-*[0-9]+\.[0-9]+)')
#pattern that finds all the sub urls of a corpus
#these are generally of the format: url/xxxx-xx-xx.cha
GET_URLS = re.compile(r'(?<=href=").*?\.cha(?=\">)')


#these patterns are used to parse the transcripts and collect actual data
#globals: these are at the top of each transcript, and do not vary within
#gets the name of the transcript, location and year
GET_TITLE = re.compile(r'(?<=@Loc: )[^ @]*')
#gets the author/researcher of the transcript
GET_AUTHOR = re.compile(r'\A[a-z]+-?[a-z]*/[a-z]+', re.IGNORECASE)
#gets the language being used, e.g. ENG or FR
GET_LANG = re.compile(r'(?<=@ID: )[a-z]*[^\|]')
#gets the name of the child
GET_NAME = re.compile(r'(?<=\|)[A-Z][a-z]*(?=\|)')
#gets the age of the child at time of production
GET_AGE = re.compile(r'(?<=\|)[0-9]+;[0-9]*.?[0-9]*')
#gets the gender of the child (probably not relevant)
GET_GENDER = re.compile(r'(?<=\|)[a-z]*[^\|]')

#tokens: they actually return a 2-tuple of (x,y), where y is the relevant data
#gets the child's production
GET_CHILD = re.compile(r'(?P<name>%pho:)(?P<word>[^\|]*?(?=%))')
#gets the adult production/target form
GET_ADULT = re.compile(r'(?P<name>%mod:)(?P<word>[^\|]*?(?=%))')
#gets the orthographic representation of the child's utterance
GET_WORD = re.compile(r'(?P<name>\*[A-Z][a-z]*: )(?P<word>.*?)(?P<punctuation> \.|\?|!)')
#gets the type of the utterance, e.g. imitation.
GET_TYPE = re.compile(r'(?P<name>%x(type|stage|segtyp): )(?P<word>[^\|1234567890]*)')

GET_PHON_DATA = re.compile(r"""
                \*[A-Z][a-zé]*:[ ](?P<word>[^\.\?!]*?)[ ](?P<punct>\.|\?|!)
                [ ][0-9]+[ ]%pho:[ ](?P<child>[^1234567890]*)
                [ ][0-9]+[ ]%mod:[ ](?P<adult>[^1234567890]*)""", re.X)

#used for format of the Genesee corpus
#http://childes.psy.cmu.edu/browser/index.php?url=Biling/Genesee/
GET_PHON_DATA_GENESEE = re.compile(r"""
                \*CHI:[ ](?P<word>[^\.\?!]*?)[ ](?P<punct>\.|\?|!)
                [ ][0-9]+[ ]%xpho:[ ](?P<child>[^1234567890]*)""", re.X)

GET_PHON_DATA_WITH_SEGTYP = re.compile(r"""
                \*[A-Z]+[a-zé]*:[ ](?P<word>[^\.\?!]*?)[ ](?P<punct>\.|\?|!)
                [ ][0-9]+[ ]%pho:[ ](?P<child>[^1234567890]*)
                [ ][0-9]+[ ]%mod:[ ](?P<adult>[^1234567890]*)
                [ ][0-9]+[ ]%x(segtyp|stage):[ ](?P<type>[^%\*]*)""", re.X)

#-----------------------------#
# Utility Based Functions     #
#-----------------------------#

def url_cleaner(url):
    """Takes url as input, returns it
    without html tags"""
    t = urlopen(url).read()
    p = re.compile(r'<[^<]*?>')
    b = re.compile(r'<![^<]*?>')
    m = re.compile(r'\s+')
    z = re.compile(r' &#[0-9]+;')
    t = p.sub("", t)
    t = b.sub("", t)
    t = m.sub(" ", t)
    t = z.sub("", t)
    return t

def url_to_phon_transcript(url):
    """Takes a CHILDES url as input, and
    returns a phon_transcript object representation
    which contains a list of phon_data tokens"""
    errors = []
    hasType = True
    isGenesee = False
    text = url_cleaner(url)
    language = GET_LANG.findall(text)[0]
    age = GET_AGE.findall(text)[0]
    name = GET_NAME.findall(text)[0]
    title = GET_TITLE.findall(text)[0]
    author = GET_AUTHOR.findall(title)[0]
    transcript = phon_transcript.phon_transcript()
    transcript.url = url
    transcript.lang = language
    transcript.subject = name
    transcript.age = age
    transcript.title = title
    transcript.author = author
    transcript.data = []
    transcript.text = text
    words = GET_WORD.findall(text)
    childs = GET_CHILD.findall(text)
    adults = GET_ADULT.findall(text)
    data_size = len(words)
    data_size2 = len(GET_PHON_DATA.findall(text))
    if GET_PHON_DATA_WITH_SEGTYP.findall(text) != []:
        itr_tokens = GET_PHON_DATA_WITH_SEGTYP.finditer(text)
        hasType = False
        data_size2 = len(GET_PHON_DATA_WITH_SEGTYP.findall(text))
    elif GET_PHON_DATA.findall(text) != []:
        itr_tokens = GET_PHON_DATA.finditer(text)
        data_size2 = len(GET_PHON_DATA.findall(text))
    else:
        itr_tokens = GET_PHON_DATA_GENESEE.finditer(text)
        data_size2 = len(GET_PHON_DATA_GENESEE.findall(text))
        hasType = False
        isGenesee = True
        transcript.title_to_subject()
    itr_words = GET_WORD.finditer(text)
    itr_childs = GET_CHILD.finditer(text)
    itr_adults = GET_ADULT.finditer(text)
    itr_types = GET_TYPE.finditer(text)
    for x in range(0, data_size2):
        m = itr_tokens.next()
        p = phon_data.phon_data()
        p.title = transcript.title
        p.age = age
        p.name = transcript.subject
        p.lang = language
        p.word = m.group('word')
        p.child = m.group('child').decode("utf8")
        if not isGenesee:
            p.adult = m.group('adult').decode("utf8")
            p.trim() #trim the token to reduce number of states/complexity
            p.get_symbols()
        if isGenesee:
            p.genesee_clean()
        if hasType:
            try:
                p.type = m.group('type')
            except Exception as e:
                p.type = None
        new_tokens = p.split()
        for token in new_tokens:
            transcript.data.append(token)
    return transcript

def url_to_urls(url):
    """Takes a CHILDES url,
    and returns a list of urls for each
    of the transcripts within that corpus"""
    text = urlopen(url).read()
    return GET_URLS.findall(text)

def url_to_phon_transcripts(url):
    """Takes a CHILDES url
    and returns a list of all transcripts in it"""
    urls = url_to_urls(url)
    transcripts = []
    for transcript in urls:
        transcripts.append(url_to_phon_transcript(transcript))
    return transcripts

#-----------------------------#
# Child Age Conversions       #
#-----------------------------#
#four functions to convert between the year;month;day format
#allows conversion to a raw integer value, and also back to the string format
def convert_ymd(ymd):
    b = re.compile(r'\A(?P<year>[0-9]);(?P<month>[0-9][0-9]?)?\.?(?P<day>[0-9][0-9]?)?')
    values = b.findall(ymd)
    date = 0
    if values[0][0] != '':
        date += int(values[0][0]) * 365
    if values[0][1] != '':
        date += months_value(int(values[0][1]))
    if values[0][2] != '':
        date += int(values[0][2])
    return date


def deconvert_ymd(value):
    string = ""
    string += str(value // 365)
    string += ";"
    value -= 365 * (value // 365)
    return string + month_subtract(value)


def months_value(months):
    value = 0
    num = 1
    if months == 1:
        return month_value(months)
    else:
        while(num <= months):
            value += month_value(num)
            num += 1
    return value

def month_value(month):
    if month == 2:
        return 28
    elif month in [4, 6, 9, 11]:
        return 30
    else:
        return 31

def month_subtract(value):
    string = ""
    counter = 0
    for x in range(1, 13):
        if (value - month_value(x)) > 0:
            value -= month_value(x)
            counter += 1
    string += str(counter)
    string += "."
    return string + str(value)

def find_division(phon_corpus, delta = .2, sigma = .1, theta = 3.0):
    """
    Given a corpus of data and a statistical model
    it attempts to find clusterings and trends among the corpus

    Args:
        model: any statistical model that can classify or tag unlabelled data
        phon_corpus: a corpus of child languag productions
        delta: real between [0,1) which specifies size of initial training set
        sigma: real between [0,1) which specifies size of initial testing set
        theta: real which specifies threshold from which an accuracy can deviate
    Returns:

    """
    # a list of divisions by indices
    divisions = []
    training_bound = int(len(phon_corpus.labelled_sequences) * delta)
    testing_bound = int(((len(phon_corpus.labelled_sequences) - training_bound) * sigma) + training_bound)
    training_start = 0
    training_set = phon_corpus.labelled_sequences[training_start:training_bound]
    testing_set = phon_corpus.labelled_sequences[training_bound:testing_bound]
    hmm_trainer = nltk.HiddenMarkovModelTrainer(phon_corpus.child_symbols, phon_corpus.adult_symbols)
    hmm_tagger = hmm_trainer.train(training_set)
    baseline_accuracy = hmm_tagger.test(testing_set)
    new_accuracy = 0
    condition = True
    while condition:
        new_accuracy = hmm_tagger.test(phon_corpus.labelled_sequences[training_bound:testing_bound])
        print baseline_accuracy - theta
        #the new_accuracy is lower than the threshold allows
        #so we have found our division
        if new_accuracy < (baseline_accuracy - theta):
            print "finally"
            divisions.append(((training_start, training_bound), (training_bound, testing_bound)))
            print divisions
            if testing_bound < int(len(phon_corpus.labelled_sequences) - 1):
                hmm_trainer = nltk.HiddenMarkovModelTrainer(phon_corpus.child_symbols, phon_corpus.adult_symbols)
                training_bound = int(((len(phon_corpus.labelled_sequences) - testing_bound) * delta) + testing_bound)
                training_start = testing_bound
                training_set = phon_corpus.labelled_sequences[training_start:training_bound]
                hmm_tagger = hmm_trainer.train(training_set)
                testing_bound = int(((len(phon_corpus.labelled_sequences) - training_bound) * sigma) + training_bound)
                testing_set = phon_corpus.labelled_sequences[training_bound:testing_bound]
                baseline_accuracy = hmm_tagger.test(testing_set)
                new_accuracy = 0
            else:
                break
        #the accuracy stayed within the threshold, so we move on
        else:
            testing_bound += 1
            baseline_accuracy = new_accuracy
        if testing_bound == len(phon_corpus.labelled_sequences) - 1:
            condition = False
    return divisions
    
    
    
    





#unused now
def table_minimum(table, indices):
	"""Takes a 2-D table and a list of indices (x,y)
	as input, and returns the indices which point to the minimum
	value over all given indices in the list.
	In the case of multiple minima, it returns a list of them."""
	minima = []
	#initialize the minimum to the first entry
	minimum = table[indices[0][0]][indices[0][1]]
	for entry in indices:
		#check to see if we found a new minimum
		if table[entry[0]][entry[1]] < minimum:
			#set the new minimum
			minimum = table[entry[0]][entry[1]]
			#clear list of previous minima
			minima = []
			#add the new entry
			minima.append(entry)
		#check to see if we found an additional minimum
		elif table[entry[0]][entry[1]] == minimum:
			#add it to the list of minima
			minima.append(entry)
		#else we do nothing
	return minima
    
        
#sources of error
#http://childes.psy.cmu.edu/browser/index.php?url=Eng-NA/Goad/Julia/1996-11-14.cha
#100	*Julia:	um . ▶
#101	*Julia:	um a deer . ▶
#two names in a row
