#encoding=utf-8
'''
Created on 2011-12-9

@author: Sunny
'''
from nltk.parse import ViterbiParser
from nltk.grammar import parse_pcfg
import re
import sys
from nltk.internals import NLTK_JAR
import nltk


data_floder_name = 'data/'
rule_file = data_floder_name+'Rule.txt'
test_file = data_floder_name+'test.txt'
test_sentence_file = data_floder_name+'testsentence.txt'
parsed_sentence_file = data_floder_name+'testParses.txt'
corpus_file = data_floder_name+'corpus.txt'
cnf_rule_file = data_floder_name+'ChomskyRule.txt'

def NomalizeRuleFile(filename):
    lines=open(filename).readlines()
    outfile=open(filename+"_n.txt",'w')
    nonternimals = set([s.split()[1] for s in lines])

    for line in lines:       
        line=line.strip()
        parses=line.split()
#        print parses
#        if (len(parses)!=4) and (len(parses)!=5):
#            #print "wrong line",line
#            continue
        parsed = parses[1] +' -> '
        for p in parses[3:]:
            if not p in nonternimals:
                parsed += " '"+p +"' "
            else:
                parsed += ' ' + p +' '
        parsed +='[%.25f]\n'%(float(parses[0]))
#        print parsed
        outfile.write(parsed)
    outfile.close()

def IsInSent(word,tokens):
    return word in tokens
    
    
def NomalizeTree(par,tokens):
    li=[]
    if isinstance(par, basestring):
        words = par.split()
    else:
        words=str(par).split()
    
    for word in words[:-1]:
        word=word.strip()
        tword=word
        tword=tword.replace("(","")
        tword=tword.replace(")","")
        if not IsInSent(tword,tokens):
            if word[0]=='(':
                li.append('(')
            elif word[-1]==')':
                li.append(')')
        else:
            li.append(word)
    s=""
    for l in li:
        s+=l+" "
    for i in range(len(tokens)):
        ts="( "+tokens[i]+")"
        s=s.replace(ts,"_"+str(i)+"_",1)
    #print s
    return s


def GenerateOne(sent,parser):
    wordwithtags=sent.split("  ")
    tokens=[]
    words=[]
    for wordtag in wordwithtags:
        wordandtag=wordtag.split("/")
        tokens.append(wordandtag[-1])
        word=""
        for wordt in wordandtag[:-1]:
            word+=wordt
        words.append(word)
    
#    commasps = ' '.join(tokens).split('wco')
#    for c in commasps:
#        tp = c.strip().split(' ')
#        parsed = parser.parse(tp)
#        print parsed
#    print parsed

    parsed = parser.parse(tokens)
    return str(parsed).replace('\n','')

#    s=NomalizeTree(parsed,tokens)
#    if len(words)!=len(tokens):
#        print "wrong split",sent
#    else:
#        for i in range(len(tokens)):
#            s=s.replace(str(i),words[i]+" ",1)
#    return s

def GenerateAll(lines,parser,st=0):
    i=1
    outfile=open("Data//testParses"+str(st),"w")
    for line in lines:
        try:
            print i+st
            i=i+1
            sent=line.strip()
            s=GenerateOne(sent,parser)       
            outfile.write(s+"\n")
            outfile.flush()
        except:
            import traceback;traceback.print_exc()
            

def get_token_word(sent):
    wordwithtags=sent.split("  ")
    tokens=[]
    words=[]
    for wordtag in wordwithtags:
        wordandtag=wordtag.split("/")
        tokens.append(wordandtag[-1])
        word=""
        for wordt in wordandtag[:-1]:
            word+=wordt
        words.append(word)
    return tokens,words


def strify(t):
    
    istree = isinstance(t, nltk.Tree)
    if istree:
        s = ' ('
    else:
        return '||'+t+'||'
    
    for child in t:
        s += strify(child)
    if istree:
        s += ') '
    return s
    
def replace_tags_words(parsed,tags,words):
    str_tree = str(parsed)
#    print tags
#    print ','.join(words).decode('gbk')

    if str_tree.rfind('(')!=-1:
        str_tree = str_tree[:str_tree.rfind('(')]
    else:
        return ''.join(words)
    
    out = nltk.tree.bracket_parse(str_tree)
    leaves = out.leaves()
    for index in range(len(leaves)):
        path = out.leaf_treeposition(index)
        out[path] = words[index]
    
    out = strify(out).replace('(||','').replace('||)','').replace('  ',' ')
    
#    out = re.sub(r'\((.*?) ', '',out)
#    print out.decode('gbk').replace('\n','')
    return out


def nomalize_result():
    tid = 0
    lines =open(parsed_sentence_file).readlines()
    outfile = open("Data//testParses_.txt",'w')
    
    golden = open(test_sentence_file).readlines()
    i = 0
    for i in range(len(lines)):
        tokens,words = get_token_word(golden[i])
#        print ','.join(words).decode('gbk')
#        print tokens
#        print lines[i]
        s= replace_tags_words(lines[i], tokens,words)
        tow = s + '\n'
        outfile.write(tow)
#        print tow.decode('gbk')

def main():
    file="Data//traned_rules10.cfg"
#    NomalizeRuleFile(file)
    if len(sys.argv)==4:
        tid = int(sys.argv[1])
        st = int(sys.argv[2])
        end = int(sys.argv[3])
    else:
        tid = 0
        st = 0
        end = 2000
        
    grammar = parse_pcfg(open(file+"_n.txt").read())
    testfilename=test_sentence_file
    
    #resultfilename="Data//ParseResults.txt"
#    GenerateAll(testfilename,grammar)
    infile=open(testfilename)
    lines=infile.readlines()
    print "total:",len(lines)
    parser=ViterbiParser(grammar)
#    parser=nltk.InsideChartParser(grammar)
    if end > len(lines): end = len(lines)
    GenerateAll(lines[st:end], parser,st)
#    MultipleThreadRunner(GenerateAll,lines,parser,)
    #WriteResult(resultfilename,lines)        
if __name__ == '__main__':
#    nomalize_result()
    main()