import os, re, pickle
# from nltk.etree import ElementTree as ET
from nltk.chunk.api import *
from nltk.chunk.util import *
import nltk
from nltk.tokenize import RegexpTokenizer
from nltk.tree import Tree
from nltk.chunk import RegexpChunkParser
from nltk.chunk.regexp import RegexpParser
import cPickle as pickle
import datetime
from nltk.tag.hunpos import HunposTagger
import StringIO
from Person import Person

_BINARY_NE_CHUNKER = 'ne_chunker_en.pickle'
_BINARY_NER_PER = 'ne_per_en.pickle'
_BINARY_NER_ORG = 'ne_org_en.pickle'
_BINARY_NER_LOC = 'ne_loc_en.pickle'

pickle_data = { 'PER' : _BINARY_NER_PER,
                'ORG' : _BINARY_NER_ORG,
                'LOC' : _BINARY_NER_LOC }

class NEChunkParserTagger(nltk.tag.ClassifierBasedTagger):
    def __init__(self, train):
        nltk.config_megam('/opt/mm-ner-english/megam.opt')
        self.short_en_wordlist = set(open("dictionary/en-basic").read().splitlines())
        self.person_name_list = set(open("dictionary/persons.txt").read().splitlines())
        self.organization_name_list = set(open("dictionary/organizations.txt").read().splitlines())
        self.organization_suffix_list = set(open("dictionary/organizations-suffix.txt").read().splitlines())
        self.conjunction_list = set(["and", "for", "of", "the"])
        self.location_name_list = set(open("dictionary/locations.txt").read().splitlines())
        nltk.tag.ClassifierBasedTagger.__init__(
            self, train=train,
            classifier_builder=self._classifier_builder)

    def _classifier_builder(self, train):
        return nltk.MaxentClassifier.train(train, algorithm='megam',
                                           gaussian_prior_sigma=1,
                                           trace=2);
    
    def _organization_suffix(self, tokens, index):
        limit = len(tokens)
        i = index
        while i < limit:
            token = tokens[i][0]
            if token.islower() or shape(token) == "punct" or shape(token) == "numbr":
                return False
            if token in self.organization_suffix_list:
                return True
            i += 1
        return False
    
    def _feature_detector(self, tokens, index, history):
        word = tokens[index][0]
        pos = tokens[index][1].lower()
        #pos = simplify_pos(tokens[index][1])
        if index == 0:
            prevword = prevprevword = None
            prevpos = prevprevpos = None
            prevshape = prevtag = prevprevtag = None
        elif index == 1:
            prevword = tokens[index-1][0]#.lower()
            prevprevword = None
            prevpos = tokens[index-1][1].lower()
            #prevpos = simplify_pos(tokens[index-1][1])
            prevprevpos = None
            prevtag = history[index-1][0]
            prevshape = shape(prevword)
            prevprevtag = None
        else:
            prevword = tokens[index-1][0]#.lower()
            prevprevword = tokens[index-2][0].lower()
            prevpos = tokens[index-1][1].lower()
            #prevpos = simplify_pos(tokens[index-1][1])
            prevprevpos = simplify_pos(tokens[index-2][1])
            prevtag = history[index-1]
            prevprevtag = history[index-2]
            prevshape = shape(prevword)
        if index == len(tokens)-1:
            nextword = nextnextword = None
            nextpos = nextnextpos = None
        elif index == len(tokens)-2:
            nextword = tokens[index+1][0]#.lower()
            nextpos = tokens[index+1][1].lower()
            #nextpos = simplify_pos(tokens[index+1][1])
            nextnextword = None
            nextnextpos = None
        else:
            nextword = tokens[index+1][0]#.lower()
            nextpos = tokens[index+1][1].lower()
            #nextpos = simplify_pos(tokens[index+1][1])
            nextnextword = tokens[index+2][0].lower()
            nextnextpos = tokens[index+2][1].lower()

        organization_suffix = self._organization_suffix(tokens, index)
        
        features = {
            'bias': True,
            'shape': shape(word),
            'wordlen': len(word),
            'prefix3': word[:3],#.lower(),
            'suffix3': word[-3:],#.lower(),
            'pos': pos,
            'word': word,
            'en-wordlist': (word in self.short_en_wordlist), # xx!
            'person-name': (word in self.person_name_list),
            'organization-name': (word in self.organization_name_list),
            'organization-suffix': organization_suffix,
            'location-name': (word in self.location_name_list),
            'prevtag': prevtag,
            'prevpos': prevpos,
            'nextpos': nextpos,
            'prevword': prevword,
            'nextword': nextword,
            'word+nextpos': '%s+%s' % (word, nextpos),
            'pos+prevtag': '%s+%s' % (pos, prevtag),
            'prevshape+prevtag': '%s+%s' % (prevshape, prevtag),
            }
        return features

class NEChunkParser(ChunkParserI):
    def __init__(self, train):
        self._train(train)

    def parse(self, tokens):
        tagged = self._tagger.tag(tokens)
        #tree = self._tagged_to_parse(tagged)
        #return tree
        result = [(w[0],tag) for w,tag in tagged ]
        return result

    def _train(self, corpus):
        corpus = [s for s in corpus]
        #corpus = [get_corpus()]
#        print corpus
        self._tagger = NEChunkParserTagger(train=corpus)

def shape(word):
    if re.match('^[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$', word):
        return 'numbr'
    elif re.match('^\W+$', word):
        return 'punct'
    #elif re.match('^[A-Z]+$', word):
    #    return 'upper'
    elif re.match('^[A-Z][a-z]+$', word):
        return 'title'
    elif re.match('^[a-z]+$', word):
        return 'lower'
    elif re.match('^\w+$', word):
        return 'mixed'
    else:
        return 'other'        

def simplify_pos(s):
    if s.startswith('V'): return "V"
    else: return s.split('-')[0]
    
def load_ner_data(path, search_token=['PER','ORG','LOC','TITLE']):
    for path, dirs, files in os.walk(path):
        for f in files:
            if not f.endswith('txt'): continue
            for sent in load_ner_file(os.path.join(path, f), search_token):
                yield sent

def load_ner_file(fname, search_token):
    text = open(fname).read()
    text = text.replace('"','')
    text = text.replace(',',' , ')
    #text = text.replace('.',' . ')
    text = text.replace('/',' / ')
    text = text.replace('(',' ( ')
    text = text.replace(')',' ) ')
    #tokenizer = RegexpTokenizer("[\w'-<:>.,/]+")
    tokenizer = RegexpTokenizer("\s+", gaps=True)
    pattern = re.compile('\<(.*):START\>|\<(.*):END\>')

    #text2 = nltk.sent_tokenize(text) #ini harusnya tidak perlu jika data training sudah pasti satu kalimat
    buffer = StringIO.StringIO(text)
    text2 = buffer.readlines()
    for s in text2:
        raw_words = tokenizer.tokenize(s)
        #print raw_words
        clean_sent = re.sub('<.*?>','',s)

        text = nltk.word_tokenize(clean_sent)
        tag_iter = (pos for (word, pos) in nltk.pos_tag(text))

        label = ''
        begin_tag = True
        toks = [] 
        for w in raw_words:
#            print w
            res = pattern.findall(w)
            if res:
                if res[0][0]:
                    label = res[0][0]
                    begin_tag = True
                else:
                    label = ''
            else:
                if (label=='') or (label not in search_token):
                    toks.append(([w,tag_iter.next()], "O"))
                elif begin_tag:
                    toks.append(([w,tag_iter.next()], "B-%s" % label))
                    begin_tag = False
                else:
                    toks.append(([w,tag_iter.next()], "B-%s" % label))
#            print toks
        yield toks

def create_pickle(train_path=None):
    if train_path==None:
        train_path = "training"
    for label, fname in pickle_data.iteritems():
        train_data = load_ner_data(train_path, [label])
        print 'Creating pickle for %s...' % label
        cp = NEChunkParser(train_data)
        outfilename = 'binary/%s' % fname
        print 'Saving chunker to %s...' % outfilename
        out = open(outfilename, 'wb')
        pickle.dump(cp, out, -1)
        out.close()

def load_pickle(filename):
    pkl_file = open(filename, 'rb')
    cp = pickle.load(pkl_file)
    pkl_file.close()
    return cp

def condition_measure(label='O', test_path=None):
    if test_path==None:
        test_path = "test"
    true_positive = 0
    true_negative = 0
    false_positive = 0
    false_negative = 0
    train_data = load_ner_data(test_path, search_token=[label])
    pkl_file = 'binary/%s' % pickle_data[label]
    cp = load_pickle(pkl_file)

    label_param = label
    #if label_param != "O":
    #    label_param = "B-" + label

    rerr = open(label + "_recall.txt", 'w')
    perr = open(label + "_precision.txt", 'w')
    for tok in train_data:
#        words = []
#        for (word,b),label in tok:
#            words.append(word)
#        text = nltk.word_tokenize(' '.join(words))
#        tokens = nltk.pos_tag(text)
        
        tokens = [(w,b) for (w,b),l in tok]
        result = cp.parse(tokens)
        i=0
        for (word,b),label in tok:
            w,l = result[i]
            label = label[-3:]
            l = l[-3:]
            if w==word and l==label:
                if l==label_param:
                    true_positive+=1
                else:
                    true_negative+=1
            elif w==word and not l==label:
                if l==label_param:
                    false_positive+=1
                    # perr.write("word: " + w + "\n\n")
                elif label==label_param:
                    false_negative+=1
                    # rerr.write("word: " + w + "\n\n")
                else:
                    true_negative+=1
            i+=1
    print 'accuracy %s'%label_param
    #print 'true positive : %d'%true_positive
    #print 'true negative : %d'%true_negative
    #print 'false positive : %d'%false_positive
    #print 'false negative : %d'%false_negative
    rerr.close()
    perr.close()

    precision = 100
    if true_positive+false_positive>0:
        precision = (float(true_positive))/float(true_positive+false_positive)*100
    recall = 100
    if true_positive+false_negative>0:
        recall = (float(true_positive))/float(true_positive+false_negative)*100
    f_measure = 2 * (precision * recall) / (precision+recall)
    print 'precission : %f percent'% precision
    print 'recall     : %f percent'% recall
    print 'f-measure  : %f percent'% f_measure

def accuracy(test_path = None, binary_path = None, classifier = None, label_param='O'):
    if test_path==None:
        train_data = load_ner_data("test", search_token=[label_param])
    else:
        train_data = load_ner_data(test_path, search_token=[label_param])
    if binary_path==None:
        pkl_file = 'binary/%s' % pickle_data[label_param]
        cp = load_pickle(pkl_file)
    else:
        cp = load_pickle(binary_file)
        
    if not classifier==None:
        cp = classifier
    sum = 0
    correct = 0
    reflist = []
    testlist = []
    rerr = open("recall.txt", 'w')
    perr = open("precision.txt", 'w')
    for tok in train_data:
#        words = []
#        for (word,b),label in tok:
#            words.append(word)
#
#        text = nltk.word_tokenize(' '.join(words))
#        #tokens = nltk.pos_tag(text)
        tokens = [(w,b) for (w,b),l in tok]
        result = cp.parse(tokens)
        sum +=1
        miss = 0
        i=0
        for (word,b),label in tok:
            w,l = result[i]
            if w==word and not l==label:
                miss +=1
#                if label=='B-PER' and l=='O':
#                    print word
                    #print '=== line %d ==='%sum
                    #print 'word : %s'%word
                    #print 'correct : %s'%label
                    #print 'guess : %s'%l
                    #print 'sent : %s '%' '.join(words)
                    #print 'train : %s '%tok
                    #print 'result : %s '%result
                    # rerr.write('=== line %d ==='%sum + "\n")
                    # rerr.write('word : %s'%word + "\n")
                    # rerr.write('correct : %s'%label + "\n")
                    # rerr.write('guess : %s'%l + "\n")
#                elif label!='B-PER' and l=='B-PER':
                    # perr.write('=== line %d ==='%sum + "\n")
                    # perr.write('word : %s'%word + "\n")
                    # perr.write('correct : %s'%label + "\n")
                    # perr.write('guess : %s'%l + "\n")
            i+=1
            reflist.append(label)
            testlist.append(l)
        if not miss>0:
            correct +=1
    print '\naccuracy total '
    print 'correct per sent : %d '%correct
    print 'total sentences : %d '%sum
    print 'accuracy : %f percent'%((float(correct))/float(sum)*100)
    cm = nltk.ConfusionMatrix(reflist, testlist)
    print cm.pp(sort_by_count=True, show_percents=False)
    rerr.close()
    perr.close()
    
def test(text):
    pkl_file = 'binary/%s' % pickle_data['PER']
    cp = load_pickle(pkl_file)
    
    tokens = nltk.pos_tag(nltk.word_tokenize(text))
    print tokens
    result = cp.parse(tokens)
    print result

def sub_leaves(tree, node):
    return [t.leaves() for t in tree.subtrees(lambda s: s.node == node)]

def chunking(parsed, chunker, node):
    tree = Tree('S',parsed)
    chunk_result = chunker.parse(tree)
    subtrees = sub_leaves(chunk_result, node)
    return subtrees

def getPerson(parsed, person_so_far):
    chunker = RegexpParser(r'''
                            PER:
                                {<B-TITLE>*<B-PER>+}
                            ''')
    result = chunking(parsed,chunker,'PER')
    person_list = []

    for tuple in result:
        name,title = splitNameTitle(tuple)
        if person_so_far.find(name)>0:
            for person in person_list:
                person.cek_for_aliases(name)
        else:
            person_so_far = person_so_far + ' ' + name
            newPerson = Person(name, title)
            person_list.append(newPerson)
    return person_list,person_so_far

def splitNameTitle(person):
    name = []
    title_temp = []
    title = []
    for token,label in person:
        if label == 'B-PER':
            name.append(token)
            if not title_temp==[]:
                title.append(' '.join(title_temp))
                title_temp = []
        elif label == 'B-TITLE':
            title_temp.append(token)
    if not title_temp==[]:
        title.append(' '.join(title_temp))
    return ' '.join(name),title

def getOrganization(parsed):
    chunker = RegexpParser(r'''
                            ORG:
                                {<B-ORG>+}
                            ''')
    org = []
    result = chunking(parsed,chunker,'ORG')
    for tuple in result:
        temp = []
        for name,label in tuple:
            temp.append(name)
        org.append(' '.join(temp))
    return org

def getLocation(parsed):
    chunker = RegexpParser(r'''
                            LOC:
                                {<B-LOC>+}
                            ''')
    loc = []
    result = chunking(parsed,chunker,'LOC')
    for tuple in result:
        temp = []
        for name,label in tuple:
            temp.append(name)
        loc.append(' '.join(temp))
    return loc

def get_aliases(listPerson):
        aliases = {}
	for person in listPerson:
	    for alias in person.person_alias:
	        aliases[alias] = person.person_name
	    aliases[person.person_name] = person.person_name
	return aliases

if __name__ == '__main__':
    # from en_ner import create_pickle
    # create_pickle()
    # for i in range(0,1):
        # print "testing %d" % i
        # testpath = "FOLD/F%d/test" % i

        # condition_measure(label='PER', test_path=testpath)
        # condition_measure(label='ORG', test_path=testpath)
        # condition_measure(label='LOC', test_path=testpath)
	test("Calling  Ambiga a \"dangerous Hindu woman\", Perkasa leaders backed by pro-UMNO  members of parliament also raised the spectre of a racial riot should  organisers press on with the rally.")
	# lines = open("ambiga.txt", 'r')
	# for line in lines:
		# line = line.split()
		# test(line)
		# print ""
