#encoding=utf-8
'''
Created on 2011-11-13

@author: guanyw

python version:2.6


'''
from corpus import rte_pairs_from_train_pairs, rte_pairs_from_1ton_pairs, \
    tagged_train_1ton_file_name, tagged_train_pairs_file_name, \
    tagged_test_1ton_file_name
from corpus.pair_writer import write_pairs_1toN
from corpus.postprocess import is_number_match, is_first_ne_match
from corpus.preprocess import sentence_to_triple_list
import nltk
import random


class RTEFeatureExtractorZh:
    """
    This builds a bag of words for both the text and the hypothesis after
    throwing away some stopwords, then calculates overlap and difference.
    """
    
    def __init__(self, rtepair, stop=True):
        """
        @param rtepair: a L{RTEPair} from which features should be extracted
        @param stop: if C{True}, stopwords are thrown away.
        @type stop: C{bool}
        """
        self.stop = stop
        self.stopwords = open('stoplist.txt').read().split('\n')
        self.stopwords = set([unicode(wd) for wd in self.stopwords])
#        self.stopwords = set([])
        
        self.negwords = set([u'不是', u'并非',u'未',u'不',u'非',u'否'])
        self.negwords = set([])
        #Get the set of word types for text and hypothesis
#        self.txt_tokens,self.txt_nes = get_words_and_nes(rtepair.text)
#        self.hyp_tokens,self.hyp_nes = get_words_and_nes(rtepair.hyp)
#        self.text_tokens = list(rtepair.text)
#        self.hyp_tokens = list(rtepair.hyp)

        stop_tag_list = ['/wp','/u','/x']
#        stop_tag_list = []
        self.text_tokens = [wd for wd,pos,nes in sentence_to_triple_list(rtepair.text) 
                            if pos not in stop_tag_list]
        self.hyp_tokens = [wd for wd,pos,nes in sentence_to_triple_list(rtepair.hyp) 
                           if pos not in stop_tag_list]
        self.text_words = set(self.text_tokens)
        self.hyp_words = set(self.hyp_tokens)
        
        self.txt_nes = [wd for wd,pos,nes in sentence_to_triple_list(rtepair.text) if nes!='/O' ]
        self.hyp_nes = [wd for wd,pos,nes in sentence_to_triple_list(rtepair.hyp) if nes!='/O' ]
#        self.txt_nes = merge_ne(sentence_to_triple_list(rtepair.text))
#        self.hyp_nes = merge_ne(sentence_to_triple_list(rtepair.hyp))

#        self.text_nes = set([])
#        self.hyp_nes = set([])
                
        self.text_nes = set(self.txt_nes)
        self.hyp_nes = set(self.hyp_nes)
        
        if self.stop:
            self.text_words = self.text_words - self.stopwords
            self.hyp_words = self.hyp_words - self.stopwords
            
        self._overlap = self.hyp_words & self.text_words
        self._hyp_extra = self.hyp_words - self.text_words
        self._txt_extra = self.text_words - self.hyp_words
        
        
        self._ne_overlap = self.hyp_nes & self.text_nes
        self._hyp_ne_extra = self.hyp_nes - self.text_nes
#        self._txt_ne_extra = self.text_nes - self.hyp_nes
        
            
    
    def overlap(self, toktype, debug=False):
        """
        Compute the overlap between text and hypothesis.
        
        @param toktype: distinguish Named Entities from ordinary words
        @type toktype: 'ne' or 'word'
        """
        if toktype == 'ne':
            if debug: print "ne overlap", self._ne_overlap
            return self._ne_overlap
        elif toktype == 'word':
            if debug: print "word overlap", self._overlap - self._ne_overlap
            return self._overlap - self._ne_overlap
        else:
            raise ValueError("Type not recognized:'%s'" % toktype)
    
    def hyp_extra(self, toktype, debug=True):
        """
        Compute the extraneous material in the hypothesis.
        
        @param toktype: distinguish Named Entities from ordinary words
        @type toktype: 'ne' or 'word'
        """
        ne_extra = self._hyp_ne_extra
        if toktype == 'ne':
            return ne_extra
        elif toktype == 'word':
            return self._hyp_extra - ne_extra
        else:
            raise ValueError("Type not recognized: '%s'" % toktype)
        
def rte_features(rtepair):
    extractor = RTEFeatureExtractorZh(rtepair)
    features = {}
    features['word_overlap'] = len(extractor.overlap('word'))
    features['word_hyp_extra'] = len(extractor.hyp_extra('word'))
    features['ne_overlap'] = len(extractor.overlap('ne'))
    features['ne_hyp_extra'] = len(extractor.hyp_extra('ne'))
    features['neg_txt'] = len(extractor.negwords & extractor.text_words)
    features['neg_hyp'] = len(extractor.negwords & extractor.hyp_words)
    return features            
           

def show_errors(classifier,test_data,features=rte_features):
    errors = []
    for (pair,val) in test_data:
        guess = classifier.classify(features(pair))
        if guess != val:
            errors.append((guess,pair))
            
    print "errors:%d/total:%d"%(len(errors),len(test_data))
    for (guess,pair) in sorted(errors):
        print "guess:%d txt:%s\thyp:%s" % (guess,pair.text,pair.hyp)

        
def show_accuracy(guess_list,labeled_list):
    assert len(guess_list),len(labeled_list)            
    tp=0
    fp=0
    tn=0
    fn=0
    for i in range(len(guess_list)):
        guess = guess_list[i]
        labeled = labeled_list[i]
        if guess and labeled: tp += 1
        if not guess and not labeled: tn += 1
        if not guess and labeled: fn += 1
        if guess and not labeled: fp += 1
        
    
    print('    T  \t    F')
    print('P:%4s\t|%4s'%(tp,fp))
    print('N:%4s\t|%4s'%(tn,fn))
        
    try:
        pcs = float(tp)/(tp+fp)
        rcl = float(tp)/(tp+fn)
        
        print('Precision: %6.4f'%pcs)
        print('Recall:    %6.4f'%rcl)
        print('F-score:   %6.4f'%(2*pcs*rcl/(pcs+rcl)))
    except:
        import traceback;traceback.print_exc()
        
        
def classify_test(classifier,paires,features=rte_features,is_show_accuracy=True):
    labeled_list = [pair.value for pair in paires]
    for idx,pair in enumerate(paires):
        paires[idx].value = classifier.classify(features(pair))
        if paires[idx].value==1:
            paires[idx].value = is_number_match(paires[idx].text,paires[idx].hyp)
            if paires[idx].value == 1:
                paires[idx].value = is_first_ne_match(paires[idx].text,paires[idx].hyp)
    guess_list = [pair.value for pair in paires]
    
    if is_show_accuracy:
        show_accuracy(guess_list, labeled_list)
    return paires
    
    
def classify_and_save(classifier,pairs,fname_pre):
    label_pairs = classify_test(classifier,pairs)
    if fname_pre:
        write_pairs_1toN(label_pairs,fname_pre+'_full.xml',verbose='full')
        write_pairs_1toN(label_pairs,fname_pre+'_pos.xml',verbose='positive')
        write_pairs_1toN(label_pairs,fname_pre+'_pure.xml',verbose='pure')

def rte_classifier(trainer,with_test=True, features=rte_features):
    """
    Classify RTEPairs
    """ 
    paires = []
    paires1 = [(pair, pair.value) for pair in rte_pairs_from_train_pairs.pairs(tagged_train_pairs_file_name)]
    paires2 =[(pair, pair.value) for pair in rte_pairs_from_1ton_pairs.pairs(tagged_train_1ton_file_name)]
    
    classifier = None
    if with_test:
#        random.shuffle(paires)
        train_len1 = int(len(paires1)*0.6)
        train_len2 = int(len(paires2)*0.6)
    
        train = paires1[:train_len1]+paires2[:train_len2:]
        test = paires1[train_len1:]+paires2[train_len2:]
    
        # Train up a classifier.
        classifier = trainer( [(features(pair), label) for (pair,label) in train] )
        # Run the classifier on the test data.
        test_pairs = [pair for pair,label in test]
        classify_test(classifier, test_pairs, features)
    else:
        classifier = trainer( [(features(pair), label) for (pair,label) in paires1+paires2] )
        
    return classifier


if __name__ == '__main__':

#    demo()
    classifier = rte_classifier(nltk.NaiveBayesClassifier.train)

    pair_tests10 = rte_pairs_from_1ton_pairs.pairs(tagged_test_1ton_file_name)
    label_pairs = classify_test(classifier, pair_tests10, is_show_accuracy=False)
    write_pairs_1toN(label_pairs,'../result.xml',verbose='pure')