#!/usr/bin/env python

from utils import *
#from neg_words import *
from jwords import INTERACTION_STEMS
from triggers import TRIGGER_STEMS_LOGARITHMIC as TRIGGER_STEMS, TARGET_ROLES, TARGET_ROLES_dict, TRIGGER_STEMS_SIMPLE, NOT_TRIGGER_STEMS_SIMPLE
from freqs import FREQUENCIES_LOGARITHMIC as FREQUENCIES

from numpy import array
from monte.arch.crf import ChainCrfNNIsl #all ready-to-use models reside in monte.arch

# TODO
NUMBER_OF_TAGS = len(TARGET_ROLES) + 1
FEATURE_SIZE = 4 + len(TARGET_ROLES) + POS_size

def _evaluator(args):
    return (args[0][0] == "T" and args[1].lower() != "entity") or args[0][0] == "E"

class Bionlp09_1:
    def __init__(self, crf=None):
        if crf:
            self.crf = crf
        else:
            self.crf = ChainCrfNNIsl(FEATURE_SIZE, NUMBER_OF_TAGS, RADIUS)
        self.trainer = TRAINER(self.crf, *TRAINER_PARAMS)
        self.verbs = []
        for v in TRIGGER_STEMS.keys():  # TODO
            self.verbs.append(analysis_sentence(v))

        if SAMPLE_SIZE:
            self.train_files = evaluator.get_sample()
        else:
            self.train_files = evaluator.train_files
        

    def get_feature_vector_list(self, sentence):
        result = []

        sentence.prepare_for_unordered_find()
        
        for v in self.verbs:
            mention_index_list = sentence.find_unordered_subtext_all(v)
            for mention, index in mention_index_list:
                v_ch_idx = sentence.chunks_offset_to_index(index, index+len(mention))
                for k in range(v_ch_idx[0], v_ch_idx[1]):
                    sentence[k].attributes['verb_vector'] = TRIGGER_STEMS[v.render_raw()]
                

        words = sentence.get_words()

        proteins_in_sentence = 0
        for token in words:
            if 'protein' in token.attributes:
                proteins_in_sentence += 1

        for token in words:
            stem = token.attributes['stem']
            feature = [0] * FEATURE_SIZE
            if 'protein' in token.attributes:
                feature[0] = 1
            if 'verb_vector' in token.attributes:
                for i in range(len(token.attributes['verb_vector'])):
                    feature[i+1] = token.attributes['verb_vector'][i]
            if stem in FREQUENCIES:
                feature[1+len(TARGET_ROLES)] = FREQUENCIES[stem]
            if stem in INTERACTION_STEMS:
                feature[2+len(TARGET_ROLES)] = 1
            feature[3+len(TARGET_ROLES)] = proteins_in_sentence

            if 'POS' in token.attributes and token.attributes['POS'] in POS_dict:
                feature[4+len(TARGET_ROLES)+POS_dict[token.attributes['POS']]] = 1
                        
            result.append(feature)
            
        return result
    

    def train(self):
        input_output_pairs = []
        cnt = 0
        for pmid in self.train_files:
            cnt += 1
            print >> OUT, cnt, "Extracting features from", pmid
            proteins = readData(pmid + 'a1')
            target = readData(pmid + 'a2')
            target = [t for t in target if t[0][0] == "T" and t[1].lower() != "entity"]
            target = sorted(target, cmp=lambda x,y: x[2]-y[2])
            tr_idx = 0
            
            abstract_ = open(pmid + 'txt')
            abstract = abstract_.read()
            abstract_.close()
            
            sentences = analysis_abstract(pmid, proteins)
            
            for sentence in sentences:
                input = array(self.get_feature_vector_list(sentence))

                output = [0]*len(input)
                l_s_idx = sentence.offset + sentence.number_of_characters()
                while tr_idx < len(target) and target[tr_idx][2] < l_s_idx:
                    tr_ch_idx = sentence.chunks_offset_to_index(target[tr_idx][2]-sentence.offset, target[tr_idx][3]-sentence.offset)
                    for k in range(*tr_ch_idx):
                        sentence[k].attributes['CRF_OUT'] = TARGET_ROLES_dict[target[tr_idx][1]]
                    tr_idx += 1

                index = 0
                for token in sentence.get_words():
                    if 'CRF_OUT' in token.attributes:
                        output[index] = token.attributes['CRF_OUT'] + 1
                    index += 1

                output = array(output)

                input_output_pairs.append((input, output))

        print >> OUT, "Feature extraction over, start training." # :p:*
        OUT.flush()
        for c in range(TRAIN_STEPS):
            self.trainer.step(input_output_pairs, 0.001) # TODO: tune
            print >> OUT, self.crf.cost(input_output_pairs, 0.001)
            OUT.flush()


    @evaluable(lambda args: args[0][0] == "T" and args[1].lower() != "entity", '.t1')
    def test(self, abstract, proteins, triggers=None, ms=None):
        result = []

        t_seq = 1
        e_seq = 1
        if proteins:
            t_seq = int(proteins[-1][0][1:]) + 1

        pr_idx = 0 # Actually trigger_index but :P
        sdfdf = 0
        for sentence in abstract:
            same_tag_parts = []

            raw = sentence.render_raw()
            words = sentence.get_words()
            n = len(words)
 
            if triggers:
                predictions = [0]*n
                l_s_idx = sentence.offset + sentence.number_of_characters()
                while pr_idx < len(triggers) and triggers[pr_idx][2] < l_s_idx:
                    pr_ch_idx = sentence.chunks_offset_to_index(triggers[pr_idx][2]-sentence.offset, triggers[pr_idx][3]-sentence.offset, False)
                    for k in range(pr_ch_idx[0]-1, pr_ch_idx[1]):
                        predictions[k] = TARGET_ROLES_dict[triggers[pr_idx][1]] + 1
                    pr_idx += 1

            else: # Use the CRF
                input = array(self.get_feature_vector_list(sentence))
                predictions = self.crf.viterbi(input)
            
            if sum(predictions): sdfdf += 1
               #print ["%s:%d" % (str(w), w.offset+sentence.offset) for w in words]
               #print predictions
               #print triggers
               #print "--------------------------"
 
            first_same_tag = 0
            for i in range(1,n):
                if predictions[i] == predictions[first_same_tag]:
                     continue
                if predictions[first_same_tag] != 0:
                    same_tag_parts.append((first_same_tag, i-1, predictions[first_same_tag]))

                first_same_tag = i

            if predictions[first_same_tag] != 0:
                same_tag_parts.append((first_same_tag, n-1, predictions[first_same_tag]))

            trigger_types = {}
            trigger_protein_in_subtree = {}
            trigger_protein_path_length = {}

            i = 0
            to_be_deleted = []
            for start, end, prediction in same_tag_parts:
                for simple_trigger in NOT_TRIGGER_STEMS_SIMPLE.keys():
                    if NOT_TRIGGER_STEMS_SIMPLE[simple_trigger] != prediction:
                        continue
                    split_trigger = simple_trigger.split()
                    mm = len(split_trigger)
                    for j in range(end-start-mm+2):
                        if split_trigger[0] == words[j].render_raw().lower():
                            skip = False
                            for fa_end in range(fa_start+1, fa_start+mm):
                                if split_trigger[fa_end-fa_start] != words[fa_end].render_raw().lower():
                                    skip = True
                            if not skip:
                                to_be_deleted.append(i)

                i += 1                

            for h in reversed(to_be_deleted):
                print same_tag_parts[h]
                del same_tag_parts[h]
            
            for simple_trigger in TRIGGER_STEMS_SIMPLE.keys():
                split_trigger = simple_trigger.split()
                mm = len(split_trigger)
                for j in range(len(words)-mm+1):
                    if split_trigger[0] == words[j].render_raw().lower():
                        fa_start = fa_end = j
                        skip = False
                        for fa_end in range(fa_start+1, fa_start+mm):
                            if split_trigger[fa_end-fa_start] != words[fa_end].render_raw().lower():
                                skip = True
                        if skip:
                            continue
                        collision = False
                        for start, end, prediction in same_tag_parts:
                            if ((fa_start >= start and fa_start <= end) or \
                                (start >= fa_start and start <= fa_end) or \
                                (fa_end >= start and fa_end <= end)):
                                collision = True
                        if not collision:
                            same_tag_parts.append((fa_start, fa_end, TRIGGER_STEMS_SIMPLE[simple_trigger] + 1))
                        do = False
                        fa_start = fa_end = -1
                        

            for start, end, prediction in same_tag_parts:

                while end > start and words[end].render_raw().lower() in MOZAKHRAF:
                    print words[end].render_raw().lower()
                    end -= 1

                ss = words[start].offset
                ee = words[end].offset + words[end].number_of_characters()
                s = sentence.offset + ss
                e = sentence.offset + ee
                result.append(("start=%d, ss=%d, s=%d, end=%d, ee=%d, e=%d" % (start,ss,s,end,ee,e), words[start].attributes['debug']))
                result.append(("T%d" % t_seq, TARGET_ROLES[prediction-1], s, e, raw[ss:ee]))

                trigger_types[t_seq] = prediction
                trigger_protein_in_subtree[t_seq] = {}
                trigger_protein_path_length[t_seq] = {}

                trigger_set = set(words[start:end+1])

                for node in words:
                    if 'protein' in node.attributes:
                        fathers = set(node.get_path_to_root())
                        pr = node.attributes['protein']
                    
                        trigger_protein_in_subtree[t_seq][pr] = \
                            (pr in trigger_protein_in_subtree[t_seq] and trigger_protein_in_subtree[t_seq][pr]) \
                            or ((trigger_set & fathers) and True)
                    
                        length = n 
                        if pr in trigger_protein_path_length[t_seq]:
                            length = trigger_protein_path_length[t_seq][pr]
                        for tr in trigger_set:
                            length = min(length, tr.path_length(node))
                        trigger_protein_path_length[t_seq][pr] = length


                t_seq += 1


            protein_event = {}
            for pr_line in proteins:
                pr = pr_line[0]
                protein_event[pr] = []

            event_trigger = {}
        
            for trigger_num, trigger_type in trigger_types.items():
                protein_in_subtree = trigger_protein_in_subtree[trigger_num]
                protein_path_length = trigger_protein_path_length[trigger_num]
                protein_path_rank = sorted(protein_path_length.items(), cmp=lambda x,y: x[1]-y[1])
                m = len(protein_path_rank)
                
                pairs = set([])

                # all in the subtree
                if METHOD_FLAGS[trigger_type - 1][0] == 1:
                    tr = METHOD_FLAGS[trigger_type - 1][1]
                    for pr, l in protein_path_rank:
                        if protein_in_subtree[pr] and l <= protein_path_rank[min(tr, m-1)][1]:
                            result.append(("E%d" % e_seq,
                                           "%s:T%d" % (TARGET_ROLES[trigger_type-1], trigger_num), 
                                           "Theme:%s" % pr))
                            protein_event[pr].append(e_seq)
                            event_trigger[e_seq] = trigger_num
                            e_seq += 1
            
                # nearest
                elif METHOD_FLAGS[trigger_type - 1][0] == 2:
                    if protein_path_rank:
                        nearest_l = protein_path_rank[0][1]

                        for pr, l in protein_path_rank:
                            result.append(("E%d" % e_seq,
                                           "%s:T%d" % (TARGET_ROLES[trigger_type-1], trigger_num), 
                                           "Theme:%s" % pr))
                            protein_event[pr].append(e_seq)
                            event_trigger[e_seq] = trigger_num
                            e_seq += 1
                            if l - nearest_l > METHOD_FLAGS[trigger_type - 1][1]:
                                break

                # Binding
                elif METHOD_FLAGS[trigger_type - 1][0] == 4:
                    tr_down = METHOD_FLAGS[trigger_type - 1][1]
                    tr_up = METHOD_FLAGS[trigger_type - 1][2]
                    for pr, pr_d in protein_path_rank:
                        if (protein_in_subtree[pr] and 
                            pr_d <= protein_path_rank[min(tr_down, m-1)][1]) or \
                           pr_d <= protein_path_rank[min(tr_up, m-1)][1]:
                            result.append(("E%d" % e_seq,
                                           "%s:T%d" % (TARGET_ROLES[trigger_type-1], trigger_num),
                                           "Theme:%s" % pr))
                            protein_event[pr].append(e_seq)
                            event_trigger[e_seq] = trigger_num
                            e_seq += 1
                            # for pr2, pr2_d in protein_path_rank:
                            #     if pr != pr2 and (not ((pr2, pr) in pairs)) and \
                            #         ((protein_in_subtree[pr] and 
                            #           pr_d <= protein_path_rank[min(tr_down, m-1)][1]) or \
                            #          pr_d <= protein_path_rank[min(tr_up, m-1)][1]):
                            #         result.append(("E%d" % e_seq,
                            #                "%s:T%d" % (TARGET_ROLES[trigger_type-1], trigger_num),
                            #                "Theme:%s" % pr, "Theme2:%s" %pr2))
                            #         pairs.add((pr, pr2))
                            #         protein_event[pr2].append(e_seq)
                            #         event_trigger[e_seq] = trigger_num
                            #         e_seq += 1
 
 
            for trigger_num, trigger_type in trigger_types.items():
                protein_in_subtree = trigger_protein_in_subtree[trigger_num]
                protein_path_length = trigger_protein_path_length[trigger_num]
                protein_path_rank = sorted(protein_path_length.items(), cmp=lambda x,y: x[1]-y[1])
            
                if METHOD_FLAGS[trigger_type - 1][0] == 5:
                    fnd = False
                    for event in event_trigger.keys():
                        result.append(("E%d" % e_seq,
                                       "%s:T%d" % (TARGET_ROLES[trigger_type-1], trigger_num), 
                                       "Theme:E%d" % event))
                        # TODO: bunch of variables for the next round
                        e_seq += 1
                        fnd = True

                    if protein_path_rank and not fnd:
                        nearest_l = protein_path_rank[0][1]

                        for pr, l in protein_path_rank:
                            result.append(("E%d" % e_seq,
                                           "%s:T%d" % (TARGET_ROLES[trigger_type-1], trigger_num), 
                                           "Theme:%s" % pr))
                            protein_event[pr].append(e_seq)
                            event_trigger[e_seq] = trigger_num
                            e_seq += 1
                            if l - nearest_l > METHOD_FLAGS[trigger_type - 1][1]:
                                break                    

        print sdfdf
        return result

    
if __name__ == "__main__":
    from time import time

    st = time()
    apply_profile(globals(), True)
    b = Bionlp09_1(CRF)
    if DO_TRAIN:
        print >> OUT, ". Training ...................................."
        b.train()
        print >> OUT, ". Saving the CRF .............................."
        import pickle
        crf_f = open("%s/crf_model.obj" % OUTPUT_PATH, 'w')
        pickle.dump(b.crf, crf_f)
        crf_f.close()
    

    end = time()
    print >> OUT, "Train Time: %d" % (end-st)
    if not FSCORE:
        ext = hasattr(b.test, "ext") and b.test.ext or ""
        evaluator.test(b.test, os.path.join(OUTPUT_PATH, "CRF"), False, ext)
    else:
        module_evaluate({"CRF": b.test})
    
    if MAKE_ARCHIVE:
        os.chdir(os.path.join(OUTPUT_PATH, MAKE_ARCHIVE))
        if not FSCORE:
            os.system('cp ../../../questionnaire.txt .')
        os.system('tar cf ../archive.tar.gz -z *.t*')
    OUT.close()
