#!/usr/bin/env python
import os
import sys

import profiles
from text_utils import *
from random import sample

from modifiers import NEG_WORDS, SPEC_WORDS
from triggers import TARGET_ROLES, TARGET_ROLES_dict

def readData(filename):
    file_ = open(filename)
    t = file_.readlines()
    m = []
    for line in t:
        line = line.strip()
        k = []
        if line[0] == "T":
            id_, space_sep, mention = line.split('\t')
            k.append(id_)
            k.extend(space_sep.split())
            k.append(mention)
            k[2] = int(k[2])
            k[3] = int(k[3])
        elif line[0] in "EM":
            id_, space_sep = line.split('\t')
            k.append(id_)
            k.extend(space_sep.split())
        if k: m.append(k)
    file_.close()
    return m

def writeData(filename, data):
    file_ = open(filename, 'w')
    for line in data:
        if line[0][0] == "T":
            file_.write("%s\t%s %d %d\t%s\n" % line)
        elif line[0][0] in "EM":
            file_.write("%s\t%s\n" % (line[0], " ".join(line[1:])))
        else:
            file_.write("#%s\n" % "\t".join(line))
    file_.close()


def compare(results, criterion):
    found = [0] * len(TARGET_ROLES)
    false_found = [0] * len(TARGET_ROLES)
    for res in results:
        if res[0][0] != "T":
            continue
        is_valid = False
        for true_tuple in criterion:
            if res[0][0] != true_tuple[0][0]: continue
            if res[0][0] == "T":
                t, type_, start, end, mention = res
                c_type_, c_start, c_end = true_tuple[1:4]
                if int(c_start) != start: continue
                if int(c_end) != end: continue
                # FIXME: What if the type_ is empty?
                if type_ and c_type_.lower() != type_.lower(): continue
                is_valid = True
                break
        if is_valid: found[TARGET_ROLES_dict[res[1]]] += 1
        else: false_found[TARGET_ROLES_dict[res[1]]] +=1 

    return (found, false_found)


def analysis_abstract(pmid, proteins):
    # input data for the function
    abstract_ = open(pmid + 'txt')
    abstract = abstract_.read()
    abstract_.close()

    genia_ = open(pmid + 'CoNLL')

    pr_idx = 0

    result = []
    last_index = l_s_idx = 0
    pr = None

    sent_list = []
    for line in genia_:
        line = line.strip()
        if line:
            sent_list.append(line)
        elif sent_list:
            s = LiveSentence(offset=l_s_idx)
            last_index = 0
            chunk_nums = {}
            parent_dic = {}
            for l in sent_list:
                (num, word, base, POS, chunk, NE, unk1, unk2) = l.split('\t')
                word = word.replace('``', '"')
                num = int(num)
                unk1 = int(unk1)
                new_index = abstract.find(word, last_index+l_s_idx) - l_s_idx
                if new_index > last_index:
                    s.append(LiveSpace(abstract[l_s_idx+last_index:l_s_idx+new_index], 
                             previous=pr, offset=last_index))
                if POS == 'O' and unk2 == 'P':
                    pr = LiveSymbol(word, offset=new_index)
                else:
                    neg =  word.lower() in NEG_WORDS
                    stem = stemmer.stemWord(base.lower())
                    spec = stem in SPEC_WORDS
                    pr = LiveWord(word,
                                  attrs={'word': word, 
                                         'base': base, 'POS': POS,
                                         'chunk': chunk, 'NE': NE,
                                         'unk2': unk2,
                                         'stem': stem,
                                         'neg': neg,
                                         'debug': 'last_index=%d, l_s_idx=%d' % (last_index, l_s_idx)},
                                  offset=new_index)
                                  # We will probably need unk1 and unk2 for tree parsing as a main attribute.
                s.append(pr)
                chunk_nums[num] = pr
                parent_dic[num] = unk1
                last_index = new_index + len(word)

            for num, p in parent_dic.items():
                if p > 0:
                    chunk_nums[num].set_parent(chunk_nums[p])
            
            l_s_idx += s.number_of_characters()
            sent_list = []
            
            while pr_idx < len(proteins) and proteins[pr_idx][2] < l_s_idx:
                pr_ch_idx = s.chunks_offset_to_index(proteins[pr_idx][2]-s.offset, proteins[pr_idx][3]-s.offset)
                for k in range(*pr_ch_idx):
                    s[k].attributes['protein'] = proteins[pr_idx][0]
                pr_idx += 1
    
            result.append(s)
        
    genia_.close()
    return result


class Evaluator:
    def __init__(self, train_directoris=[]):
        self.train_files = []
        self.sample_files = []
        for directory in train_directoris:
            self.add_train_director(directory)

    def add_train_director(self, directory):
        for f in os.listdir(directory):
            if not f.endswith('.a1'): continue
            self.train_files.append(os.path.join(directory, f[:-2]))

    def get_sample(self):
        if not self.sample_files:
            self.sample_files = sample(self.train_files, SAMPLE_SIZE)
        return self.sample_files


    def test(self, func, output_path, use_sample=False, ext=""):
        if use_sample:
            pmids = self.sample_files
        else:
            pmids = self.train_files

        os.mkdir(output_path)

        for pmid in pmids:

            print >> OUT, "**** PMID = %s ******************************" % pmid[-9:-1]

            proteins = readData(pmid + 'a1')
            proteins.sort(cmp=lambda x,y: x[2]-y[2])
            analysed_abstract = analysis_abstract(pmid, proteins)
            
            transcriptions = ()
            if os.path.exists(pmid + 'a2.t'):
                transcriptions = readData(pmid + 'a2.t')

            results = func(analysed_abstract, proteins, transcriptions) # calling the function
            writeData(os.path.join(output_path, "%sa2%s" % (pmid.split('/')[-1], ext)), results)


    def evaluate(self, func, output_path, use_sample=False, ext=""):
        """
        Returns a tuple; (recall, precision, fscore, total, found, false_found)
        """
        total = [0] * len(TARGET_ROLES)
        found = [0] * len(TARGET_ROLES)
        false_found = [0] * len(TARGET_ROLES)

        if use_sample:
            pmids = self.sample_files
        else:
            pmids = self.train_files

        os.mkdir(output_path)

        for pmid in pmids:
            
            print >> OUT, "**** PMID = %s ******************************" % pmid[-9:-1]
            
            criterion = []
            for line in readData(pmid + 'a2'):   # The .a2 files
                if func.validator_func(line):
                    criterion.append(line)
                    total[TARGET_ROLES_dict[line[1]]] += 1
            n = len(criterion)

            proteins = readData(pmid + 'a1')
            proteins.sort(cmp=lambda x,y: x[2]-y[2])
            analysed_abstract = analysis_abstract(pmid, proteins)

            triggers = []
            ms = []
            if USE_GOLD_TRIGGERS and os.path.exists(pmid + 'a2'):
                for l in readData(pmid + 'a2'):
                    if l[0][0] == 'T' and l[1].lower() != "entity":
                       triggers.append(l)
                    elif l[0] == 'M':
                       ms.append(l)

            results = func(analysed_abstract, proteins, triggers, ms) # calling the function
            writeData(os.path.join(output_path, "%sa2%s" % (pmid.split('/')[-1], ext)), results)
            
            # compare the result of the function with the criterion
            pmid_found, pmid_false_found = compare(results, criterion)
            for i in range(len(TARGET_ROLES)):
                found[i] += pmid_found[i]
                false_found[i] += pmid_false_found[i]


        for i in range(len(TARGET_ROLES)):
            print TARGET_ROLES[i], ":", total[i], found[i], false_found[i]


        return (0, 0, 0, 0, 0, 0)
        # if total == 0: return(0, 0, 0, total, found, false_found)
        # recall = float(found) / total
        # if (found + false_found) == 0: return (recall, 0, 0, total, found, false_found)
        # precision = float(found) / (found + false_found)
        # if (precision + recall) == 0: return (recall, precision, 0, total, found, false_found)
        # fscore = 2 * precision * recall / (precision + recall)
        # 
        # return (recall, precision, fscore, total, found, false_found)


def evaluable(validator_func, ext=""):
    def _evaluable_func(func):
        func.validator_func = validator_func
        func.ext = ext
        return func
    return _evaluable_func


def apply_profile(globs, do_print=False):
    profile = "DEFAULT"
    if len(sys.argv) > 1:
        profile = sys.argv[1]
    profile_dict = getattr(profiles, profile)
    globs.update(profile_dict)
    if do_print:
        print >> OUT, "Profile:", profile
        for k, v in profile_dict.items():
            vv = str(v)
            if len(vv) > 70:
                vv = vv[:70] + "..."
            print >> OUT, "    %s: %s" % (k, vv)
        print >> OUT, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
    return profile_dict

def module_evaluate(globs):
    from time import time
    for attr_name, attr in globs.items():
        if hasattr(attr, "validator_func"):
            
            ext = hasattr(attr, "ext") and attr.ext or ""

            if evaluator.sample_files:
                st = time()
                recall, precision, fscore, total, found, false_found = \
                                evaluator.evaluate(attr, os.path.join(OUTPUT_PATH, attr_name+"_smpl"), True, ext)
                end = time()
                print >> OUT, "        Function    Recall Precision   F-Score     Total     Found    -Found   Time(s)"
                print >> OUT, "----------------   -------   -------   -------   -------   -------   -------   -------"
                print >> OUT, "%16s   %6.1f%%   %6.1f%%   %6.1f%%   %7d   %7d   %7d   %7d" % (
                    attr_name+"_smpl", recall*100, precision*100, fscore*100, total, found, false_found, end-st)
            OUT.flush()
            if TEST: continue
            st = time()
            recall, precision, fscore, total, found, false_found = \
                                evaluator.evaluate(attr, os.path.join(OUTPUT_PATH, attr_name), False, ext)
            end = time()
            print >> OUT, "        Function    Recall Precision   F-Score     Total     Found    -Found   Time(s)"
            print >> OUT, "----------------   -------   -------   -------   -------   -------   -------   -------"
            print >> OUT, "%16s   %6.1f%%   %6.1f%%   %6.1f%%   %7d   %7d   %7d   %7d" % (
                attr_name, recall*100, precision*100, fscore*100, total, found, false_found, end-st)
            OUT.flush()

                
apply_profile(globals())
evaluator = Evaluator([TRAIN_DIR])
