import re, custom_utils

class Candidate:
    """
    Contains information about a candidate set of location
    references. A list of these will get processed and modified by a
    set of heuristics within this classifier.
    """
    def __init__(self):
        """
        interval is a pair of co-ordinates between which the candidate
        words are present within the main string.

        This function also creates internal data structures with empty
        values (which will later be set by other functions directly).
        """
        self.interval_list = [] #list of locations at which the
                                #candidate words occur
        self.words = [] #list of words in candidate place name
        self.score = {} #contains scores for a each type or NE
                        #classification
        self.context_list = [] #in context words.
        return


    def __repr__(self):
        ret = str(self.interval_list)
        ret += " " + str(self.score["Location"])
        return ret



class Classifier:
    """
    Does candidate generation and rule based classification.
    """

    def __init__(self, stringdata):
        """
        Input stringdata is the string in which candidate generation
        and classification of named entity recognizers will be done.
        """
        self.data = stringdata
        self.candidates = {}
        self.candidate_ranking = []
        #Keeping words and ratings in source code for now, will be
        #moving to a data file later.
        self.class_types = {"Location":"Location", "Organisation":"Organisation"}
        self.before_set = [["present at", "Location", 1000],
                           ["present", "Location", 800],
                           ["at", "Location", 900],
                           ["to", "Location", 500],
                           ["University", "Organisation", 700],
                           ["Island", "Location", 700],
                           ["Islands", "Location", 700],
                           ["From", "Location", 700] ]        
        self.after_set = [["city", "Location", 800],
                          ["area", "Location", 600],
                          ["Lake", "Location", 700],
                          ["vicinity", "Location", 600], 
                          ["Hotel", "Organisation", 800],
                          ["Island", "Location", 700],
                          ["Islands", "Location", 700],
                          ["River", "Location", 700]]

        self.generate_candidates()

        return

    def generate_candidates(self):
        """
        Find all strings of capitalized words, merge adjacent ones and
        create an intervals list.
        """
        cap_word = re.compile(r"[A-Z][a-zA-Z]+")
        iterator = cap_word.finditer(self.data)
        cap_words_list = []
        prev = None
        for each in iterator:
            L = list(each.span())
            if prev!=None:
                if self.data[prev[1]:L[0]].strip() == "":
                    cap_words_list[-1][1] = L[1]
                    prev = cap_words_list[-1]
                else:
                    prev = L
                    cap_words_list.append(L)
            else:
                cap_words_list.append(L)
                prev = list(L)
        for each in cap_words_list:
            words = self.data[each[0]:each[1]].split()
            lower_words = []
            for i in words:
                lower_words.append(i.lower())
            lower_words = tuple(lower_words)
            try:
                k = self.candidates[lower_words]
            except KeyError:
                self.candidates[lower_words] = Candidate()
                self.candidates[lower_words].words = words
                k = self.candidates[lower_words]
            k.interval_list.append(tuple(each))

            for i in self.class_types:
                k.score[i] = 0 #set each type of score to 0
         
        return

    def get_tokens_from_interval(self, interval):
        return self.data[interval[0]:interval[1]].split()

    def get_tokens_from_position(self, position, num_tokens, get_left = True):
        """
        Note: Excludes the character at position "position" and does
        the fetching.
        """
        if get_left: 
            data_string = self.data[0:position].split()            
            i,L = len(data_string),[]
            while i>0:
                i -= 1
                s = custom_utils.clean_word(data_string[i])
                if len(s) >= 1:
                    L = [s]+L
                    if len(L) == num_tokens:
                        break
        else: 
            data_string = self.data[position+1:].split()
            i,L = 0, []
            while i<len(data_string):
                s = custom_utils.clean_word(data_string[i])
                if len(s) >= 1:
                    L += [s]
                    if len(L) == num_tokens:
                        break
                i += 1

        return L
        

    def classify(self):
        """
        heuristic to classify capitalized words as named
        entities.
        """

        before_dic = {}
        for each in self.before_set: before_dic[each[0].lower()] = [each[1], each[2]] 
        after_dic = {}
        for each in self.after_set: after_dic[each[0].lower()] = [each[1], each[2]]

        print "before: ", before_dic
        print "after: ", after_dic
        
        for each in self.candidates:
            cap_tokens = self.candidates[each].words
            #processing the capitalized words for trigger terms
            for it in cap_tokens:
                try:
                    p = before_dic[it.lower()]
                    self.candidates[each].score[p[0]] += p[1]
                except KeyError: pass
                try:
                    p = after_dic[it.lower()]
                    self.candidates[each].score[p[0]] += p[1]
                except KeyError: pass 
            #Now process words surrounding the capitalized words
            N_WORDS = 3
            print "Now processing: ", self.candidates[each].words
            for item in self.candidates[each].interval_list:
                words_before = self.get_tokens_from_position(item[0], N_WORDS)
                words_after = self.get_tokens_from_position(item[1]-1,N_WORDS, False)
                self.candidates[each].context_list.append(words_before+cap_tokens+words_after)
                for word in words_before:
                    try:
                        p = before_dic[word.lower()]
                        self.candidates[each].score[p[0]] += p[1]
                    except KeyError: pass
                for word in words_after:
                    try:
                        p = after_dic[word.lower()]
                        self.candidates[each].score[p[0]] += p[1]
                    except KeyError: pass
        
        #scrap candidates with other types of classifications.        
        todelete = []
        for each in self.candidates:
            f = 0
            for clas in self.candidates[each].score:
                if clas != "Location" and self.candidates[each].score[clas] > 0:
                    f = 1
            if f == 1 and self.candidates[each].score["Location"] == 0:
                todelete.append(each)
        
        prev_size = len(self.candidates)

        """print "Before deletion:"
        for each in self.candidates:
            print self.candidates[each].words, self.candidates[each].context_list
            """
        for each in todelete:
            del self.candidates[each]

        print len(todelete), " candidates, out of a total of", prev_size, "were deleted because they had non-location type classifications."

        #strip candidates with no classification
        todelete = []
        for each in self.candidates:
            if self.candidates[each].score["Location"] == 0:
                todelete.append(each)
        
        prev_size = len(self.candidates)

        print "Before deletion:"
        for each in self.candidates:
            print self.candidates[each].words, self.candidates[each].context_list


        for each in todelete:
            del self.candidates[each]
            
        print len(todelete), " candidates, out of a total of", prev_size, "were deleted because they had no classifications."

        print "What remains is:"
        count = 0
        for each in self.candidates:
            print count, self.candidates[each].words#, self.candidates[each].context_list
            count += 1
            

        #Now consider the scores and rank each candidate
        self.candidate_ranking = []
        for each in self.candidates:
            self.candidate_ranking.append(self.candidates[each])
        
        """print "before: ", self.candidate_ranking
        self.candidate_ranking.sort(lambda x, y: cmp(y.score["Location"], x.score["Location"]))
        print " after: ", self.candidate_ranking
        """
        return


if __name__ == "__main__":
    
    #Works reasonably only with english language documents, some
    #examples of which are: 10342_tx1.xml, 21294_tx1.xml, etc
    filename = "../data/xmldocs/10342_tx1.xml"
    import TaxonX
    T = TaxonX.TaxonX(filename)
    s = T.xmldata
    #print s
    C = Classifier(s)
    C.classify()
    

    pass
