'''
Created on Apr 22, 2012

@author: samindaw
'''

from nltk.corpus import wordnet as wn
from os.path import basename

def get_word_list(words):
    """
        given a string having comma seperated list of words
        return the list of words as a python list 
    """
    wlist=words.split(",")
    result=[]
    for w in wlist:
        result.append(w.strip())
    return result

def remove_formatting(word):
    """
        get rid of user formatting added to a word
    """
    return word.strip().replace("_"," ").lower()

def read_data_from_file(filename, obj):
    """
        read data from files which has knowledge in the form of
        semantic notion/values
        eg: key_x:value_x1 key_y:valuey_y1
            key_x:value_x2 key_y:value_y2
            key_x:value_x3 key_y:value_y3
            ....
    """
    f = open (filename,"r")
    data = f.read().split("\n")
    for line in data:
        line=line.strip()
        #if its a comment
        if (len(line)==0 or line[0]=="#"):
            continue
        features=line.split()
        case=Case()
        for feature in features:
            values=feature.split(":")
            feature_name=values[0]
            feature_value=values[1]
            case[feature_name]=feature_value
        obj.add_case(case)

def long_substr(data):
    """
        Return the longest common substring among list of strings
        this function is borrowed from 
            http://stackoverflow.com/questions/2892931/longest-common-substring-from-more-than-two-strings-python
    """
    substr = ''
    if len(data) > 1 and len(data[0]) > 0:
        for i in range(len(data[0])):
            for j in range(len(data[0])-i+1):
                if j > len(substr) and all(data[0][i:i+j] in x for x in data):
                    substr = data[0][i:i+j]
    return substr

def compare_strings(word1, word2):
    """
        return fuzzy string comparison similarity value
    """
    substr=long_substr([word1,word2])
    return len(substr)/((len(word1)+len(word2))/2.0)

def read_script(filename):
    """
        read a script file and return a script object
    """
    name = basename(filename)
    script=KBScript(name)
    read_data_from_file(filename, script)
    return script

def read_casebase(filename):
    """
        read a case base file and return a case base object
    """
    cb=CaseBase()
    read_data_from_file(filename, cb)
    return cb

class Feature:
    """
        Feature is a Semantic notion and its value
    """
    def __init__(self, feature_id, feature_value):
        self.feature_id=feature_id
        self.feature_value=feature_value

class Case(dict):
    """
        A collection of features grouped together which forms 
        some semantic meaning 
    """
    def __init__(self,features=None):
        if features!=None:
            if (isinstance(features,list)):
                for feature in features:
                    self[feature.feature_id]=feature.feature_value
            if (isinstance(features,dict)):
                for key,value in features.items():
                    self[key]=value
    def add_feature(self, feature_name, feature_value):
        self[feature_name]=feature_value
        
class CaseBase(list):
    """
        A container object to keep track of cases
    """
    def add_case(self,caseObj):
        if (isinstance(caseObj,list) or isinstance(caseObj,dict)):
            self.append(Case(caseObj))
        else:
            if (isinstance(caseObj,Case)):
                self.append(caseObj)
    
class CBREngine:
    """
        An inference engine which uses a case base to search 
        similar notions
    """
    def __init__(self, casebase=CaseBase()):
        self.casebase=casebase
        
    def add_case(self,caseObj):
        self.casebase.add_case(caseObj)
    
    def compare(self,word1, word2):
        """
            Compare given 2 words and return best match
        """
        word1_n=remove_formatting(word1)
        word2_n=remove_formatting(word2)
        word1_synsets=wn.synsets(word1_n) #@UndefinedVariable
        word2_synsets=wn.synsets(word2_n) #@UndefinedVariable
        best_w1=None
        best_w2=None
        best_compare=0
        
        #if atleast one of them are names use fuzzy string comparison
        if (len(word1_synsets)==0 or len(word2_synsets)==0):
            best_compare=compare_strings(word1_n, word2_n)
        else:
            for word1_synset in word1_synsets:
                for word2_synset in word2_synsets:
                    value=word1_synset.wup_similarity(word2_synset)
                    if (value!=None and best_compare<value):
                        best_compare=value
                        best_w1=word1_synset
                        best_w2=word2_synset
        return best_compare, (best_w1,word1), (best_w2,word2)
    
    def split_comparison(self, words1, words2):
        """
            when words contain comma seperated lists compare each of them individually 
            and return the best match between the two lists 
        """
        wlist1=get_word_list(words1)
        wlist2=get_word_list(words2)
        matches=[]
        max_list_similarity=0.0
        for word1 in wlist1:
            word1=word1.strip()
            max_similarity=0.0
            best_match=None
            for word2 in wlist2:
                word2=word2.strip()
                similarity = self.compare(word1, word2)
                if (similarity[0]>max_similarity):
                    max_similarity=similarity[0]
                    best_match=similarity
            matches.append(best_match)
            max_list_similarity+=max_similarity
        max_list_similarity=max_list_similarity/len(wlist1)
        return max_list_similarity,matches 

    def search_case(self, feature_values, n=None):
        """
            Given a set of features search for the best match 
            of a case from the case base
        """
        results=[]
        for case in self.casebase:
            total_similarity=0.0
            count=0
            for feature_name,feature_value in feature_values.items():
                if (feature_name in case.keys()):
                    count+=1
                    wlist1 = feature_value
                    wlist2 = case[feature_name]
                    similarity = self.split_comparison(wlist1, wlist2)
                    total_similarity+=similarity[0]
            if (count!=0):
                total_similarity=total_similarity/count #average
                total_similarity=total_similarity*(count/(len(feature_values.keys())*1.0)) #percentage of features we are matching
            results.append((case,total_similarity))
        return list(reversed(sorted(results, key=lambda result: result[1])))[:n]
        
class KBScript(CaseBase):
    """
        Represents a script which contains a collection of sentences 
        represented in semantic notion/value format
    """
    def __init__(self, name=None):
        self.name=name
        self.engine=CBREngine(self)
    
    def search_matches(self, feature_values, n=None):
        """
            search for the best match of given set of features to 
            a sentence in the script
        """
        return self.engine.search_case(feature_values, n)

class KScriptBase(list):
    """
        A container to maintain a set of scripts
    """
    def add_script(self,scriptObj):
        self.append(scriptObj)
            
class KBScriptEngine:
    """
        An inference engine that is capable of searching for 
        matching scripts from a script base & merging them
    """
    def __init__(self, scriptbase=KScriptBase()):
        self.scriptbase=scriptbase
    

    def match_with_script(self, script, sentences, threashold):
        """
            match a script with a set of sentences and return 
            similarity and matches sentences
        """
        script_match = 1.0
        match_list = []
        count = 0
        for feature_set in sentences:
            match = script.search_matches(feature_set)
            if (match[0][1] > threashold):
                match_list.append((feature_set, match[0][0], match[0][1]))
                script_match = script_match * match[0][1]
                count += 1
        
        script_match = script_match * count / (len(sentences)*1.0)
        return script_match, match_list

    def search_script(self, sentences, n=None, threashold=0.0):
        """
            Search for scripts which best matches the given sentences
        """
        results=[]
        for script in self.scriptbase:
            script_match, match_list = self.match_with_script(script, sentences, threashold)
            results.append((script,match_list,script_match))
        return list(reversed(sorted(results, key=lambda result: result[2])))[:n]
    
    def merge_script(self, script, sentences, threashold=0):
        """
            merge the given script and the sentences and return the new script
        """
        script_match, match_list = self.match_with_script(script, sentences, threashold)
        unused_sents=list(sentences)
        replacements={}
        for sentence, case, match in match_list:
            unused_sents.remove(sentence)
            for feature_name, feature_value in sentence.items():
                if (feature_name in case.keys()):
                    comparison=script.engine.split_comparison(feature_value,case[feature_name])
                    matches=comparison[1]
                    for match_data in matches:
                        replacements.setdefault(match_data[2][1],set()).add((match_data[0],match_data[1][1]))
        result=KBScript()
        for s1,s2 in replacements.items():
            replacements[s1]=list(reversed(sorted(s2,key=lambda result: result[0])))

        for case in script:
            result_case=Case()
            for feature_name,feature_value in case.items():
                results_to_add=[]
                wlist=get_word_list(feature_value)
                for w in wlist:
                    if (w in replacements.keys()):
                        results_to_add.append(replacements[w][0][1])
                
                if (len(results_to_add)>0):
                    result_case[feature_name]=",".join(results_to_add)
                else:
                    result_case[feature_name]=feature_value
            result.add_case(result_case)
        return result,unused_sents
        