"""
   Translate the description_id into concept_id, also record the number of times that this concept occurs in the text
"""

import MySQLdb 
#import TokenMatcher
import sys
from DatabaseConnector import connection

_DEBUG_ = True
dictionary = {}

class Translator:
    
    def __new__(cls, *args, **kargs):
        if cls.instance is None:
            cls.instance = object.__new__(cls, *args, **kargs)
        return cls.instance   
    
    def __init__(self):
        self.dictionary = {}
        self.loaddic()
    
    def translator(self, desc, historyls):
        cursor = connection.cursor()
        conc = {}
        try:
            for word in desc:
                if self.dictionary.has_key(str(word)) == False:
                   print "There is no corresponding concept!!..."
                else:
                     tempword = self.dictionary[str(word)]
                     conc[tempword] = 1
    #        print "current concept list:"
    #        print conc, conc.keys()
    #        print "history concept list:"
    #        print historyls
            for keys in conc.keys():
    #            print "Current key is: " + keys
                if historyls.has_key(keys) == True:
                   historyls[keys] = historyls[keys] + 1
    #               print tempword + " occurs " + str(historyls[keys]) + " times..."
                else:
                     historyls[keys] = 1
    #                 print "A new record: " + keys
    #                 print historyls
            conc = historyls
        except MySQLdb.OperationalError, message:
                    errorMessage = "Error %d:\n%s" % (message[ 0 ], message[ 1 ] )
                    print errorMessage
        cursor.close()
        return conc
    
    
    def loaddic(self):
        cursor = connection.cursor()
        if _DEBUG_:
                sys.stderr.write("Loading dictionary..")
        cursor.execute("select `Concept ID`, `Description ID` from snomed.sct_descriptions_20070731")
        
        for i, tuple in enumerate(cursor.fetchall()):
            if _DEBUG_:
                     if i % 10000 == 0:
                         sys.stderr.write(".")
            self.dictionary[tuple[1]] = tuple[0]
        if _DEBUG_:
                sys.stderr.write(" %s entries\n" % i)
    
    def transform(self):
        loaddic()
        conceptids = []
        cursor = connection.cursor()
        cursor.execute("select * from snomed.sct_lexicon_20070731 where `ConceptIDs` = ''")
        for tuple in cursor.fetchall():
            descids = tuple[1].split(":")
            term = tuple[0]
            for desc_single in descids:
                if dictionary.has_key(desc_single):
                    conc_single = dictionary[desc_single]
                    if conceptids.count(conc_single) == 0:
                        conceptids.append(conc_single)
            conceptids_str = ""
            for i in range(0, len(conceptids)):
                conceptids_str = conceptids_str + conceptids[i]
                if i < len(conceptids) -1:
                    conceptids_str = conceptids_str + ":"
            print term
            print conceptids
            conceptids = []
            cursor.execute("update snomed.sct_lexicon_20070731 set `ConceptIDs` = '" + conceptids_str + "' where `Term` = '" + term + "'")
        connection.commit()
        
        print "terminated"




#if __name__ == "__main__":
##    transform()
#    conc_list = {}
#    cursor = connection.cursor()
#    cursor.execute("SELECT * FROM hosrep.resulttext_table r where r.`RequestID` = 1")
#    ts = Translator()
#    for tuple in cursor.fetchall():
##    reqid = str(reqid).strip('L')
#        text = str(tuple[1]).strip('"(),')
#        reqid = tuple[0]
#        print reqid, text
#        for ((s, e), ids) in TokenMatcher.match_sentence(text.split()):
#            print s, e, ids
#            conc_list = ts.translator(ids, conc_list)
#            print s, e, conc_list 

#
#if __name__ == "__main__":
#    conc_list = {}
#    for line in file("sample.txt","r"):
#        for ((s, e), ids) in TokenMatcher.match_sentence(line.split()):
#            print s, e, list(ids)
#            conc_list = translator(ids, conc_list)
#            print "\n\n"
#            print conc_list
#            print "\n"
#           print s, e, conc_list.keys()
