#coding: utf-8

from Tokenizer import Tokenizer
from PyMorphyInterface import *
from Chunker import *
from EntitiesExtractor import *
from RelationshipsExtractor import *
import nltk
from collections import namedtuple
import logging

SimpleLeaf = namedtuple('SimpleLeaf',['token','pos','index'])
TaggedTokenWithSemanticEntity = namedtuple('TaggedTokenWithSemanticEntity',['token','graminfo','sementity'])
TaggedToken = namedtuple('TaggedToken',['token','graminfo'])



class SemanticProcessor:
    def __init__(self, ldb):
        """
              Конструктор класса:

          """
        self._ldb = ldb
        self.last_conditions = []



    def process(self, text):
        """
            processes given NL text
        """

        self.leafindex = -1


        # Step 1. Tokenize text
        tokenizer = Tokenizer()
        tokenized_text = tokenizer.tokenize(text)

        #Step 0 Replace not + something for antonym
        for token_index in range(len(tokenized_text)):
            try:
                if tokenized_text[token_index] == LOGICAL_OPERATORS.NOT.lower():
                    if ANTONYMS_DICTIONARY.has_key(tokenized_text[token_index + 1]):
                        tokenized_text[token_index + 1] = ANTONYMS_DICTIONARY[tokenized_text[token_index + 1]]
                        tokenized_text.pop(token_index)
            except:
                pass



        # Step 2. Tag tokens
        tagged_tokens = self.tag_tokens(tokenized_text)

        # Step 3. Build syntactic tree
        chunked_sentence = self.build_syntactic_tree(tagged_tokens)
        #nltk.download()
        #print nltk.ne_chunk(chunked_sentence, binary=True)

        #now we bind an entry from LSD to each token
        entities_extractor = EntitiesExtractor(self._ldb)

        tagged_tokens_with_semantic_entities = [

            TaggedTokenWithSemanticEntity(
                tagged_token.token,
                tagged_token.graminfo,
                entities_extractor.load_entities(tagged_token.token))
            for tagged_token in tagged_tokens]

        #t = self.create_tree(chunked_sentence, tagged_tokens, tagged_tokens_with_semantic_entities)
        #tre.draw()
        tree = self.create_immutabletree(chunked_sentence, tagged_tokens)
        tree.draw()



        entities_extractor.extract_entities1(tree, tagged_tokens_with_semantic_entities)
        #print entities_extractor.entities

        relationshipsextractor = RelationshipsExtractor(self._ldb, entities_extractor.entities, entities_extractor.prepositions)
        default_entity = None
        #check for elliptical phrase
        non_elliptic_subtrees = [t for t in tree.subtrees(filter=lambda x: x.node in ['NPP','VPP'])]

        if not non_elliptic_subtrees:# and self.last_tree:
            default_entity = NamedEntity(u'тур',[u'поездка'])
            #for elliptic replacement tests
            #last_conditions = [('country','doo','tar'),('shmantry','foo','bar')]
            #print default_entity

        relationshipsextractor.extract_relationships(tree, tagged_tokens_with_semantic_entities, default_entity)



        skimmed_representations = skim_semantic_representations(tree, relationshipsextractor.entities)

        conditions = self.extract_sql_parameters(skimmed_representations, self.last_conditions)

        self.last_conditions = conditions

        query = self.compose_query(conditions)
        print query
        results = self._ldb.dbcursor.execute(query).fetchall()
        print '№ \t Страна \t Дата \t Длительность \t'
        for i in results:
            print '%s \t %s \t %s \t %s \t' % (i[0],i[2],i[3],i[4])



#    #function to implement later
#    def extract_information(self, entity, column_flags_values_dict, operator_flags_values_dict, value_flags_functions_dict, concept_separator='*', modifier_separator=','):
#        values_dict = {}
#        #go through each modifier
#        for modifier in entity.modifiers:
#            #check whether functional symbol is in column_flags
#            if modifier[0] in column_flags_values_dict.keys():
#                column_name = modifier[0] #column name
#                operator = operator_flags_values_dict[modifier[0]] #operator
#                #split modifier value
#                for item in modifier[1].split(concept_separator):
#                    splitted_modifier = item.split(modifier_separator)
#                    for value_flag in value_flags_functions_dict.keys():
#                        if value_flag in splitted_modifier[0]:
#                            values_dict[value_flag] = value_flags_functions_dict[value_flag](splitted_modifier[1]) # function performed to extract information from modifier value
#        return (column_name, operator, values_dict)



    def extract_sql_parameters(self, skimmed_representations, last_conditions = []):
        """
            Returns list of tuples (column_name, operator, conditions)
            which can be used to compose sql query
        """
        #date parameters
        datename = 'start_date'
        dateflags = [u'Дата']
        month = u'Месяц'
        monthsdict = {
            u'Январь':'01',
            u'Февраль':'02'
        }
        day = u'число'
        fetched_day = None
        fetched_month = None
        date_operator = '='

        #price parameters
        price_name = 'price'
        priceflags = [u'Цена',u'Дешевле']
        price = u'число'
        currency = u'Валюта'
        currencylist = [u'рубль',u'Доллар']
        fetched_price = None
        fetched_currency = None
        price_operator = '='

        #destination parameters
        destination_names = ['continent','country']
        notsign = '~'
        destinationflags = [u'Конечный пункт']
        name = u'Название'
        countrylist = [u'Турция',u'Египет',u'Австралия']
        continentlist = [u'Африка',u'Европа']
        fetched_country = None
        fetched_continent = None
        destination_operator = '='

        #duration parameters
        duration_name = 'duration'
        durationflags = [u'Длительность']
        duration = u'число'
        fetched_duration = ''
        duration_operator = '='
#       test of general method for features' extraction
#        for entity in skimmed_representations:
#            info = self.extract_information(entity,
#                {u'Цена':'price', u'Валюта':'currency'},
#                {u'Цена':'=', '~': '<>',u'Валюта':'='},
#                {u'число': str2int ,u'Валюта': lambda x: get_elements_that_are_matched_in_string([u'рубль',u'Доллар'],x)}
#            )
#            print info
#            for i in info[2]:
#                print i, info[2][i]
#
#            info = self.extract_information(entity,
#                    {u'Конечный пункт':'continent'},
#                    {u'Конечный пункт':'=', '~': '<>'},
#                    {u'Название': lambda x: get_elements_that_are_matched_in_string([u'Африка',u'Европа'],x)}
#            )
#            print info
#            for i in info[2]:
#                print i, info[2][i]




        for entity in skimmed_representations:
            for modifier in entity.modifiers:
                if modifier[0] in dateflags:
                    for item in modifier[1].split('*'):
                        splitted_item = item.split(',')
                        if day in splitted_item[0]:
                            fetched_day = str2int(splitted_item[1])
                        elif month in splitted_item[0]:
                            for m in monthsdict.keys():
                                if m in splitted_item[1]:
                                    fetched_month = monthsdict[m]
                elif modifier[0] in priceflags:
                    if priceflags.index(modifier[0]) == 1:
                        price_operator = '<='
                    for item in modifier[1].split('*'):
                        splitted_item = item.split(',')
                        if currency in splitted_item[0]:
                            for c in currencylist:
                                if c in splitted_item[1]:
                                    fetched_currency = currencylist.index(c) + 1
                        elif price in splitted_item[0]:
                            fetched_price = str2int(splitted_item[1])
                elif modifier[0] in destinationflags:
                    if notsign in modifier[1]:
                        destination_operator = '<>'
                    for item in modifier[1].split('*'):
                        splitted_item = item.split(',')
                        if name in splitted_item[0]:
                            for c in countrylist:
                                #print c, splitted_item[1], c in splitted_item[1]
                                if c in splitted_item[1]:

                                    fetched_country = c
                            for c in continentlist:
                                if c in splitted_item[1]:
                                    fetched_continent = c
                elif modifier[0] in durationflags:
                    for item in modifier[1].split('*'):
                        splitted_item = item.split(',')
                        if duration in splitted_item[0]:
                            fetched_duration = int(remove_non_alphanumeric(splitted_item[1]))



        print fetched_day, fetched_month
        print fetched_price, fetched_currency
        print fetched_continent, fetched_country
        print fetched_duration

        #print last_conditions
        conditions = filter(lambda x: not(
        (x[0] == price_name and fetched_price)
        or (x[0] == datename and (fetched_day or fetched_month))
        or (x[0] in destination_names and (fetched_continent or fetched_country))
        or (x[0] == duration_name and fetched_duration))
        , last_conditions)
        #print conditions

        if fetched_price:
            conditions.append((price_name, price_operator, str(fetched_price)))

        datevalue = ''
        if fetched_day and fetched_month:
            datename = "strftime('%d.%m', start_date)"
            datevalue = br(str(fetched_day) + '.' + fetched_month)
        elif fetched_day:
            datename = "strftime('%d', start_date)"
            datevalue = br(str(fetched_day))
        elif fetched_month:
            datename = "strftime('%m', start_date)"
            datevalue = br(fetched_month)

        if fetched_day or fetched_month:
            conditions.append((datename, date_operator, datevalue))

        destination_name = ''
        destination_value = ''
        if fetched_continent:
            destination_name = destination_names[0]
            destination_value = br(fetched_continent)
        elif fetched_country:
            destination_name = destination_names[1]
            destination_value = br(fetched_country)

        if fetched_continent or fetched_country:
            conditions.append((destination_name, destination_operator, destination_value))

        if fetched_duration:
            conditions.append((duration_name, duration_operator, str(fetched_duration)))

        print conditions
        return conditions

    #def _clear_conditions(self, conditions, filter_value):
    #    return filter(lambda x: , conditions)
    def compose_query(self, conditions):
        query = "select * from tours"
        if conditions:
            query += ' where '
            query += ' and '.join(map(lambda x: x[0] + x[1] + x[2], conditions))

        return query


    def create_immutabletree(self, tree, tagged_tokens):
        """
            creates a tree that can be used as a key in dict
        """
        try:
            #t.node contains node label ( S, NP , VP, etc...)
            tree.node
        except AttributeError:
            #that exception means 't' is tuple representing a leaf
            self.leafindex += 1
            return SimpleLeaf(tagged_tokens[tree[0]][0],
                    tagged_tokens[tree[0]][1].pos,
                self.leafindex)
        else:
            # Now we know that t.node is defined
            return nltk.ImmutableTree(tree.node, [self.create_immutabletree(child, tagged_tokens) for child in tree])


    def create_tree(self, tree, tagged_tokens, tagged_tokens_with_semantic_entities):

        try:
            #t.node contains node label ( S, NP , VP, etc...)
            tree.node
        except AttributeError:
            #that exception means 't' is tuple representing a leaf
            return (tagged_tokens[tree[0]][0],
                    tagged_tokens[tree[0]][1],
                    tagged_tokens_with_semantic_entities[tree[0]])
        else:
            # Now we know that t.node is defined
            return nltk.Tree(tree.node, [self.create_tree(child, tagged_tokens, tagged_tokens_with_semantic_entities) for child in tree])

    def tag_tokens(self, tokens):
        tagged_tokens = []

        pymorphyi = PyMorphyInterface()

        for token in tokens:
            # если разделитель
            if token in '.,:;][}{()<>*&^%$#@!\|/?':
                tagged_tokens.append(TaggedToken(token,PropertiesContainer(pos=POS_TAGS.DELIMETER)))
                continue
                #            elif token[0] == '@':
            #                rm.append(MorphologicalRepresentationEntry(token[1:], [{'part': 12}]))
            #                continue

            tagged_token = pymorphyi.tag_token(token)

            if not len(tagged_token.base):
                tagged_tokens.append(TaggedToken(token,None))
            else:
                tagged_tokens.append(TaggedToken(tagged_token.base,tagged_token.graminfo))

        #log tagged_tokens
        logging.debug("======POS tagging result======:")
        for token in tagged_tokens:
            logging.debug("%s:%s" % (token.token, token.graminfo))
        #end of log block

        return tagged_tokens




    def build_syntactic_tree(self, tagged_tokens):


        sentence = []

        for token_index in range(len(tagged_tokens)):
            sentence.append( (token_index, tagged_tokens[token_index].graminfo.pos))

        #grammar = "NP: {<ADJ>*<N>}"

        #NP: Chunk sequences of JJ, NN
        #PP: Chunk prepositions followed by NP
        #VP: Chunk verbs and their arguments
        #CLAUSE: Chunk NP, VP

        grammar = r"""
            NP: {<NOT>?<ADJ|NB>*<N|CNJ>+}
            PP: {<NOT>?<P><NP>}
            NPP: {<NP><PP|NP>+}
            VP: {<ADV>*<V><ADV>*}
            VPP: {<VP><NP|PP|CLAUSE>+}
            CLAUSE: {<NP|NPP><VPP>}
            """


        chunker = Chunker(grammar)

        chunked_sentence = chunker.chunk_sentence(sentence)


        #        test_sentence1 = [('tabletka','N'),('NA','P'),('TARELKE','N')]
        #        chunker.chunk_sentence(test_sentence1)
        #
        #        test_sentence2 = [('tabletka','N'),('LEZHIT','V'),('NA','P'),('TARELKE','N')]
        #        chunker.chunk_sentence(test_sentence2)


        return chunked_sentence
