# -*- coding: utf-8 -*-
"""
Created on Mon Mar  4 16:49:52 2019

@author: william

Email: hua_yan_tsn@163.com
"""
import HierarchicalAttention
from PositionEmbeddings import getPositionEmbeddings
import BiGRU
from loadData import *
def softmax_possibility(sentence_representation, W):
    """
    :param sentence_representation: the final representation of sentence
    :param W: the weight matrix, the first dimension of W denotes the class -1, second dimension denotes
            the class 1, and third dimension denotes the class 0 and the fourth dimension denotes the class 3
            class -1 : negative
            class 1: positive
            class 0: nerual
            class 3: conflicts
    :return polarities: the predicted polarities of sentence
    """
    computed_W = W[:,:sentence_representation.shape[0]]
    c = np.exp(np.matmul(computed_W, sentence_representation))
    summary = np.sum(c)
    distributed_possibilities = c / summary
    return distributed_possibilities

def lossFunction(softmax_possibilities, Y):
    """
    :param softmax_possibilities: the distributed possibility value computed by softmax_possibilty function
            dimension: batch_size * 4
    :param Y: the class vector of each sentence.
            dimension: batch_size * 4
    :return loss_value:
    """
    loss_value = 0
    for i in range(len(Y)):
        loss_value = - np.log(softmax_possibilities[i][Y[i]])
    return loss_value

def RMSprop(weight,learning_rate = 0.001, beta = 0.5):
    """
    :param learning_rate: the learning rate of project
    :param weight: the init weight matrix
    :param beta: the decay rate of learning rate | the drop out rate
    """
    pass


def initParams():
    W = {}
    forward_W = {}
    forward_W.setdefault('update', np.random.rand(1,351))
    forward_W.setdefault('reset', np.random.rand(1,351))
    forward_W.setdefault('hidden', np.random.rand(1, 351))
    forward_W.setdefault('output', np.random.randn(1)[0])
    backward_W = {}
    backward_W.setdefault('update', np.random.rand(1, 351))
    backward_W.setdefault('reset', np.random.rand(1, 351))
    backward_W.setdefault('hidden', np.random.rand(1, 351))
    backward_W.setdefault('output', np.random.randn(1)[0])
    W.setdefault('forward', forward_W)
    W.setdefault('backward', backward_W)
    W.setdefault('score', np.random.rand(82,300))
    W.setdefault('fuse', np.random.rand(464, 300))
    W.setdefault('s2c', np.random.rand(464,))
    W.setdefault('final', np.random.rand(4,164))
    batch_size = 64
    return W, batch_size
def train():
    pass

if __name__ == "__main__":
    # =========== 数据加载以及参数初始化 =======================#
    resource = 'laptop'
    data = loadTrainData(resource)
    label = loadLabel(resource)
    aspects = loadAspectToken(resource)
    initPositionEmbeddings = loadInitPositionEmbeddings(max_len=82, vector_dimension=50)
    print(' ==== sentence number %d =====' % len(data))
    W, batch_size = initParams()
    word_index = loadWordIndex(resource)
    words_ids = loadWordsIds(max_len=82)
    sample_num = 0
    sample_label = []
    sample_distribution = []
    batch_num = 0
    # ============== 进行训练 =========================#
    for index in range(len(data)):
        print('=======  sentence %d  ======' % index)
        aspects[index] = aspects[index].strip('\n').strip()
        temp = []
        for i in aspects[index].split():
            temp.append(word_index[i])
        aspects[index] = ' '.join(temp)
        sentence = []
        data_index = data[index].split()
        for i in data_index:
            if (i == '0'):
                break
            sentence.append(i)
        for i in range(len(sentence)):
            sentence[i] = word_index[sentence[i]]
        sentence = ' '.join(sentence)
        position_embeddings = getPositionEmbeddings(sentence, aspects[index], words_ids, initPositionEmbeddings, vector_dimension=50)
        if (position_embeddings == []):
            print('=====   sentence %d has no correct aspect label, discards =====' % index)
            continue
        print("====get position embeddings success====", position_embeddings.shape)
        word_embeddings = BiGRU.getWordEmbeddings(sentence)
        print("====get word embeddings success====")
        hidden_state = BiGRU.BiDirectionalGRU(word_embeddings, position_embeddings, W)
        print('====hidden state success====', hidden_state.shape)
        source2Asp = HierarchicalAttention.source2AspectAtt(word_embeddings, W['score'])
        print('====source 2 aspects success====', source2Asp.shape)
        r_hat = HierarchicalAttention.fuseInformation(hidden_state, source2Asp, W['fuse'])
        print('====information fuse success====', r_hat.shape)
        source2Cont = HierarchicalAttention.source2ContextAtt(hidden_state, r_hat, W['s2c'])
        print('====source 2 context success====', source2Cont.shape)
        answer = softmax_possibility(source2Cont, W['final'])
        break
        print('==== predict polarities distribution is ', answer, '======')
        sample_num += 1
        print('====== batch num %d;    sample num %d. ======' % (batch_num, sample_num))
        sample_label.append(label[index])
        sample_distribution.append(answer)
        break
        if (sample_num == batch_size):
            batch_num += 1
            sample_num = 0
            lf = lossFunction(sample_distribution, sample_label)
            print('======  loss function result: %f  =====' % lf)
            sample_distribution = []
            sample_label = []