from utils.prepare_data import *
import time
from utils.model_helper import *
import os
import tensorflow as tf
import numpy as np
from sklearn import metrics
from sklearn.metrics import classification_report
import pandas as pd
import tensorflow as tf


x_test, y_test = load_data("../dbpedia_data/data/test_set_valid.csv", one_hot=False)
y_test = list(y_test)
_, y_test_oh = load_data("../dbpedia_data/data/test_set_valid.csv", one_hot=True)
print(np.array(y_test_oh).shape)

def caculate_acc(soft_max_ensemble,y_test=y_test):
    acc_num=0
    y_pred=get_y_pred(soft_max_ensemble)
    for i in range(len(y_test)):
        if y_test[i]==y_pred[i]:
            acc_num+=1
    acc = acc_num/len(y_test)
    return acc

def get_y_pred(soft_max_ensemble):
    y_pred = []
    for i in soft_max_ensemble:
        y_pre = np.argmax(i)
        y_pred.append(y_pre)
    return y_pred

def get_softmax(file_name):
    softmax_list = []
    with open(file=file_name,mode="r",encoding="utf-8") as f:
        result=f.readlines()
        for l in result:
            i=list(map(float,l.split("\t")))
            softmax_list.append(i)
    return softmax_list

def att_data_prepare(soft_max_cnn,soft_max_adversarial_abblstm,soft_max_attn_bi_lstm,softmax_bert):
    data=[]
    for i,_ in enumerate(soft_max_cnn):
        line=[]
        line.append(soft_max_cnn[i])
        line.append(soft_max_adversarial_abblstm[i])
        line.append(soft_max_attn_bi_lstm[i])
        line.append(softmax_bert[i])
        data.append(line)
    #data = np.array(data)
    return data
    

def attention(inputs, attention_size):
    #shape of should be [batch_size,model_num,label_num]
    inputs = inputs 
    # size of the label_num
    hidden_size = 11
    
    
    # Trainable parameters
    w_omega = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))#11*4
    #w_omega = tf.cast(w_omega, tf.float64)  
    b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
    #b_omega = tf.cast(b_omega, tf.float64)
    u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
    #u_omega = tf.cast(u_omega, tf.float64)
    
    with tf.name_scope('v'):
        # 对于每个batch*model——num加一个全连接
        # shape of v is (B,M,L)*(L,A) = [batch_size,model_num,attention_size]
        v = tf.tanh(tf.tensordot(inputs, w_omega, axes=1) + b_omega) # shape of [batch_size,label_num,attention_size]

    # [batch_size,model_num,attention_size]*[attention_size,1]=(batch_size,model_num)
    vu = tf.tensordot(v, u_omega, axes=1, name='vu')  # (batch_size,model_num) shape
    alphas = tf.nn.softmax(vu, name='alphas') # (batch_size,model_num) shape

    # the result has (batch_size,label_num)
    output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)
    output = tf.cast(output, tf.float32)
    return output,alphas


def get_ensemble(softmax_bert,soft_max_cnn,soft_max_adversarial_abblstm,soft_max_attn_bi_lstm):
    soft_max_ensemble = []
    for i,_ in enumerate(soft_max_cnn):
        line = []
        for j,_ in enumerate(_):
            line.append(3*softmax_bert[i][j]+1.5*soft_max_cnn[i][j]+soft_max_adversarial_abblstm[i][j]+soft_max_attn_bi_lstm[i][j])
        soft_max_ensemble.append(line)
    return soft_max_ensemble

def output(y_pred):
    index_list = range(1,len(y_test)+1)
    for i,y in enumerate(y_pred):
        y_pred[i]+=1
    df = {"index":index_list,"label_predict":y_pred}
    data = pd.DataFrame(df)
    print(data)
    data.to_csv("result.csv",header=False,index=False)

if __name__ == '__main__':
    #model1-cnn
    soft_max_cnn = get_softmax('text_cnn_title_desc_checkpoint/test_results_valid.tsv')
    cnn_acc = caculate_acc(soft_max_cnn)
    print("cnn_acc:",cnn_acc)
    y_pred = get_y_pred(soft_max_cnn)
    print(classification_report(y_test, y_pred))

    #adversarial_abblstm
    soft_max_adversarial_abblstm = get_softmax('adversarial_abblstm_checkpoint/test_results_valid.tsv')
    adversarial_abblstm_acc = caculate_acc(soft_max_adversarial_abblstm)
    print("adversarial_abblstm_acc:",adversarial_abblstm_acc)
    y_pred = get_y_pred(soft_max_adversarial_abblstm)
    print(classification_report(y_test, y_pred))

    #attn_bi_lstm
    soft_max_attn_bi_lstm = get_softmax('attn_bi_lstm_checkpoint/test_results_valid.tsv')
    attn_bi_lstm_acc = caculate_acc(soft_max_attn_bi_lstm)
    print("attn_bi_lstm_acc:",attn_bi_lstm_acc)
    y_pred = get_y_pred(soft_max_attn_bi_lstm)
    print(classification_report(y_test, y_pred))

    #bert
    softmax_bert = get_softmax('bert/test_results_valid.tsv')
    bert_acc = caculate_acc(softmax_bert)
    print("bert_acc:",bert_acc)
    y_pred = get_y_pred(softmax_bert)
    print(classification_report(y_test, y_pred))
    
    #ensemble
    soft_max_ensemble = get_ensemble(softmax_bert,soft_max_cnn,soft_max_adversarial_abblstm,soft_max_attn_bi_lstm) 

    y_pred = get_y_pred(soft_max_ensemble)

    #caculate acc
    acc = caculate_acc(soft_max_ensemble,y_test=y_test)
    print("ensemble_acc:",acc)


    from sklearn import metrics
    print("Confusion Matrix...")
    cm = metrics.confusion_matrix(y_test, y_pred)
    from sklearn.metrics import classification_report
    print(classification_report(y_test, y_pred))

    #attention weight

    batch_ph = tf.placeholder(tf.float32,[None,4,11], name='batch_ph')
    target_ph = tf.placeholder(tf.float32, [None,None],name='target_ph')
    
    att_ensemble_input = att_data_prepare(soft_max_cnn,soft_max_adversarial_abblstm,soft_max_attn_bi_lstm,softmax_bert)
    y_test_oh=y_test_oh
    attention_output, alphas = attention(batch_ph,4)
    print(attention_output.dtype)
    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=attention_output, labels=target_ph))
    optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(loss)
    accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(tf.sigmoid(attention_output)), target_ph), tf.float32))
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print("Start learning...")
        for epoch in range(100):
            loss_train = 0
            accuracy_train = 0
            print("epoch: {}\t".format(epoch), end="")
            loss_tr, acc, _ ,alpha=sess.run([loss, accuracy, optimizer,alphas],feed_dict={batch_ph:att_ensemble_input,target_ph:y_test_oh})
            print("acc:",acc)
            print(alpha)



    #output
    #output(y_pred)









