#encoding:utf-8
import os
import pandas as pd
import json
from config2 import Config
import tensorflow as tf
import numpy as np
from tools.utils import nextBatchTest
os.environ['CUDA_VISIBLE_DEVICES']='2'


# 实例化配置参数对象
config = Config()
test_df = pd.read_csv(config.testFileSource,encoding="utf-8")

# 注：下面两个词典要保证和当前加载的模型对应的词典是一致的
with open(config.word2idxSource, "r", encoding="utf-8") as f:
    word2idx = json.load(f)
        
with open(config.label2idxSource, "r", encoding="utf-8") as f:
    label2idx = json.load(f)
idx2label = {value: key for key, value in label2idx.items()}

sentences = []
for x in list(test_df.text):
    xIds = [word2idx.get(item, word2idx["UNK"]) for item in x.split(" ")]
    if len(xIds) >= config.sequenceLength:
        xIds = xIds[:config.sequenceLength]
    else:
        xIds = xIds + [word2idx["PAD"]] * (config.sequenceLength - len(xIds))
    sentences.append(xIds)
    
predList = []
scoreValList = []
num = 1
config.batchSize = 100
graph = tf.Graph()
with graph.as_default():
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False, gpu_options=gpu_options)
    sess = tf.Session(config=session_conf)

    with sess.as_default():
        def testStep(batchX,predictions,score):
            """
            测试函数
            """
            if config.layerType=="Transformer": 
                from layers.Transformer import Transformer,fixedPositionEmbedding
                embeddedPosition1 = fixedPositionEmbedding(config.batchSize, config.sequenceLength) 
                feed_dict = {
                    inputX: batchX,
                    dropoutKeepProb: 1.0,
                    embeddedPosition: embeddedPosition1
                }
            else:
                feed_dict = {
                    inputX: batchX,
                    dropoutKeepProb: 1.0
                }
            pred,scoreVal = sess.run([predictions,score], feed_dict=feed_dict)
            return  pred,scoreVal

        checkpoint_file = tf.train.latest_checkpoint("model/"+config.layerType+"/model/")
        print("checkpoint_file:"+checkpoint_file)
        saver = tf.compat.v1.train.import_meta_graph("{}.meta".format(checkpoint_file))
        saver.restore(sess, checkpoint_file)
        print(num)
        num = num +1

        # 获得需要喂给模型的参数，输出的结果依赖的输入值
        inputX = graph.get_operation_by_name("inputX").outputs[0]
        dropoutKeepProb = graph.get_operation_by_name("dropoutKeepProb").outputs[0]
        if config.layerType=="Transformer": 
            embeddedPosition = graph.get_operation_by_name("embeddedPosition").outputs[0]
        print(num)
        num = num +1

        # 获得输出的结果
        predictions = graph.get_tensor_by_name("output/predictions:0")
        score = graph.get_tensor_by_name("output/score:0")
        if config.status=="linux" or config.status=="local":
            print(num)
            num = 0
            for batchTest in nextBatchTest(sentences, config.batchSize):
                pred,scoreVal = testStep(batchTest,predictions,score)
                # print(f"np.max(score,axis=1):{np.max(score,axis=1)}")	#每行最大
                scoreValList=scoreValList+list(np.max(scoreVal,axis=1))
                predList=predList+list(pred) 
            num = 0
            print(num)
            num = num +1 
            test_df = pd.DataFrame()
            print(num)
            num = num +1
            test_df['label'] = predList
            test_df['score'] = scoreValList
            test_df.to_csv(config.testFileSourceOutput,encoding="utf-8",index=None)
        else:
            pred_list,score = sess.run([predictions,score], feed_dict={inputX: sentences, dropoutKeepProb: 1.0})
            print("pred_list:"+pred_list)
            print("score:"+score)
        


# print(len(sentences))    
# print(len(pred_list))       
