# coding=utf-8
"""
Author  : Jane
Contact : xijian@ict.ac.cn
Time    : 2021/3/17 9:57
Desc:
"""
import tensorflow as tf
from transformers import XLNetTokenizer
import pkg_resources

import pandas as pd
import numpy as np
import time
import logging
import os
from collections import Counter

import sys
sys.path.append('/home/xijian/pycharm_projects/JSNews/')
from src.junshi.classify.han.han_master.han_model1 import MyHAN
from src.junshi.classify.metrics.compute_metrics import get_metrics
from src.junshi.classify.han.han_master.predict_online import create_hanstyle_inputdata_batch_online, _init
from src.junshi.classify.han.han_master import config as han_cfg
from src.junshi.classify.ensemble.config import *
from src.junshi.classify.textcnn.tcnn_master.tcnn_model import MyTextCNN
from src.junshi.classify.xlnet.xlnet_master.xlnet_train import MyXLNet
from src.junshi.classify.textcnn.tcnn_master.predict_online import create_tcnnstyle_inputdata_batch_online
from src.junshi.classify.xlnet.xlnet_master.predict_online import create_xlnetstyle_inputdata_batch_online
from src.junshi.classify.xlnet.xlnet_master.config import *


logging.basicConfig(level=logging.INFO)
"""
os.environ['CUDA_VISIBLE_DEVICES']='1'
# 设置按需使用GPUs
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
if gpus:
    try:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
        logical_gpus = tf.config.experimental.list_logical_devices(device_type='GPU')
        print('************************** ', len(gpus), 'Physical GPUs, ', len(logical_gpus), 'Logical GPUs')
    except RuntimeError as e:
        print(e)
"""

STOPWORDS_SET = set()  # 停用词集合
STOPWORDS_FILENAME = 'zh_data/stopwords.txt'  # 停用词文件名
USERDICT_FILENAME = 'zh_data/all_js_keywords.txt'


vocab_processor = _init(TOKENIZER_PATH)
xlnet_tokenizer = XLNetTokenizer.from_pretrained(xlnet_model_dir+'spiece.model')


def load_all_models(model_name):
    ensemble_models = []
    for modelname in model_name:
        model_file = pkg_resources.resource_filename(__name__, modelname)
        print(model_file)
        if 'fast' in modelname:
            # model = fasttext.load_model(model_file)
            pass
        elif 'bert' in modelname:
            pass
        elif 'han' in modelname:
            model = MyHAN(hidden_size=han_cfg.hidden_size, num_classes=num_classes)
            model.load_weights(tf.train.latest_checkpoint(model_file))
        elif 'textcnn' in modelname:
            model = MyTextCNN(num_classes=num_classes)
            model.load_weights(tf.train.latest_checkpoint(model_file))
        elif 'xlnet' in modelname:
            model = MyXLNet(xlnet_model_dir, num_classes)
            model.load_weights(tf.train.latest_checkpoint(model_file))
        else:
            pass
            # model = joblib.load(model_file)
        ensemble_models.append(model)
    return ensemble_models


def predict_by_all_models(model_name, ensemble_models, x_data):
    predicted_labels = []
    predicted_probs = []
    print('************************ 共有%d个分类模型同时预测 *************************' % len(ensemble_models))
    # print(len(x_data))
    for i, modelname in enumerate(model_name):
        #print(i)
        if 'fast' in modelname:
            # pred, pred_prob = fast_predict(ensemble_models[i], X_test)
            pass
        elif 'bert' in modelname:
            # pred, pred_prob = ensemble_models[i].bert_predict(X_test)
            # print('************************* %s **  pred : %s ******************************' %(modelname, pred[:10]))
            pass
        elif 'han' in modelname:
            encoded_texts = create_hanstyle_inputdata_batch_online(x_data, vocab_processor)
            pred_prob = ensemble_models[i].predict(encoded_texts)
            pred = tf.argmax(pred_prob, axis=-1).numpy()
            # print('*'*27, f'HAN *****  {pred} : {pred_prob}', '*'*27)
        elif 'textcnn' in modelname:
            encoded_texts = create_tcnnstyle_inputdata_batch_online(x_data, vocab_processor)
            pred_prob = ensemble_models[i].predict(encoded_texts)
            pred = tf.argmax(pred_prob, axis=-1).numpy()
            # print('*'*27, f'HAN *****  {pred} : {pred_prob}', '*'*27)
        elif 'xlnet' in modelname:
            encoded_texts = create_xlnetstyle_inputdata_batch_online(x_data, xlnet_tokenizer)
            pred_prob = ensemble_models[i](dict(encoded_texts), training=False)
            pred = tf.argmax(pred_prob, axis=-1).numpy()
        else:
           pass
        pred = list(map(int, pred)) ############ ['0', '1', '2', '3']  -> [0,1,2,3]
        predicted_labels.append(pred)
        predicted_probs.append(pred_prob)
    return predicted_labels, predicted_probs

def get_label_by_maxprobsum(labels, probs):
    max_probsum = 0  # 某个类别对应的最大概率和
    label_idx = 0
    # labels=[-1,1]
    for zz in zip(*probs):
        # print(zz)
        if sum(zz) > max_probsum:
            max_probsum = sum(zz)
            label = labels[label_idx]
            # print(max_probsum, label_idx, label)
        label_idx += 1
    return label


def ensemble_predict_by_vote(labels, predicted_labels, predicted_probs, classifier_num=5):
    # i=0
    y_pred = []
    for z in zip(*predicted_labels, *predicted_probs):
        # (lab, cnt) = Counter(z[:5]).most_common(1)[0]
        c = Counter(z[:classifier_num]).most_common(2)  # 仅比较排名前2位
        if len(c) == 1:
            label, count = c[0]
            y_pred.append(label)
            continue

        lab1, cnt1 = c[0]
        lab2, cnt2 = c[1]
        if cnt1 > cnt2:  # >####################################################
            label = lab1
            y_pred.append(label)

        elif cnt1 == cnt2:  # == 需要进一步比较概率############################
            z1 = z[:classifier_num]  # 标记
            z2 = z[classifier_num:]  # 概率

            # print(z1,z2)
            label = get_label_by_maxprobsum(labels, z2)
            y_pred.append(label)
        # print('label:', label)
        # print(z)

        # i += 1
        # if i>5:
        #    break
    return y_pred

if __name__=='__main__':


    # 加载数据
    filepath = DATA_PATH + 'js_news/labeled_data/20201217/class_for_4_and_1/han/js_pd_tagged_test.txt'
    df_data = pd.read_csv(filepath, encoding='UTF-8', sep='\t', header=0, index_col=False, usecols=[1, 2])
    df_data = df_data.dropna()
    # df_data = df_data[df_data.label == 0]
    print(df_data.head())

    x_data, y_data = df_data['clear_content'], df_data['label']

    # 加载模型
    model_name = [
        '../han/han_master/checkpoint_w2v/20210311',
        '../textcnn/tcnn_master/checkpoint_w2v/20210311'
    ]
    starttime = time.time()
    ensemble_models = load_all_models(model_name)
    endtime = time.time()
    logging.info(f'ensemble 加载模型耗时：{endtime - starttime:.2f}s')

    # 预测
    starttime = time.time()
    predicted_labels, predicted_probs = predict_by_all_models(model_name, ensemble_models, x_data)
    print(len(predicted_labels), np.array(predicted_probs).shape)
    endtime = time.time()
    logging.info(f'ensemble测试预测耗时：{endtime - starttime:.2f}s')

    labels = list(range(num_classes))
    y_pred = ensemble_predict_by_vote(labels, predicted_labels, predicted_probs, classifier_num=len(model_name))
    print(len(y_pred), y_pred[:10])

    """
    cnt_0 = 0
    for pred in y_pred:
        if pred == 0:
            cnt_0 += 1
    print(f'{cnt_0 * 1.0 / len(df_data) :.2f}')
    """
    eval_result_file = pkg_resources.resource_filename(__name__, './eval/20210317/eval_results.txt')
    get_metrics(y_data, y_pred, eval_result_file)