# -*- coding: utf-8 -*-
# @Time    : 2018/3/31 21:30
# @Author  : Tianchiyue
# @File    : main.py
# @Software: PyCharm Community Edition
from models import configs
from models.rnn import Rnn
from models import cnn, ata, tan
import numpy as np
import pickle
from keras.backend.tensorflow_backend import set_session
import os
import random as rn
import tensorflow as tf
import keras.backend as K
import logging
import sys
import argparse
from sklearn.model_selection import StratifiedKFold
from collections import Counter


def init_env(gpu_id):
    """
    设置gpuid
    :param gpu_id:字符串
    """
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True
    set_session(tf.Session(config=tf_config))
    print('GPU%s ready!' % gpu_id)


def rand_set():
    # 设置随机种子,在每次clear_session 后面都要重新调用
    os.environ['PYTHONHASHSEED'] = '7'
    np.random.seed(7)
    rn.seed(7)
    session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
    tf.set_random_seed(7)
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    K.set_session(sess)
    print('========Init Over=========')


def load_data(data_path):
    with open(data_path, 'rb')as f:
        data = pickle.load(f)
    print('========Data Loaded=======')
    return data


def load_config(model_name):
    """
    加载config，dict 是可变对象，传入引用，如果在函数内部改变，对象原始值也改变
    Ref:http://www.runoob.com/w3cnote/python-understanding-dict-copy-shallow-or-deep.html
        https://www.cnblogs.com/loleina/p/5276918.html
    """
    if model_name == 'cnn':
        config = configs.cnn_config
    elif model_name == 'rnn':
        config = configs.rnn_config
    elif model_name == 'ata':
        config = configs.ata_config
    elif model_name == 'tan':
        config = configs.tan_config
    else:
        return None
    print('===={}配置文件加载完毕===='.format(model_name))
    return config


def load_model(model_name, model_config, embedding_matrix):
    if model_name == 'cnn':
        model = cnn.Cnn(model_config, embedding_matrix)
    elif model_name == 'rnn':
        model = Rnn(model_config, embedding_matrix)
    elif model_name == 'ata':
        model = ata.ATA(model_config, embedding_matrix)
    elif model_name == 'tan':
        model = tan.TAN(model_config, embedding_matrix)
    else:
        return None
    model.compile()
    print('===={}模型加载完毕===='.format(model_name))
    return model


def cross_validation(sentence, target, y, embedding_matrix, model_name='cnn', config=configs.cnn_config, folds=5):
    """
    适用于没有划分数据集
    在新疆数据集上，没有测试集合。采用五折交叉验证方式
    :param sentence: 句子矩阵
    :param target: 话题矩阵
    :param y: 真实标注
    :param embedding_matrix: 预训练词嵌入矩阵
    :param model_name: 模型名称
    :param config: 模型配置文件
    :param folds: 折数
    :return: 五次acc，fscore 平均分
    """
    logging.info('\t开始使用五折交叉训练模型{}'.format(model_name))
    kfold = StratifiedKFold(n_splits=folds, shuffle=True)
    avg_acc, avg_score = 0.0, 0.0
    for train_index, valid_index in kfold.split(np.zeros(y.shape[0]), np.argmax(y, axis=1)):
        ytc = load_model(model_name, config, embedding_matrix)
        if model_name in ['ata', 'tan']:
            train_x_t, valid_x_t = [sentence[train_index], target[train_index]], \
                                   [sentence[valid_index], target[valid_index]]
            train_y_t, valid_y_t = y[train_index], y[valid_index]
        else:
            train_x_t, valid_x_t = sentence[train_index], sentence[valid_index]
            train_y_t, valid_y_t = y[train_index], y[valid_index]

        acc, score = ytc.fit(train_x_t, train_y_t, valid_x_t, valid_y_t)
        print('模型{}:\tacc:{}\tscore:{}'.format(model_name, acc, score))
        avg_acc += acc
        avg_score += score
        del ytc
        if K.backend() == 'tensorflow':
            K.clear_session()
            rand_set()
    return avg_acc / folds, avg_score / folds


def xinjiang(model_name):
    """
    新疆数据集
    :param model_name:如果传进来是个list，则逐个验证
    :return:
    """
    train_x, train_target, labels, embedding_matrix = load_data('data/xinjiang_processed_data.pkl')
    if isinstance(model_name, list):
        for single_model in model_name:
            logging.info('===={}===='.format(single_model))
            config = load_config(single_model)
            config['max_length'], config['target_nums'], config['embedding_dims'] = \
                train_x.shape[1], train_target.shape[1], embedding_matrix.shape[1]
            result = cross_validation(train_x, train_target, labels, embedding_matrix,
                                      model_name=single_model, config=config, folds=5)
            logging.info('****{}****'.format(result))
    else:
        logging.info('===={}===='.format(model_name))
        config = load_config(model_name)
        config['max_length'], config['target_nums'], config['embedding_dims'] = \
            train_x.shape[1], train_target.shape[1], embedding_matrix.shape[1]
        result = cross_validation(train_x, train_target, labels, embedding_matrix,
                                  model_name=model_name, config=configs.cnn_config, folds=5)
        logging.info('****{}****'.format(result))


def nlpcc_search_para(model_name, times=1):
    """
    在NLPCC数据集上对ATA模型搜索超参数
    """
    config = load_config(model_name)
    opt_dict = {'adam': 0.001, 'rmsprop': 0.001, 'adagrad': 0.01, 'sgd': 0.01}
    dropout = [0.2, 0.5]
    batch_size = [50, 100]
    for lr in [0.001, 0.0005]:
        for dropout_rate in dropout:
            for bsz in batch_size:
                for hsz in [100, 300]:
                    print('参数{}，{}，{},{},'.format(lr, dropout_rate, bsz,hsz))
                    config['lr'], config['dropout_rate'], config['batch_size'], config['hidden_dims'] = \
                        lr, dropout_rate, bsz, hsz
                    nlpcc(model_name, times)


def nlpcc(model_name, times=1):
    """
    nlpcc 每个target运行多次
    """
    config = load_config(model_name)
    config['epochs'] = 8
    target_list = ['chunjie', 'kaifang', 'shenzhen', 'russia', 'phone']
    for target in target_list:
        train_data, test_data, embedding_matrix = load_data('data/{}.pkl'.format(target))
        config['max_length'] = train_data[0].shape[1]
        config['embedding_dims'] = embedding_matrix.shape[1]
        config['target_nums'] = train_data[1].shape[1]
        print(target)
        results = []
        for _ in range(times):
            ytc = load_model(model_name, config, embedding_matrix)
            result = ytc.fit_nlpcc([train_data[0][:500], train_data[1][:500]], train_data[2][:500],
                                   [train_data[0][500:], train_data[1][500:]], train_data[2][500:],
                                   test_data[:2], test_data[2])
            del ytc
            if K.backend() == 'tensorflow':
                K.clear_session()
                # 如果想每次得到同样的结果需要重新设置随机数，否则结果不相同
                rand_set()
            rand_set()
            print(result)
            results.append(result)
        print('平均得分{}'.format(np.average(results, axis=0)))


def semeval_ensemble(data_set='nlpcc', model_name='ata', folds=5, soft_vote=True):
    """
    在semeval数据集 五折交叉ensemble
    ref：Connecting Targets to Tweets: Semantic Attention-Based Model for Target-Specific Stance Detection
         https://github.com/zhouyiwei/tsd.
    :param model_name:
    :param soft_vote 预测值加和在nlpcc数据集结果较好；hard_vote 预测类别投票 在semeval数据集结果较好
    :return:
    """
    config = load_config(model_name)
    config['epochs'] = 8
    if data_set == 'semeval':
        target_list = ['hillary', 'abortion', 'feminist', 'climate']
    else:
        target_list = ['chunjie', 'kaifang', 'shenzhen', 'russia', 'phone']
    for target in target_list:
        print(target)
        train_data, test_data, embedding_matrix = load_data('data/{}.pkl'.format(target))
        config['max_length'] = train_data[0].shape[1]
        config['embedding_dims'] = embedding_matrix.shape[1]
        config['target_nums'] = train_data[1].shape[1]
        kfold = StratifiedKFold(n_splits=folds, shuffle=True)
        avg_acc, avg_score = 0.0, 0.0
        sentence, target, y = train_data
        y_preds_soft = []
        y_preds_hard = []
        for train_index, valid_index in kfold.split(np.zeros(y.shape[0]), np.argmax(y, axis=1)):
            ytc = load_model(model_name, config, embedding_matrix)
            if model_name in ['ata', 'tan']:
                train_x_t, valid_x_t = [sentence[train_index], target[train_index]], \
                                       [sentence[valid_index], target[valid_index]]
                train_y_t, valid_y_t = y[train_index], y[valid_index]
            else:
                train_x_t, valid_x_t = sentence[train_index], sentence[valid_index]
                train_y_t, valid_y_t = y[train_index], y[valid_index]
            pred, acc, score = ytc.fit_nlpcc(train_x_t, train_y_t, valid_x_t, valid_y_t, test_data[:2], test_data[2], use_class_weight=True, predicted=True)
            print('模型{}:\tacc:{}\tscore:{}'.format(model_name, acc, score))
            y_preds_soft.append(pred)
            y_preds_hard.append(np.argmax(pred, axis=1))
            avg_acc += acc
            avg_score += score
            del ytc
            if K.backend() == 'tensorflow':
                K.clear_session()
                rand_set()
        en_soft_score = ata.ATA.f1_score([i.argmax() for i in test_data[2]],
                                         [i.argmax() for i in np.sum(y_preds_soft, axis=0)])
        en_hard_score = ata.ATA.f1_score([i.argmax() for i in test_data[2]],
                                         [Counter(i).most_common(1)[0][0] for i in np.vstack(y_preds_hard).transpose()])
        print('模型{}平均分:\tacc:{}\tscore:{}'.format(model_name, avg_acc/folds, avg_score/folds))
        print('ensemble模型平均分\thard_score:{}\tsoft_score{}'.format(en_hard_score, en_soft_score))
        print('!!!!!!!!!')


def main(argv):
    # pycharm use -->run-->edit configuration--> script parameters
    # 参数都是字符串格式传入
    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset', default='nlpcc', help='Phase: Can be train or predict, the default value is train.')
    parser.add_argument('--gpu', default='2', help='before running watch nvidia-smi')
    parser.add_argument('--mode', default='searchpara', help='before running watch nvidia-smi')
    args = parser.parse_args()
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s%(message)s',
                        filename='files/{}_{}.log'.format(args.dataset, args.mode),
                        filemode='w')
    # init_env(str(args.gpu))
    rand_set()
    if args.dataset == 'nlpcc':
        if args.mode == 'searchpara':
            nlpcc_search_para('ata')
        else:
            nlpcc(args.mode, times=1)
    if args.dataset == 'xinjiang':
        if args.mode == 'all':
            xinjiang(['cnn', 'rnn', 'tan', 'ata'])
        else:
            xinjiang(args.mode)
    if args.dataset == 'semeval':
        semeval_ensemble()
    else:
        return None


if __name__ == '__main__':
    main(sys.argv)
