# -*- coding: utf-8 -*-
# @Time    : 2018/3/31 19:12
# @Author  : Tianchiyue
# @File    : model.py
# @Software: PyCharm Community Edition
# -*- coding: utf-8 -*-
# @Time    : 2018/3/28 15:07
# @Author  : Tianchiyue
# @File    : model.py
# @Software: PyCharm Community Edition
import numpy as np
from keras.layers import Input, Embedding, Dense, Dropout
from keras.models import Model
from keras import backend as K
from keras import optimizers, regularizers
from sklearn.metrics import precision_score, recall_score
import logging


class BaseModel:
    def __init__(self, config, embedding_matrix):
        self.config = config
        self.model = None
        self.sentence_input = Input(shape=(self.config['max_length'],),
                                    dtype='int32',
                                    name='sentence_input')
        self.target_input = Input(shape=(self.config['target_nums'],),
                                  dtype='int32',
                                  name='target_input')
        self.embedding_layer = Embedding(embedding_matrix.shape[0],
                                         embedding_matrix.shape[1],
                                         trainable=self.config['embed_trainable'],
                                         weights=[embedding_matrix],
                                         mask_zero=True
                                         )
        self.sentence = self.embedding_layer(self.sentence_input)
        self.target = self.embedding_layer(self.target_input)

    def build(self):
        pass

    def compile(self):
        # 文本表示
        rep = self.build()
        if self.config['use_mlp']:
            rep = Dropout(self.config['dropout_rate'])(rep)
            rep = Dense(self.config['hidden_dims'], activation=self.config['activation'])(rep)
        # rep = Dropout(self.config['dropout_rate'])(rep)
        if self.config['use_l2']:
            predictions = Dense(self.config['num_classes'],
                                kernel_regularizer=regularizers.l2(self.config['l2']),
                                activation='softmax')(rep)
        else:
            predictions = Dense(self.config['num_classes'],
                                activation='softmax')(rep)
        #   判断是否使用target信息
        if self.config.get('use_target', None):
            self.model = Model(inputs=[self.sentence_input, self.target_input],outputs=predictions)
        else:
            self.model = Model(inputs=[self.sentence_input], outputs=predictions)
        opt = optimizers.get(self.config['optimizer'])
        K.set_value(opt.lr, self.config['lr'])
        self.model.compile(optimizer=opt,
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

    def predict(self, test_x):
        return self.model.predict(test_x)

    # 根据任务改变
    def evaluate(self, valid_x, valid_y):
        v_pred = [i.argmax() for i in self.predict(valid_x)]
        v_true = [i.argmax() for i in valid_y]
        valid_score = BaseModel.f1_score(v_true, v_pred)
        evaluate_list = self.model.evaluate(valid_x, valid_y, verbose=0)
        return evaluate_list[0], evaluate_list[1], valid_score

    def fit(self, train_x, train_y, valid_x, valid_y, best_type='best_score', filename='files/best.model'):
        logging.info('----------------------')
        early_stop = 0
        best_loss = 1000
        best_acc = 0
        best_score = 0
        n_stop = self.config['n_stop']
        best_epoch = 0
        # 不平衡数据设置权重
        # class_weights = dict(enumerate(train_y.shape[0] / np.bincount(np.argmax(train_y, -1))))
        for i in range(1, self.config['epochs']):
            # 训练三轮之后 学习率衰减。fine tuning
            if i == self.config['lr_decay_epoch']:
                K.set_value(self.model.optimizer.lr, self.config['lr_decay_rate'] * self.config['lr'])
            early_stop += 1
            his = self.model.fit(train_x, train_y,
                                 batch_size=self.config['batch_size'],
                                 # class_weight=class_weights,
                                 verbose=0)
            logging.debug('\ttrain\tepoch{}:\tloss:{}, acc:{}'.format(i, his.history['loss'][0], his.history['acc'][0]))
            val = self.evaluate(valid_x, valid_y)
            if (val[0] < best_loss and best_type == 'best_loss') or \
                    (val[2] > best_score and best_type == 'best_score'):
                best_loss = val[0]
                best_epoch = i
                best_acc = val[1]
                best_score = val[2]
                self.model.save_weights(filename)
                early_stop = 0
            logging.debug('\ttest\tepoch{}:\tloss:{}, acc:{},score:{}'.format(i, val[0], val[1], val[2]))
            if early_stop > n_stop:
                logging.info('\tearly stop')
                break
        logging.info('\tbest\tepoch{}:\tloss:{}, acc:{},score:{}'.format(best_epoch, best_loss, best_acc, best_score))
        return best_acc, best_score

    def fit_nlpcc(self, train_x, train_y, valid_x, valid_y, test_x, test_y,
                  use_class_weight=False, predicted=False, best_type='best_score', filename='files/best.model'):
        """
        输出每轮的训练集，验证集，测试集结果
        :return:
        """
        early_stop = 0
        best_loss = 1000
        best_acc = 0
        best_score = 0
        n_stop = self.config['n_stop']
        best_epoch = 0
        # 不平衡数据设置权重
        class_weights = dict(enumerate(train_y.shape[0] / np.bincount(np.argmax(train_y, -1))))
        test_result =[]
        for i in range(1, self.config['epochs']):
            # 训练三轮之后 学习率衰减。fine tuning
            if i == self.config['lr_decay_epoch']:
                K.set_value(self.model.optimizer.lr, self.config['lr_decay_rate'] * self.config['lr'])
            early_stop += 1
            if use_class_weight:
                his = self.model.fit(train_x, train_y,
                                     batch_size=self.config['batch_size'],
                                     class_weight=class_weights,
                                     verbose=0)
            else:
                his = self.model.fit(train_x, train_y,
                                     batch_size=self.config['batch_size'],
                                     verbose=0)
            logging.debug('\ttrain\tepoch{}:\tloss:{}, acc:{}'.format(i, his.history['loss'][0], his.history['acc'][0]))
            val = self.evaluate(valid_x, valid_y)
            test = self.evaluate(test_x, test_y)
            if (val[0] < best_loss and best_type == 'best_loss') or \
                    (val[2] > best_score and best_type == 'best_score'):
                best_loss = val[0]
                best_epoch = i
                best_acc = val[1]
                best_score = val[2]
                self.model.save_weights(filename)
                early_stop = 0
            logging.debug('\tvalid\tepoch{}:\tloss:{}, acc:{},score:{}'.format(i, val[0], val[1], val[2]))
            logging.debug('\ttest\tepoch{}:\tloss:{}, acc:{},score:{}'.format(i, test[0], test[1], test[2]))
            test_result.append([test[1], test[2]])
            if early_stop > n_stop:
                logging.info('\tearly stop')
                break
        logging.info('\tbest\tepoch{}:\tloss:{}, acc:{},score:{}'.format(best_epoch, best_loss, best_acc, best_score))
        if predicted:
            self.model.load_weights(filename)
            return self.predict(test_x), test_result[best_epoch-1][0], test_result[best_epoch-1][1]
        else:
            return test_result[best_epoch-1]


    @staticmethod
    def f1_score(y_true, y_pred):
        pf, pa = precision_score(y_true, y_pred, average=None)[1:]
        rf, ra = recall_score(y_true, y_pred, average=None)[1:]
        ff = 2 * pf * rf / (pf + rf)
        fa = 2 * pa * ra / (pa + ra)
        fa = (ff + fa) / 2
        return fa