import numpy as np


class LRClassifier(object):
    def __init__(self):
        self.weight = None
        self.bias = None
        self.class_num = 0
        self.fea_num = 0

    @staticmethod
    def softmax(batch_vec):
        epsilon = 1e-6
        output = np.zeros_like(batch_vec)
        max_val = batch_vec.max(1)

        for i in range(output.shape[0]):
            soft_val = batch_vec[i] - max_val[i]
            soft_val = np.clip(np.exp(soft_val), 1e-20, 1)
            output[i] = soft_val / soft_val.sum()

        output = np.clip(output, epsilon, 1 - epsilon)
        return output

    def gradientDecent(self, batch_train, batch_labels, learning_rate=0.01):
        dense_output = np.dot(batch_train, self.weight.T) + self.bias
        prob_output = self.softmax(dense_output)
        grad_y = prob_output - 1.0 * batch_labels
        d_w = np.dot(grad_y.T, batch_train)
        d_b = grad_y.sum(0)

        self.weight -= d_w * learning_rate
        self.bias -= d_b * learning_rate

    @staticmethod
    def batchIndex(total_size, batch_size, shuffle=False):
        shf_idx = [i for i in range(total_size)]
        if shuffle:
            shf_idx = np.random.permutation(total_size)

        batch_index = [[shf_idx[(i + j) % total_size] for i in range(batch_size)] \
                       for j in range(0, total_size, batch_size)]
        return batch_index

    def train(self, data_set, labels, epoch=100, batch_size=256):
        data_set = np.asarray(data_set).astype(np.float32)
        labels = np.asarray(labels)
        assert len(data_set.shape) == 2, '数据维数必须为2'
        assert len(labels.shape) == 1, '标记的维数必须是1'
        assert data_set.shape[0] == labels.shape[0], '标记长度与样本数量不一致'

        total_size = data_set.shape[0]
        onehot_labels = np.zeros(shape=(total_size, int(labels.max() + 1)), dtype=np.float32)

        stddev = np.sqrt(2 / data_set.shape[1])
        self.weight = np.random.normal(0, stddev, size=(data_set.shape[0], onehot_labels.shape[0])).astype(np.float32)
        self.bias = np.zeros_like(self.weight[0])

        for e in range(epoch):
            index = self.batchIndex(total_size, batch_size)
            lr = 0.01 / (e * batch_size // 10000 + 1)
            for idx in index:
                self.gradientDecent(data_set[idx], onehot_labels[idx], lr)

    def predict(self, input_batch):
        dense_output = np.dot(input_batch, self.weight.T) + self.bias
        prob_output = self.softmax(dense_output)

        ret = [x.argmax() for x in prob_output]
        return ret
