import numpy as np
import sys
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_dir, ".."))
from data_process import prepare_for_training as pr
from torch import sigmoid

def sigmoid_gradient(z):
    return sigmoid(z)*(1-sigmoid(z))

class Layer:
    def __init__(self,neuron_num,sample_num):
        self.neuron_cnt = neuron_num
        self.activations = np.zeros((sample_num,neuron_num))
        self.input = np.zeros((sample_num,neuron_num))

class MultilayerPerceptron:
    def __init__(self,data,labels,neuron_nums):
        (data_processed,data_mean,data_std)  = pr.prepare_for_training(data)
        self.data = data_processed
        self.labels = labels
        self.data_mean = data_mean
        self.data_std = data_std
        self.sample_num = self.data.shape[0]
        self.feature_num = self.data.shape[1]
        self.unique_labels = np.unique(self.labels)
        self.label_num = self.unique_labels.shape[0]

        self.layer_num = len(neuron_nums)
        self.layers = self.Layer_init(neuron_nums,self.sample_num)
        self.W = self.W_init()
    
    def Layer_init(self,neuron_nums,sample_num):
        layers = []
        for neuron_num in neuron_nums:
            layer = Layer(neuron_num,sample_num)
            layers.append(layer)
        return layers

    def W_init(self):
        W = []
        for i in range(self.layer_num-1):
            input_cnt = self.layers[i].neuron_cnt
            output_cnt = self.layers[i+1].neuron_cnt
            w_matrix = np.random.rand(input_cnt+1,output_cnt)*0.05
            W.append(w_matrix)
        return W
    
    def W_init0(self):
        W = []
        for i in range(self.layer_num-1):
            input_cnt = self.layers[i].neuron_cnt
            output_cnt = self.layers[i+1].neuron_cnt
            w_matrix = np.random.rand(input_cnt+1,output_cnt)*0.05
            W.append(w_matrix)
        return W

    def train(self,max_iter,alpha):
        cost_history = self.gradient_descent(max_iter,alpha)
        print("training finish!")
        return cost_history
    
    def gradient_descent(self,max_iter,alpha):
        cost_history = []
        for _ in range(max_iter):
            print('iter_num:',_)
            optimized_W = self.W
            cost,predictions = self.cost_func(optimized_W)
            cost_history.append(cost)
            deltas = self.gradient_step(predictions,optimized_W)
            #print('当前权重参数:')
            for i,w in enumerate(optimized_W):
                self.W[i] = w - alpha*deltas[i]
                #print(self.W[i])
        return cost_history

    def cost_func(self,W): 
        predictions = self.forward_propagation(W)
        #标签制作，每个标签都是one-hot
        bitwise_labels = np.zeros((self.sample_num,self.label_num))
        for i in range(self.sample_num):
            bitwise_labels[i][self.labels[i][0]] = 1
        positive_predictions = predictions[bitwise_labels==1]
        negtive_predictions = predictions[bitwise_labels==0]
        bit_positive_cost = np.sum(np.log(positive_predictions))
        bit_negtive_cost = np.sum(np.log(1-negtive_predictions))
        cost = -(1/self.sample_num)*(bit_positive_cost+bit_negtive_cost)
        print("cost:",cost)
        return cost,predictions
    
    def forward_propagation(self,W):
        self.layers[0].activations = self.data[:,1:]
        input_data = self.data
        #前向传播
        for i in range(1,self.layer_num):
            self.layers[i].input = np.dot(input_data,W[i-1])
            self.layers[i].activations = sigmoid(self.layers[i].input)
            input_data = np.hstack((np.ones((self.sample_num,1)),self.layers[i].activations))
        predictions = self.layers[self.layer_num-1].activations
        return predictions

    def gradient_step(self,predictions,W):
        return self.back_propagation(predictions,W)

    def back_propagation(self,predictions,W):
        #反向传播
        #predictions:sample_num*label_num
        W_delta = self.W_init0()
        for sample_idx in range(self.sample_num):
            single_sample_nuerons = []
            for layer_idx in range(self.layer_num):
                single_sample_nuerons.append(self.layers[layer_idx].activations[sample_idx])

            bitwise_label = np.zeros((self.label_num,1))
            bitwise_label[self.labels[sample_idx][0]] = 1

            part_diff = [0]*self.layer_num
            prediction = predictions[sample_idx].reshape((self.label_num,1))
            part_diff[self.layer_num-1] = prediction - bitwise_label
            #10*1
            for layer_idx in range(self.layer_num-2,0,-1):
                z = np.hstack((np.array(1),self.layers[layer_idx].input[sample_idx]))
                z = z.reshape((z.shape[0],1))
                layer_gradient = sigmoid_gradient(z)#26*1
                #10*1 
                part_diff[layer_idx] = np.dot(W[layer_idx],part_diff[layer_idx+1])*layer_gradient
                part_diff[layer_idx] = part_diff[layer_idx][1:,:]

            for w_idx in range(len(W)):
                tmp = np.hstack((np.array(1),single_sample_nuerons[w_idx]))
                tmp = tmp.reshape((tmp.shape[0],1))
                W_delta[w_idx] += np.dot(tmp,part_diff[w_idx+1].T)
        
        for i in range(len(self.W)):
            W_delta[i] = 1/self.sample_num*W_delta[i]

        return W_delta





