import numpy as np
import sklearn
from sklearn import preprocessing
import struct
from array import array
import os
from os.path import join
class MnistDataloader(object):
    def __init__(self,train_images_path, 
                train_labels_path, 
                test_images_path,
                test_labels_path):
        self.train_images_path = train_images_path
        self.train_labels_path = train_labels_path
        self.test_images_path = test_images_path
        self.test_labels_path = test_labels_path
    
    def read_images_labels(self, images_path, labels_path):
        '''
        input: images_path, labels_path
        output: 
            label vector: np.array [60000,]
            images conlumn: np.array [60000, 784] 
        '''
        with open(labels_path, 'rb') as file:
            magic, size = struct.unpack(">II", file.read(8))
            if magic != 2049:
                raise ValueError('Magic number mismatch, expected 2049, got {}'.format(magic))
            labels = array("B", file.read())       
        images = []
        with open(images_path, 'rb') as file:
            magic, size, rows, cols = struct.unpack(">IIII", file.read(16))
            if magic != 2051:
                raise ValueError('Magic number mismatch, expected 2051, got {}'.format(magic))
            image_data = array("B", file.read())        
        images = []
        for i in range(size):
            images.append([0] * rows * cols)
        for i in range(size):
            img = np.array(image_data[i * rows * cols:(i + 1) * rows * cols])
            # img = img.reshape(28, 28)
            images[i][:] = np.ravel(np.mat(img))   
        return images, labels
    def load_data(self):
        x_train, y_train = self.read_images_labels(self.train_images_path, self.train_labels_path)
        x_test, y_test = self.read_images_labels(self.test_images_path, self.test_labels_path)
        return np.array(x_train), np.array(y_train), np.array(x_test), np.array(y_test)       

'''
Original BLS without enchanment nodes
'''
class nodes_generator(object):
    def __init__(self, is_enhance=False):
        self.wlist = []
        self.blist = []
        self.is_enhance = is_enhance
        self.nonlinear = 0
    
    def sigmoid(self,data):
        return 1.0/(1+np.exp(-data))

    def linear(self,data):
        return data

    def tanh(self,data):
        return (np.exp(data)-np.exp(-data))/(np.exp(data)+np.exp(-data))

    def relu(self,data):
        return np.maximum(data,0)

    # 正交投影 施密特正交化
    def orth(self,W):
        for i in range(0,W.shape[1]):
            w = np.mat(W[:,i].copy()).T
            w_sum = 0
            for j in range(i):
                wj = np.mat(W[:,j].copy()).T
                w_sum += (w.T.dot(wj))[0,0]*wj 
            w -= w_sum
            w = w/np.sqrt(w.T.dot(w))
            W[:,i] = np.ravel(w)
        return W 
    
    def generator(self, shape , times):
        '''
        随机产生一个w列向量，一个偏置b
        Parameter: 
            shape: random shape of w
            times: the number of enchanment nodes
        '''
        for i in range(times):
            w = 2*np.random.random(size=shape)-1
            if self.is_enhance == True:
                w = self.orth(w)
            b = 2*np.random.random()-1
            yield(w, b)

    def generate_nodes(self, data, times, batchsize, nonlinear):
        # batchsize 一个feature mapping node的个数
        # times feature mapping的个数
        # 按照bls的理论，mapping layer是输入乘以不同的权重加上不同的偏差之后得到的
	    # 若干组，所以，权重是一个列表，每一个元素可作为权重与输入相乘
        # wlist 和 blist是行向量
        self.wlist = [elem[0] for elem in self.generator((data.shape[1],batchsize),times)]
        self.blist = [elem[1] for elem in self.generator((data.shape[1],batchsize),times)]
        
        self.nonlinear =  {'linear':self.linear,
						'sigmoid': self.sigmoid,
						'tanh':self.tanh,
						'relu':self.relu }[nonlinear]
        # 下面就是先得到一组mapping nodes，再不断叠加，得到len(Wlist)组mapping nodes
        # blist 广播了 blist的个数是times
        print(f"data{data.shape}")
        print(f"self.wlist{self.wlist[0].shape}")
        nodes = self.nonlinear(data.dot(self.wlist[0]) + self.blist[0])
        for i in range(1,len(self.wlist)):
            nodes = np.column_stack((nodes, self.nonlinear(data.dot(self.wlist[i])+self.blist[i])))
        return nodes

    def transform(self,testdata):
        print(f"testdata{testdata.shape}")
        print(f"self.wlist{self.wlist[0].shape}")
        testnodes = self.nonlinear(testdata.dot(self.wlist[0])+self.blist[0])
        for i in range(1,len(self.wlist)):
            testnodes = np.column_stack((testnodes, self.nonlinear(testdata.dot(self.wlist[i])+self.blist[i])))
        return testnodes   

    def update(self,otherW, otherb):
        self.Wlist += otherW
        self.blist += otherb
        
class scaler:
    def __init__(self):
        self._mean = 0
        self._std = 0
    
    # 训练使用
    def fit_transform(self,traindata):
        # 对每一列求均值
        self._mean = traindata.mean(axis = 0)
        # 对每一列求标准差
        self._std = traindata.std(axis = 0)
        return (traindata-self._mean)/(self._std+0.001)
    # 测试使用
    def transform(self,testdata):
        # 对整个矩阵求均值和标准差
        return (testdata-self._mean)/(self._std+0.001)

class broadNet(object):
    '''
    broad learning network
    '''
    def __init__(self, 
                map_num=10, 
                enchance_num=10, 
                map_function='linear',
                enchan_function='linear', 
                batchsize='auto',
                reg = 0.001):
        self.map_num = map_num
        self.enchance_num = enchance_num
        self.map_function = map_function
        self.enchance_function = enchan_function
        self.batchsize = batchsize
        self.reg = reg

        self.W = 0
        self.pseudoinverse = 0
        self.normalscaler = scaler()
        self.onehotencoder = preprocessing.OneHotEncoder(sparse=False)
        self.mapping_generator = nodes_generator()
        self.enchance_generator = nodes_generator(is_enhance=True)
    
    def fit(self, data ,label):
        if self.batchsize == 'auto':
            self.batchsize = data.shape[1]
        data = self.normalscaler.fit_transform(data)

        label = self.onehotencoder.fit_transform(np.mat(label).T)

        mappingdata = self.mapping_generator.generate_nodes(data, self.map_num, self.batchsize, self.map_function)
    
        enhancedata = self.enchance_generator.generate_nodes(mappingdata, self.enchance_num, self.batchsize, self.enchance_function)

        print('number of mapping nodes {0}, number of enhence nodes {1}'.format(mappingdata.shape[1],enhancedata.shape[1]))
        print('mapping nodes maxvalue {0} minvalue {1} '.format(round(np.max(mappingdata),5),round(np.min(mappingdata),5)))
        print('enhence nodes maxvalue {0} minvalue {1} '.format(round(np.max(enhancedata),5),round(np.min(enhancedata),5)))

        inputdata = np.column_stack((mappingdata, enhancedata))
        print('input shape ', inputdata.shape)
        pseudoinverse = self.pinv(inputdata,self.reg)
        print('pseudoinverse shape:', pseudoinverse.shape)
        self.W = pseudoinverse.dot(label)

    def pinv(self, A, reg):
        return np.mat(reg*np.eye(A.shape[1]) + A.T.dot(A)).I.dot(A.T)

    def decode(self, y_onehot):
        Y = []
        for i in range(y_onehot.shape[0]):
            lis = np.ravel(y_onehot[i]).tolist()
            Y.append(lis.index(max(lis)))
        return Y
    
    def accuracy(self,predictlabel,label):
        label = np.ravel(label).tolist()
        # predictlabel = predictlabel.tolist()
        count = 0
        for i in range(len(label)):
            if label[i] == predictlabel[i]:
                count += 1
        return (round(count/len(label),5))

    def predict(self,testdata):
        testdata = self.normalscaler.transform(testdata)
        test_mappingdata = self.mapping_generator.transform(testdata)
        test_enhencedata = self.enchance_generator.transform(test_mappingdata)
        
        test_inputdata = np.column_stack((test_mappingdata,test_enhencedata))    
        return self.decode(test_inputdata.dot(self.W))     

def show_accuracy(predictLabel,Label):
    Label = np.ravel(Label).tolist()
    # predictLabel = predictLabel.tolist()
    count = 0
    for i in range(len(Label)):
        if Label[i] == predictLabel[i]:
            count += 1
    return (round(count/len(Label),5))


if __name__ == '__main__':
    input_path = 'dataset/Minist'
    training_images_filepath = join(input_path, 'train-images-idx3-ubyte/train-images-idx3-ubyte')
    training_labels_filepath = join(input_path, 'train-labels-idx1-ubyte/train-labels-idx1-ubyte')
    test_images_filepath = join(input_path, 't10k-images-idx3-ubyte/t10k-images-idx3-ubyte')
    test_labels_filepath = join(input_path, 't10k-labels-idx1-ubyte/t10k-labels-idx1-ubyte')
    dataloader = MnistDataloader(train_images_path = training_images_filepath,
                                train_labels_path = training_labels_filepath,
                                test_images_path= test_images_filepath,
                                test_labels_path= test_labels_filepath)
    x_train, y_train, x_test, y_test = dataloader.load_data()
    # print(x_train.shape)
    # print(y_train.shape)
    # print(x_test.shape)
    # print(y_test.shape)
    bls = broadNet(enchance_num=20)
    bls.fit(x_train, y_train)
    predict = bls.predict(x_test)
    print(show_accuracy(predict, y_test))