import numpy as np
from simulation.lr_utils import *
import matplotlib.pyplot as plt
import scipy.io as scio

def initialize_parameters(layers_dims):
    np.random.seed(3)
    parameters = {}
    L = len(layers_dims)
    for l in range(1, L):
        parameters["W" + str(l)] = np.random.randn(layers_dims[l], layers_dims[l - 1]) / np.sqrt(layers_dims[l - 1])
        parameters["b" + str(l)] = np.zeros((layers_dims[l], 1))
    return parameters

def linear_forward(A_pre, W, b):
    Z = np.dot(W,A_pre) + b
    cache = (A_pre,W,b)
    return Z,cache

def relu(Z):
    A = np.maximum(0,Z)
    cache = Z
    return A,cache

def sigmoid(Z):
    A = 1 / (1 + np.exp(-Z))
    cache = Z
    return A,cache

def activation_forward(A_pre, W, b, activation="relu"):
    if activation == "relu":
        Z , linear_cache = linear_forward(A_pre,W,b)
        A , activation_cache = relu(Z)
    elif activation == "sigmoid":
        Z, linear_cache = linear_forward(A_pre, W, b)
        A, activation_cache = sigmoid(Z)
    cache = (linear_cache,activation_cache)

    return A,cache

def calculate_cost(AL, Y):
    m = Y.shape[1]
    cost = -np.sum(np.multiply(np.log(AL), Y) + np.multiply(np.log(1 - AL), 1 - Y)) / m
    cost = np.squeeze(cost)
    assert (cost.shape == ())
    return cost
    # m = Y.shape[1]
    # cost = (-1 / m) * np.sum(np.multiply(np.log(AL),Y) + np.multiply(np.log(1 - AL),1 - Y))
    # cost = np.squeeze(cost)
    # return cost

def relu_backward(dA, activation_cache):
    Z = activation_cache
    dZ = np.array(dA,copy=True)
    dZ[Z <= 0] = 0
    return dZ

def linear_backward(dZ, linear_cache):
    A_pre, W, b = linear_cache
    m = A_pre.shape[1]
    dW = np.dot(dZ,A_pre.T) / m
    db = np.sum(dZ,axis=1,keepdims=True) / m
    dA_pre = np.dot(W.T,dZ)
    # todo 此处有个小问题 就是矩阵乘法形状的对应
    return dA_pre,dW,db

def sigmoid_backward(dA, activation_cache):
    s = 1 / (1 + np.exp(-activation_cache))
    dZ = dA * s * (1 - s)
    return dZ

def activation_backward(dA, cache, activation="relu"):
    linear_cache , activation_cache = cache
    if activation == "relu":
        dZ = relu_backward(dA, activation_cache)
        dA_pre, dW, db = linear_backward(dZ, linear_cache)
    elif activation == "sigmoid":
        dZ = sigmoid_backward(dA,activation_cache)
        dA_pre, dW, db = linear_backward(dZ, linear_cache)
    return dA_pre,dW,db

def update_parameters(parameters, gradients, alpha):
    L = len(parameters) // 2
    for l in range(L):
        parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - alpha * gradients["dW" + str(l + 1)]
        parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - alpha * gradients["db" + str(l + 1)]
    return parameters

def two_layer_nn(X,Y,layer_dims,alpha=0.0075,iterations=2500,print_cost=False,isPlot=False):
    np.random.seed(1)
    gradients = {}
    costs = []
    parameters = initialize_parameters(layer_dims)
    W1 = parameters["W1"]
    b1 = parameters["b1"]
    W2 = parameters["W2"]
    b2 = parameters["b2"]
    for i in range(0,iterations):
        A1 , cache1 = activation_forward(X,W1,b1,"relu")
        A2 , cache2 = activation_forward(A1,W2,b2,"sigmoid")
        cost = calculate_cost(A2,Y)
        dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))

        dA1, dW2, db2 = activation_backward(dA2,cache2,"sigmoid")
        dA0, dW1, db1 = activation_backward(dA1,cache1,"relu")

        gradients["dW1"] = dW1
        gradients["db1"] = db1
        gradients["dW2"] = dW2
        gradients["db2"] = db2

        parameters = update_parameters(parameters,gradients,alpha)
        W1 = parameters["W1"]
        W2 = parameters["W2"]
        b1 = parameters["b1"]
        b2 = parameters["b2"]
        if i % 100 == 0 :
            costs.append(cost)
            if print_cost:
                print("第",i,"次迭代，成本为：",np.squeeze(cost))

    return parameters

def L_nn_forward(X,parameters):
    caches = []
    A = X
    L = len(parameters) // 2
    for l in range(1,L):
        A_pre = A
        A , cache = activation_forward(A_pre,parameters["W" + str(l)],parameters["b" + str(l)],"relu")
        caches.append(cache)
    AL,cache = activation_forward(A,parameters["W" + str(L)],parameters["b" + str(L)],"sigmoid")
    caches.append(cache)
    return AL,caches

def L_nn_backward(AL,Y,caches):
    gradients = {}
    L = len(caches)
    m = AL.shape[1]
    Y = Y.reshape(AL.shape[1])
    dAL = -(np.divide(Y,AL) - np.divide(1 - Y,1 - AL))
    # cache中的缓存是从0开始的
    current_cache = caches[L-1]
    gradients["dA" + str(L)], gradients["dW" + str(L)], gradients["db" + str(L)] = activation_backward(dAL,current_cache,"sigmoid")
    for l in reversed(range(L-1)):
        current_cache = caches[l]
        dA,dW,db = activation_backward(gradients["dA"+str(l+2)],current_cache,"relu")
        # 此处是l+1是因为cache 缓存是用0开始的 而实际导数是从1
        gradients["dA" + str(l+1)] = dA
        gradients["dW" + str(l+1)] = dW
        gradients["db" + str(l+1)] = db
    return gradients

def L_layer_nn(X,Y,layers_dims,alpha=0.0075,iteration=2500,print_cost=True,is_plot=False):
    np.random.seed(1)
    costs = []
    parameters = initialize_parameters(layers_dims)
    for i in range(0,iteration):
        AL , cache = L_nn_forward(X,parameters)
        cost = calculate_cost(AL,Y)
        gradients = L_nn_backward(AL,Y,cache)
        parameters = update_parameters(parameters,gradients,alpha)
        if i % 100 == 0 :
            costs.append(cost)
            if print_cost:
                print("第",i,"次迭代,代价为:",np.squeeze(cost))
    if is_plot:
        plt.plot(np.squeeze(costs))
        plt.ylabel("cost")
        plt.xlabel("iterations")
        plt.title("alpha = " + str(alpha))
        plt.show()
    return parameters

def predict(X,Y,parameters):
    m = X.shape[1]
    n = len(parameters) // 2
    p = np.zeros((1,m))
    AL,caches = L_nn_forward(X,parameters)
    # for i in range(0,AL.shape[1]):
    #     if AL[0,i] > 0.5:
    #         p[0,i] = 1
    #     else:
    #         p[0,i] = 0
    # print("准确度为: " + str(float(np.sum((p == Y)) / m)))
    return AL

if __name__ == '__main__':
    # train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
    # train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T
    # test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T
    # print(train_x_flatten)
    # train_x = train_x_flatten / 255
    # train_y = train_set_y
    # test_x = test_x_flatten / 255
    # test_y = test_set_y
    # print('测试集')
    # print(test_y)
    # print(train_x)
    # print(train_x.shape)
    # print(train_y.shape)

    # 获取离线指纹数据 也就是线下采集
    offline_data = scio.loadmat('sim_data/offline_data_random.mat')
    online_data = scio.loadmat('sim_data/online_data.mat')

    offline_location, offline_rss \
        = offline_data['offline_location'][:1000], offline_data['offline_rss'][:1000]
    trace, rss = online_data['trace'][0:100, :], online_data['rss'][0:100, :]
    print(offline_location.shape)
    print(offline_rss.shape)
    print(trace.shape)
    print(rss.shape)
    offline_location , offline_rss \
        = offline_location.reshape(offline_location.shape[0],-1).T ,offline_rss.reshape(offline_rss.shape[0],-1).T
    # offline_rss = offline_rss.reshape(offline_rss.shape[0],-1).T
    # 两层神经网络
    # n_x = 6
    # n_h = 7
    # n_y = 1
    n_x = 6
    n_h = 9
    n_y = 2
    #
    layers_dims = (n_x, n_h, n_y)
    # parameters = L_layer_nn(offline_rss, offline_location,
    #                           layers_dims, iterations=2500,
    #                              print_cost=True, isPlot=True)
    # layers_dims = [12288, 20, 7, 5, 1]

    parameters = two_layer_nn(offline_rss, offline_location, layers_dims,  iterations=2500, print_cost=True,
                                isPlot=False)
    predictions_train = predict(offline_rss, offline_location, parameters)  # 训练集
    predictions_test = predict(rss, trace, parameters)  # 测试集

    # AL = predict(rss, trace, parameters)
    # AL = predict(test_x, test_y, parameters)
    # print(AL)
    # enum_dims = [[12288, 20, 7, 5, 1],[12288, 30, 20, 7, 5, 1],[12288, 50,40,30, 20, 7, 5, 1]]
    # print(len(enum_dims))
    # # for i in range(len(enum_dims)):
    # #     print(enum_dims[i])
    # # 节点数深了 但是迭代次数低 还是欠拟合
    # layers_dims = [12288, 30, 20, 7, 5, 1]
    # for i in range(len(enum_dims)):
    #     parameters = L_layer_nn(train_x, train_y, enum_dims[i], alpha=0.0075, iteration=2500, print_cost=True,
    #                             is_plot=False)
    #     print("the ",i)
    #     predictions_train = predict(train_x, train_y, parameters)  # 训练集
    #     predictions_test = predict(test_x, test_y, parameters)  # 测试集


