#!/usr/bin/env python3
#*
train_X_orig, train_Y, test_X_orig, test_Y, classes = load_data()

#*
idx = 10
plt.imshow(train_X_orig[idx])
# plt.show()
label = train_Y[0, idx]
print(label)
print(classes[label].decode('utf8'))

#*
n_train = train_X_orig.shape[0]
n_px = train_X_orig.shape[1]
n_test = test_X_orig.shape[0]

#* preprocessing
train_X = train_X_orig.reshape((train_X_orig.shape[0], -1)).T/255
test_X = test_X_orig.reshape((test_X_orig.shape[0], -1)).T/255

#* initialization
def initialize_parameters(n_x, n_h, n_y):
  np.random.seed(1)

  W1 = np.random.randn(n_h, n_x)*0.01
  b1 = np.zeros((n_h, 1))
  W2 = np.random.randn(n_y, n_h)*0.01
  b2 = np.zeros((n_y, 1))

  return {
    'W1':W1,
    'b1':b1,
    'W2':W2,
    'b2':b2,
  }
initialize_parameters(2, 1, 2)

#*
def initialize_parameters_deep(layer_dims):
  np.random.seed(1)
  params = {}
  for i in range(1, len(layer_dims)):
    params['W' + str(i)] = np.random.randn(layer_dims[i], layer_dims[i - 1])/np.sqrt(layer_dims[i - 1])
    params['b' + str(i)] = np.zeros((layer_dims[i], 1))
  return params
initialize_parameters_deep([5, 4, 3])

#* forward propagation
def linear_forward(A, W, b):
  return (W@A + b, (A, W, b))

def relu(z):
  return np.maximum(z, 0)

def linear_activation_forward(A_prev, W, b, activation):
  z, linear_cache = linear_forward(A_prev, W, b)
  if activation == 'sigmoid':
    A = sigmoid(z)
  elif activation == 'relu':
    A = relu(z)
  return A, (linear_cache, z)

def L_model_forward(X, params):
  L = len(params)//2
  A = X
  caches = []
  for l in range(1, L):
    A, cache = linear_activation_forward(A, params['W' + str(l)], params['b' + str(l)], 'relu')
    caches.append(cache)
  AL, cache = linear_activation_forward(A, params['W' + str(L)], params['b' + str(L)], 'sigmoid')
  caches.append(cache)
  return AL, caches

#* cost
def compute_cost(AL, Y):
  cost = np.squeeze(-np.mean(Y*np.log(AL) + (1 - Y)*np.log(1 - AL), axis=1))
  return cost

#* backward propagation
def linear_backward(dZ, cache):
  A_prev, W, b=cache
  m = A_prev.shape[1]

  dW = dZ@A_prev.T/m
  db = np.mean(dZ, axis=1, keepdims=True)
  dA_prev = W.T@dZ

  return dA_prev, dW, db

def relu_backward(dA, cache):
  Z = cache
  dZ = np.array(dA, copy=True)
  dZ[Z <= 0] = 0
  return dZ

def sigmoid_backward(dA, cache):
  Z = cache
  s = 1/(1 + np.exp(-Z))
  dZ = dA*s * (1 - s)
  return dZ

def linear_activation_backward(dA, caches, activation):
  linear_cache, activation_cache=caches
  if activation == 'relu':
    dZ = relu_backward(dA, activation_cache)
  elif activation == 'sigmoid':
    dZ = sigmoid_backward(dA, activation_cache)
  return linear_backward(dZ, linear_cache)

def L_model_backward(AL, Y, caches):
  grads = {}
  L = len(caches)
  m = AL.shape[1]
  Y = Y.reshape(AL.shape)

  dAL =-(Y/AL - (1 - Y)/(1 - AL))
  grads['dA' + str(L)] = dAL

  dA_prev, dW, db=linear_activation_backward(dAL, caches[-1], 'sigmoid')
  grads['dA' + str(L - 1)] = dA_prev
  grads['dW' + str(L)] = dW
  grads['db' + str(L)] = db
  for l in reversed(range(1, L)):
    dA_prev, dW, db=linear_activation_backward(dA_prev, caches[l - 1], 'relu')
    grads['dA' + str(l - 1)] = dA_prev
    grads['dW' + str(l)] = dW
    grads['db' + str(l)] = db
  return grads

#* update parameters
def update_parameters(params, grads, learning_rate):
  for k in params.keys():
    params[k] -= learning_rate*grads['d' + k]
  return params

#* define and train model
#* two layer model
def two_layer_model(X, Y, layer_dims, learning_rate=0.0075, n_iters=3000, print_cost=False):
  np.random.seed(1)
  grads = {}
  costs = []
  m = X.shape[1]
  n_x, n_h, n_y = layer_dims

  # initialization
  params = initialize_parameters(n_x, n_h, n_y)
  W1 = params['W1']
  b1 = params['b1']
  W2 = params['W2']
  b2 = params['b2']

  for i in range(n_iters):
    # forward
    A1, cache1 = linear_activation_forward(X, W1, b1, 'relu')
    A2, cache2 = linear_activation_forward(A1, W2, b2, 'sigmoid')
    # cost
    cost = compute_cost(A2, Y)
    # backward
    dA2 = -(Y/A2 - (1 - Y)/(1 - A2))
    dA1, dW2, db2 = linear_activation_backward(dA2, cache2, 'sigmoid')
    _, dW1, db1 = linear_activation_backward(dA1, cache1, 'relu')
    grads['dW1'] = dW1
    grads['dW2'] = dW2
    grads['db1'] = db1
    grads['db2'] = db2
    # update
    params = update_parameters(params, grads, learning_rate)
    W1 = params['W1']
    b1 = params['b1']
    W2 = params['W2']
    b2 = params['b2']

    if print_cost and i % 100 == 0:
      print(f'Iteration {i}: Cost {cost}')
      costs.append(cost)

  plt.plot(np.squeeze(costs))
  plt.xlabel('iterations')
  plt.ylabel('cost')
  plt.title(f'learning rate = {learning_rate}')
  # plt.show()

  return params

n_x = n_px*n_px*3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
params = two_layer_model(train_X, train_Y, layers_dims, n_iters=1, print_cost=True)

#* L layer model
def L_layer_model(X, Y, layer_dims, learning_rate=0.0075, n_iters=3000, print_cost=False):

  np.random.seed(1)
  costs = []

  # initialization
  params = initialize_parameters_deep(layer_dims)

  for i in range(n_iters):
    # forward
    AL, caches = L_model_forward(X, params)
    print(AL, caches)
    # cost
    cost = compute_cost(AL, Y)
    # backward
    grads = L_model_backward(AL, Y, caches)
    # update
    #
    #
    params = update_parameters(params, grads, learning_rate)

    if print_cost and i % 100 == 0:
      print(f'Iteration {i}: Cost {cost}')
      costs.append(cost)

  plt.plot(np.squeeze(costs))
  plt.xlabel('iterations')
  plt.ylabel('cost')
  plt.title(f'learning rate = {learning_rate}')
  plt.show()

  # return params

layer_dims = [12288, 20, 7, 5, 1]
params = L_layer_model(train_X, train_Y, layer_dims, n_iters=1, print_cost=True)
#* predict
def predict(X, y, params):
  m = X.shape[1]
  n = len(params)//2
  p = np.zeros((1, m))

  probas, _ = L_model_forward(X, params)
  for i in range(m):
    p[0, i] = (1 if probas[0, i] > 0.5 else 0)
  print(f'Accuracy: {np.mean(p==y)}')
  return p
train_yhat = predict(train_X, train_Y, params)
test_yhat = predict(test_X, test_Y, params)
