from builtins import range
from builtins import object
import numpy as np

from layers import *
from layer_utils import *


class TwoLayerNet(object):
    def __init__(
        self,
        input_dim=28*28,
        hidden_dim=100,
        num_classes=10,
        weight_scale=1e-3,
        reg=0.0,
    ):
        self.params = {}
        self.reg = reg

        self.params['W1'] = (np.random.randn(input_dim, hidden_dim) * weight_scale).astype(np.float32)
        self.params['W2'] = (np.random.randn(hidden_dim, num_classes) * weight_scale).astype(np.float32)
        self.params['b1'] = np.zeros((1, hidden_dim)).astype(np.float32)
        self.params['b2'] = np.zeros((1, num_classes)).astype(np.float32)

        self.loss_value = None
        self.grads = {}

    def loss(self, X, y=None):
        W1, b1 = self.params['W1'], self.params['b1']
        W2, b2 = self.params['W2'], self.params['b2']
        N = X.shape[0]
        # N = 1

        x1, _ = affine_forward(X, W1, b1)
        # x2, _ = relu_forward(x1)
        x2 = x1
        x3, _ = affine_forward(x2, W2, b2)
        scores = x3
        if y is None:
            return scores

        loss, grads = 0, {}

        right_log = np.log(np.sum(np.exp(scores), axis=1))
        data_loss = np.average(-np.sum(scores[np.arange(N), y]) / N + right_log)
        loss = data_loss + 0.5 * self.reg * (np.sum(W1*W1) + np.sum(W2*W2))

        scores_max = np.max(scores, axis=1, keepdims=True)
        exp_scores = np.exp(scores - scores_max) 
        probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
        dscores = probs                                 # (N,C)
        dscores[range(N), y] -= 1
        dscores /= N
    
        dW2 = np.dot(x2.T, dscores)                     # (H,C)
        db2 = np.sum(dscores, axis=0, keepdims=True)    # (1,C)
       
        dh1 = np.dot(dscores, W2.T)                     # (N,H)
        # ReLu层
        # dh1[x2 <= 0] = 0
        
        dW1 = np.dot(X.T, dh1)                          # (D,H)
        db1 = np.sum(dh1, axis=0, keepdims=True)        # (1,H)
       
        dW2 += self.reg * W2
        dW1 += self.reg * W1
        grads['W1'] = dW1
        grads['b1'] = db1
        grads['W2'] = dW2
        grads['b2'] = db2

        self.loss_value = loss
        self.grads = grads

        return loss, grads

    def step(self):
        learning_rate = 1e-4
        loss_value = self.loss_value
        self.params['W1'] -= learning_rate * loss_value * self.grads['W1']
        self.params['b1'] -= learning_rate * loss_value * self.grads['b1']
        self.params['W2'] -= learning_rate * loss_value * self.grads['W2']
        self.params['b2'] -= learning_rate * loss_value * self.grads['b2']

    def totype_int(self):
        self.params['W1'] = np.astype(self.params['W1'], np.int32)
        self.params['b1'] = np.astype(self.params['b1'], np.int32)
        self.params['W2'] = np.astype(self.params['W2'], np.int32)
        self.params['b2'] = np.astype(self.params['b2'], np.int32)

    def totype_float(self):
        self.params['W1'] = np.astype(self.params['W1'], np.float32)
        self.params['b1'] = np.astype(self.params['b1'], np.float32)
        self.params['W2'] = np.astype(self.params['W2'], np.float32)
        self.params['b2'] = np.astype(self.params['b2'], np.float32)