import numpy as np

# 采用dropout，训练时间大大延长，但是对测试阶段没影响:
class Dropout():
    def __init__(self, drop_rate=0.5, is_train=True):
        self.drop_rate = drop_rate
        self.is_train = is_train
        self.fix_value = 1 - drop_rate   # 修正期望


    def forward(self, x):
        if self.is_train == False:    # 测试状态
            return x
        else:                         # 训练状态
            N, m = x.shape
            self.save_mask = np.random.uniform(0, 1, m) > self.drop_rate   # save_mask中为保留的神经元
            return (x * self.save_mask) / self.fix_value


    def backward(self, eta):
        if self.is_train == False:
            return eta
        else:
            return eta * self.save_mask
