def conv_forward(x, w, b, conv_param):

    N, _, H, W = x.shape
    F, _, HH, WW = w.shape
    stride = conv_param.get('stride', 1)
    pad = conv_param.get('pad', 0)

    # Check for parameter sanity
    assert (H + 2 * pad -
            HH) % stride == 0, 'Sanity Check Status: Conv Layer Failed in Height.'
    assert (W + 2 * pad -
            WW) % stride == 0, 'Sanity Check Status: Conv Layer Failed in Width.'
    H_prime = 1 + (H + 2 * pad - HH) // stride
    W_prime = 1 + (W + 2 * pad - WW) // stride

    kw = {'device': x.device, 'dtype': x.dtype}

    x_pad = torch.nn.functional.pad(x, (pad, pad, pad, pad), **kw)
    out = torch.zeros(N, F, H_prime, W_prime, **kw)

    for n in range(N):
        for f in range(F):
            for j in range(H_prime):
                for i in range(W_prime):
                    out[n, f, j, i] = (
                        x_pad[n, :, j*stride:j*stride+HH, i*stride:i*stride+WW]).sum() + b[f]

    cache = (x, w, b, conv_param)

    return out, cache


def conv_backward(dout, cache):
    x, w, b, conv_param = cache
    N, C, H, W = x.shape
    F, _, kH, kW = w.shape
    stride = conv_param.get('strid', 1)
    pad = conv_param.get('pad', 0)

    x_pad = torch.nn.functional.pad(
        x, (pad, pad, pad, pad)).to(x.device).to(dtype)

    H_prime = 1 + (H + 2 * pad - kH) // stride
    W_prime = 1 + (W + 2 * pad - kW) // stride

    dx_pad = torch.zeros_like(x_pad)
    dx = torch.zeros_like(x)
    dw = torch.zeros_like(w)
    db = torch.zeros_like(b)

    for n in range(N):
        for f in range(F):
            db[f] += torch.sum(dout[n, f])
            for j in range(0, H_prime):
                for i in range(0, W_prime):
                    dw[f] += x_pad[n, :, j*stride:j*stride+kH,
                                   i*stride:i*stride+kW] * dout(n, f, j, i)
                    dx_pad[n, :, j*stride:j*stride+kH, i*stride:i *
                           stride+kW] += w[f] * dout[n, f, j, i]

    dx = dx_pad[:, :, pad:pad+H, pad+pad+W]

    return dx, dw, db