import keras
# import keras.backend as K
# import tensorflow as tf
# import numpy as np
#
#
# sparse = np.array([[0, 1, 0], [1, 0, 0]]).reshape(-1, 3)
# probability = np.array([[0.3, 0.5, 0.2], [0.2, 0.1, 0.7]])
# label = K.constant(np.array([0, 2]), dtype=tf.int32)
# logit = K.constant(np.array([[11, 12, 1], [10, 1, 1]]))
# sparse = K.constant(sparse)
# probability = K.constant(probability)
# ce = K.binary_crossentropy(sparse, probability)
# sparse_ce = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=label, logits=logit)
# with tf.Session() as sess:
#     print (sess.run(ce))
#     print (sess.run(sparse_ce))
#     print (sess.run(tf.nn.softmax(logits=logit)))
#     print (sess.run(tf.one_hot([0, 2, 3], depth=10)))

import sys


def get_input():
    n_r = list(map(int, input('input ints')))
    f_list = []
    for i in range(n_r[0]):
        f_list.append(list(map(int, input('input ints'))))
    return n_r, f_list


def output_matrix(n_r, f_list):
    out_matrix = []
    n, r = n_r
    for i in range(n):
        line_i = []
        for j in range(n):
            sum_i_j = 0
            for iter_i in range(i-r, i+r+1):
                for iter_j in range(j-r, j+r+1):
                    if iter_i in range(0, n) and iter_j in range(0, n):
                        if (abs(iter_i - i) + abs(iter_j - j)) <= r:
                            sum_i_j += f_list[iter_i][iter_j]
            line_i.append(sum_i_j)
        out_matrix.append(line_i)
    print (out_matrix)


n_r, f_list = get_input()
output_matrix(n_r, f_list)