# import torch
# import torch.nn as nn
# a = torch.rand(1,8,7,7)
# b = torch.rand(1,8,7,32)
# c = torch.matmul(a,b)
# print(c.shape)
# def mlp(input_dim, mlp_dims, last_relu=False):
#     layers = []
#     mlp_dims = [input_dim] + mlp_dims
#     for i in range(len(mlp_dims) - 1):
#         layers.append(nn.Linear(mlp_dims[i], mlp_dims[i + 1]))
#         # nn.init.orthogonal(layers[-1].weight)
#         if i != len(mlp_dims) - 2 or last_relu:
#             #layers.append(nn.ReLU())
#             layers.append(nn.LeakyReLU(negative_slope=-0.2))
#     net = nn.Sequential(*layers)
#     return net
#
# input = torch.rand(1,4,7,32).cuda()
# emd = nn.Sequential(
#             nn.Linear(32, 16),
#             nn.ReLU(),
#             nn.Linear(16, 5)
#         ).cuda()
# print(emd(input).shape)

fw = open('.' + '/data.txt', 'w+')
print("%f %f %f %f %f" % (0,0,0,0,0), file=fw)
print("%f %f %f %f %f" % (0,0,0,0,0), file=fw)
