import torch
import torch.nn.functional as F
import numpy as np

# define the network
class Net(torch.nn.Module):
    def __init__(self, n_feature, n_hidden, n_output):
        super(Net, self).__init__()
        self.hidden = torch.nn.Linear(n_feature, n_hidden)   # hidden layer
        self.out    = torch.nn.Linear(n_hidden, n_output)    # output layer

    def forward(self, x):
        x = F.relu(self.hidden(x))      # activation function for hidden layer
        x = self.out(x)
        return x

# load the net.
net = torch.load('my_nn_net.net')

with open('nn_params.c', 'w') as f_nn:
    f_nn.write('#include <stdio.h>\n\n\n')
    # show the parameters
    for k, v in net.state_dict().items():
        print(k) # show tensor key name
        print(v) # show tensor weights and biase
        # print(torch.numel(v)) # show the numbers of elements.
        f_nn.write(str(v.size()).replace('torch.Size(', '/* ').replace(')', ' */').replace(', ', '][') + '\n' )
        v_array = torch.reshape(v, [-1]).numpy() # convert the tensor to array.
        f_nn.write('float nn_param_' + k.replace('.', '_') + '[' + str(len(v_array)) + '] =')
        f_nn.write(' {' + ', '.join(str(val) for val in v_array ) + '};\n\n')

    f_nn.write('/* EOF. */\n\n')

print('done.')

'''
# make fake data
n_data = torch.ones(100, 2)
x0 = torch.normal(2*n_data, 1)      # class0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100)               # class0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2*n_data, 1)     # class1 x data (tensor), shape=(100, 2)
y1 = torch.ones(100)                # class1 y data (tensor), shape=(100, 1)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor)  # shape (200, 2) FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor)    # shape (200,) LongTensor = 64-bit integer

out = net(x)

print(out)
'''


