import torch
import torch.nn as nn
from torch.autograd import Variable
#from carbon_net import CarbonNet
from net import *
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt
import cmds


class Averager(object):

    def __init__(self):
        self.reset()

    def add(self, v):
        if isinstance(v, Variable):
            count = v.data.numel()
            v = v.data.sum()
        elif isinstance(v, torch.Tensor):
            count = v.numel()
            v = v.sum()
        self.n_count += count
        self.sum += v

    def reset(self):
        self.n_count = 0
        self.sum = 0

    def val(self):
        res = 0
        if self.n_count != 0:
            res = self.sum / float(self.n_count)
        return res
        
        
def load_data(): 
    data = np.load('C60Ih_ZE.npz')
    e = data['E'].astype('float32')
    z = data['Z']
    idx = np.arange(e.shape[0])
    np.random.shuffle(idx)
    e = e[idx]
    z = z[idx]
    spl = int(e.shape[0]*0.8)
    z_train = z[:spl]
    e_train = e[:spl]
    z_test = z[spl:]
    e_test = e[spl:]    
    adj = cmds.read('../C60.car').adjacent()
    adj = torch.tensor(adj).to(device).float()
    return z_train, e_train, z_test, e_test, adj  


num_epochs = 5000
batch_size = 16
learning_rate = 0.001

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
torch.set_default_dtype(torch.float32)
model = GCN16()
#model = torch.load('BN28\\GCN6_28_spl80.pth', map_location=lambda storage, loc: storage)
#model.load_state_dict(model.state_dict())
#model.eval()
model.to(device)
criterion = nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    
    
def test(adj, z_test, prop):
    model.eval()
    loss_avg = Averager()
    for i in range(z_test.shape[0]//batch_size):
        z = torch.LongTensor(z_test[i*batch_size:(i+1)*batch_size]).to(device)
        p = torch.tensor(prop[i*batch_size:(i+1)*batch_size]).to(device)
        outputs = model(adj,z)
        loss = criterion(outputs, p)
        loss_avg.add(loss)
    return loss_avg.val()


def train():
    z_train, e_train, z_test, e_test, adj = load_data()

    loss_avg = Averager()
    min_loss = 10000
    for epoch in range(num_epochs):
        model.train()
        for i in range(e_train.shape[0]//batch_size):
            z = torch.LongTensor(z_train[i*batch_size:(i+1)*batch_size]).to(device)
            e = torch.tensor(e_train[i*batch_size:(i+1)*batch_size]).to(device)
            outputs = model(adj,z)
            loss = criterion(outputs, e)
            loss_avg.add(loss)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        if epoch%100==0:
            print('    epoch {}, train_loss={:.6f}, test_loss={:.6f}'.format(
                epoch, sqrt(loss_avg.val())*1000, sqrt(test(adj, z_test, e_test))*1000))
        if loss<min_loss:
            min_loss = loss
            torch.save(model, 'gcn15_C60Ih_ZE.pth')
        loss_avg.reset()


def valid():
    z_train, e_train, z_test, e_test, adj = load_data()
    z = torch.LongTensor(z_test).to(device)
    e = torch.tensor(e_test).to(device)

    model = torch.load('gcn14.pth', map_location=lambda storage, loc: storage)
    outputs = model(adj,z)
    loss = criterion(outputs, e)
    print(loss.item())
    for a,b in zip(e,outputs):
        print(a.item(), b.item())
    #np.savez('C98_valid.npz', e, outputs.detach())


def predict():
    import sys
    sys.path.append('..')
    from data import BCN_name2Z
    f = open('C53N7_forbidden.txt')
    Z = []
    for line in f:
        Z.append(BCN_name2Z(line))
    Z = torch.LongTensor(Z)
    adj = cmds.read('../C60.car').adjacent()
    adj = torch.tensor(adj).float()
    model = torch.load('gcn18_C60Ih_ZFE_82.pth', map_location=lambda storage, loc: storage)
    e_pred = model(adj,Z)
    g = open('predict_result.txt', 'w')
    for e in e_pred:
        #print(e.item())
        g.write(str(e.item()) + '\n')


if __name__ == '__main__':
    #train()
    predict()
    #valid()