from gcn import *
from math import sqrt
import numpy as np
import matplotlib.pyplot as plt


class Averager(object):

    def __init__(self):
        self.reset()

    def add(self, v):
        if isinstance(v, Variable):
            count = v.data.numel()
            v = v.data.sum()
        elif isinstance(v, torch.Tensor):
            count = v.numel()
            v = v.sum()
        self.n_count += count
        self.sum += v

    def reset(self):
        self.n_count = 0
        self.sum = 0

    def val(self):
        res = 0
        if self.n_count != 0:
            res = self.sum / float(self.n_count)
        return res


num_epochs = 1500
batch_size = 32
learning_rate = 0.001

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = GCN11()
#model = torch.load('BN28\\GCN6_28_spl80.pth', map_location=lambda storage, loc: storage)
#model.load_state_dict(model.state_dict())
#model.eval()
model.to(device)
criterion = nn.MSELoss(reduction='mean')
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    
    
def test(adj, in_, prop):
    model.eval()
    loss_avg = Averager()
    for i in range(adj.shape[0]//batch_size):
        v = torch.LongTensor(in_[i*batch_size:(i+1)*batch_size]).to(device)
        a = torch.FloatTensor(adj[i*batch_size:(i+1)*batch_size]).to(device)
        p = torch.tensor(prop[i*batch_size:(i+1)*batch_size]).to(device)
        outputs = model(v, a)
        loss = criterion(outputs, p)
        loss_avg.add(loss)       
    return loss_avg.val()


def train():
    data = np.load('BN28\\BN28_aeh3.npz')
    id = data['H']
    e = data['E']/56
    a = data['A']
    spl = int(e.shape[0]*0.9)
    id_train = id[:spl]
    a_train = a[:spl]
    e_train = e[:spl]
    id_test = id[spl:]
    a_test = a[spl:]
    e_test = e[spl:]

    loss_avg = Averager()
    min_loss = 10000
    for epoch in range(num_epochs):
        model.train()
        for i in range(e_train.shape[0]//batch_size):
            id = torch.LongTensor(id_train[i*batch_size:(i+1)*batch_size]).to(device)
            a = torch.FloatTensor(a_train[i*batch_size:(i+1)*batch_size]).to(device)
            e = torch.tensor(e_train[i*batch_size:(i+1)*batch_size]).to(device)
            outputs = model(id, a)
            loss = criterion(outputs, e)
            loss_avg.add(loss)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        if epoch%5==0:
            print('    epoch {}, train_loss={:.6f}, test_loss={:.6f}'.format(
                epoch, sqrt(loss_avg.val()), sqrt(test(a_test, id_test, e_test))))
        if loss<min_loss:
            min_loss = loss
            torch.save(model, 'GCN11_28_spl8.pth')
        loss_avg.reset()

        
def valid():
    data = np.load('BN23_aeghiv.npz')
    v = torch.LongTensor(data['V']).to(device)
    a = torch.FloatTensor(data['A']).to(device)
    e = torch.tensor(data['E']/46).to(device)
    
    model = torch.load('GCN11_28_spl9.pth', map_location=lambda storage, loc: storage)    
    outputs = model(v, a)
    loss = criterion(outputs, e)
    print(loss.item())
    for a,b in zip(e,outputs):
        print(a.item(), b.item())
    np.savez('BN28_valid23.npz', e, outputs.detach())


def prdict():
    data = np.load('data/tri26_ah3.npz')
    id = torch.LongTensor(data['H']).to(device) 
    a = torch.FloatTensor(data['A']).to(device)

    model = GCN10()
    model = torch.load('BN28\\GCN10_28_spl82_best.pth', map_location=lambda storage, loc: storage)
    out = model(id, a).detach().numpy()
    
    """e = data['E'] / 23
    for i in range(a.shape[0]):
        print('%15f%15f'%(e[i], out[i]))"""
    print(out)
    f = open('tri26_predict.txt','w')
    for i in range(out.shape[0]):
        f.write('%d\t%f\n'%(i+1,out[i]))
    
    
def error():
    '''保存测试误差'''
    data = np.load('BN28\\BN28_aeh3.npz')
    id = data['H']
    e = data['E']/56
    a = data['A']
    spl = int(e.shape[0]*0.8)
    id_test = id[spl:]
    a_test = a[spl:]
    e_test = e[spl:]
    
    model = torch.load('BN28\\GCN10_28_spl82_best.pth', map_location=lambda storage, loc: storage)
    id = torch.LongTensor(id_test).to(device)
    a = torch.FloatTensor(a_test).to(device)    
    predict = model(id, a).detach().numpy()
    err = predict/2 - e_test
    np.save('GCN10_28_spl82_error.npy', err)
    
def error_show(): 
    e = np.load('GCN10_28_spl82_error.npy')
    n, bins, patches = plt.hist(e, 20, density=True)
    print(e.min(), e.max())
    print(e.mean(), e.var())
    #plt.plot(bins, 'r--')
    #plt.show()
    print(np.sum(e>-0.04) / len(e))
    
def property_train():
    data = np.load('BN28\\BN28_aeghiv.npz')
    a = data['A']
    spl = int(a.shape[0]*0.9)
    a_train = a[:spl]
    a_test = a[spl:]
    v = data['V']
    v_train = v[:spl]
    v_test = v[spl:]
    g = data['G']
    g_train = g[:spl]
    g_test = g[spl:]
    h = data['H']
    h_train = h[:spl]
    h_train = h[:spl]
    print(g.mean(), h.mean())

    loss_avg = Averager()
    min_loss = 10000
    for epoch in range(num_epochs):
        model.train()
        for i in range(a_train.shape[0]//batch_size):
            v = torch.LongTensor(v_train[i*batch_size:(i+1)*batch_size]).to(device)
            a = torch.FloatTensor(a_train[i*batch_size:(i+1)*batch_size]).to(device)
            g = torch.tensor(g_train[i*batch_size:(i+1)*batch_size]).to(device)
            outputs = model(v, a)
            loss = criterion(outputs, g)
            loss_avg.add(loss)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        if epoch%5==0:
            print('    epoch {}, train_loss={:.6f}, test_loss={:.6f}'.format(
                epoch, loss_avg.val(), test(a_test, v_test, g_test)))
        if loss<min_loss:
            min_loss = loss
            torch.save(model, 'GCN12_gap_28_spl8.pth')
        loss_avg.reset()    


if __name__ == '__main__':
    #error_show()
    #tri2ai(r'E:\360disk\Cluster\topology\BN\BN_Files\Documents\25\tri27_iso.txt', 412, 27)
    #hash2id()
    #prepare_data(r'D:\cluster\BN28')
    #out = model(torch.LongTensor(id[:2]), torch.tensor(a[:2]))
    #print(out.shape)
    #train()
    #valid()
    prdict()