import torch
import numpy as np
from torch import nn 
from torch.autograd import Variable
import torch.nn.functional as F
from datetime import datetime

def Conv(in_channel, out_channel, kernal, stride = 1, padding = 0):
    layer = nn.Sequential(
    nn.Conv2d(in_channel, out_channel, kernal, stride, padding),
    nn.BatchNorm2d(out_channel, 0.001),
    nn.ReLU(True)
    )
    return layer


class stem(nn.Module):
    def __init__(self, in_channel, out_channel):
        super(stem, self).__init__()
        self.block1 = Conv(in_channel, 32, 3, padding=1)
        self.block2 = Conv(32, 64, 3, padding=1)
        self.block3 = nn.MaxPool2d(3, 2, 1)
        self.block4 = Conv(64, 80, 1)
        self.block5 = Conv(80, out_channel, 3, padding=1)

    def forward(self,x):
        out = self.block1(x)
        out = self.block2(out)
        out = self.block3(out)
        out = self.block4(out)
        out = self.block5(out)
        return out


class Inception1(nn.Module):
    def __init__(self, in_channel, out1, out2, out3):
        super(Inception1, self).__init__()

        self.branch1 = Conv(in_channel, out1, 1)
        self.branch2 = nn.Sequential(
            Conv(in_channel, 32, 1),
            Conv(32, out2, 3, padding=1)
        )
        self.branch3 = nn.Sequential(
            Conv(in_channel, 32, 1),
            Conv(32, 48, 3, padding=1),
            Conv(48, out3, 3, padding=1)
        )
        self.output = Conv(out1+out2+out3,in_channel,1)

    def forward(self,x):
        f1 = self.branch1(x)
        f2 = self.branch2(x)
        f3 = self.branch3(x)
        f = torch.cat((f1, f2, f3), dim=1)
        output = self.output(f)
        return F.relu(output+x)


class Inception2(nn.Module):
    def __init__(self, in_channel, out1, out2, out3):
        super(Inception2,self).__init__()

        self.branch1 = Conv(in_channel, out1, 1)
        self.branch2 = nn.Sequential(
            Conv(in_channel, 128, 1),
            Conv(128, out2, 3, padding=1)
        )
        self.branch3 = nn.Sequential(
            Conv(in_channel, 128, 1),
            Conv(128, 256, 3, padding=1),
            Conv(256, out3, 3, padding=1)
        )
        self.output = Conv(out1+out2+out3,in_channel,1)

    def forward(self, x):
        f1 = self.branch1(x)
        f2 = self.branch2(x)
        f3 = self.branch3(x)
        f = torch.cat((f1, f2, f3), dim=1)
        output = self.output(f)
        return F.relu(output+x)

class reduction1(nn.Module):
    def __init__(self, in_channel, out1, out2, out3):
        super(reduction1, self).__init__()
        self.branch1 = nn.MaxPool2d(3, 2, 1)
        self.branch2 = nn.Sequential(
            Conv(in_channel, 128, 1),
            Conv(128, out1, 3, 2, 1)
        )
        self.branch3 = nn.Sequential(
            Conv(in_channel, 128, 1),
            Conv(128, out2, 3, 2, 1)
        )
        self.branch4 = nn.Sequential(
            Conv(in_channel, 128, 1),
            Conv(128, 128, 3, padding=1),
            Conv(128, out3, 3, 2, 1)
        )

    def forward(self, x):
        f1 = self.branch1(x)
        f2 = self.branch2(x)
        f3 = self.branch3(x)
        f4 = self.branch4(x)
        f = torch.cat((f1,f2,f3,f4),dim=1)
        return(f)
    
class reduction2(nn.Module):
    def __init__(self, in_channel, out1, out2, out3):
        super(reduction2, self).__init__()
        self.branch1 = nn.MaxPool2d(3, 2, 1)
        self.branch2 = nn.Sequential(
            Conv(in_channel, 256, 1),
            Conv(256, out1, 3, 2, 1)
        )
        self.branch3 = nn.Sequential(
            Conv(in_channel, 256, 1),
            Conv(256, out2, 3, 2, 1)
        )
        self.branch4 = nn.Sequential(
            Conv(in_channel, 256, 1),
            Conv(256, 288, 3, padding=1),
            Conv(288, out3, 3, 2, 1)
        )

    def forward(self, x):
        f1 = self.branch1(x)
        f2 = self.branch2(x)
        f3 = self.branch3(x)
        f4 = self.branch4(x)
        f = torch.cat((f1,f2,f3,f4),dim=1)
        return(f)
    

class weight_predict(nn.Module):
    def __init__(self, in_channel):
        super(weight_predict, self).__init__()
        self.block1 = stem(in_channel, 128)
        self.block2 = Inception1(128, 32, 32, 64)
        self.block3 = reduction1(128, 128, 256, 256)
        self.block4 = Inception2(768, 128, 128, 256)
        self.block5 = reduction2(768, 384, 288, 320)
        self.block6 = nn.AvgPool2d(4, 1)
        self.block7 = nn.Flatten(1)
        self.dropout = nn.Dropout(0.2)

        self.classifier1 = nn.Linear(1760,1024)
        self.classifier2 = nn.Linear(1024,512)
        self.classifier3 = nn.Linear(512,128)
        self.classifier4 = nn.Linear(128,1)

    def forward(self, x):
        out = self.block1(x)
        out = self.block2(out)
        out = self.block3(out)
        out = self.block4(out)
        out = self.block5(out)
        out = self.block6(out)
        out = self.block7(out)
        out = self.dropout(out)
        out = self.classifier1(out)
        out = self.classifier2(F.relu(out))
        out = self.classifier3(F.relu(out))
        out = self.classifier4(F.relu(out))
        return out

def tf_resnet(x):
    x = x.reshape(-1,1,32,32)
    x = np.array(x, dtype = 'float32')/4
    x = (x-0.5)/0.5
    x = torch.from_numpy(x)
    return x

def tf_label(x):
    x = x.reshape(-1,1)
    x = np.array(x, dtype = 'float32')/1000
    x = torch.from_numpy(x)
    return x 

def train(net, train_data, test_data , num_epochs, optimizer, criterion):
    if torch.cuda.is_available():
        net = net.cuda()
    prev_time = datetime.now()
    for epoch in range(num_epochs):
        train_loss = 0
        train_acc = 0
        net = net.train()
        for data1 in train_data:
            if torch.cuda.is_available():
                data_train = Variable(tf_resnet(data1[:,0:-1]).cuda(), volatile = True)
                label_train = Variable(tf_label(data1[:,-1]).cuda())
            else:
                data_train = Variable(tf_resnet(data1[:,0:-1]), volatile = True)
                label_train = Variable(tf_label(data1[:,-1]))
            #forward
            output = net(data_train)
            loss = criterion(output, label_train)
            #backward
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss += loss
        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)
        if test_data is not None:
            test_loss = 0
            test_acc = 0
            net = net.eval()
            for data1 in test_data:
                if torch.cuda.is_available():
                    data_train = Variable(tf_resnet(data1[:,0:-1]).cuda(), volatile = True)
                    label_train = Variable(tf_label(data1[:,-1]).cuda())
                else:
                    data_train = Variable(tf_resnet(data1[:,0:-1]), volatile = True)
                    label_train = Variable(tf_label(data1[:,-1]))
                #forward
                output = net(data_train)
                loss = criterion(output, label_train)
                test_loss += loss
            epoch_str = (
                "epoch:%d. Train Loss:%f, Train acc:%f, Test Loss:%f, Test acc:%f"%(epoch, train_loss/len(train_data), train_acc/len(train_data), test_loss/len(test_data), test_acc/len(test_data))
            )
        else:
            epoch_str = (
                "epoch:%d. Train Loss:%f, Train acc:%f,"%(epoch, train_loss/len(train_data), train_acc/len(train_data))
            )
        prev_time = cur_time
        print(time_str + epoch_str)
# test_net = weight_predict(1)
# test_x = Variable(torch.zeros(1,1,32,32))
# test_y = test_net(test_x)



