import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable

class spec_cnn(nn.Module):
    def __init__(self, flag, spec_band, num_classes, init_weights=True):
        super(spec_cnn, self).__init__()

        self.id = flag

        self.conv_i=nn.Sequential(
            nn.Conv1d(1, 6, 5,stride=1, padding=0).float(),
            nn.Conv1d(6, 12, 5, stride=1, padding=0).float(),
            nn.Conv1d(12, 24, 4, stride=1, padding=0).float(),
            nn.Conv1d(24, 48, 5, stride=1, padding=0).float(),
            nn.Conv1d(48, 96, 4, stride=1, padding=0).float(),
        )
        self.fc_i=nn.Linear(6*96, 256).float()

        self.conv_p=nn.Sequential(
            nn.Conv1d(1, 6, 8,stride=1, padding=0).float(),
            nn.Conv1d(6, 12, 7, stride=1, padding=0).float(),
            nn.Conv1d(12, 24, 8, stride=1, padding=0).float(),
        )
        self.fc_p=nn.Linear(168,256).float()

        self.conv_k=nn.Sequential(
            nn.Conv1d(1, 6, 9, stride=1, padding=0).float(),
            nn.Conv1d(6, 12, 9, stride=1, padding=0).float(),
            nn.Conv1d(12, 24, 9, stride=1, padding=0).float(),
            nn.Conv1d(24, 48, 10, stride=1, padding=0).float(),
        )
        self.fc_k = nn.Linear(288, 256).float()

        self.pool = nn.MaxPool1d(2,stride=2)

        self.fc1 = nn.Linear(256, num_classes).float()

        if init_weights:
            self._initialize_weights()

    def forward(self, x):

        x=x.reshape(-1,1,x.shape[-1])

        if self.id=='indian':
            for i in range(4):
                x=self.conv_i[i](x)
                x=self.pool(x)
            x=self.conv_i[-1](x)
            x = x.view(x.size(0), -1)
            x = self.fc_i(x)
        elif self.id=='pavia':
            for i in range(3):
                x = self.conv_p[i](x)
                x = self.pool(x)
            x = x.view(x.size(0), -1)
            x = self.fc_p(x)
        elif self.id=='ksc':
            for i in range(3):
                x = self.conv_k[i](x)
                x = self.pool(x)
            x=self.conv_k[-1](x)
            x = x.view(x.size(0), -1)
            x = self.fc_k(x)
        else:
            raise NotImplementedError
        # fc
        score = F.log_softmax(self.fc1(x), dim=1)
        return score

    # fork from https://pytorch.org/docs/stable/_modules/torchvision/models/vgg.html#vgg11
    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.xavier_normal_(m.weight.data, gain=1)
                if m.bias is not None:
                    nn.init.constant_(m.bias.data, 0)
            elif isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight.data, gain=1)
                if m.bias is not None:
                    nn.init.constant_(m.bias.data, 0)
class operate():

    def train(self, epoch, loss_trn, net, optimizer, scheduler, trn_loader, criterion):
        net.train()  # train mode
        epochavg_loss = 0
        correct = 0
        total = 0
        for idx, (X_spat, y_target) in enumerate(trn_loader):
            X_spat = Variable(X_spat.float()).cuda()
            ######GPU
            y_target = Variable(y_target.float().long()).cuda()
            y_pred = net.forward(X_spat)
            loss = criterion(y_pred, y_target)

            epochavg_loss += loss.item()
            _, predicted = torch.max(y_pred.data, 1)
            # print(torch.unique(predicted))
            # print(torch.unique(y_target))
            correct += torch.sum(predicted == y_target)
            total += y_target.shape[0]

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            # if idx % 20==0:

            del X_spat, y_target
            del y_pred
            # del loss
        scheduler.step()
        print('train epoch:{},train loss:{},correct/total:{:.4f}%'.format(epoch,
               epochavg_loss / (idx + 1),100 * correct.item() / total))
        loss_trn.append(epochavg_loss / (idx + 1))
        return loss_trn

    def inference(self,net, data_loader, criterion, FLAG='VAL'):
        net.eval()  # evaluation mode
        inf_loss = 0
        num = 1
        correct = 0
        total = 0
        for idx, (X_spat, y_target) in enumerate(data_loader):
            with torch.no_grad():
                X_spat = Variable(X_spat.float()).cuda()#GPU
                y_target = Variable(y_target.float().long()).cuda()
                y_score = net.forward(X_spat)
            loss = criterion(y_score, y_target)
            inf_loss += loss.float()  # save memory

            _, predicted = torch.max(y_score.data, 1)
            correct += torch.sum(predicted == y_target)
            total += y_target.shape[0]

            y_pred_inf = np.argmax(y_score.detach().cpu().numpy(), axis=1) + 1
            if num == 1:
                inf_result = y_pred_inf
            else:
                inf_result = np.hstack((inf_result, y_pred_inf))
            if idx % 20 == 0 and idx > 0:
                print('test loss:{},{}/{}({:.2f}%),correct/total:{:.4f}%'.format(
                    loss.item(), idx * X_spat.shape[0],len(data_loader.dataset),100 * idx * X_spat.shape[0] / len(
                    data_loader.dataset),100 * correct.item() / total))
            num += 1
            del X_spat, y_target
            del loss
            del y_score
            del y_pred_inf
        avg_inf_loss = inf_loss / len(data_loader.dataset)
        if FLAG == 'VAL':
            print('Over all validation loss:', inf_loss.cpu().numpy(), 'Average loss:', avg_inf_loss.cpu().numpy(),
                  'correct/total:{:.4f}%'.format(100 * correct.item() / total))
        if FLAG == 'TEST':
            print('Over all testing loss:', inf_loss.cpu().numpy(), 'Average loss:', avg_inf_loss.cpu().numpy(),
                  'correct/total:{:.4f}%'.format(100 * correct.item() / total))
        return inf_result