import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable

class lss(nn.Module):
    def __init__(self, spec_band, spat_band, num_classes, signal='rnn', init_weights=True):
        super(lss, self).__init__()

        self.signal=signal
        self.spat_band = spat_band
        self.spec_band = spec_band

        self.rnn1= nn.RNN(input_size=spat_band,hidden_size=256,num_layers=1,nonlinearity='relu',bias=True, batch_first=True,dropout=0.2,bidirectional=False)

        self.conv0 = nn.Sequential(
            nn.Conv2d(spat_band, 256, (1, 1), stride=1, padding=0, bias=True).float(),
            # nn.BatchNorm2d(256, track_running_stats=True).float(),
            # nn.LeakyReLU(),
            # nn.Conv2d(256, 512, (3, 3), stride=1, padding=0, bias=True).float(),
            # nn.BatchNorm2d(512, track_running_stats=True).float(),
            # nn.LeakyReLU()
        )

        self.pool = nn.AvgPool2d((3,3), stride=1, padding=0)

        self.conv1 = nn.Sequential(
            nn.Conv2d(spat_band, 256, (1, 1), stride=1, padding=0, bias=True).float(),
            # nn.BatchNorm2d(512, track_running_stats=True).float(),
            # nn.LeakyReLU(),
        )

        self.conv2 = nn.Sequential(
            nn.Conv2d(spat_band, 64, (1, 1), stride=1, padding=0, bias=True).float(),
            # nn.BatchNorm2d(256, track_running_stats=True).float(),
            # nn.LeakyReLU(),
            # nn.Conv2d(256,512, (3, 3), stride=1, padding=0, bias=True).float(),
            # nn.BatchNorm2d(512, track_running_stats=True).float(),
            # nn.LeakyReLU()
        )

        self.conv3 = nn.Sequential(
            nn.Conv2d(spat_band, 48, (7, 7), stride=1, padding=0, bias=True).float(),
            nn.BatchNorm2d(48, track_running_stats=True).float(),
            nn.LeakyReLU(),
            nn.Conv2d(48, 64, (5, 5), stride=1, padding=0, bias=True).float(),
            # nn.BatchNorm2d(512, track_running_stats=True).float(),
            # nn.LeakyReLU()
        )

        self.conv4 = nn.Sequential(
            nn.Conv2d(128, 192, (5, 5), stride=1, padding=0, bias=True).float(),
            nn.BatchNorm2d(192, track_running_stats=True).float(),
            nn.LeakyReLU(),
            nn.Conv2d(192, 256, (5, 5), stride=1, padding=0, bias=True).float(),
        )

        self.conv5 = nn.Conv2d(256, 256, (1, 1), stride=1, padding=0, bias=True).float()

        self.conv6 = nn.Conv2d(512, 512, (1, 1), stride=1, padding=0,bias=True).float()
        #self.conv6 = nn.Conv2d(256, 256, (1, 1), stride=1, padding=0, bias=True).float()#CNN
        

        self.fc0 = nn.Linear(spec_band, 768, bias=True).float()
        self.fc1 = nn.Linear(256*3, num_classes, bias=True).float()

        self.drop2d = nn.Dropout2d(p=0.2, inplace=True)
        self.drop0 = nn.Dropout(p=0.5, inplace=True)
        self.drop1 = nn.Dropout(p=0.5, inplace=True)


        if init_weights:
            self._initialize_weights()

    def forward(self, x_spec, x_spat, x_spat_rnn, x_spat_idx):
        # spec
        spec = self.fc0(x_spec)
        spec = self.drop0(spec)

        # spat
        x_spat_mp = x_spat[:, :, 5:-5, 5:-5]
        x_spat33 = x_spat[:, :, 9:-9, 9:-9]
        x_spat11 = x_spat[:, :, 10:-10, 10:-10]

        DII = self.pool(self.conv0(x_spat33))+self.conv1(x_spat11)
        DII = self.drop2d(DII)
        #print(x_spat_mp.shape,x_spat.shape)
        DOI = torch.cat((self.conv2(x_spat_mp), self.conv3(x_spat)),1)
        DOI = self.conv4(DOI)
        DOI = self.drop2d(DOI)

        if self.signal=='rnn':
            out,_=self.rnn1(x_spat_rnn)#l->r,(N,t,C)

        out_new=torch.zeros_like(out)
        x_spat_idx=x_spat_idx.long()
        for i in range(out.shape[0]):
            out_new[i][x_spat_idx[i]]=out[i]

        LSS=out_new.reshape(-1,3,3,out.shape[-1]).permute(0,3,1,2)

        #(N,C,3,3)

        LSS=self.conv5(LSS)

        LSS=self.drop2d(LSS)

        CI = self.conv6(torch.cat((LSS, DOI), 1))
        #CI = self.conv6(DOI)#cnn

        spat_I=torch.cat((DII,self.pool(CI)),1)

        y=spat_I.view(spat_I.size(0), -1)

        # cat,fc
        #x = torch.cat((spec, y), 1)
        x = y#only spat
        x = self.drop1(x)

        score = F.log_softmax(self.fc1(x), dim=1)
        return score

    # fork from https://pytorch.org/docs/stable/_modules/torchvision/models/vgg.html#vgg11
    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.xavier_normal_(m.weight.data, gain=1)
                if m.bias is not None:
                    nn.init.constant_(m.bias.data, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight.data, 1)
                nn.init.constant_(m.bias.data, 0)
            elif isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight.data, gain=1)
                nn.init.constant_(m.bias.data, 0)
            # elif isinstance(m, nn.RNN):
            #     nn.init.xavier_normal_(m.weight.data, gain=1)
            #     nn.init.constant_(m.bias.data, 0)
            # elif isinstance(m, nn.LSTM):
            #     nn.init.xavier_normal_(m.weight.data, gain=1)
            #     nn.init.constant_(m.bias.data, 0)
            # elif isinstance(m, nn.GRU):
            #     nn.init.xavier_normal_(m.weight.data, gain=1)
            #     nn.init.constant_(m.bias.data, 0)
class operate():

    def train(self, epoch, loss_trn, net, optimizer, scheduler, trn_loader, criterion):
        net.train()  # train mode
        epochavg_loss = 0
        correct = 0
        total = 0
        for idx, (X_spec, X_spat,X_spat_rnn,X_spat_rnn2, y_target) in enumerate(trn_loader):
            X_spec, X_spat,X_spat_rnn,X_spat_rnn2=\
                Variable(X_spec.float()).cuda(), Variable(X_spat.float()).cuda(), Variable(X_spat_rnn.float()).cuda(),Variable(X_spat_rnn2.float()).cuda()
            ######GPU
            y_target = Variable(y_target.float().long()).cuda()
            y_pred = net.forward(X_spec, X_spat, X_spat_rnn, X_spat_rnn2)
            loss = criterion(y_pred, y_target)

            epochavg_loss += loss.item()
            _, predicted = torch.max(y_pred.data, 1)
            # print(torch.unique(predicted))
            # print(torch.unique(y_target))
            correct += torch.sum(predicted == y_target)
            total += y_target.shape[0]

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            # if idx % 20==0:

            del X_spec, X_spat, X_spat_rnn, X_spat_rnn2, y_target
            del y_pred
            # del loss
        scheduler.step()
        print('train epoch:{},train loss:{},correct/total:{:.4f}%'.format(epoch,
               epochavg_loss / (idx + 1),100 * correct.item() / total))
        loss_trn.append(epochavg_loss / (idx + 1))
        return loss_trn

    def inference(self,net, data_loader, criterion, FLAG='VAL'):
        net.eval()  # evaluation mode
        inf_loss = 0
        num = 1
        correct = 0
        total = 0
        for idx, (X_spec, X_spat, X_spat_rnn, X_spat_rnn2, y_target) in enumerate(data_loader):
            with torch.no_grad():
                X_spec, X_spat, X_spat_rnn, X_spat_rnn2=\
                    Variable(X_spec.float()).cuda(), Variable(X_spat.float()).cuda(), Variable(X_spat_rnn.float()).cuda(),Variable(X_spat_rnn2.float()).cuda()
            ######GPU
                y_target = Variable(y_target.float().long()).cuda()
                y_score = net.forward(X_spec, X_spat,X_spat_rnn,X_spat_rnn2)
            loss = criterion(y_score, y_target)
            inf_loss += loss.float()  # save memory

            _, predicted = torch.max(y_score.data, 1)
            correct += torch.sum(predicted == y_target)
            total += y_target.shape[0]

            y_pred_inf = np.argmax(y_score.detach().cpu().numpy(), axis=1) + 1
            if num == 1:
                inf_result = y_pred_inf
            else:
                inf_result = np.hstack((inf_result, y_pred_inf))
            if idx % 20 == 0 and idx > 0:
                print('test loss:{},{}/{}({:.2f}%),correct/total:{:.4f}%'.format(
                    loss.item(), idx * X_spec.shape[0],len(data_loader.dataset),100 * idx * X_spec.shape[0] / len(
                    data_loader.dataset),100 * correct.item() / total))
            num += 1
            del X_spec,X_spat, X_spat_rnn, X_spat_rnn2, y_target
            del loss
            del y_score
            del y_pred_inf
        avg_inf_loss = inf_loss / len(data_loader.dataset)
        if FLAG == 'VAL':
            print('Over all validation loss:', inf_loss.cpu().numpy(), 'Average loss:', avg_inf_loss.cpu().numpy(),
                  'correct/total:{:.4f}%'.format(100 * correct.item() / total))
        if FLAG == 'TEST':
            print('Over all testing loss:', inf_loss.cpu().numpy(), 'Average loss:', avg_inf_loss.cpu().numpy(),
                  'correct/total:{:.4f}%'.format(100 * correct.item() / total))
        return inf_result