import copy
import csv
import pandas as pd
import numpy as np
import torch
from numpy import mat
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
from torchvision.models._utils import _make_divisible
from xr6 import LeakyRelu6
from torch.autograd import Variable


class xRelu(torch.autograd.Function):
    @staticmethod
    def forward( ctx, inp, alpha):
        ctx.constant = alpha
        ctx.save_for_backward(inp)
        return torch.where(inp < 0. or inp > 6, alpha * inp, inp)

    @staticmethod
    def backward(ctx, grad_output):
        inp = ctx.saved_tensors
        ones_like_inp = torch.ones_like(inp)
        return torch.where(inp < 0. or inp > 6, ones_like_inp * ctx.constant,
                           ones_like_inp), None


class LeakyRelu6(nn.Module):
    def __init__(self, alpha=0.002):
        super().__init__()
        self.alpha = alpha

    def forward(self, x):
        out = xRelu.apply(x, self.alpha)
        return out


transform = transforms.Compose([
    transforms.ToTensor()
])

lr6 = LeakyRelu6()

class Data(Dataset):
    def __init__(self, datadir, transform=None):
        self.transform = transform
        self.datadir = datadir
        dt = pd.read_csv(self.datadir)
        data_numpy = dt.to_numpy()
        dataf = data_numpy.astype(np.float32)
        labels = []
        datas = []
        lis = dt.shape[0]
        for i in range(0, lis):
            data = dataf[i]
            labe = data[0]
            img = data[1:]
            nd = torch.tensor(img)
            nd = nd.view(1, 28, 28)
            if self.transform is not None:
                nd = self.transform(nd)
            if labe >= 10:
                labe -= 1
            labels.append(labe)
            datas.append(nd)
        self.labels = labels
        self.datas = datas

    def __getitem__(self, idx):
        img = self.datas[idx]
        # img = np.array(img)
        # if self.transform is not None:
        #     img = torch.from_numpy(img)
        #     img = self.transform(img)
        label = self.labels[idx]
        label = int(label)
        label = torch.tensor(label, dtype=torch.float32)
        return img, label

    def __len__(self):
        return len(self.datas)


datadir = 'sign_mnist_train/sign_mnist_train.csv'
datadir_test = 'sign_mnist_test/sign_mnist_test.csv'

ar_dataset = Data(datadir)
ar_dataset_test = Data(datadir_test)

train_loader = DataLoader(dataset=ar_dataset, batch_size=30, shuffle=True, drop_last=True)
test_loader = DataLoader(dataset=ar_dataset_test, batch_size=30, shuffle=True, drop_last=True)


class ConBNReLU(nn.Sequential):
    def __init__(self, ch_in, ch_out, kernol_size=3, stride=1, groups=1):
        padding = (kernol_size - 1) // 2
        super().__init__(
            nn.Conv2d(ch_in, ch_out, kernol_size, stride, padding, groups=groups, bias=False),
            nn.BatchNorm2d(ch_out),
            nn.ReLU6()
        )


def CBR3(ch_in, ch_out, stride):
    return nn.Sequential(
        nn.Conv2d(ch_in, ch_out, 3, stride, bias=False),
        nn.BatchNorm2d(ch_out),
        nn.ReLU6()
    )


def CBR1(ch_in, ch_out):
    return nn.Sequential(
        nn.Conv2d(ch_in, ch_out, 1, 1, bias=False),
        nn.BatchNorm2d(ch_out),
        nn.ReLU6(inplace=True)
    )


class InvertedRes(nn.Module):
    def __init__(self, ch_in, ch_out, stride, alpha):
        super().__init__()
        ch_hidden = ch_in * alpha
        self.use_shot = stride == 1 and ch_in == ch_out
        layers = []
        if alpha != 1:
            layers.append(
                ConBNReLU(ch_in, ch_hidden, kernol_size=1))
        layers.extend([

            # nn.Conv2d(ch_in, ch_hidden, 1, 1, bias=False),  # pw
            # nn.BatchNorm2d(ch_hidden),
            # nn.ReLU6(inplace=True),
            ConBNReLU(ch_hidden, ch_hidden, kernol_size=3, stride=stride, groups=ch_hidden),
            # nn.Conv2d(ch_hidden, ch_hidden, 3, stride=stride, padding=1, groups=ch_hidden, bias=False),  # dw
            # nn.BatchNorm2d(ch_hidden),
            # nn.ReLU6(inplace=True),
            nn.Conv2d(ch_hidden, ch_out, 1, 1, 0, bias=False),  # pw
            nn.BatchNorm2d(ch_out)
        ])

        self.conv = nn.Sequential(*layers)

    def forward(self, x):
        if self.use_shot:
            return x + self.conv(x)
        else:
            return self.conv(x)


class MobileNet(nn.Module):
    def __init__(self, num_class=24, alpha=1, round_num=8):
        super().__init__()
        block = InvertedRes
        ch_in = _make_divisible(8 * alpha, round_num)
        ch_last = _make_divisible(960 * alpha, round_num)
        inverted_res_seting = [
            [1, 16, 1, 1],
            [6, 24, 2, 2],
            [6, 96, 3, 1],
            [6, 320, 1, 1]
        ]
        ch_in = int(ch_in * alpha)
        self.ch_last = int(ch_last * alpha) if alpha > 1.0 else ch_last
        features = [CBR3(1, ch_in, 1)]
        for t, c, n, s in inverted_res_seting:
            ch_out = _make_divisible(c * alpha, round_num)
            for i in range(n):
                if i == 0:
                    features.append(block(ch_in, ch_out, s, alpha=t))
                else:
                    features.append(block(ch_in, ch_out, 1, alpha=t))
                ch_in = ch_out
        features.append(CBR1(ch_in, self.ch_last))
        self.features = nn.Sequential(*features)
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))

        self.class_f = nn.Linear(ch_last, num_class)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    nn.init.zeros_(m.bias)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.ones_(m.weight)
                nn.init.zeros_(m.bias)
            elif isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, 0, 0.01)
                nn.init.zeros_(m.bias)

    def forward(self, x):
        x = self.features(x)
        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.class_f(x)
        return x


if __name__ == '__main__':

    mobile = MobileNet()
    device = torch.device('cuda')
    mobile.to(device)
    loss_f = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(mobile.parameters(), lr=0.001)
    train_steps = 0


    def fit(epoch,model,train_d,test_d):
        correct = 0
        total = 0
        running_loss = 0
        model.train()
        for x,y in train_d:
            x,y =x.to('cuda'),y.to('cuda')
            y_pred = model(x)
            loss = loss_f(y_pred, y.long())
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            with torch.no_grad():
                y_pred = torch.argmax(y_pred, dim=1)
                correct += (y_pred == y).sum().item()
                total += y.size(0)
                running_loss += loss.item()

        epoch_loss = running_loss / len(train_d.dataset)
        epoch_acc = correct / total

        test_correct = 0
        test_total = 0
        test_running_loss = 0

        model.eval()
        with torch.no_grad():
            for x, y in test_d:
                if torch.cuda.is_available():
                    x, y = x.to("cuda"), y.to("cuda")
                y_pred = model(x)
                loss = loss_f(y_pred, y.long())
                y_pred = torch.argmax(y_pred, dim=1)
                test_correct += (y_pred == y).sum().item()
                test_total += y.size(0)
                test_running_loss += loss.item()

        epoch_test_loss = test_running_loss / len(test_d.dataset)
        epoch_test_acc = test_correct / test_total

        print("epoch:", epoch,
              "loss:", round(epoch_loss, 3),
              "accuracy:", round(epoch_acc, 3),
              "test_loss:", round(epoch_test_loss, 3),
              "test_accuracy:", round(epoch_test_acc, 3)
              )
        return epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc

    epochs = 80
    best_acc = 0.0
    train_loss = []
    train_acc = []
    test_loss = []
    test_acc = []
    path = './best.pt'
    torch.save(mobile.state_dict(), path)
    best_model_w = copy.deepcopy(mobile.state_dict())

    for epoch in range(epochs):
        epoch_loss, epoch_acc, epoch_test_loss, epoch_test_acc = fit(epoch, mobile, train_loader, test_loader)
        train_loss.append(epoch_loss)
        train_acc.append(epoch_acc)
        test_loss.append(epoch_test_loss)
        test_acc.append(epoch_test_acc)

        if epoch_test_acc > best_acc:
            best_acc = epoch_acc
            best_model_wts = copy.deepcopy(mobile.state_dict())
    mobile.load_state_dict(best_model_w)
    mobile.eval()


  
        




















    # for i in range(epoch):
    #     for data in train_loader:
    #         imgs, labels = data
    #         img = imgs.to(device)
    #         label = labels.long()
    #         labell = label.to(device)
    #         output = mobile(img)
    #         loss = loss_f(output, labell)
    #         optimizer.zero_grad()
    #         loss.backward()
    #         optimizer.step()
    #     if i % 2 == 0:
    #         print(i, loss.item())
    #         lossa = 0
    #         with torch.no_grad():
    #             for data in test_loader:
    #                 imgts, labelts = test_loader
    #                 imgt = imgts.to(device)
    #                 labelt = labelts.to(device)
    #                 op = mobile(imgt)
    #                 loss = loss_f(op, labelt)
    #                 lossa += loss
    #                 print(f'text:{lossa / len(test_loader)}')
