import numpy as np
import torch
from torch.nn import Module
import torch.nn.functional as F
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import DataLoader
from torchvision import datasets, transforms

class BaseModel(Module):
    def __init__(self, model_name, training_params, dataset_name = None):
        super().__init__()
        if dataset_name == None:
            dataset_name = model_name
        self.model_name = model_name
        self.training_params = training_params
        self.dataset_name = dataset_name
        self.dataset_dir = 'original_data/' + dataset_name + '/'
        self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    def forward(self, x):
        print('Method `forward` must be implemented')
        raise NotImplementedError

    def get_optimizer(self):
        print('Method `get_optimizer` must be implemented')
        raise NotImplementedError

    # 覆写此方法以实现每轮训练后调整optimizer参数
    def adjust_optimizer(self, opt, epoch):
        pass

    def to_device(self):
        return self.to(self.device)

    # get_train_valid_loader方法的transform_train参数为true时，对train_loader的变形
    # 覆写此方法以实现需要时增强训练集
    def get_train_loader_transform(self):
        return transforms.ToTensor()

    def get_train_valid_loader(self, batch_size, valid_size, transform_train, shuffle):
        set_cls = getattr(datasets, self.dataset_name)

        if transform_train:
            train_transform = self.get_train_loader_transform()
        else:
            train_transform = transforms.ToTensor()

        train_set = set_cls(root = self.dataset_dir, train = True,
                                        transform = train_transform, download = True)
        valid_set = set_cls(root = self.dataset_dir, train = True,
                                        transform = transforms.ToTensor(), download = True)

        set_len = len(train_set)
        inds = list(range(set_len))
        cind = int(np.floor(valid_size * set_len))   #valid_size<1

        if shuffle:
            np.random.shuffle(inds)

        train_sampler = SubsetRandomSampler(inds[cind:])      #分成前后两部分
        valid_sampler = SubsetRandomSampler(inds[:cind])

        train_loader = DataLoader(dataset = train_set, batch_size = batch_size, sampler = train_sampler)
        valid_loader = DataLoader(dataset = valid_set, batch_size = batch_size, sampler = valid_sampler)

        return train_loader, valid_loader

    def get_test_loader(self, batch_size, shuffle):
        set_cls = getattr(datasets, self.dataset_name)
        test_set = set_cls(root = self.dataset_dir, train = False,
                                        transform = transforms.ToTensor(), download = True)
        test_loader = DataLoader(dataset = test_set, batch_size = batch_size, shuffle = shuffle)
        return test_loader

    # 单轮训练
    def train_once(self, loader, opt, epoch):
        self.train() # 将网络设置为训练模式

        for ind, (images, labels) in enumerate(loader):
            images = images.to(self.device)
            labels = labels.to(self.device)

            output = self(images) #将图像传入网络
            loss = F.cross_entropy(output, labels) #计算损失函数

            opt.zero_grad() #梯度清零
            loss.backward() #反向传播
            opt.step() #根据梯度更新网络参数

            ind += 1
            print('\rEpoch {}: [batch: {}/{} ({:.0f}%)] \tLoss: {:.4f}'.format(
                epoch, ind, len(loader),
                100. * ind / len(loader), loss), end = ' ')

        print()

    # 测试
    def test(self, loader):
        self.eval() # 将网络设置为测试模式

        total = 0
        corr = 0
        with torch.no_grad():
            for images, labels in loader:
                images = images.to(self.device)
                labels = labels.to(self.device)

                output = self(images) #将图像传入网络

                pred = torch.argmax(output, dim = 1) #找出概率最大的标签
                total += labels.size(0)
                corr += (pred == labels).sum().item()

        ratio = corr / total  #分类正确比率

        print('Test: Accuracy: {}/{} ({:.0f}%)'.format(
            corr, total, ratio * 100))

        return ratio

    # 获取网络参数的复制
    def state_dict_clone(self):
        state_dict = self.state_dict()
        ret = state_dict.copy()
        for k, v in state_dict.items():
            ret[k] = v.clone()
        return ret

    # 加载最初训练好的网络参数
    def load_raw_state(self):
        self.load_state_dict(torch.load('data/raw_models/' + self.model_name + '.pt'))

    # 将一些图片传入网络得到输出，输入输出为numpy.ndarray
    def predict(self, images):
        self.eval()
        return self(torch.from_numpy(images).to(self.device)).detach().cpu().numpy()
