import os, time, random, copy
from collections import defaultdict

from torchvision import models, transforms, datasets
import torch.optim as optim
import torch.nn as nn
import torch
import torch.nn.functional as F

from .Loss import ProxyStaticLoss
from .Reader import ImageReader
from .color_lib import RGBmean, RGBstdv
from .model import ResNet18Head, ResNet18Tail
from .bninception import bninception, BNInceptionHead, BNInceptionTail
from .selfattention import SimpleSelfAttention2
from . import getLogger

class Config:
    pass
cfg = Config()
cfg.mode = 'train'
cfg.debug = True
log = getLogger(cfg)

class learn():
    def __init__(self, args, Data, ID, dst, data_dict, num_epochs=10,
                 batch_size=128):
        self.args = args
        self.Data = Data
        self.ID = ID
        self.dst = dst

        self.data_dict_tra = data_dict['tra']
        self.data_dict_val = data_dict['test']

        self.batch_size = batch_size
        log.info('batch size: {}'.format(self.batch_size))
        self.num_workers = 8
        log.info('num workers: {}'.format(self.num_workers))

        self.init_lr = 0.01
        log.info('init_lr: {}'.format(self.init_lr))
        self.decay_rate = 0.01
        self.num_epochs = num_epochs

        if self.args.backbone == 'resnet18':
            self.imgsize = 256
        elif self.args.backbone == 'bninception':
            self.imgsize = 224
        log.info('image size: {}'.format(self.imgsize))
        self.RGBmean = RGBmean[Data]
        self.RGBstdv = RGBstdv[Data]
        if self.args.head_tail:
            if self.args.backbone == 'resnet18':
                self.head = ResNet18Head()
                if self.args.attention:
                    self.attention = SimpleSelfAttention2(128)
            elif self.args.backbone == 'bninception':
                self.head = BNInceptionHead()

        # sort classes and fix the class order
        all_class = sorted(self.data_dict_tra)
        self.idx_to_ori_class = {
            i: all_class[i]
            for i in range(len(all_class))
        }
        if not self.setsys():
            log.info('system error')
            return

    def run(self):
        for i in range(self.ID.size(1)):
            log.info('='*40)
            log.info('Training ensemble #{}/{}'.format(i, self.args.ensemble_size))
            self.l = i  # index of the ensembles
            self.meta_id = self.ID[:, i].tolist()
            self.decay_time = [False, False]
            self.loadData()
            self.setModel()
            self.criterion = ProxyStaticLoss(self.classSize, self.classSize)
            best_model = self.opt(self.num_epochs)
            self.eva(best_model)
        return

    ##################################################
    # step 0: System check
    ##################################################
    def setsys(self):
        if not torch.cuda.is_available():
            log.info('No GPU detected')
            return False
        return True

    ##################################################
    # step 1: Loading Data
    ##################################################
    def loadData(self):
        # balance data for each class
        TH = 300

        # append image
        # self.data_dict_meta = {i: [] for i in range(max(self.meta_id) + 1)}
        self.data_dict_meta = defaultdict(list)
        for i, c in self.idx_to_ori_class.items():
            meta_class_id = self.meta_id[i]
            tra_imgs = self.data_dict_tra[c]
            if len(tra_imgs) > TH: tra_imgs = random.sample(tra_imgs, TH)
            self.data_dict_meta[meta_class_id] += tra_imgs

        self.data_transforms_tra = transforms.Compose([
            transforms.Resize(int(self.imgsize * 1.1)),
            transforms.RandomCrop(self.imgsize),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            transforms.Normalize(self.RGBmean, self.RGBstdv)
        ])

        self.data_transforms_val = transforms.Compose([
            transforms.Resize(self.imgsize),
            transforms.CenterCrop(self.imgsize),
            transforms.ToTensor(),
            transforms.Normalize(self.RGBmean, self.RGBstdv)
        ])

        self.classSize = len(self.data_dict_meta)
        log.info('output size: {}'.format(self.classSize))

        return

    ##################################################
    # step 2: Set Model
    ##################################################
    def setModel(self):
        log.info('Setting model')
        if self.args.head_tail:
            if self.args.backbone == 'resnet18':
                self.tail = ResNet18Tail()
                self.tail.fc = nn.Linear(self.tail.fc.in_features, self.classSize)
                self.tail.avgpool = nn.AdaptiveAvgPool2d(1)
                if self.args.attention:
                    self.model = nn.Sequential(self.head, self.attention, self.tail)
                    log.info('resnet18: head-attention-tail')
                else:
                    self.model = nn.Sequential(self.head, self.tail)
                    log.info('resnet18: head-tail')
            elif self.args.backbone == 'bninception':
                self.tail = BNInceptionTail()
                self.tail.last_linear = nn.Linear(self.tail.last_linear.in_features, self.classSize)
                self.model = nn.Sequential(self.head, self.tail)
                log.info('bninception: head-tail')
        else:
            if self.args.backbone == 'resnet18':
                self.model = models.resnet18(pretrained=True)
                num_ftrs = self.model.fc.in_features
                self.model.fc = nn.Linear(num_ftrs, self.classSize)
                self.model.avgpool = nn.AdaptiveAvgPool2d(1)
                log.info('resnet18: independent')
            elif self.args.backbone == 'bninception':
                self.model = bninception()
                num_ftrs = self.model.last_linear.in_features
                self.model.last_linear = nn.Linear(num_ftrs, self.classSize)
                log.info('bninception: independent')
        self.model = torch.nn.DataParallel(self.model)
        self.model = self.model.cuda()
        self.optimizer = optim.SGD(self.model.parameters(),
                                   lr=self.init_lr,
                                   momentum=0.9)
        return

    def lr_scheduler(self, epoch):
        if epoch >= 0.5 * self.num_epochs and not self.decay_time[0]:
            self.decay_time[0] = True
            lr = self.init_lr * self.decay_rate
            log.info('LR is set to {}'.format(lr))
            for param_group in self.optimizer.param_groups:
                param_group['lr'] = lr
        if epoch >= 0.8 * self.num_epochs and not self.decay_time[1]:
            self.decay_time[1] = True
            lr = self.init_lr * self.decay_rate * self.decay_rate
            log.info('LR is set to {}'.format(lr))
            for param_group in self.optimizer.param_groups:
                param_group['lr'] = lr
        return

    ##################################################
    # step 3: Learning
    ##################################################
    def opt(self, num_epochs):
        # recording time and epoch acc and best result
        since = time.time()
        best_epoch = 0
        best_acc = 0
        record = []
        dsets = ImageReader(self.data_dict_meta, self.data_transforms_tra)
        dataLoader = torch.utils.data.DataLoader(dsets,
                                                 batch_size=self.batch_size,
                                                 shuffle=True,
                                                 num_workers=self.num_workers,
                                                 pin_memory=True)
        for epoch in range(num_epochs):
            self.lr_scheduler(epoch)

            tra_loss, tra_acc = self.tra(dataLoader)

            record.append((epoch, tra_loss, tra_acc))
            log.info('Epoch:{}/{} Loss:{:.4f} Acc:{:.4f}'.format(epoch, num_epochs-1, tra_loss, tra_acc))

            # deep copy the model
            if epoch >= 1 and tra_acc > best_acc:
                best_acc = tra_acc
                best_epoch = epoch
                best_model = copy.deepcopy(self.model)
                torch.save(best_model,
                           os.path.join(self.dst, 'model_{:02}.pth'.format(self.l)))

        torch.save(torch.Tensor(record),
                   os.path.join(self.dst, 'record_{:02}.pth'.format(self.l)))
        time_elapsed = time.time() - since
        log.info('Training complete in {:.0f}m {:.0f}s'.format(
            time_elapsed // 60, time_elapsed % 60))
        log.info('Best tra acc {:.2f} in epoch {}'.format(best_acc, best_epoch))
        return best_model

    def tra(self, dataLoader):
        # Set model to training mode
        self.model.train()

        L_data, T_data, N_data = 0.0, 0, 0

        # iterate batch
        for data in dataLoader:
            self.optimizer.zero_grad()
            with torch.set_grad_enabled(True):
                inputs_bt, labels_bt = data  # <FloatTensor> <LongTensor>
                fvec = self.model(inputs_bt.cuda())
                loss = self.criterion(fvec, labels_bt)
                loss.backward()
                self.optimizer.step()

            _, preds_bt = torch.max(fvec.cpu(), 1)

            L_data += loss.item()
            T_data += torch.sum(preds_bt == labels_bt).item()
            N_data += len(labels_bt)

        return L_data / N_data, T_data / N_data

    def eva(self, best_model):
        best_model.eval()
        dsets = ImageReader(self.data_dict_val, self.data_transforms_val)
        dataLoader = torch.utils.data.DataLoader(dsets,
                                                 self.batch_size,
                                                 shuffle=False,
                                                 num_workers=self.num_workers)

        Fvecs = []
        with torch.set_grad_enabled(False):
            for data in dataLoader:
                inputs_bt, labels_bt = data  # <FloatTensor> <LongTensor>
                fvec = F.normalize(best_model(inputs_bt.cuda()), p=2, dim=1)
                Fvecs.append(fvec.cpu())

        Fvecs_all = torch.cat(Fvecs, 0)
        torch.save(dsets, os.path.join(self.dst, 'testdsets.pth'))
        torch.save(Fvecs_all, os.path.join(self.dst, str(self.l) + 'testFvecs.pth'))
        return
