import os
import sys
import copy
import numpy as np
import torch
import torch.optim as optim

sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from data import CUB
from googlenet import GoogLeNet


class GoogLeNetTrainer:
    def __init__(self):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'

        # Load dataset
        data_train = CUB(train=True, max_classes=10)
        data_valid = CUB(train=False, max_classes=10)
        num_classes = len(set(data_train.y))
        self.train_loader = torch.utils.data.DataLoader(data_train, batch_size=32, shuffle=True)
        self.valid_loader = torch.utils.data.DataLoader(data_valid, batch_size=32, shuffle=False)

        # Define model
        self.model = GoogLeNet(num_classes=num_classes).to(self.device)

        # Define optimizer
        self.optimizer = optim.Adam(self.model.parameters(), lr=0.0003, weight_decay=1e-4)
        self.scheduler = optim.lr_scheduler.StepLR(self.optimizer, step_size=57, gamma=0.1)
        self.epochs = 150

    def train(self, saving_path=None):
        best_valid_acc = 0.
        best_model_wts = self.model.state_dict()
        for epoch in range(self.epochs):
            self.model.train()
            train_loss, correct, num_samples = 0, 0, 0
            for batch, target in self.train_loader:
                batch = batch.to(self.device)
                target = target.to(self.device)
                ret = self.model(batch, target)
                self.optimizer.zero_grad()
                ret['loss'].mean().backward()
                self.optimizer.step()

                train_loss += ret['loss'].mean().item() * len(target)
                correct += torch.sum(ret['output'].argmax(dim=-1).eq(target)).item()
                num_samples += len(target)
            self.scheduler.step()
            train_loss = train_loss / num_samples
            train_acc = correct / num_samples
            valid_acc = self.validate()
            if best_valid_acc < valid_acc:
                best_valid_acc = valid_acc
                best_model_wts = copy.deepcopy(self.model.state_dict())  # save the best model
            print(f'Epoch {epoch:2d}; train loss {train_loss:.4f}, train acc {train_acc:.4f}; val acc: {valid_acc:.4f}')

        self.model.load_state_dict(best_model_wts)
        print('Validation Accuracy:', self.validate())
        if saving_path is not None:
            os.makedirs(os.path.dirname(saving_path), exist_ok=True)
            torch.save(self.model, saving_path)
        return self.model

    def validate(self, loader=None):
        if loader is None:
            loader = self.valid_loader
        self.model.eval()
        with torch.no_grad():
            correct, num_samples = 0, 0
            for batch, target in loader:
                batch = batch.to(self.device)
                ret = self.model(batch)
                correct += torch.sum(ret['output'].cpu().argmax(dim=-1).eq(target)).item()
                num_samples += len(target)
        acc = correct / num_samples
        return acc

    def extract_feature(self, loader=None):
        if loader is None:
            loader = self.valid_loader
        self.model.eval()
        feature = []
        with torch.no_grad():
            for batch, target in loader:
                batch = batch.to(self.device)
                ret = self.model(batch)
                feature.extend(ret['output'].cpu().numpy())
        feature = np.array(feature)
        return feature


if __name__ == '__main__':
    gt = GoogLeNetTrainer()
    gt.train()
    # Extract visal feature.
    dataset = CUB(train_rate=1)
    feature = gt.extract_feature(torch.utils.data.DataLoader(dataset, batch_size=32, shuffle=True))
    print('Feature shape', feature.shape)
