import sys
import os
import logging
import time
import random
import torch
import cv2
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torch.nn import BCEWithLogitsLoss, DataParallel
import numpy as np
from torch import nn
import yaml
from typing import Any

from tensorboardX import SummaryWriter

sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import env
from env import Configuration
from image_producer import ImageListDataset
from torchvision.transforms import transforms, functional
from torchvision import models
import hydra, omegaconf
from hydra.core.config_store import ConfigStore
from hydra.core.hydra_config import HydraConfig
# noinspection PyProtectedMember
from omegaconf._utils import get_omega_conf_dumper, _ensure_container
from dataclasses import dataclass

class Stopwatch:
    def __init__(self):
        self._split_time = self._start_time = time.perf_counter()

    def split(self) -> float:
        now = time.perf_counter()
        self._split_time, t = now, now - self._split_time
        return t


@dataclass
class Summary:
    epoch:int = 0
    step:int = 0
    loss:float = 0.0
    acc:float = 0.0
    TP:float = 0.0
    FP:float = 0.0
    FN:float = 0.0
    precision:float = 0.0
    recall:float = 0.0
    F1_score:float = 0.0

    def reset(self):
        self.loss = 0.0
        self.acc = 0.0
        self.TP = 0.0
        self.FP = 0.0
        self.FN = 0.0
        self.precision = float('NaN')
        self.recall = float('NaN')
        self.F1_score = float('NaN')


def save_model(model:DataParallel, summary:Summary, path:str) -> None:
    omegaconf.OmegaConf.save(summary, path + '.yaml')
    torch.save(model.module.state_dict(), path + '.ckpt')


def load_model(model:DataParallel, base_model:str) -> Summary:
    r""" 
    base_model(str): the name of the base model like run_id[.name]
    """
    run_id, name = base_model, None
    if base_model.find('.') > 0:
        run_id, name = base_model.split('.', 2)

    run_dir = [f for f in os.listdir('..') if f.startswith(f'{run_id}-')]
    assert len(run_dir) > 0
    assert len(run_dir) == 1
    run_dir = os.path.join('..', run_dir[0])

    for f in [name] if name else ['best', 'train']:
        base_model = os.path.join(run_dir, f)
        if os.path.isfile(base_model + '.ckpt') and os.path.isfile(base_model + '.yaml'):
            summary = omegaconf.OmegaConf.load(base_model + '.yaml')
            summary = Summary(**summary)
            ckpt = torch.load(base_model + '.ckpt', map_location=torch.device('cuda' if env.use_gpu else 'cpu'))
            model.load_state_dict(ckpt, strict=False)
            return summary

    raise Exception(f'no model file: {run_dir}/{name}')


def chose_model(model):
    if model == 'resnet18':
        return models.resnet18()
    elif model == 'resnet34':
        return models.resnet34()
    elif model == 'resnet50':
        return models.resnet50()
    else:
        raise Exception(f"unsupported model type: {model}")


def train_epoch(summary:Summary, summary_writer:SummaryWriter, args:Configuration, model, loss_fn, optimizer, data_loader) -> None:
    model.train()

    steps = len(data_loader)
    batch_size = data_loader.batch_size
    iter_data_loader = iter(data_loader)

    scalar = torch.cuda.amp.grad_scaler.GradScaler() if args.fp16 else None
    summary.reset()
    sw = Stopwatch()

    for step in range(steps):
        inputs, labels = next(iter_data_loader)
        if args.fp16:
            inputs = Variable(inputs.float().cuda(non_blocking=True).half())
            labels = Variable(labels.float().cuda(non_blocking=True).half())
        elif env.use_gpu():
            inputs = Variable(inputs.float().cuda(non_blocking=True))
            labels = Variable(labels.float().cuda(non_blocking=True))
        else:
            inputs = Variable(inputs.float())
            labels = Variable(labels.float())

        if args.fp16:
            with torch.cuda.amp.autocast_mode.autocast():
                outputs = model(inputs)
                outputs = torch.squeeze(outputs)  # noqa
                loss = loss_fn(outputs, labels)
            optimizer.zero_grad()
            scalar.scale(loss).backward()
            scalar.step(optimizer)
            scalar.update()
        else:
            outputs = model(inputs)
            outputs = torch.squeeze(outputs)  # noqa
            loss = loss_fn(outputs, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        probs = outputs.sigmoid()

        if env.use_gpu():
            predicts = (probs >= 0.5).type(torch.cuda.FloatTensor)
            acc_data = (predicts == labels).type(torch.cuda.FloatTensor).sum().data.item()
        else:
            predicts = (probs >= 0.5).type(torch.FloatTensor)
            acc_data = (predicts == labels).type(torch.FloatTensor).sum().data.item()

        loss_data = loss.data.item()

        logging.info('Training Epoch: {}, Step: {}/{}, Loss: {:.5f}, Acc: {:.3f}, Run: {:.2f}'
                     .format(summary.epoch + 1, step + 1, steps, loss_data, acc_data / batch_size, sw.split()))

        summary.loss += loss_data
        summary.acc += acc_data
        summary.step += 1

        if summary.step % args.log_every == 0:
            summary_writer.add_scalar('train/loss', summary.loss / args.log_every, summary.step)
            summary_writer.add_scalar('train/acc', summary.acc / args.log_every / batch_size, summary.step)
            summary.reset()

    summary.epoch += 1


def valid_epoch(summary:Summary, args:Configuration, model, loss_fn, data_loader) -> None:
    model.eval()

    steps = len(data_loader)
    iter_data_loader = iter(data_loader)

    count_total = t_total = 0
    summary.reset()

    for step in range(steps):
        inputs, labels = next(iter_data_loader)
        with torch.no_grad():
            if args.fp16:
                inputs = Variable(inputs.float().cuda(non_blocking=True).half())
                labels = Variable(labels.float().cuda(non_blocking=True).half())
            elif env.use_gpu():
                inputs = Variable(inputs.float().cuda(non_blocking=True))
                labels = Variable(labels.float().cuda(non_blocking=True))
            else:
                inputs = Variable(inputs.float())
                labels = Variable(labels.float())

            if args.fp16:
                with torch.cuda.amp.autocast_mode.autocast():
                    output = model(inputs)
                    output = torch.squeeze(output)  # important
                    loss = loss_fn(output, labels)
            else:
                output = model(inputs)
                output = torch.squeeze(output)  # important
                loss = loss_fn(output, labels)

        probs = output.sigmoid()

        if env.use_gpu():
            predicts = (probs >= 0.5).type(torch.cuda.FloatTensor)
            acc_data = (predicts == labels).type(torch.cuda.FloatTensor).sum().data.item()
        else:
            predicts = (probs >= 0.5).type(torch.FloatTensor)
            acc_data = (predicts == labels).type(torch.FloatTensor).sum().data.item()

        summary.TP += (predicts * labels).sum().data.item()
        summary.FP += (predicts * (1.0 - labels)).sum().data.item()
        summary.FN += ((1.0 - predicts) * labels).sum().data.item()

        summary.loss += loss.data.item()
        summary.acc += acc_data
        count_total += labels.size(0)
        t_total += labels.sum().data.item()

    summary.loss /= steps
    summary.acc /= count_total
    summary.TP /= t_total
    summary.FP /= t_total
    summary.FN /= t_total
    summary.precision = summary.TP / (summary.TP + summary.FP) if summary.TP else 0.0
    summary.recall = summary.TP / (summary.TP + summary.FN) if summary.TP else 0.0
    summary.F1_score = 2 * summary.precision * summary.recall / \
                        (summary.precision + summary.recall) \
                        if summary.precision or summary.recall else 0.0


class MyRotateTransform:
    def __call__(self, img):
        angle = random.choice([0, 90, 180, 270])
        return functional.rotate(img, angle) if angle != 0 else img


class RGBToHSVTransform:
    def __call__(self, img):
        return cv2.cvtColor(np.array(img),cv2.COLOR_RGB2HSV)


class NonTransform:
    def __call__(self, img):
        return img


def get_normalize_transform(args:Configuration):
    if args.normalize is None:
        return NonTransform()

    return transforms.Normalize(
            mean=args.normalize[0], 
            std=args.normalize[1],
            inplace=True)


@hydra.main(config_path='configs', config_name='config', version_base=None)
def run(args:Configuration):
    omegaconf.OmegaConf.save(args, 'args.yaml')

    if args.fp16 and not env.use_gpu():
        raise Exception('float16 can only work on GPU mode')

    model = chose_model(args.model)
    fc_features = model.fc.in_features
    model.fc = nn.Linear(in_features=fc_features, out_features=1)  # ImageNet has 1000 classes but we has only 1

    summary = load_model(model, args.base_model) if args.base_model else Summary()

    model = DataParallel(model, device_ids=None)
    loss_fn = BCEWithLogitsLoss()

    if env.use_gpu():
        model = model.cuda()
        loss_fn = loss_fn.cuda()

    if args.optimizer.type == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.optimizer.args.momentum)
    elif args.optimizer.type == 'adam':
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    else:
        raise Exception(f'unsupported optimizer {args.optimizer}')

    transforms_train = transforms.Compose([
                        transforms.RandomHorizontalFlip(),
                        MyRotateTransform(),
                        transforms.ColorJitter(brightness=args.color_jitter.brightness,
                                               contrast=args.color_jitter.contrast, 
                                               saturation=args.color_jitter.saturation,
                                               hue=args.color_jitter.hue),
                        RGBToHSVTransform() if args.hsv else NonTransform(),
                        transforms.ToTensor(),
                        get_normalize_transform(args)])
    transform_valid = transforms.Compose([
                        RGBToHSVTransform() if args.hsv else NonTransform(),
                        transforms.ToTensor(),
                        get_normalize_transform(args)])
    dataset_train = ImageListDataset(list_files=args.input.train_list,
                                     img_dir=args.input.patch_dir,
                                     transform=transforms_train)
    dataset_valid = ImageListDataset(list_files=args.input.valid_list,
                                     img_dir=args.input.patch_dir,
                                     transform=transform_valid)
    dataloader_train = DataLoader(dataset_train,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
                                  pin_memory=args.pin_memory,
                                  shuffle=True,
                                  drop_last=True)
    dataloader_valid = DataLoader(dataset_valid,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
                                  pin_memory=args.pin_memory,
                                  shuffle=False,
                                  drop_last=True)

    logging.info(f'total epoch train: {dataset_train.__len__()}, valid:{dataset_valid.__len__()}, batch_size:{args.batch_size}')

    with SummaryWriter('.') as summary_writer:
        best_valid = None
        for epoch in range(args.epoch):
            train_epoch(summary, summary_writer, args, model, loss_fn, optimizer, dataloader_train)

            sw = Stopwatch()
            valid_epoch(summary, args, model, loss_fn, dataloader_valid)

            logging.info('Validation Epoch: {}, step: {}, Loss: {:.5f}, ACC: {:.3f}, Run: {:.2f}'
                        .format(summary.epoch, summary.step, summary.loss, summary.acc, sw.split()))

            for k,v in vars(summary).items():
                if k != 'epoch' and k != 'step':
                    summary_writer.add_scalar('valid/'+k, v, summary.step)

            if (epoch+1) % args.save_every == 0:
                save_model(model, summary, 'epoch' + str(epoch+1))
            else:
                save_model(model, summary, 'train')

            if best_valid is None or summary.acc > best_valid:
                best_valid = summary.acc
                save_model(model, summary, 'best')


@hydra.main(config_path='configs', config_name='config', version_base=None)
def main(cfg: Configuration):
    # omegaconf.OmegaConf.save(cfg, sys.stdout)
    # logging.info('hello')
    # print(HydraConfig.get().runtime.output_dir)
    # with open('sys.log') as f:
    #     print(f.read())
    env.check_torch_env()
    env.check_config(cfg)
    run(cfg)
    

if __name__ == '__main__':
    if not os.path.isdir('outputs'):
        print("""
**********************************************************************
* ERROR - NOT FOUND required subdirectory './outputs'
* 
* Cause and/or solution
*     - mkdir outputs, to create required subdirectory
*     - change to the directory contains required outputs subdirectory
**********************************************************************
""")
        exit(1)

    ConfigStore.instance().store(name='config_schema', node=Configuration)
    omegaconf.OmegaConf.register_new_resolver('if', lambda x, v: v if x else '')

    main()