from torch import nn
from typing import Dict, List, Optional, Tuple, Callable
from .logger import LogSystem
from tensordict import TensorDict
import torch
from jaxtyping import Float32
from torch import Tensor
from .logger import logger
from dataclasses import dataclass
import os
import torch
import tensorrt as trt
import sys
from .colorful import Color

__all__ = [
    "Module",
]

if os.environ.get("TRT_DEBUG", False):
    TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
else:
    TRT_LOGGER = trt.Logger(trt.Logger.ERROR)

EXPLICIT_BATCH = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)

def GiB(val):
    return val * 1 << 30

def conver_engine(onnx_file_path='_test_model.onnx', engine_file_path="_test_model.trt", max_batch_size=1):
    """Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
    """Takes an ONNX file and creates a TensorRT engine to run inference with"""
    with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
        # builder.max_workspace_size = GiB(max_batch_size)
        # builder.max_batch_size = max_batch_size
        # Parse model file
        if not os.path.exists(onnx_file_path):
            logger.info('ONNX file {} not found.'.format(onnx_file_path))
            exit(0)
        logger.info('Loading ONNX file from path {}...'.format(onnx_file_path))
        with open(onnx_file_path, 'rb') as model:
            logger.info(f'Beginning ONNX file {Color.Green}parsing {Color.End}')
            if not parser.parse(model.read()):
                print ('ERROR: Failed to parse the ONNX file.')
                for error in range(parser.num_errors):
                    print (parser.get_error(error))
                return None
        logger.info(f'{Color.Green}Successfully parsed {Color.End}ONNX file')
        logger.info('Building an engine from file {}; this may take a while...'.format(onnx_file_path))

        config = builder.create_builder_config()
        # this interface has many properties that you can set to control how TensorRT optimizes your network
        serilized_engine = builder.build_serialized_network(network, config)
        with open(engine_file_path, "wb") as f:
            f.write(serilized_engine)
        logger.info(f'{Color.Green}Successfully created {Color.End}engine')
        
@dataclass
class TrainingLoss:
    main_loss:torch.Tensor = None
    aux_loss:tuple[torch.Tensor] = None
    def __post_init__(self):
        assert self.main_loss is not None, "main_loss must be provided"
        self.losses = [self.main_loss]
        if self.aux_loss is not None:
            assert isinstance(self.aux_loss, (tuple, list)), "aux_loss must be a tuple or list"
            self.losses.extend(self.aux_loss)
            
    def detach(self):
        self.main_loss = self.main_loss.detach()
        if self.aux_loss is not None:
            self.aux_loss = [loss.detach() for loss in self.aux_loss]
        
        return self
            
class Module(nn.Module):
    def __init__(self, example_input_tensor:torch.Tensor):
        super().__init__()
        self.device = None
        self.config = None
        self.example_input_tensor = example_input_tensor
        
    def forward(self, x):
        raise NotImplementedError
    
    def training_step(self, batch:TensorDict, batch_idx:int) -> TrainingLoss:
        raise NotImplementedError
    
    def configure_optimizers(self) -> Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LRScheduler]:
        raise NotImplementedError
    
    def on_train_epoch_start(self, epoch:int):
        pass

    def on_train_epoch_end(self, lr_step_func:Callable):
        lr_step_func()

    def check_exportable(self, save_path:str = None):
        self.eval()

        if self.example_input_tensor is not None:
            with torch.no_grad():
                torch.onnx.export(self, self.example_input_tensor, '_test_model.onnx')
        if save_path is not None:
            conver_engine('_test_model.onnx', engine_file_path=save_path)
            os.remove('_test_model.onnx')
        else:
            conver_engine('_test_model.onnx', engine_file_path='_test_model.engine')
        
            os.remove('_test_model.onnx')
            os.remove('_test_model.engine')

    def on_train_start(self):
        pass
    
    def on_train_end(self):
        pass
    
    def validation_step(self, batch:TensorDict, batch_idx:int) -> None:
        raise NotImplementedError
    
    def on_validation_start(self):
        pass

    def on_validation_end(self):
        pass

    def bind_log(self, logger:LogSystem) -> None:
        self.logger = logger
    
    def log_step(self, training_loss:TrainingLoss):
        NotImplementedError