# src/tzer/context.py

from typing import Dict, List
from dataclasses import dataclass, field
import tvm
from tvm import relax, tir
import pickle
import random
import numpy as np
from copy import deepcopy

from .tvmpass import PassDependenceGraph, PassNode

def safe_make_shape_expr(shape_list):
    prim_exprs = []
    for x in shape_list:
        if x is None:
            continue
        elif isinstance(x, tir.PrimExpr):
            prim_exprs.append(x)
        elif isinstance(x, int):
            prim_exprs.append(tir.IntImm("int32", x))
        else:
            print(f"[safe_make_shape_expr] Warning: skipping invalid shape element: {x} (type {type(x)})")

    if len(prim_exprs) == 0:
        prim_exprs = [tir.IntImm("int32", 1)]

    return relax.ShapeExpr(prim_exprs, span=tvm.span.Span())

_RELAX_FUNCTION_HARD_PASSES_ = [
    relax.transform.FoldConstant,
    relax.transform.FuseOps,
    relax.transform.DeadCodeElimination,
    
]

_RANDOM_WALK_MAP_ = np.ones((len(_RELAX_FUNCTION_HARD_PASSES_), len(_RELAX_FUNCTION_HARD_PASSES_)))
_RANDOM_WALK_MAP_[0][1] = 0  

graph = PassDependenceGraph(tvm.target.Target('llvm'))
_ALL_DIR_PASS_NODES_ = list(graph.tir_pass_nodes.values())

@dataclass
class CompileConfig:
    target: tvm.target.Target = None
    relax_pass_types: List[tvm.transform.Pass] = None
    tir_pass_nodes: List[PassNode] = None

    def mutate(self):
        self.target = random.choice(self._target_space())
        n_pass = random.randint(1, len(_RELAX_FUNCTION_HARD_PASSES_) - 1)
        self.relax_pass_types = []
        pidx = random.randint(0, len(_RELAX_FUNCTION_HARD_PASSES_) - 1)
        for _ in range(n_pass):
            self.relax_pass_types.append(_RELAX_FUNCTION_HARD_PASSES_[pidx])
            candidates_idx = _RANDOM_WALK_MAP_[pidx].nonzero()[0]
            if len(candidates_idx) == 0:
                break
            pidx = candidates_idx[random.randint(0, len(candidates_idx) - 1)]
        self.tir_pass_nodes = graph.random_tir_passes(n_pass)

    def get_device(self):
        kind = self.target.export()['kind']
        if kind == 'cuda':
            return tvm.cuda()
        elif kind == 'rocm':
            return tvm.rocm()
        else:
            return tvm.cpu()

    @staticmethod
    def _target_space():
        return [tvm.target.Target('llvm')]

@dataclass
class ExecutionConfig:
    module: tvm.IRModule
    params: Dict
    n_inp_node: int
    exe_mode: str = None
    inputs: List[List[tvm.nd.NDArray]] = field(default_factory=list)
    oracle: List[List[tvm.nd.NDArray]] = None
    oracle_name: str = "NOT_SET"

    def from_keras(self, model, shape=None, layout="NCHW"):
        relay_mod, relay_params = relay.frontend.from_keras(model, shape, layout)
        self.module = relay_mod  
        self.params = relay_params

    @staticmethod
    def exe_mode_space(dynamic_shape=False):
        return ['vm', 'debug'] if dynamic_shape else ['vm', 'graph', 'debug']

    def check(self):
        assert isinstance(self.module, tvm.IRModule)
        assert self.params is not None
        assert self.n_inp_node > 0
        assert self.exe_mode is not None
        assert self.inputs is not None and len(self.inputs) > 0

    def mutate(self):
        input_shapes = self.module['main'].checked_type.arg_types[:self.n_inp_node]
        dy_batch_size_list = []

        def _concretize_non_batch_dim(shape: relax.TensorType):
            concrete_shape = []
            for idx, x in enumerate(shape.shape):
                if isinstance(x, tvm.tir.Var) and x.name == "any":
                    if idx == 0:
                        concrete_shape.append(tvm.tir.Var("any", "int64"))
                    else:
                        dim = int(np.random.normal(224, 56))  
                        dim = max(min(dim, 512), 16)
                        concrete_shape.append(dim)
                else:
                    concrete_shape.append(int(x))
            return relax.TensorType(safe_make_shape_expr(concrete_shape), shape.dtype)

        self.inputs = []
        n_sample = max(1, int(np.random.chisquare(3)))
        n_sample = min(n_sample, 8)  

        for i in range(n_sample):
            this_input = []
            for shape in input_shapes:
                shape_type = _concretize_non_batch_dim(shape)
                shape_ = []
                for dim in shape_type.shape:
                    if isinstance(dim, tvm.tir.Var) and dim.name == "any":
                        val = dy_batch_size_list[i] if i < len(dy_batch_size_list) else 1
                        shape_.append(val)
                    elif isinstance(dim, tvm.tir.IntImm):
                        shape_.append(int(dim))
                    elif isinstance(dim, int):
                        shape_.append(dim)
                    elif isinstance(dim, tir.PrimExpr):
                        try:
                            shape_.append(int(tvm.arith.Analyzer().const_int_bound(dim).max_value))
                        except Exception:
                            shape_.append(1)
                    else:
                        print(f"[mutate] Unexpected shape element: {dim}, type: {type(dim)} - fallback to 1")
                        shape_.append(1)

                dtype_ = shape_type.dtype
                data = np.zeros(shape=shape_, dtype=dtype_)
                this_input.append(tvm.nd.array(data))
            self.inputs.append(this_input)

        self.exe_mode = random.choice(self.exe_mode_space(False))



@dataclass
class Context:
    runtime: ExecutionConfig
    compile: CompileConfig

    def dump(self, path):
        to_store_params = {k: v.numpy() for k, v in self.runtime.params.items()}
        with open(path, 'wb') as f:
            runtime_conf = {
                'module': self.runtime.module.astext(),
                'params': to_store_params,
                'n_inp_node': self.runtime.n_inp_node,
                'exe_mode': self.runtime.exe_mode,
                'inputs': [[x.numpy() for x in inp] for inp in self.runtime.inputs],
                'oracle': self.runtime.oracle,
                'oracle_name': self.runtime.oracle_name
            }
            compile_conf = {
                'target': self.compile.target,
                'relax_pass_types': self.compile.relax_pass_types,
                'tir_pass_nodes': graph.export_name(self.compile.tir_pass_nodes)
            }
            pickle.dump({'runtime': runtime_conf, 'compile': compile_conf}, f, protocol=pickle.HIGHEST_PROTOCOL)

    def load(self, path):
        with open(path, 'rb') as f:
            data = pickle.load(f)
            self.compile.target = data['compile']['target']
            self.compile.relax_pass_types = data['compile']['relax_pass_types']
            self.compile.tir_pass_nodes = graph.recover(data['compile']['tir_pass_nodes'])
            for k, v in data['runtime'].items():
                if k == 'module':
                    self.runtime.module = tvm.parser.fromtext(v)
                elif k == 'params':
                    self.runtime.params = {k_: tvm.nd.array(v_) for k_, v_ in v.items()}
                elif k == 'inputs':
                    self.runtime.inputs = [[tvm.nd.array(x) for x in inp] for inp in v]
                else:
                    setattr(self.runtime, k, v)

    def mutate(self):
        self.runtime.mutate()
        self.compile.mutate()

    def check(self):
        self.runtime.check()
        self.compile.check()
