import os
import natsort
import mindspore
import mindspore as ms
import numpy as np
from mindspore.ops import operations as P
from common.utils import Const, dtype_map
from collections import defaultdict


class UTFactory:
    def __init__(self, name, args, kwargs, real_data=False, save_path="", stack=None, comparator=None):
        self.name = name
        self.args = args
        self.kwargs = self.convert_list_to_tuple(kwargs)
        self.real_data = real_data
        self.save_path = save_path
        self.stack = stack
        self.comparator = comparator
        self.groups = self.process_file()
        
    @staticmethod
    def convert_list_to_tuple(data):
        for key, value in data.items():
            if not isinstance(value, list):
                continue
            data[key] = tuple(value)
        return data
        
    def compare(self):
        if not self.groups:
            return 
        if self.real_data:
            self.compare_real()
        else:
            self.compare_random()
    
    def insert_into_dict(self, dic, keys, value):
        key = keys.pop(0)
        if not keys:  
            if key in dic:
                dic[key].append(value)
            else:
                dic[key] = [value]
        else:
            if key not in dic:
                dic[key] = defaultdict(list)
            self.insert_into_dict(dic[key], keys, value)

    def defaultdict_to_regular(self, dic):
        if isinstance(dic, defaultdict):
            dic = {key: self.defaultdict_to_regular(value) for key, value in dic.items()}
        if isinstance(dic, dict):
            for key, value in dic.items():
                if isinstance(value, list) and len(value) == 1:
                    dic[key] = value[0]
        return dic

    def parse_input_file(self, input_files):
        file_dict = defaultdict(list)
        
        for file in input_files:
            parts = file.replace(".npy", "").split("_")[3].split(".")[1:]
            keys = list(map(int, parts))
            self.insert_into_dict(file_dict, keys, file)
            
        file_dict = self.defaultdict_to_regular(file_dict)
        return file_dict
    
    def parse_nested_dict(self, dic):
        result = []
        for key in sorted(dic.keys()):
            if isinstance(dic[key], str):
                result.append(dic[key])
            elif isinstance(dic[key], dict):
                result.append(self.parse_nested_dict(dic[key]))
        return result            
            
    def group_files(self, files):
        groups = []
        current_inputs = []
        current_outputs = []
        
        for file in files:
            if Const.INPUT in file:
                if current_outputs:
                    groups.append({Const.INPUT: current_inputs, Const.OUTPUT: current_outputs})
                    current_inputs = [file]
                    current_outputs = []
                else:
                    current_inputs.append(file)
            elif Const.OUTPUT in file:
                current_outputs.append(file)
        if current_inputs or current_outputs:
            groups.append({Const.INPUT: current_inputs, Const.OUTPUT: current_outputs})
            
        return groups
  
    def process_file(self):
        if not os.path.exists(self.save_path):
            return 
        for _, _, filenames in os.walk(self.save_path):
            file_names = natsort.natsorted(filenames) # natsort 
        groups = self.group_files(file_names)
        
        for group in groups:
            group[Const.INPUT] = self.parse_nested_dict(self.parse_input_file(group[Const.INPUT]))
            
        return groups
        
    def parse_shape_dtype(self, file_name):
        _, dtype, input_shape = file_name.replace(".npy", "").rsplit("_", 2)
        if input_shape == "()" and dtype not in dtype_map:
            return dtype, None
        if input_shape == "()" and dtype in dtype_map:
            dtype = dtype_map[dtype]
            return dtype, 1
        dtype = dtype_map[dtype]
        input_shape = tuple(int(num.strip(",")) for num in input_shape.strip("()").split(", "))
        return dtype, input_shape
        
    def load_input(self, input_file):
        result = []
        for input in input_file:
            if isinstance(input, str):
                _, shape = self.parse_shape_dtype(input)
                input_path = os.path.join(self.save_path, input)
                if shape:
                    input_data = ms.Tensor(np.load(input_path))
                else:
                    input_data = np.load(input_path).item()
                result.append(input_data)
            elif isinstance(input, list):
                transformed_data = self.load_input(input)
                result.append(tuple(transformed_data))
        return result
    
    def generate_input(self, input_file):
        result = []
        for input in input_file:
            if isinstance(input, str):
                dtype, shape = self.parse_shape_dtype(input)
                if shape == 1:
                    input_data = ms.Tensor(np.random.randn(shape).astype(dtype))
                elif shape:
                    input_data = ms.Tensor(np.random.randn(*shape).astype(dtype))
                else:
                    input_data = np.load(os.path.join(self.save_path, input)).item()
                result.append(input_data)
            elif isinstance(input, list):
                transformed_data = self.load_input(input)
                result.append(tuple(transformed_data))
        return result
    
    def compare_real(self):
        for index, group in enumerate(self.groups):
            output = group[Const.OUTPUT]
            input = self.load_input(group[Const.INPUT])
            output_cpu = self.run_cpu(*input)
            if isinstance(output_cpu, ms.Tensor):
                output_npu = np.load(os.path.join(self.save_path, output[0]))
                output_cpu = output_cpu.asnumpy()
                self.comparator.compare(output_cpu, output_npu, self.name + "." + str(index) + "_" + Const.OUTPUT)
            else:
                if len(output_cpu) != len(group[Const.OUTPUT]):
                    print("The number of outputs in the dump file does not match the calculated result")
                    raise Exception
                for index_output, (output_c, output_n) in zip(output_cpu, output):
                    output_n = np.load(os.path.join(self.save_path, output_n))
                    output_c = output_c.asnumpy()
                    self.comparator.compare(output_c, output_n, self.name + "." + str(index) + "_" + Const.OUTPUT + "." + str(index_output))
                    
    def compare_random(self):
        for index, group in enumerate(self.groups):
            input = self.generate_input(group[Const.INPUT])
            output_cpu = self.run_cpu(*input)
            output_npu = self.run_npu(*input)
            if isinstance(output_cpu, ms.Tensor):
                output_npu = output_npu.asnumpy()
                output_cpu = output_cpu.asnumpy()
                self.comparator.compare(output_cpu, output_npu, self.name + "." + str(index) + "_" + Const.OUTPUT)
            else:
                for index_output, (output_c, output_n) in zip(output_cpu, output_npu):
                    output_n = output_n.asnumpy()
                    output_c = output_c.asnumpy()
                    self.comparator.compare(output_c, output_n, self.name + "." + str(index) + "_" + Const.OUTPUT + "." + str(index_output))
           
    def run_npu(self, *args):
        mindspore.set_context(device_target="Ascend", mode=mindspore.GRAPH_MODE, save_graphs=False, jit_syntax_level=mindspore.STRICT)
        ops_name, _ = self.name.split("_")
        npu_output = getattr(P, ops_name)(*self.args, **self.kwargs)(*args)
        return npu_output
                            
    def run_cpu(self, *args):
        mindspore.set_context(device_target="CPU", mode=mindspore.GRAPH_MODE, save_graphs=False, jit_syntax_level=mindspore.STRICT)
        ops_name, _ = self.name.split("_")
        cpu_output = getattr(P, ops_name)(*self.args, **self.kwargs)(*args)
        return cpu_output