from typing import Optional

from get_model.parse_model_config import parse_yaml_modules
from solver.intra_solver_by_ILP.hloLayer import *

model_config_path = "/home/lionrock-g4/wangjiaqian/codes/code_test/sample_test/llm_simulator_by_alpa_layer/config/model_config_qwen3_32b.yaml"

class ModelGeneratorByLayer:
    def __init__(self):
        self.model_config_path = model_config_path
        self.training_param, self.module_list, self.module_length = parse_yaml_modules(self.model_config_path)

        self.batch_size = self.training_param["batch_size"]
        self.seq_len = self.training_param["seq_len"]
        self.embedding_dim = self.training_param["embedding_dim"]
        self.vocal_size = self.training_param["vocal_size"]
        self.hidden_dim = self.training_param["hidden_dim"]
        self.num_heads = self.training_param["num_heads"]
        self.intermediate_size = self.training_param["intermediate_size"]

    def print_graph(self):
        for i in range(self.module_length):
            module_name = self.module_list[i]  # 根据索引获取module_name
            self.process_module(module_name)
            print(module_name)

    def computation_graph(self, cur_module_length):
        layerComputation = HloLayerComputation()
        prev_layer = None  # 初始化前一个层
        
        with layerComputation:
            # 核心计算图逻辑
            for i in range(cur_module_length):
                module_name = self.module_list[i]  # 根据索引获取module_name
                layers = self.process_module(module_name)  # 获取当前模块的所有层
                
                # 将层添加到计算图中
                for layer in layers:
                    if prev_layer is not None:
                        layerComputation.append_layer(layer, dependencies=[prev_layer])
                    else:
                        layerComputation.append_layer(layer)
                    prev_layer = layer
                    
        return layerComputation

    def computation_stage_graph(self, start: int = 0, end: Optional[int] = None):
        """
        构建从 module_list[start:end] 的子计算图
        :param start: 起始层索引（包含）
        :param end: 终止层索引（不包含）
        :return: HloLayerComputation 实例
        """
        if end is None:
            end = self.module_length
        assert 0 <= start <= end <= self.module_length, f"Invalid range: start={start}, end={end}"

        layerComputation = HloLayerComputation()
        prev_layer = None  # 初始化前一个 layer

        with layerComputation:
            for i in range(start, end):
                module_name = self.module_list[i]
                layers = self.process_module(module_name)

                # 建立前序依赖（prev_layer → layer）
                for layer in layers:
                    if prev_layer is not None:
                        layerComputation.append_layer(layer, dependencies=[prev_layer])
                    else:
                        layerComputation.append_layer(layer)
                    prev_layer = layer
        return layerComputation

    def process_module(self, module_name):
        if module_name == 'Embedding':
            return [self.add_Embedding()]
        elif module_name == 'Qwen2DecoderLayer':
            return self.add_Decoder()
        elif module_name == 'Qwen2RMSNorm':
            return [self.add_RMSNorm()]
        elif module_name == 'Qwen2RotaryEmbeddingLayer':
            return [self.add_RotaryEmbedding()]
        else:
            raise ValueError(f"Unknown module: {module_name}")

    def add_Embedding(self):
        # embedding_layer_output.shape = (batch_size, seq_len, embedding_dim)
        embedding = HloEmbeddingLayer((self.batch_size, self.seq_len), self.vocal_size, self.embedding_dim)
        # return embedding.output_shape
        return embedding

    # 一个decoder层包含四个层的操作
    # Qwen2SdpaAttention\Qwen2MLP\Qwen2RMSNorm\Qwen2RMSNorm
    def add_Decoder(self):
        rms_norm_1 = HloRMSNorm((self.batch_size, self.seq_len, self.hidden_dim))
        attention = HloSdqaAttentionLayer(rms_norm_1.output_shape, self.num_heads)
        rms_norm_2 = HloRMSNorm(attention.output_shape)
        mlp = HloMLP(rms_norm_2.output_shape, self.intermediate_size)
        # return mlp.output_shape
        return [rms_norm_1, attention, rms_norm_2, mlp]

    def add_RMSNorm(self):
        # 输入张量 (batch_size, seq_len, hidden_dim)
        rms_norm = HloRMSNorm((self.batch_size, self.seq_len, self.hidden_dim))
        # return rms_norm.output_shape
        return rms_norm

    def add_RotaryEmbedding(self):
        embedding = HloEmbeddingLayer((self.batch_size, self.seq_len), self.vocal_size, self.embedding_dim)
        # return embedding.output_shape
        return embedding


if __name__ == '__main__':
    modelGeneratorByLayer = ModelGeneratorByLayer()
    modelGeneratorByLayer.print_graph()