from typing import Literal
import math
import queue

class TransformerCaculator:
    def __init__(self, vocab_size: int, context_length: int, num_layers: int, 
                 d_model: int, num_heads: int, d_ff: int):
        self.vocab_size = vocab_size
        self.context_length = context_length
        self.num_layers = num_layers
        self.d_model = d_model
        self.num_heads = num_heads
        self.d_ff = d_ff

        self.param_precious: Literal["bf16", "fp16", "fp32", "int8", "int4"] = 'bf16'
        self.grad_precious: Literal["bf16", "fp16", "fp32", "int8", "int4"] = 'bf16'

        self.batch_size = 1
    
    def set_batch_size(self, batch_size: int):
        self.batch_size = batch_size
        return self

    def _get_total(self, param: int | dict) -> int:
        if isinstance(param, int):
            return param
        if isinstance(param, dict):
            total = 0
            q = queue.Queue()
            q.put(param)
            while not q.empty():
                value = q.get()
                if isinstance(value, int):
                    total += value
                elif isinstance(value, dict):
                    for v in value.values():
                        q.put(v)
                elif isinstance(value, float):
                    total += value
            return total

    def _get_embedding_param(self):
        return self.vocab_size * self.d_model
    
    def _get_lm_head_param(self):
        return self.vocab_size * self.d_model
    
    def _get_rms_param(self):
        return self.d_model
    
    def _get_mha_param(self):
        return 4 * self.d_model * self.d_model
    
    def _get_ffn_param(self):
        return 3 * self.d_model * self.d_ff
    
    def _get_block_param(self):
        param =  {
            'rms1': self._get_rms_param(),
            'rms2': self._get_rms_param(),
            'mha': self._get_mha_param(),
            'ffn': self._get_ffn_param(),
        }
        return param, self._get_total(param)
    
    def _get_blocks_param(self):
        param = {f'layer{i}': self._get_block_param()[0] for i in range(self.num_layers)}
        return param, self._get_total(param)

    def get_param(self):
        param =  {
            'embedding': self._get_embedding_param(),
            'blocks': self._get_blocks_param()[0],
            'rms': self._get_rms_param(),
            'lm_head': self._get_lm_head_param(),
        }

        return param, self._get_total(param)
        

    def get_grad(self):
        return self.get_param()
    
    def _get_size(self, precious: Literal["bf16", "fp16", "fp32", "int8", "int4"]):
        factor = 2.0
        match precious:
            case "bf16" | "fp16":
                factor = 2.0
            case "fp32":
                factor = 4.0
            case "int8":
                factor = 1.0
            case "int4":
                factor = 0.5
        param = self.get_param()[1]
        unit = "GB"
        if param > 1e12:
            unit = "TB"
            param /= 1e12
        elif param > 1e9:
            unit = "GB"
            param /= 1e9
        elif param > 1e6:
            unit = "MB"
            param /= 1e6
        else:
            unit = "KB"
            param /= 1e3

        return f"{round(param * factor, 3)}{unit}"

    def get_grad_size(self):
        return self._get_size(self.grad_precious)
    
    def get_param_size(self):
        return self._get_size(self.param_precious)
    
    def _get_embedding_flops(self):
        # mat: [b l d] * [d vocab_size]
        return self.batch_size * self.context_length * self.vocab_size * self.d_model * 2
    
    def _get_lm_head_flops(self):
        # mat: [b l d] * [d vocab_size]
        return self.batch_size * self.context_length * self.d_model * self.vocab_size * 2
    
    def _get_rms_flops(self):
        # element wise: pow, sum, +eps, rsqrt
        # mat: [bld*d->bld]
        return self.batch_size * self.context_length * self.d_model * 6
    
    def _get_mha_flops(self):
        # mat: [b l d] * [d d] (qkv_proj)
        # mat: [h b l d/h] * [h b l d/h] (calculate attn)
        # softmax: [h b l l] 
        # mat: [h b l l] * [h b l d/h] (calculate v weight)
        # mat: [b l d] * [d d] (o_proj)
        return (self.batch_size * self.context_length * self.d_model * self.d_model * 8 + 
                self.num_heads * self.batch_size * self.context_length * self.d_model / self.num_heads * self.context_length * self.batch_size * self.num_heads +
                self.num_heads * self.batch_size * self.context_length * self.context_length * 4 +
                self.num_heads * self.batch_size * self.context_length * self.context_length * self.d_model / self.num_heads * self.batch_size * self.num_heads
                )
    
    def _get_ffn_flops(self):
        # mat [b l d] * [d d_ff] (w1,w3)
        # silu: exp, +1, 1/, dot_production
        # element_wise: [b l d_ff]
        # mat: [b l d_ff] * [d_ff d] (w2)
        return (self.batch_size * self.context_length * self.d_model * self.d_ff * 3 * 2 + 
                self.batch_size * self.context_length * self.d_ff * 5 )
    
    def _get_block_flops(self):
        flops =  {
            'rms1': self._get_rms_flops(),
            'rms2': self._get_rms_flops(),
            'mha': self._get_mha_flops(),
            'ffn': self._get_ffn_flops(),
        }
        return flops, self._get_total(flops)
    
    def _get_blocks_flops(self):
        flops = {f'layer{i}': self._get_block_flops()[0] for i in range(self.num_layers)}
        return flops, self._get_total(flops)

    def get_flops(self):
        flops =  {
            'embedding': self._get_embedding_flops(),
            'blocks': self._get_blocks_flops()[0],
            'rms': self._get_rms_flops(),
            'lm_head': self._get_lm_head_flops(),
        }

        return flops, self._get_total(flops)

    def get_flops_analysis(self):
        flops_detail, toal_flops = self.get_flops()
        res = {
            'embedding': 0,
            'rms': 0,
            'lm_head': 0,
        }
        blocks = {
            'rms1': 0,
            'rms2': 0,
            'mha': 0,
            'ffn': 0
        }
        for k, v in flops_detail.items():
            if k in res:
                res[k] += v
            elif k == 'blocks':
                for block in v.values():
                    for bk, bv in block.items():
                        blocks[bk] += bv
        for k, v in blocks.items():
            res[f"blocks.{k}"] = v
        precent_res = {k:f"{round(v * 100 / toal_flops, 2)}%" for k,v in res.items()}

        return {
            "toal_flops": toal_flops,
            "contribution": precent_res
        }
    
        
    def get_analysis(self):
        param_detail, toal_param = self.get_param()
        res = {
            'embedding': 0,
            'rms': 0,
            'lm_head': 0,
        }
        blocks = {
            'rms1': 0,
            'rms2': 0,
            'mha': 0,
            'ffn': 0
        }
        for k, v in param_detail.items():
            if k in res:
                res[k] += v
            elif k == 'blocks':
                for block in v.values():
                    for bk, bv in block.items():
                        blocks[bk] += bv
        for k, v in blocks.items():
            res[f"blocks.{k}"] = v
        precent_res = {k:f"{round(v * 100 / toal_param, 2)}%" for k,v in res.items()}
        return {
            "param": toal_param,
            "param_size": self.get_param_size(),
            "grad": toal_param,
            "gard_size": self.get_grad_size(),
            "contribution": precent_res
        }
    
    def analysis(self):
        print("############### Parameter Info #####################")
        print(self.get_analysis())
        print("############### FLOPs Info #####################")
        print(self.get_flops_analysis())
                

if __name__ == '__main__':
    gpt2_XL = TransformerCaculator(50257, 1024, 48, 1600, 25, 6400)
    # print(f"Param: {gpt2_XL.get_param()[1]}, Grad: {gpt2_XL.get_grad()[1]}, Prama size: {gpt2_XL.get_param_size('bf16')}, Grad size: {gpt2_XL.get_grad_size('fp32')}")
    # print(gpt2_XL.get_analysis())

    gpt2_small = TransformerCaculator(50257, 1024, 12, 768, 12, 6400).analysis()
    gpt2_small = TransformerCaculator(50257, 1024, 12, 768, 12, 6400).set_batch_size(2).analysis()
    gpt2_small = TransformerCaculator(50257, 1024, 12, 768, 12, 6400).set_batch_size(4).analysis()
    # gpt2_medium = TransformerCaculator(50257, 1024, 24, 1024, 16, 6400).analysis()
    # gpt2_large = TransformerCaculator(50257, 1024, 36, 1280, 20, 6400).analysis()