import numpy as np
import os
import sys
import json
from toolss import *
from typing_extensions import Union, Literal, Tuple

from dataclasses import dataclass, field
import math

DataType = Literal['float16', 'int8', 'float32', 'int4', 'bfloat16', 'int32']
np.random.seed(0)

@dataclass
class LightningAttentionConfig:
    """ Configuration  for golden data generation """

    query_shape:                Tuple[int, ...]
    key_shape:                  Tuple[int, ...]
    value_shape:                Tuple[int, ...]
    slope_rate_shape:           Tuple[int, ...]
    mask_shape:                 Tuple[int, ...]
    lambda_shape:               Tuple[int, ...]
    lambda_inverse_shape:       Tuple[int, ...]
    kv_cache_shape:             Tuple[int, ...]

    input_dtype:                DataType = 'bfloat16'
    slope_rate_dtype:           DataType = 'float32'
    mask_dtype:                 DataType = 'float32'
    kv_cache_dtype:             DataType = 'float32'
    attention_out_dtype:        DataType = 'bfloat16'

    output_dir:                 str = 'input'
    query_filename:             str = 'query.bin'
    key_filename:               str = 'key.bin'
    value_filename:             str = 'value.bin'
    slope_rate_filename:        str = 'slope_rate.bin'
    mask_filename:              str = 'mask.bin'
    lambda_filename:            str = 'lambda.bin'
    lambda_inverse_filename:    str = 'lambda_inverse.bin'
    kv_cache_filename:          str = 'kv_cache.bin'
    attention_out_filename:     str = 'attention_out.bin'
    kv_update_filename:         str = 'kv_update.bin'

    block_size :                int = 256
    decay:                      float = 1.0
    has_mask:                   bool = False

def truncate_to_bfloat16(arr: np.ndarray) -> np.ndarray:
    arr = arr.astype(np.float32)
    arr_view = arr.view(np.uint32)
    arr_view = (arr_view & 0xFFFF0000)
    return arr_view.view(np.float32)

def generate_matrix(shape: tuple, dtype: DataType, t='bias') -> np.ndarray:
    if dtype == 'bfloat16':
        if t == 'kv':
            return truncate_to_bfloat16(np.zeros(shape).astype(np.float32))
        arr = np.random.rand(*shape).astype(np.float32)
        return truncate_to_bfloat16(arr)
    else:
        if t == 'kv':
            return np.zeros(shape).astype(dtype)
        arr = np.random.rand(*shape)
        return arr.astype(dtype)

def build_slope(n_attention_heads: int):
    def get_slopes(n):
        def get_slopes_power_of_2(n):
            start = 2 ** (-(2 ** -(math.log2(n) - 3)))
            ratio = start
            return [start * ratio ** i for i in range(n)]
        if math.log2(n).is_integer():
            return get_slopes_power_of_2(n)
        else:
            closest_power_of_2 = 2 ** math.floor(math.log2(n))
            return (
                get_slopes_power_of_2(closest_power_of_2) + 
                get_slopes(2 * closest_power_of_2)[0::2][:n - closest_power_of_2]
            )
    slopes = get_slopes(n_attention_heads)
    return slopes
        
def bfloat16_to_bytes(arr: np.ndarray) -> bytes:
    arr_view = arr.view(np.uint32)
    arr_16bit = (arr_view >> 16).astype(np.uint16)
    return arr_16bit.tobytes()

def dump_data(filename: str, data: np.ndarray, dtype: DataType, output_dir: str):
    os.makedirs(output_dir, exist_ok=True)
    if dtype == 'bfloat16':
        with open(os.path.join(output_dir, filename), 'wb') as f:
            f.write(bfloat16_to_bytes(data))
        return
    data.tofile(os.path.join(output_dir, filename))


def generate_minimax_prefill(block_size, q, k, v, slope_rate):
    BLOCK = block_size
    slope_rate = slope_rate.astype(np.float32)

    b, h, n, d = q.shape
    e = v.shape[-1]
    NUM_BLOCK = (n + BLOCK - 1) // BLOCK

    array = np.arange(BLOCK) + 1
    q_decay = np.exp(-slope_rate * array.reshape(-1, 1))
    k_decay = np.exp(-slope_rate * (BLOCK - array.reshape(-1, 1)))
    index = array[:, None] - array[None, :]
    s_index = slope_rate * index[None, None, :, :]
    s_index = np.where(index >= 0, -s_index, float("-inf"))
    diag_decay = np.exp(s_index)

    kv = np.zeros((b, h, d, e), dtype=np.float32)
    output = np.empty((b, h, n, e), dtype=q.dtype)

    for i in range(NUM_BLOCK):
        si = i * BLOCK
        ei = min(si + BLOCK, n)
        m = ei - si
        qi = q[:, :, si:ei, :]
        ki = k[:, :, si:ei, :]
        vi = v[:, :, si:ei, :]
        qkv_none_diag = np.matmul(qi * q_decay[:, :m], kv).astype(np.float32)

        qk = np.matmul(qi, ki.transpose(0, 1, 3, 2)).astype(np.float32) * diag_decay[:, :, :m, :m]
        qkv_diag = np.matmul(qk, vi.astype(np.float32))
        block_decay = np.exp(-slope_rate * m)
        output[:, :, si:ei] = qkv_none_diag + qkv_diag
        kv = block_decay * kv + np.matmul((ki * k_decay[:, -m:]).transpose(0, 1, 3, 2).astype(vi.dtype), vi)
    return output, kv

def generate_lambda(block_size, slope_rate, q):
    b, n, s, d = q.shape
    BLOCK = block_size
    array = np.arange(BLOCK) + 1
    q_decay = np.exp(-slope_rate * array.reshape(-1, 1))
    k_decay = np.exp(-slope_rate * (BLOCK - array.reshape(-1, 1)))
    index = array[:, None] - array[None, :]
    s_index = slope_rate * index[None, None, :, :]
    s_index = np.where(index >= 0, -s_index, float("-inf"))
    diag_decay = np.exp(s_index)
    block_decay = np.exp(-slope_rate * BLOCK)
    block_decay = block_decay.astype(np.float32)
    mask = diag_decay.astype(np.float32)
    lambda_ = (q_decay * np.ones((n, BLOCK, d))).astype(np.float32)
    lambda_inverse = (k_decay * np.ones((n, BLOCK, d))).astype(np.float32)
    return block_decay, mask, lambda_, lambda_inverse

def generate_golden(config: LightningAttentionConfig):
    q = generate_matrix(config.query_shape, config.input_dtype, 'q')
    k = generate_matrix(config.key_shape, config.input_dtype, 'k')
    v = generate_matrix(config.value_shape, config.input_dtype, 'v')
    kv_cache = generate_matrix(config.kv_cache_shape, config.kv_cache_dtype, 'kv')

    slope_rate = build_slope(config.slope_rate_shape[0])
    slope_rate = np.array(slope_rate, dtype=config.slope_rate_dtype).reshape(*config.slope_rate_shape)

    if config.has_mask:
        block_decay, mask, lambda_, lambda_inverse = generate_lambda(config.block_size, slope_rate, q)
    else:
        empty_array = np.array([]).astype(np.float32)
        block_decay, mask, lambda_, lambda_inverse = empty_array, empty_array, empty_array, empty_array 
    
    attention_out, kv_update = generate_minimax_prefill(config.block_size, q, k, v, slope_rate)

    if config.attention_out_dtype != 'float32':
        if config.attention_out_dtype == 'bfloat16':
            attention_out = truncate_to_bfloat16(attention_out)
        else:
            attention_out = attention_out.astype(config.attention_out_dtype)
    if config.kv_cache_dtype != 'float32':
        if config.kv_cache_dtype == 'bfloat16':
            kv_update = truncate_to_bfloat16(kv_update)
        else:
            kv_update = kv_update.astype(config.kv_cache_dtype)
    
    dump_data(config.query_filename, q, config.input_dtype, config.output_dir)
    dump_data(config.key_filename, k, config.input_dtype, config.output_dir)
    dump_data(config.value_filename, v, config.input_dtype, config.output_dir)
    dump_data(config.kv_cache_filename, kv_cache, config.kv_cache_dtype, config.output_dir)
    if config.has_mask:
        dump_data(config.slope_rate_filename, block_decay, config.slope_rate_dtype, config.output_dir)
    else:
        dump_data(config.slope_rate_filename, slope_rate, config.slope_rate_dtype, config.output_dir)
    dump_data(config.mask_filename, mask, config.mask_dtype, config.output_dir)
    dump_data(config.lambda_filename, lambda_, config.mask_dtype, config.output_dir)
    dump_data(config.lambda_inverse_filename, lambda_inverse, config.mask_dtype, config.output_dir)
    dump_data(config.attention_out_filename, attention_out, config.attention_out_dtype, config.output_dir)
    dump_data(config.kv_update_filename, kv_update, config.kv_cache_dtype, config.output_dir)

    return q, k, v, mask, lambda_, lambda_inverse, kv_cache, attention_out, kv_update

if __name__ == '__main__':
    from argparse import ArgumentParser

    parser = ArgumentParser()
    parser.add_argument('--hp', type=lambda x: [int(item) for item in x.split()], default='1 1 256 128 256')
    args = parser.parse_args()
    batch_size, num_head, seq_len, head_dim, block_size = args.hp
    
    print(f'Generate Data with shape [{batch_size}, {num_head}, {seq_len}, {head_dim}, {block_size}]')

    generate_golden(LightningAttentionConfig(
        query_shape=(batch_size, num_head, seq_len, head_dim),
        key_shape=(batch_size, num_head, seq_len, head_dim),
        value_shape=(batch_size, num_head, seq_len, head_dim),
        slope_rate_shape=(num_head, 1, 1),
        kv_cache_shape=(batch_size, num_head, head_dim, head_dim),

        mask_shape=(0, ),
        lambda_shape=(0, ),
        lambda_inverse_shape=(0, ),

        has_mask=False,
        
        block_size=block_size,

        output_dir='input',
        query_filename='q.bin',
        key_filename='k.bin',
        value_filename='v.bin',
        slope_rate_filename='s.bin',
        kv_cache_filename='kv.bin',

        kv_update_filename='../output/golden_kv.bin',
        attention_out_filename='../output/golden_o.bin'
    ))
