import torch

from nanoflow.operations.rope.help_functions import apply_rope
from nanoflow.operations.operation_base import Operations, Operation_Layer
from nanoflow.core.IOWrapper import IOWrapper
from nanoflow.operations.impl_base import OperationImpl
from nanoflow.utils.util_functions import tensor_offset_to_req_idx


def _apply_rotary_emb_torch(
    x: torch.Tensor,
    cos: torch.Tensor,
    sin: torch.Tensor,
) -> torch.Tensor:
    cos = cos.unsqueeze(-2).to(x.dtype)
    sin = sin.unsqueeze(-2).to(x.dtype)
    x1 = x[..., ::2]
    x2 = x[..., 1::2]
    o1 = x1 * cos - x2 * sin
    o2 = x2 * cos + x1 * sin
    return torch.stack((o1, o2), dim=-1).flatten(-2)

class RopeAppendTorchImpl(OperationImpl):
    category_tag = "torch"
    def __init__(self, op_base, stream, device):
        super().__init__(op_base, stream, device)
        self.rope_type = op_base.rope_type
        if self.rope_type == "llama3":
            self.base = 500000.0
            self.rotary_dim = 128
        self.theta = op_base.theta
        self.original_max_position_embeddings = op_base.original_max_position_embeddings
        self.low_freq_factor = op_base.low_freq_factor
        self.high_freq_factor = op_base.high_freq_factor
        self.factor = op_base.factor
        self.num_kv_heads = op_base.num_kv_heads // op_base.tp_size
        self.num_qo_heads = op_base.num_qo_heads // op_base.tp_size
        self.head_dim = op_base.head_dim
    
    def config(self, impl_tag, parameter_map):
        if impl_tag == "withKVCache":
            self.use_kv_cache = True
        elif impl_tag == "withoutKVCache":
            self.use_kv_cache = False
        else:
            raise ValueError(f"Unknown impl_tag: {impl_tag}")
    
    def run(self, layer, kqv, KVCache, output):
        with torch.cuda.stream(self.stream):
            # Determine the number of elements for each slice.
            layout_strides = [
                self.num_kv_heads * self.head_dim,
                self.num_kv_heads * self.head_dim,
                self.num_qo_heads * self.head_dim,
            ]
            # Split kqv into key, query, and value (here assumed to be in the order: k, q, v).
            # print("kqv shape:", kqv.shape)
            k, v, q = torch.split(kqv, layout_strides, dim=1)
            k = k.contiguous()
            v = v.contiguous()
            q = q.contiguous()

            qo_indicies = self.op_base.qo_indicies
            input_req_idx = self.op_base.input_req_idx
            
            # Process each batch element.
            for i, global_index in enumerate(input_req_idx):
                start = qo_indicies[i]
                end = qo_indicies[i + 1]
                sub_q = q[start:end, :]
                sub_k = k[start:end, :]
                
                last_offset = 0
                if self.use_kv_cache:
                    last_offset = KVCache.get_indices(layer, global_index)

                apply_rope(
                    self.rope_type,
                    self.theta,
                    self.head_dim,
                    self.original_max_position_embeddings,
                    self.low_freq_factor,
                    self.high_freq_factor,
                    self.factor,
                    sub_q,
                    output=sub_q,
                    offset=last_offset
                )
                apply_rope(
                    self.rope_type,
                    self.theta,
                    self.head_dim,
                    self.original_max_position_embeddings,
                    self.low_freq_factor,
                    self.high_freq_factor,
                    self.factor,
                    sub_k,
                    output=sub_k,
                    offset=last_offset
                )

                # Write the updated values back.
                q[start:end, :] = sub_q
                k[start:end, :] = sub_k

                # Update the external KVCache with the new key and value.
                KVCache.put(layer, global_index, sub_k, v[start:end, :])
            output.copy_(q)
        
class RopeAppendTorch(Operations):
    def __init__(
        self,
        name,
        device,
        theta,
        rope_type="llama3",
        factor=8.0,
        low_freq_factor=1.0,
        high_freq_factor=4.0,
        original_max_position_embeddings=8192,
        nano_idx=None
    ):
        """
        Args:
            name (str): The name of this operator.
            rope_type (str): The type of RoPE implementation to use. For llama3, pass "llama3".
            theta (float): The base used to compute the inverse frequency (typically set from config.rope_theta).
            factor (float): Scaling factor used in llama3.
            low_freq_factor (float): Lower bound frequency factor (llama3).
            high_freq_factor (float): Upper bound frequency factor (llama3).
            original_max_position_embeddings (int): The original maximum context length used in pretraining.
        """
        super().__init__(name, device, nano_idx)
        self.inputs = {"kqv": IOWrapper(self, "kqv", device).is_input()}
        self.outputs = {"q": IOWrapper(self, "q", device).is_output()}
        self.externals = {"KVCache": None}
        
        # Save RoPE configuration.
        self.rope_type = rope_type
        self.theta = theta  # typically config.rope_theta
        self.factor = factor
        self.low_freq_factor = low_freq_factor
        self.high_freq_factor = high_freq_factor
        self.original_max_position_embeddings = original_max_position_embeddings

        self.impl_map = {}
        self.init_impl_map()
        self.op_layer = RopeAppendTorch_Layer

    def init_impl_map(self):
        self.add_impl(RopeAppendTorchImpl)

    def setShape(self, num_kv_heads, num_qo_heads, head_dim, tp_size=1):
        self.num_kv_heads = num_kv_heads
        self.num_qo_heads = num_qo_heads
        self.head_dim = head_dim
        self.tp_size = tp_size
        self.inputs["kqv"].init_shape((
            0,
            (self.num_qo_heads + 2 * self.num_kv_heads) * self.head_dim // self.tp_size,
        ))
        # The output "q" has shape [batch_size, num_qo_heads * head_dim]
        self.outputs["q"].init_shape((0, self.num_qo_heads * self.head_dim // self.tp_size))

        return self

    def update(self, qo_indicies, decode_batchsize):
        if self.isNanoSplit:
            for nano_op in self.nano_ops:
                nano_op.update(qo_indicies, decode_batchsize)
        else:
            """Stores the starting indices for the query/key segments."""
            io = self.inputs["kqv"]
            start_req_idx = tensor_offset_to_req_idx(qo_indicies, io.tensor_offset)
            end_req_idx = tensor_offset_to_req_idx(qo_indicies, io.tensor_offset + io.batch_size)

            self.qo_indicies = torch.tensor(qo_indicies[start_req_idx:end_req_idx + 1]) - io.tensor_offset
            self.input_req_idx = self.externals["KVCache"].input_req_idx[start_req_idx:end_req_idx]

    def copy_nano(self, index):
        new_op = RopeAppendTorch(self.name, self.device, self.rope_type, self.theta, self.factor, self.low_freq_factor, self.high_freq_factor, self.original_max_position_embeddings, nano_idx=index)
        new_op.set_category(self.category)
        new_op.externals = self.externals
        new_op.expand_layer(self.layer_list)
        new_op.setShape(self.num_kv_heads, self.num_qo_heads, self.head_dim, self.tp_size)
        self.nano_ops.append(new_op)

        return new_op

    def init_profile_db(self):
        for _, impl in self.impl_map.items():
            self.cursor.execute(f'''
            CREATE TABLE IF NOT EXISTS "{impl.category_tag}" (
                id           INTEGER PRIMARY KEY AUTOINCREMENT, 
                batch_size   INTEGER,
                head_dim INTEGER,
                num_qo_heads INTEGER,
                num_kv_heads INTEGER,
                use_kv_cache Boolean,
                average_time_ms REAL
            );
            ''')
    
    def store_profile_db(self, category_tag, impl_tag, average_elapsed_ms):
        print(f"Name: {self.name}, Category: {category_tag}, Batch Size: {self.batch_size}, Average Time: {average_elapsed_ms} ms")
        self.cursor.execute(f'''
            INSERT INTO {category_tag} (batch_size, head_dim, num_qo_heads, num_kv_heads, use_kv_cache, average_time_ms)
            VALUES (?, ?, ?, ?)
            ''', (self.batch_size, self.head_dim, self.num_qo_heads, self.num_kv_heads, impl_tag == "withKVCache", average_elapsed_ms))

    def run(self, layer):
        self.impl.run(layer, self.inputs["kqv"].tensor, self.externals["KVCache"], self.outputs["q"].tensor)

    def profile_run(self):
        self.run(self.layer_list[0])
        
class RopeAppendTorch_Layer(Operation_Layer):
    def __init__(self, layer, base_op):
        super().__init__(layer, base_op)

    def run(self):
        self.parent.run(self.layer)
        