# Copyright (c) Huawei Technologies Co., Ltd. 2025. All rights reserved

import copy
import time
import types

import torch
from torch import nn
import torch.distributed as dist
import kpex._C as kernel


class Fast_GatingAttention(nn.Module):
    def __init__(self, config, global_config, a_dim, m_dim, output_dim):
        super().__init__()
        self.config = config
        self.global_config = global_config
        self.output_dim = output_dim
        # k,v dim
        self.key_dim = self.config.get('key_dim', int(a_dim))
        self.value_dim = self.config.get('value_dim', int(m_dim))
        self.num_head = self.config['num_head']
        assert self.key_dim % self.num_head == 0
        assert self.value_dim % self.num_head == 0
        self.key_dim = self.key_dim // self.num_head
        self.value_dim = self.value_dim // self.num_head
        # q,k,v weights
        self.query_w = nn.Parameter(torch.Tensor(a_dim,self.num_head,self.key_dim),requires_grad=False)
        self.key_w = nn.Parameter(torch.Tensor(m_dim,self.num_head,self.key_dim),requires_grad=False)
        self.value_w = nn.Parameter(torch.Tensor(m_dim,self.num_head,self.value_dim),requires_grad=False)
        self.gating_w = nn.Parameter(torch.Tensor(a_dim,self.num_head,self.value_dim),requires_grad=False)
        self.gating_b = nn.Parameter(torch.Tensor(self.num_head,self.value_dim),requires_grad=False)
        self.output_w = nn.Parameter(torch.Tensor(self.num_head,self.value_dim, self.output_dim),requires_grad=False)
        self.output_b = nn.Parameter(torch.Tensor(self.output_dim),requires_grad=False)
        # softmax & act fn
        self.softmax = nn.Softmax(dim=-1)
        self.sigmoid = nn.Sigmoid()

    @torch.jit.ignore
    def read_time(self) -> float:
        return time.time()
    
    def forward(self, q_data, m_data, bias, nonbatched_bias=torch.Tensor(), block_size=None):
        if not hasattr(self, "kpex_weights"):
            self.kpex_weights = kernel.alphafold.GatingAttentionWeight(
                self.query_w.permute(1, 2, 0),
                self.key_w.permute(1, 2, 0),
                self.value_w.permute(1, 2, 0),
                self.gating_w.permute(1, 2, 0),
                self.gating_b,
                self.output_w.permute(2, 0, 1),
                self.output_b,
            )
        act = q_data.to(torch.bfloat16)
        out = kernel.alphafold.gating_attention(
            act,
            act,
            bias.to(torch.bfloat16),
            nonbatched_bias.to(torch.bfloat16),
            self.kpex_weights,
            block_size,
        )
        return out
    

def gating_attention_forward(self, q_data, m_data, bias, nonbatched_bias=torch.Tensor(), block_size=None):
    if not hasattr(self, "kpex_weights"):
        self.kpex_weights = kernel.alphafold.GatingAttentionWeight(
            self.query_w.permute(1, 2, 0),
            self.key_w.permute(1, 2, 0),
            self.value_w.permute(1, 2, 0),
            self.gating_w.permute(1, 2, 0),
            self.gating_b,
            self.output_w.permute(2, 0, 1),
            self.output_b,
        )
    act = q_data.to(torch.bfloat16)
    out = kernel.alphafold.gating_attention(
        act,
        act,
        bias.to(torch.bfloat16),
        nonbatched_bias.to(torch.bfloat16),
        self.kpex_weights,
        block_size,
    )
    return out



def kpex_alphafold(model, model_config, dtype=torch.float):
    new_model = copy.deepcopy(model)
    evoformer = new_model.model.impl.evoformer
    
    if hasattr(evoformer, "extra_msa_stack"):
        for block in evoformer.extra_msa_stack:
            block.msa_row_attention_with_pair_bias.attention.forward = types.MethodType(
                gating_attention_forward, 
                block.msa_row_attention_with_pair_bias.attention
                )
            block.triangle_attention_starting_node.attention.forward = types.MethodType(
                gating_attention_forward, 
                block.triangle_attention_starting_node.attention
                )
            block.triangle_attention_ending_node.attention.forward = types.MethodType(
                gating_attention_forward, 
                block.triangle_attention_ending_node.attention
                )
    if hasattr(evoformer, "evoformer_iteration"):
        for block in evoformer.evoformer_iteration:
            block.msa_row_attention_with_pair_bias.attention.forward = types.MethodType(
                gating_attention_forward, 
                block.msa_row_attention_with_pair_bias.attention
                )
            block.msa_column_attention.attention.forward = types.MethodType(
                gating_attention_forward, 
                block.msa_column_attention.attention
                )
            block.triangle_attention_starting_node.attention.forward = types.MethodType(
                gating_attention_forward, 
                block.triangle_attention_starting_node.attention
                )
            block.triangle_attention_ending_node.attention.forward = types.MethodType(
                gating_attention_forward, 
                block.triangle_attention_ending_node.attention
                )
    if hasattr(evoformer, "template_embedding"):
        template_pair_sub_stack = evoformer.template_embedding.single_template_embedding.template_pair_stack.template_pair_sub_stack
        for block in template_pair_sub_stack:
            block.triangle_attention_starting_node.attention.forward = types.MethodType(
                gating_attention_forward, 
                block.triangle_attention_starting_node.attention
                )
            block.triangle_attention_ending_node.attention.forward = types.MethodType(
                gating_attention_forward, 
                block.triangle_attention_ending_node.attention
                )
    return new_model


