﻿using TorchSharp;
using TorchSharp.Modules;
using static TorchSharp.torch;
namespace Qwen3.Module;

public class Qwen3DenseAttention : nn.Module<torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor>
{
    private readonly int n_embed;
    private readonly int n_heads;
    private readonly int n_kv_heads;
    private readonly int n_embed_per_head;
    private readonly int n_kv_embed;
    private readonly Linear o_proj;
    private readonly Linear q_proj;
    private readonly Linear k_proj;
    private readonly Linear v_proj;
    private readonly Qwen3RMSNorm q_norm;
    private readonly Qwen3RMSNorm k_norm;

    public Qwen3DenseAttention(
        Qwen3Config config)
        : base(nameof(Qwen3DenseAttention))
    {
        this.n_heads = config.NumAttentionHeads;
        this.n_kv_heads = config.NumKeyValueHeads;
        this.n_embed = config.HiddenSize;
        this.n_embed_per_head = this.n_embed / this.n_heads;
        this.n_kv_embed = this.n_kv_heads * this.n_embed_per_head;
        this.q_proj = nn.Linear(this.n_embed, this.n_embed, hasBias: false);
        this.k_proj = nn.Linear(this.n_embed, this.n_kv_embed, hasBias: false);
        this.v_proj = nn.Linear(this.n_embed, this.n_kv_embed, hasBias: false);
        this.o_proj = nn.Linear(this.n_embed, this.n_embed, hasBias: false);
        this.q_norm = new Qwen3RMSNorm(this.n_embed_per_head, config.RmsNormEps);
        this.k_norm = new Qwen3RMSNorm(this.n_embed_per_head, config.RmsNormEps);
        this.RegisterComponents();
    }

    public override torch.Tensor forward(torch.Tensor x, torch.Tensor cos, torch.Tensor sin)
    {
        using var _ = NewDisposeScope();
        var shape = x.shape;
        var B = shape[0];
        var T = shape[1];
        var C = shape[2];

        var q = this.q_proj.forward(x);
        var k = this.k_proj.forward(x);
        var v = this.v_proj.forward(x);

        q = q.view(B, T, this.n_heads, this.n_embed_per_head).transpose(1, 2);
        k = k.view(B, T, this.n_kv_heads, this.n_embed_per_head).transpose(1, 2);
        v = v.view(B, T, this.n_kv_heads, this.n_embed_per_head).transpose(1, 2);
        q = this.q_norm.forward(q.transpose(1, 2)).transpose(1, 2);
        k = this.k_norm.forward(k.transpose(1, 2)).transpose(1, 2);

        (q, k) = ApplyRotaryPosEmb(q, k, cos, sin);
            
           
        if(this.n_kv_heads < this.n_heads)
        {
            var num_repeat = this.n_heads / this.n_kv_heads;
            k = k.repeat_interleave(num_repeat, dim: 1);
            v = v.repeat_interleave(num_repeat, dim: 1);
        }
        var y = torch.nn.functional.scaled_dot_product_attention(q, k, v, is_casual: true);
        y = y.transpose(1, 2).contiguous().view(B, T, C);

        y = this.o_proj.forward(y);
        return y.MoveToOuterDisposeScope();
    }

    private static Tensor RotateHalf(Tensor x)
    {
        var x1 = x[TensorIndex.Ellipsis, ..(int)(x.shape[^1] / 2)];
        var x2 = x[TensorIndex.Ellipsis, (int)(x.shape[^1] / 2)..];
        return torch.cat([-x2, x1], dim: -1);
    }

    private static (Tensor, Tensor) ApplyRotaryPosEmb(Tensor q, Tensor k, Tensor cos, Tensor sin, Tensor? positionIds = null, int unsqueezeDim = 1)
    {
        if(cos.ndim == 4)
        {
            // shape [B, 3, T, D] -> multi-modal
            cos = ProcessRotaryComponent(cos);
            sin = ProcessRotaryComponent(sin);
        }
        else
        { 
            // shape [B, T, D] -> text-only
            cos = cos.unsqueeze(1);
            sin = sin.unsqueeze(1);
        }
            
        var q_rotate = RotateHalf(q);
        var k_qotate = RotateHalf(k);
        var qEmbed = (q * cos) + (q_rotate * sin);
        var kEmbed = (k * cos) + (k_qotate * sin);
        return (qEmbed, kEmbed);
    }

    private static torch.Tensor ProcessRotaryComponent(torch.Tensor x)
    {
        var sections = x.split(new long[] { 16, 24, 24, 16, 24, 24 }, dim: -1);
        List<torch.Tensor> processed = new List<Tensor>();
        int index = 0;
        foreach(var m in sections)
        {
            processed.Add(m[index % 3]);
            index = index + 1;
        }
        return torch.cat(processed, dim: -1).unsqueeze(1);
    }
}

public class Qwen3MoeAttention : nn.Module<torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor>
{
    private readonly int n_embed;
    private readonly int n_heads;
    private readonly int n_kv_heads;
    private readonly int head_dim;
    private readonly Linear o_proj;
    private readonly Linear q_proj;
    private readonly Linear k_proj;
    private readonly Linear v_proj;
    private readonly Qwen3RMSNorm q_norm;
    private readonly Qwen3RMSNorm k_norm;

    public Qwen3MoeAttention(
        Qwen3Config config)
        : base(nameof(Qwen3MoeAttention))
    {
        this.n_heads = config.NumAttentionHeads;
        this.n_kv_heads = config.NumKeyValueHeads;
        this.n_embed = config.HiddenSize;
        this.head_dim = config.HeadDim;

        this.q_proj = nn.Linear(this.n_embed, this.n_heads * this.head_dim, hasBias: false);
        this.k_proj = nn.Linear(this.n_embed, this.n_kv_heads * this.head_dim, hasBias: false);
        this.v_proj = nn.Linear(this.n_embed, this.n_kv_heads * this.head_dim, hasBias: false);
        this.o_proj = nn.Linear(this.n_heads * this.head_dim, this.n_embed, hasBias: false);
        this.q_norm = new Qwen3RMSNorm(this.head_dim, config.RmsNormEps);
        this.k_norm = new Qwen3RMSNorm(this.head_dim, config.RmsNormEps);
        this.RegisterComponents();
    }

    public override torch.Tensor forward(torch.Tensor x, torch.Tensor cos, torch.Tensor sin)
    {
        using var _ = NewDisposeScope();
        var shape = x.shape;
        var B = shape[0];
        var T = shape[1];
        var C = shape[2];

        var q = this.q_proj.forward(x);
        var k = this.k_proj.forward(x);
        var v = this.v_proj.forward(x);

        q = q.view(B, T, this.n_heads, this.head_dim).transpose(1, 2);
        k = k.view(B, T, this.n_kv_heads, this.head_dim).transpose(1, 2);
        v = v.view(B, T, this.n_kv_heads, this.head_dim).transpose(1, 2);
        q = this.q_norm.forward(q.transpose(1, 2)).transpose(1, 2);
        k = this.k_norm.forward(k.transpose(1, 2)).transpose(1, 2);

        (q, k) = ApplyRotaryPosEmb(q, k, cos, sin);


        if (this.n_kv_heads < this.n_heads)
        {
            var num_repeat = this.n_heads / this.n_kv_heads;
            k = RepeatKV(k, num_repeat);
            v = RepeatKV(v, num_repeat);
        }

        float _scaling = (float)Math.Pow(this.head_dim, -0.5);
        var y = torch.nn.functional.scaled_dot_product_attention(q, k, v, p: _scaling, is_casual: true);
        y = y.transpose(1, 2).contiguous();

        y = y.view(B, T, this.n_heads * this.head_dim);
        y = this.o_proj.forward(y);
        return y.MoveToOuterDisposeScope();
    }

    private static Tensor RepeatKV(Tensor x, int nRep)
    {
        var batchSize = x.shape[0];
        var nKVHeads = x.shape[1];
        var seqLen = x.shape[2];
        var headDim = x.shape[3];
        if (nRep == 1)
        {
            return x;
        }

        return x.unsqueeze(2)
                .expand(batchSize, nKVHeads, nRep, seqLen, headDim)
                .reshape(batchSize, nKVHeads * nRep, seqLen, headDim);
    }

    private static Tensor RotateHalf(Tensor x)
    {
        var x1 = x[TensorIndex.Ellipsis, ..(int)(x.shape[^1] / 2)];
        var x2 = x[TensorIndex.Ellipsis, (int)(x.shape[^1] / 2)..];
        return torch.cat([-x2, x1], dim: -1);
    }

    private static (Tensor, Tensor) ApplyRotaryPosEmb(Tensor q, Tensor k, Tensor cos, Tensor sin, Tensor? positionIds = null, int unsqueezeDim = 1)
    {
        if (cos.ndim == 4)
        {
            // shape [B, 3, T, D] -> multi-modal
            cos = ProcessRotaryComponent(cos);
            sin = ProcessRotaryComponent(sin);
        }
        else
        {
            // shape [B, T, D] -> text-only
            cos = cos.unsqueeze(1);
            sin = sin.unsqueeze(1);
        }

        var q_rotate = RotateHalf(q);
        var k_qotate = RotateHalf(k);
        var qEmbed = (q * cos) + (q_rotate * sin);
        var kEmbed = (k * cos) + (k_qotate * sin);
        return (qEmbed, kEmbed);
    }

    private static torch.Tensor ProcessRotaryComponent(torch.Tensor x)
    {
        var sections = x.split(new long[] { 16, 24, 24, 16, 24, 24 }, dim: -1);
        List<torch.Tensor> processed = new List<Tensor>();
        int index = 0;
        foreach (var m in sections)
        {
            processed.Add(m[index % 3]);
            index = index + 1;
        }
        return torch.cat(processed, dim: -1).unsqueeze(1);
    }
}