﻿using TorchSharp;
using TorchSharp.Modules;
using static TorchSharp.torch;

namespace Qwen3.Module;

public class Qwen3DenseModel : nn.Module<torch.Tensor, torch.Tensor, torch.Tensor>
{
    public readonly Embedding embed_tokens;
    private readonly ModuleList<Qwen3DenseBlock> layers;
    private readonly Qwen3RMSNorm norm;
    private readonly Qwen3RotaryEmbedding rotary_emb;

    public Qwen3DenseModel(Qwen3Config config)
        : base(nameof(Qwen3DenseModel))
    {
        this.embed_tokens = nn.Embedding(config.VocabSize, config.HiddenSize);
        this.rotary_emb = new Qwen3RotaryEmbedding(config);
        this.layers = new ModuleList<Qwen3DenseBlock>();
        for (int i = 0; i < config.NumHiddenLayers; i++)
        {
            this.layers.Add(new Qwen3DenseBlock(config));
        }
        this.norm = new Qwen3RMSNorm(config.HiddenSize, config.RmsNormEps);
        this.RegisterComponents();
    }

    public override torch.Tensor forward(torch.Tensor x, torch.Tensor positionIds)
    {
        using var _ = NewDisposeScope();
        var (cos, sin) = this.rotary_emb.forward(x, positionIds);
        foreach (var layer in this.layers)
        {
            x = layer.forward(x, cos, sin);
        }
        x = this.norm.forward(x);
        return x.MoveToOuterDisposeScope();
    }
}


public class Qwen3MoEModel : nn.Module<torch.Tensor, torch.Tensor, torch.Tensor>
{
    public readonly Embedding embed_tokens;
    private readonly ModuleList<Qwen3MoEBlock> layers;
    private readonly Qwen3RMSNorm norm;
    private readonly Qwen3RotaryEmbedding rotary_emb;

    public Qwen3MoEModel(Qwen3Config config)
        : base(nameof(Qwen3MoEModel))
    {
        this.embed_tokens = nn.Embedding(config.VocabSize, config.HiddenSize);
        this.rotary_emb = new Qwen3RotaryEmbedding(config);
        this.layers = new ModuleList<Qwen3MoEBlock>();
        for (int i = 0; i < config.NumHiddenLayers; i++)
        {
            this.layers.Add(new Qwen3MoEBlock(config));
        }
        this.norm = new Qwen3RMSNorm(config.HiddenSize, config.RmsNormEps);
        this.RegisterComponents();
    }

    public override torch.Tensor forward(torch.Tensor x, torch.Tensor positionIds)
    {
        using var _ = NewDisposeScope();
        var (cos, sin) = this.rotary_emb.forward(x, positionIds);
        foreach (var layer in this.layers)
        {
            x = layer.forward(x, cos, sin);
        }
        x = this.norm.forward(x);
        return x.MoveToOuterDisposeScope();
    }
}

public class Qwen3Dense : nn.Module<torch.Tensor, torch.Tensor>
{
    public readonly Qwen3Config config;
    private readonly Qwen3DenseModel model;
    private readonly Linear lm_head;

    public Qwen3Dense(Qwen3Config config)
        : base(nameof(Qwen3Dense))
    {
        this.config = config;
        this.model = new Qwen3DenseModel(config);
        this.lm_head = nn.Linear(config.HiddenSize, config.VocabSize, hasBias:false);
        this.RegisterComponents();
    }

    private static torch.Tensor GetPositionIds(torch.Tensor inputIds)
    {
        var shape = inputIds.shape;
        var B = shape[0];
        var T = shape[1];
        var device = inputIds.device;
        var positionIds = torch.arange(T, dtype: torch.@long, device);
        positionIds = positionIds.unsqueeze(0).expand(B, -1);
        return positionIds;
    }

    public override torch.Tensor forward(torch.Tensor inputIds)
    {
        using var _ = NewDisposeScope();
        var x = this.model.embed_tokens.forward(inputIds);
        using var positionIds = GetPositionIds(inputIds);
        x = this.model.forward(x, positionIds);
        var logits = this.lm_head.forward(x);
        return logits.MoveToOuterDisposeScope();
    }

    public IEnumerable<int> GenerateStream(torch.Tensor inputIds, int maxNewTokens, List<int> stopTokens, TextStreamer streamer)
    {
        using var scope = NewDisposeScope();
        var stopTokensSet = new HashSet<int>(stopTokens);
        for (int i = 0; i < maxNewTokens; i++)
        {
            using var logits = this.forward(inputIds);
            using var lastLogits = logits[torch.TensorIndex.Colon, -1, torch.TensorIndex.Colon];
            using var probs = torch.nn.functional.softmax(lastLogits, dim: -1);
            using var nextTokenTensor = probs.argmax(dim: -1, keepdim: true);
            int nextToken = nextTokenTensor.to(torch.int32).item<int>();
            nextTokenTensor.MoveToOuterDisposeScope();
            if (stopTokensSet.Contains(nextToken))
            {
                yield break;
            }
            streamer.Put(nextToken);
            yield return nextToken;

            inputIds = torch.cat(new Tensor[] { inputIds, nextTokenTensor }, dim: 1).MoveToOtherDisposeScope(inputIds);
        }
    }

    public torch.Tensor Generate(torch.Tensor inputIds, int maxNewTokens, List<int> stopTokens)
    {
        using var scope = NewDisposeScope();
        var stopTokensSet = new HashSet<int>(stopTokens); // 使用HashSet提高查找效率

        for (int i = 0; i < maxNewTokens; i++)
        {
            var logits = this.forward(inputIds);
            var lastLogits = logits[torch.TensorIndex.Colon, -1, torch.TensorIndex.Colon];
            var probs = torch.nn.functional.softmax(lastLogits, dim: -1);
            var nextTokenTensor = probs.argmax(dim: -1, keepdim: true);
            int nextToken = nextTokenTensor.to(torch.int32).item<int>();
            if (stopTokensSet.Contains(nextToken))
            {
                break;
            }

            // 更新输入
            inputIds = torch.cat(new Tensor[] { inputIds, nextTokenTensor }, dim: 1).MoveToOtherDisposeScope(inputIds);
        }
        return inputIds.MoveToOuterDisposeScope();
    }}

public class Qwen3MoE : nn.Module<torch.Tensor, torch.Tensor>
{
    public readonly Qwen3Config config;
    private readonly Qwen3MoEModel model;
    private readonly Linear lm_head;

    public Qwen3MoE(Qwen3Config config)
        : base(nameof(Qwen3MoE))
    {
        this.config = config;
        this.model = new Qwen3MoEModel(config);
        this.lm_head = nn.Linear(config.HiddenSize, config.VocabSize, hasBias: false);
        this.RegisterComponents();
    }

    private static torch.Tensor GetPositionIds(torch.Tensor inputIds)
    {
        var shape = inputIds.shape;
        var B = shape[0];
        var T = shape[1];
        var device = inputIds.device;
        var positionIds = torch.arange(T, dtype: torch.@long, device);
        positionIds = positionIds.unsqueeze(0).expand(B, -1);
        return positionIds;
    }

    public override torch.Tensor forward(torch.Tensor inputIds)
    {
        using var _ = NewDisposeScope();
        var x = this.model.embed_tokens.forward(inputIds);
        using var positionIds = GetPositionIds(inputIds);
        x = this.model.forward(x, positionIds);
        var logits = this.lm_head.forward(x);
        return logits.MoveToOuterDisposeScope();
    }


    public IEnumerable<int> GenerateStream(torch.Tensor inputIds, int maxNewTokens, List<int> stopTokens, TextStreamer streamer)
    {
        using var scope = NewDisposeScope();
        var stopTokensSet = new HashSet<int>(stopTokens);
        for (int i = 0; i < maxNewTokens; i++)
        {
            using var logits = this.forward(inputIds);
            using var lastLogits = logits[torch.TensorIndex.Colon, -1, torch.TensorIndex.Colon];
            using var probs = torch.nn.functional.softmax(lastLogits, dim: -1);
            using var nextTokenTensor = probs.argmax(dim: -1, keepdim: true);
            int nextToken = nextTokenTensor.to(torch.int32).item<int>();
            nextTokenTensor.MoveToOuterDisposeScope();
            if (stopTokensSet.Contains(nextToken))
            {
                yield break;
            }
            streamer.Put(nextToken);
            yield return nextToken;

            inputIds = torch.cat(new Tensor[] { inputIds, nextTokenTensor }, dim: 1).MoveToOtherDisposeScope(inputIds);
        }
    }

    public torch.Tensor Generate(torch.Tensor inputIds, int maxNewTokens, List<int> stopTokens)
    {
        using var scope = NewDisposeScope();
        var stopTokensSet = new HashSet<int>(stopTokens); // 使用HashSet提高查找效率

        for (int i = 0; i < maxNewTokens; i++)
        {
            var logits = this.forward(inputIds);
            var lastLogits = logits[torch.TensorIndex.Colon, -1, torch.TensorIndex.Colon];
            var probs = torch.nn.functional.softmax(lastLogits, dim: -1);
            var nextTokenTensor = probs.argmax(dim: -1, keepdim: true);
            int nextToken = nextTokenTensor.to(torch.int32).item<int>();
            if (stopTokensSet.Contains(nextToken))
            {
                break;
            }

            // 更新输入
            inputIds = torch.cat(new Tensor[] { inputIds, nextTokenTensor }, dim: 1).MoveToOtherDisposeScope(inputIds);
        }
        return inputIds.MoveToOuterDisposeScope();
    }
}