package main

import (
	"bufio"
	"encoding/json"
	"fmt"
	"log"
	"math"
	"math/rand"
	"os"
	"runtime"
	"strings"
	"time"

	"github.com/sugarme/gotch"
	"github.com/sugarme/gotch/nn"
	"github.com/sugarme/gotch/ts"
)

func main() {
	// 模型配置
	config := &ModelConfig{}
	config.MaxPos = 1800          // 设置最大位置编码值
	config.Dmodel = 768           // 设置嵌入层的大小
	config.Dff = 2048             // 设置前馈网络的维度
	config.Dk, config.Dv = 64, 64 // 设置K(=Q)和V的维度
	config.Nlayers = 6            // 设置编码器和解码器层的数量
	config.Nheads = 8             // 设置多头注意力中头的数量
	config.LinearConfig = nn.DefaultLinearConfig()
	config.EmbeddingConfig = nn.DefaultEmbeddingConfig()
	config.LayerNormConfig = nn.DefaultLayerNormConfig()
	config.Device = gotch.CudaIfAvailable()
	config.Dtype = gotch.Float
	// 加载数据
	w := &WordData{ModelConfig: config}
	w.Open("./dict_datas.json") // 加载词表

	// 训练
	// Train(config, w, "./dataset.txt", "GPT2.pt")

	odtype := gotch.SetDefaultDType(config.Dtype)
	vs := nn.NewVarStore(config.Device) // 创建数据存储器
	net := NewGPT(vs.Root(), config)    // 从数据存储器创建模型对象
	gotch.SetDefaultDType(odtype)       // 还原默认类型

	// 加载模型数据
	err := vs.Load("GPT2.pt")
	if err != nil {
		panic(err)
	}
	// 测试
	fmt.Println(net.Answer("你好", w))
	// 清理
	ts.CleanUp()
	runtime.GC()
}

var LrCNN = 3 * 1e-4

func Train(config *ModelConfig, w *WordData, name, filepath string) {
	// 读取训练数据
	inputOut, outputOut, _ := w.OpenData(name)
	// 设置默认数据类型,并且返回之前的数据类型
	odtype := gotch.SetDefaultDType(config.Dtype)
	vs := nn.NewVarStore(config.Device) // 创建数据存储器
	net := NewGPT(vs.Root(), config)    // 从数据存储器创建模型对象
	gotch.SetDefaultDType(odtype)       // 还原默认类型
	// 打开模型
	defer vs.Save(filepath)
	vs.Load(filepath)
	// 定义默认优化器
	opt, err := nn.DefaultAdamConfig().Build(vs, LrCNN)
	if err != nil {
		log.Fatal(err)
	}
	count := 10   // 总训练次数
	countLen := 1 // 每一次训练数量
	n := len(inputOut)
	index := make([]int, n)
	for i := range index {
		index[i] = i
	}
	var indexn []int
	var epocLoss float64
	// 创建对象
	InTensor := make([]int64, 0, 1024)
	OutTensor := make([]int64, 0, 1024)
	for x := 0; x < count; x++ {
		rand.Seed(time.Now().UnixNano())
		rand.Shuffle(len(index), func(i, j int) {
			index[i], index[j] = index[j], index[i]
		}) // 创建对象
		for i := n; i > 0; i -= countLen {
			if i > countLen {
				indexn = index[i-countLen : i]
			} else {
				indexn = index[0:i]
			}
			var decoderInputLen int = 0
			var decoderOutputLen int = 0
			for _, l := range indexn {
				decoderInputLen = max(decoderInputLen, len(inputOut[l]))
				decoderOutputLen = max(decoderOutputLen, len(outputOut[l]))
			}
			// 填充内容
			pad := make([]int64, max(decoderInputLen, decoderOutputLen))
			padw := int64(w.Word2id["<pad>"])
			for i := range pad {
				pad[i] = padw
			}
			InTensor, OutTensor = InTensor[:0], OutTensor[:0]
			for _, l := range indexn {
				InTensor = append(append(InTensor, inputOut[l]...), pad[:decoderInputLen-len(inputOut[l])]...)
				OutTensor = append(append(OutTensor, outputOut[l]...), pad[:decoderOutputLen-len(outputOut[l])]...)
			}
			// 训练
			ins, _ := ts.NewTensorFromData(InTensor, []int64{int64(len(indexn)), int64(decoderInputLen)})
			ins = ins.MustTo(config.Device, true)
			outs, _ := ts.NewTensorFromData(OutTensor, []int64{int64(len(indexn) * decoderOutputLen)})
			outs = outs.MustTo(config.Device, true)
			// 调用模型进行预测，outputs是模型的输出
			outputs, _ := net.ForwardT(ins, true)
			//计算预测结果和真实标签之间的损失
			loss := outputs.CrossEntropyForLogits(outs)
			loss = loss.MustSetRequiresGrad(true, true)
			// 反向传播计算梯度
			// opt.MustBackwardStepClip(loss, 1)
			opt.BackwardStep(loss)
			// 计算当前批次的平均损失
			epocLoss = loss.Float64Values()[0]
			fmt.Println("Loss:", epocLoss, "次数:", x, ",", i)
			outputs.MustDrop()
			ins.MustDrop()
			outs.MustDrop()
			loss.MustDrop()
		}
		// 清理
		ts.CleanUp()
		runtime.GC()
	}
}

// ModelConfig 参数类
type ModelConfig struct {
	Dmodel, Nheads, Dk, Dv, Dff int64
	VocabSize, MaxPos           int64
	Nlayers                     int
	SeqLen                      *ts.Scalar
	Dtype                       gotch.DType
	Device                      gotch.Device
	LinearConfig                *nn.LinearConfig
	EmbeddingConfig             *nn.EmbeddingConfig
	LayerNormConfig             *nn.LayerNormConfig
}

type WordData struct {
	*ModelConfig
	Word2id map[string]int `json:"word2id"`
	Id2word []string       `json:"id2word"`
}

// Open 打开词表
func (data *WordData) Open(name string) error {
	f, e := os.Open(name)
	if e != nil {
		return e
	}
	defer f.Close()
	e = json.NewDecoder(f).Decode(data)
	data.VocabSize = int64(len(data.Word2id))
	return e
}
func (data *WordData) OpenData(name string) (inputOut [][]int64, outputOut [][]int64, _ error) {
	f, e := os.Open(name)
	if e != nil {
		return inputOut, outputOut, e
	}
	defer f.Close()
	scanner := bufio.NewScanner(f)
	scanner.Split(bufio.ScanLines)
	sep := int64(data.Word2id["<sep>"])
	for scanner.Scan() {
		//  使用TrimSpace()方法去除数据项两端的空白字符（如空格、换行符等）
		trainData := make([]int64, 0, 255)
		for _, str := range strings.TrimSpace(scanner.Text()) {
			// 将文本数据转换为数字ID形式，假设word2id是一个预定义的字典，用于词汇转换
			if str == '\t' {
				trainData = append(trainData, sep)
			} else {
				trainData = append(trainData, int64(data.Word2id[string(str)]))
			}
		}
		trainData = append(trainData, sep)
		outputOut = append(outputOut, trainData[1:])
		inputOut = append(inputOut, trainData[:len(trainData)-1])
	}
	return inputOut, outputOut, nil
}

// PoswiseFeedForwardNet 定义位置感知的前馈网络类
type PoswiseFeedForwardNet struct {
	*ModelConfig
	Fc        *nn.SequentialT
	Layernorm *nn.LayerNorm
}

func NewPoswiseFeedForwardNet(vs *nn.Path, config *ModelConfig) (self *PoswiseFeedForwardNet) {
	self = &PoswiseFeedForwardNet{ModelConfig: config}
	self.Fc = nn.SeqT()
	self.Fc.Add(nn.NewLinear(vs.Sub("fc_0"), self.Dmodel, self.Dff, self.LinearConfig))
	self.Fc.AddFn(nn.NewFunc(func(xs *ts.Tensor) *ts.Tensor {
		return xs.MustRelu(false)
	}))
	self.Fc.Add(nn.NewLinear(vs.Sub("fc_2"), self.Dff, self.Dmodel, self.LinearConfig))
	self.Layernorm = nn.NewLayerNorm(vs.Sub("layernorm"), []int64{self.Dmodel}, self.LayerNormConfig)

	return self
}
func (self *PoswiseFeedForwardNet) ForwardT(inputs *ts.Tensor, train bool) (retVal *ts.Tensor) {
	output := inputs.ApplyT(self.Fc, train).MustAdd(inputs, true)
	defer output.MustDrop()
	return output.Apply(self.Layernorm)
}

// MultiHeadAttention 定义多头注意力模块
type MultiHeadAttention struct {
	*ModelConfig
	Q         *nn.Linear
	K         *nn.Linear
	V         *nn.Linear
	Fc        *nn.Linear
	Layernorm *nn.LayerNorm
}

func NewMultiHeadAttention(vs *nn.Path, config *ModelConfig) (self *MultiHeadAttention) {
	self = &MultiHeadAttention{ModelConfig: config}
	self.Q = nn.NewLinear(vs.Sub("W_Q"), self.Dmodel, self.Dk*self.Nheads, self.LinearConfig)
	self.K = nn.NewLinear(vs.Sub("W_K"), self.Dmodel, self.Dk*self.Nheads, self.LinearConfig)
	self.V = nn.NewLinear(vs.Sub("W_V"), self.Dmodel, self.Dv*self.Nheads, self.LinearConfig)
	self.Fc = nn.NewLinear(vs.Sub("fc"), self.Dv*self.Nheads, self.Dmodel, self.LinearConfig)
	self.Layernorm = nn.NewLayerNorm(vs.Sub("layernorm"), []int64{self.Dmodel}, self.LayerNormConfig)
	return self
}
func (self *MultiHeadAttention) ForwardT(inputQ, inputK, inputV, attnMask *ts.Tensor, train bool) (retVal *ts.Tensor, attn *ts.Tensor) {
	batchSize := inputQ.MustSize()[0]
	Q := inputQ.ApplyT(self.Q, train)
	K := inputK.ApplyT(self.K, train)
	V := inputV.ApplyT(self.V, train)
	Q = Q.MustView([]int64{batchSize, -1, self.Nheads, self.Dk}, true).MustTranspose(1, 2, true)
	K = K.MustView([]int64{batchSize, -1, self.Nheads, self.Dk}, true).MustTranspose(1, 2, true)
	V = V.MustView([]int64{batchSize, -1, self.Nheads, self.Dv}, true).MustTranspose(1, 2, true)
	attn_Mask := attnMask.MustUnsqueeze(1, false)
	attn_Mask = attn_Mask.MustRepeat([]int64{1, self.Nheads, 1, 1}, true)
	context, attn := self.scaledDotProductAttention(Q, K, V, attn_Mask)
	context = context.MustTranspose(1, 2, true).MustReshape([]int64{batchSize, -1, self.Nheads * self.Dv}, true)
	output := context.ApplyT(self.Fc, train)
	Q.MustDrop()
	K.MustDrop()
	V.MustDrop()
	attn_Mask.MustDrop()
	context.MustDrop()
	output = output.MustAdd(inputQ, true)
	defer output.MustDrop()
	return output.Apply(self.Layernorm), attn
}

func (self *MultiHeadAttention) scaledDotProductAttention(Q, K, V, attnMask *ts.Tensor) (context, attn *ts.Tensor) {
	k := K.MustTranspose(-1, -2, false)
	scores := Q.MustMatmul(k, false)
	scores = scores.MustDivScalar(ts.FloatScalar(math.Sqrt(float64(self.Dk))), true)
	scores = scores.MustMaskedFill(attnMask, ts.FloatScalar(-1e9), true)
	attn = scores.MustSoftmax(-1, self.Dtype, true)
	k.MustDrop()
	scores.MustDrop()
	return attn.MustMatmul(V, false), attn

}

// DecoderLayer 定义解码器层类
type DecoderLayer struct {
	*ModelConfig
	DecSelfAttn *MultiHeadAttention    // 自注意力层
	DecEncAttn  *MultiHeadAttention    // 编码器-解码器注意力层
	PosFfn      *PoswiseFeedForwardNet //  位置感知的前馈网络
}

func NewDecoderLayer(vs *nn.Path, config *ModelConfig) (self *DecoderLayer) {
	// 定义解码器层中的自注意力机制和编码器-解码器注意力机制
	self = &DecoderLayer{ModelConfig: config}
	self.DecSelfAttn = NewMultiHeadAttention(vs.Sub("dec_self_attn"), config) //  自注意力层
	self.DecEncAttn = NewMultiHeadAttention(vs.Sub("dec_enc_attn"), config)   //  编码器-解码器注意力层
	self.PosFfn = NewPoswiseFeedForwardNet(vs.Sub("pos_ffn"), config)         //  位置感知的前馈网络
	return self
}
func (self *DecoderLayer) ForwardT(DecInputs, DecSelfAttnMask *ts.Tensor, train bool) (DecOutputs *ts.Tensor, DecSelfAttn *ts.Tensor) {
	DecOutputs, DecSelfAttn = self.DecSelfAttn.ForwardT(DecInputs, DecInputs, DecInputs, DecSelfAttnMask, train)
	defer DecOutputs.MustDrop()
	return self.PosFfn.ForwardT(DecOutputs, train), DecSelfAttn
}

// Decoder 定义解码器类
type Decoder struct {
	*ModelConfig
	TgtEmb *nn.Embedding
	PosEmb *nn.Embedding
	Tayer  []*DecoderLayer
}

func NewDecoder(vs *nn.Path, config *ModelConfig) (self *Decoder) {
	self = &Decoder{ModelConfig: config}
	self.TgtEmb = nn.NewEmbedding(vs.Sub("tgt_emb"), self.VocabSize, self.Dmodel, self.EmbeddingConfig)
	self.PosEmb = nn.NewEmbedding(vs.Sub("pos_emb"), self.MaxPos, self.Dmodel, self.EmbeddingConfig)
	self.Tayer = make([]*DecoderLayer, self.Nlayers)
	for i := range self.Tayer {
		self.Tayer[i] = NewDecoderLayer(vs.Sub(fmt.Sprint("layers_", i)), config)
	}
	return self
}
func (self Decoder) ForwardT(decInputs *ts.Tensor, train bool) (*ts.Tensor, []*ts.Tensor) {
	seqLen := decInputs.MustSize()[1] // 获取输入序列的长度
	pos := ts.MustArange(ts.FloatScalar(float64(seqLen)), gotch.Int, self.Device)
	pos = pos.MustUnsqueeze(0, true).MustExpandAs(decInputs, true)
	defer pos.MustDrop()
	decOutputs := decInputs.ApplyT(self.TgtEmb, train)
	decOutputs = decOutputs.MustAdd(pos.ApplyT(self.PosEmb, train), true)
	decSelfAttnPadMask := GetAttnPadMask(decInputs, decInputs)
	decSelfAttnSubsequenceMask := GetAttnSubsequenceMask(decInputs, self.ModelConfig)
	decSelfAttnMask := decSelfAttnPadMask.MustAdd(decSelfAttnSubsequenceMask, true)
	decSelfAttnMask = decSelfAttnMask.MustGt(ts.FloatScalar(0), true)
	decSelfAttnSubsequenceMask.MustDrop()
	//  初始化一个列表，用于存储每一层的自注意力权重
	var out *ts.Tensor
	decSelfAttns := make([]*ts.Tensor, len(self.Tayer))
	for i, layer := range self.Tayer {
		out, decSelfAttns[i] = layer.ForwardT(decOutputs, decSelfAttnMask, train)
		decOutputs.MustDrop()
		decOutputs = out
	}
	decSelfAttnMask.MustDrop()
	// 返回解码器的最终输出和每一层的自注意力权重
	return decOutputs, decSelfAttns
}

// GetAttnPadMask 定义一个函数，用于获取注意力机制中的填充掩码（pad mask）
func GetAttnPadMask(seqQ, seqK *ts.Tensor) *ts.Tensor {
	lenQ, lenK := seqQ.MustSize(), seqK.MustSize()
	padAttnMask := seqK.MustEq(ts.FloatScalar(0), false).MustUnsqueeze(1, true)
	return padAttnMask.MustExpand([]int64{lenK[0], lenQ[1], lenK[1]}, true, true)
}

// GetAttnSubsequenceMask 定义生成子序列掩码的函数
func GetAttnSubsequenceMask(seq *ts.Tensor, config *ModelConfig) *ts.Tensor {
	len := seq.MustSize()
	out := ts.MustOnes([]int64{len[0], len[1], len[1]}, config.Dtype, config.Device)
	return out.MustTriu(1, true)
}

// GPT 定义模型类
type GPT struct {
	*ModelConfig
	Decoder    *Decoder
	Projection *nn.Linear
}

func NewGPT(vs *nn.Path, config *ModelConfig) (self *GPT) {
	self = &GPT{ModelConfig: config}
	self.Decoder = NewDecoder(vs.Sub("decoder"), self.ModelConfig)
	self.Projection = nn.NewLinear(vs.Sub("projection"), self.Dmodel, self.VocabSize, self.LinearConfig)
	return self
}
func (self *GPT) ForwardT(decInputs *ts.Tensor, train bool) (*ts.Tensor, []*ts.Tensor) {
	decOutputs, decSelfAttns := self.Decoder.ForwardT(decInputs, train)
	defer decOutputs.MustDrop()
	decLogits := self.Projection.ForwardT(decOutputs, train)
	lenDecLogits := decLogits.MustSize()
	return decLogits.MustView([]int64{-1, lenDecLogits[len(lenDecLogits)-1]}, true), decSelfAttns
}

// GreedyDecoder 定义一个贪婪解码方法
func (self *GPT) GreedyDecoder(decInputs *ts.Tensor, data *WordData) *ts.Tensor {
	// 初始化结束标志
	terminal := false
	// 记录解码开始的长度
	startDecLen := decInputs.MustSize()[1]
	decInputs = decInputs.MustDetach(false)
	for !terminal {
		// 循环生成下一个单词，直到遇到"<sep>"分隔符或达到最大长度限制
		if decInputs.MustSize()[1]-startDecLen > 100 {
			break
		}
		// 通过解码器生成输出
		decOutputs, _ := self.Decoder.ForwardT(decInputs, false)
		projected := self.Projection.ForwardT(decOutputs, false)
		decOutputs.MustDrop()
		// 获得最后的输出
		projected = projected.MustSqueezeDim(0, true)
		prob0, prob1 := projected.MustMaxDim(-1, false, true)
		nextWord := prob1.Int64Values()
		nextSymbol := nextWord[len(nextWord)-1]
		prob0.MustDrop()
		prob1.MustDrop()
		if nextSymbol == int64(data.Word2id["<sep>"]) {
			terminal = true
		}
		// 将生成的单词添加到输入序列中
		inputVal, _ := ts.NewTensorFromData(nextSymbol, []int64{1, 1})
		inputVal = inputVal.MustTo(self.Device, true)
		decInputs = ts.MustCat([]*ts.Tensor{decInputs.MustDetach(true), inputVal}, -1)
		inputVal.MustDrop()
	}
	return decInputs
}

// Answer 定义一个贪婪解码方法
func (self *GPT) Answer(sentence string, data *WordData) string {
	var decInput []int64
	padn := int64(data.Word2id["<sep>"])
	for _, str := range sentence {
		if str == '\t' {
			decInput = append(decInput, padn)
		} else {
			decInput = append(decInput, int64(data.Word2id[string(str)]))
		}
	}
	inputVal, _ := ts.NewTensorFromData(decInput, []int64{int64(len(decInput))})
	inputVal = inputVal.MustUnsqueeze(0, true).MustTo(self.Device, true)
	// 使用贪婪解码方法生成输出序列
	inputVal = self.GreedyDecoder(inputVal, data).MustSqueeze(true)
	defer inputVal.MustDrop()
	// 将输出序列的索引转换为单词
	sentence = ""
	for _, v := range inputVal.Int64Values() {
		sentence += data.Id2word[int(v)]
	}
	return sentence
}
