package main

import (
	"fmt"
	"math"
	"math/rand"
	"strings"
	"unicode"

	"gonum.org/v1/gonum/mat"
)

// TransformerLayer 定义Transformer层结构
// 包含权重和偏置参数
type TransformerLayer struct {
	// 自注意力权重
	QWeights [][]float32
	KWeights [][]float32
	VWeights [][]float32
	// 输出权重
	OWeights [][]float32
	// 前馈网络权重
	FFNWeights1 [][]float32
	FFNWeights2 [][]float32
	// 偏置
	Bias     []float32
	FFNBias1 []float32
	FFNBias2 []float32
}

// TransformerLanguageModel 定义Transformer语言模型结构
// 包含词汇表、嵌入矩阵、位置编码、层等参数
type TransformerLanguageModel struct {
	Vocab           map[string]int
	ReverseVocab    map[int]string
	VocabSize       int
	Embedding       [][]float32
	PosEncodings    [][]float32
	Layers          []TransformerLayer
	OutputProj      [][]float32
	DropoutRate     float32
	DModel          int
	NLayer          int
	MaxLen          int
	TrendWindowSize int
	LossHistory     []float32
	LrHistory       []float32
}

// SelfAttention 使用Gonum实现自注意力机制
func SelfAttention(query, key, value [][][]float32, mask [][][]float32) [][][]float32 {
	// 将输入转换为Gonum矩阵
	batchSize := len(query)
	seqLen := len(query[0])
	dModel := len(query[0][0])

	// 初始化Gonum矩阵
	qMat := mat.NewDense(batchSize*seqLen, dModel, nil)
	kMat := mat.NewDense(batchSize*seqLen, dModel, nil)
	vMat := mat.NewDense(batchSize*seqLen, dModel, nil)

	// 填充矩阵数据
	for b := 0; b < batchSize; b++ {
		for t := 0; t < seqLen; t++ {
			for d := 0; d < dModel; d++ {
				i := b*seqLen + t
				qMat.Set(i, d, float64(query[b][t][d]))
				kMat.Set(i, d, float64(key[b][t][d]))
				vMat.Set(i, d, float64(value[b][t][d]))
			}
		}
	}

	// 计算注意力分数: Q * K^T
	var scores mat.Dense
	scores.Mul(qMat, kMat.T())

	// 应用缩放因子
	scale := 1.0 / math.Sqrt(float64(dModel))
	scores.Scale(scale, &scores)

	// 应用掩码
	if mask != nil {
		for b := 0; b < batchSize; b++ {
			for t1 := 0; t1 < seqLen; t1++ {
				for t2 := 0; t2 < seqLen; t2++ {
					i := b*seqLen + t1
					j := t2
					if mask[b][t1][t2] == 0 {
						scores.Set(i, j, math.Inf(-1))
					}
				}
			}
		}
	}

	// 应用Softmax
	var attnWeights mat.Dense
	attnWeights.Apply(func(i, j int, v float64) float64 {
		return math.Exp(v)
	}, &scores)

	// 行归一化
	rowSums := make([]float64, batchSize*seqLen)
	for i := 0; i < batchSize*seqLen; i++ {
		sum := 0.0
		for j := 0; j < seqLen; j++ {
			sum += attnWeights.At(i, j)
		}
		rowSums[i] = sum
	}

	attnWeights.Apply(func(i, j int, v float64) float64 {
		return v / rowSums[i]
	}, &attnWeights)

	// 计算加权和: attnWeights * V
	var output mat.Dense
	output.Mul(&attnWeights, vMat)

	// 将结果转换回原始格式
	result := make([][][]float32, batchSize)
	for b := 0; b < batchSize; b++ {
		result[b] = make([][]float32, seqLen)
		for t := 0; t < seqLen; t++ {
			result[b][t] = make([]float32, dModel)
			for d := 0; d < dModel; d++ {
				i := b*seqLen + t
				result[b][t][d] = float32(output.At(i, d))
			}
		}
	}

	return result
}

// TransformerFeedForward 使用Gonum实现前馈网络
func TransformerFeedForward(x [][][]float32, weights1, weights2 [][]float32, bias1, bias2 []float32) [][][]float32 {
	batchSize := len(x)
	seqLen := len(x[0])
	dModel := len(x[0][0])
	dFF := len(weights1[0])

	// 将输入转换为Gonum向量
	input := make([]float64, batchSize*seqLen*dModel)
	for b := 0; b < batchSize; b++ {
		for t := 0; t < seqLen; t++ {
			for d := 0; d < dModel; d++ {
				i := b*seqLen*dModel + t*dModel + d
				input[i] = float64(x[b][t][d])
			}
		}
	}

	// 转换权重为Gonum矩阵
	w1 := mat.NewDense(dModel, dFF, nil)
	for i := 0; i < dModel; i++ {
		for j := 0; j < dFF; j++ {
			w1.Set(i, j, float64(weights1[i][j]))
		}
	}

	w2 := mat.NewDense(dFF, dModel, nil)
	for i := 0; i < dFF; i++ {
		for j := 0; j < dModel; j++ {
			w2.Set(i, j, float64(weights2[i][j]))
		}
	}

	// 转换偏置为Gonum向量
	b1 := mat.NewVecDense(dFF, nil)
	for i := 0; i < dFF; i++ {
		b1.SetVec(i, float64(bias1[i]))
	}

	b2 := mat.NewVecDense(dModel, nil)
	for i := 0; i < dModel; i++ {
		b2.SetVec(i, float64(bias2[i]))
	}

	// 计算第一层: x * w1 + b1
	output1 := make([]float64, batchSize*seqLen*dFF)
	for i := 0; i < batchSize*seqLen; i++ {
		// 提取输入向量
		xVec := mat.NewVecDense(dModel, input[i*dModel:(i+1)*dModel])
		// 计算 x * w1
		var h mat.VecDense
		h.MulVec(w1.T(), xVec)
		// 添加偏置
		h.AddVec(&h, b1)
		// 应用ReLU
		for j := 0; j < dFF; j++ {
			val := h.AtVec(j)
			if val < 0 {
				h.SetVec(j, 0)
			}
		}
		// 保存结果
		for j := 0; j < dFF; j++ {
			output1[i*dFF+j] = h.AtVec(j)
		}
	}

	// 计算第二层: output1 * w2 + b2
	output2 := make([]float64, batchSize*seqLen*dModel)
	for i := 0; i < batchSize*seqLen; i++ {
		// 提取输入向量
		xVec := mat.NewVecDense(dFF, output1[i*dFF:(i+1)*dFF])
		// 计算 x * w2
		var h mat.VecDense
		h.MulVec(w2.T(), xVec)
		// 添加偏置
		h.AddVec(&h, b2)
		// 保存结果
		for j := 0; j < dModel; j++ {
			output2[i*dModel+j] = h.AtVec(j)
		}
	}

	// 将结果转换回原始格式
	result := make([][][]float32, batchSize)
	for b := 0; b < batchSize; b++ {
		result[b] = make([][]float32, seqLen)
		for t := 0; t < seqLen; t++ {
			result[b][t] = make([]float32, dModel)
			for d := 0; d < dModel; d++ {
				i := b*seqLen*dModel + t*dModel + d
				result[b][t][d] = float32(output2[i])
			}
		}
	}

	return result
}

// BuildVocab 构建词汇表，支持N-Gram模型
// corpus: 训练语料库
// minFrequency: 最小词频阈值
// ngramSize: N-Gram大小
// idMappingPath: ID映射表文件路径
// segmentChinese: 是否按字分割中文字符
func BuildVocab(corpus []string, minFrequency int, ngramSize int, idMappingPath string, segmentChinese bool) (map[string]int, map[int]string, error) {
	// 加载或创建ID映射表
	mapping, err := LoadIDMapping(idMappingPath)
	if err != nil {
		return nil, nil, fmt.Errorf("加载ID映射表失败: %v", err)
	}

	// 初始化词汇表
	vocab := make(map[string]int)
	reverseVocab := make(map[int]string)

	// 添加基础数字符号到词汇表
	for i := 0; i < 10; i++ {
		char := fmt.Sprintf("%d", i)
		id := GetCharID(char, mapping)
		vocab[char] = id
		reverseVocab[id] = char
	}

	// 添加特殊标记到词汇表
	specialTokens := []string{"<unk>", "<pad>", "<bos>", "<eos>", "<mask>", "<sep>", "<cls>"}
	for _, token := range specialTokens {
		id := GetCharID(token, mapping)
		vocab[token] = id
		reverseVocab[id] = token
	}

	// 词频统计
	wordFreq := make(map[string]int)

	// 1. 单字统计或整行统计
	for _, sentence := range corpus {
		if segmentChinese {
			runes := []rune(sentence)
			for i := 0; i < len(runes); {
				// 跳过空格
				if runes[i] == ' ' {
					i++
					continue
				}

				// 检查是否是中文字符
				if unicode.Is(unicode.Han, runes[i]) {
					// 中文字符按字分割
					char := string(runes[i])
					wordFreq[char]++
					i++
				} else {
					// 非中文字符按连续字符分割
					j := i
					for j < len(runes) && runes[j] != ' ' && !unicode.Is(unicode.Han, runes[j]) {
						j++
					}
					if j > i {
						char := string(runes[i:j])
						wordFreq[char]++
					}
					i = j
				}
			}
		} else {
			// 词典语料经已分好词，直接使用整行作为词组
			wordFreq[sentence]++
		}
	}

	// 2. 提取N-Gram词组
	if ngramSize > 1 {
		ngrams := ExtractNGrams(corpus, ngramSize, minFrequency)
		for ngram, freq := range ngrams {
			wordFreq[ngram] = freq
		}
	}

	// 添加高频词汇到词汇表
	for word, freq := range wordFreq {
		if freq >= minFrequency {
			id := GetCharID(word, mapping)
			vocab[word] = id
			reverseVocab[id] = word
		}
	}

	// 保存更新后的ID映射表
	if err := SaveIDMapping(mapping, idMappingPath); err != nil {
		return nil, nil, fmt.Errorf("保存ID映射表失败: %v", err)
	}

	return vocab, reverseVocab, nil
}

// NewTransformerLanguageModelWithVocab 使用自定义词汇表创建Transformer语言模型
func NewTransformerLanguageModelWithVocab(vocab map[string]int, reverseVocab map[int]string) *TransformerLanguageModel {
	// 超参数 - 调整以提升模型性能
	dModel := 128               // 输入层大小
	nLayer := 6                 // 层数
	maxLen := 100               // 最大序列长度
	dropoutRate := float32(0.3) // Dropout比率

	// 每层神经元数量 - 采用金字塔结构
	layerSizes := []int{16, 32, 64, 64, 32, 16}

	// 找出实际的最大ID
	maxID := 0
	for _, id := range vocab {
		if id > maxID {
			maxID = id
		}
	}
	// 确保嵌入矩阵大小至少为maxID+1
	vocabSize := maxID + 1
	fmt.Printf("[调试] 实际词汇表大小: %d, 最大ID: %d\n", len(vocab), maxID)
	// 词嵌入 - 大小为vocabSize
	embedding := make([][]float32, vocabSize)
	for i := 0; i < vocabSize; i++ {
		embedding[i] = make([]float32, dModel)
		for j := 0; j < dModel; j++ {
			embedding[i][j] = float32(rand.NormFloat64() * 0.1)
		}
	}

	// 位置编码
	posEncodings := PositionalEncoding(maxLen, dModel)

	// 初始化层
	layers := make([]TransformerLayer, nLayer)
	// 前馈网络隐藏层大小
	dFF := 4 * dModel

	// 初始化每一层
	for l := 0; l < nLayer; l++ {
		// 自注意力权重
		layers[l].QWeights = make([][]float32, dModel)
		layers[l].KWeights = make([][]float32, dModel)
		layers[l].VWeights = make([][]float32, dModel)
		layers[l].OWeights = make([][]float32, dModel)

		// 前馈网络权重
		layers[l].FFNWeights1 = make([][]float32, dModel)
		layers[l].FFNWeights2 = make([][]float32, dFF)

		// 偏置
		layers[l].Bias = make([]float32, dModel)
		layers[l].FFNBias1 = make([]float32, dFF)
		layers[l].FFNBias2 = make([]float32, dModel)

		// 初始化自注意力权重
		for i := 0; i < dModel; i++ {
			layers[l].QWeights[i] = make([]float32, dModel)
			layers[l].KWeights[i] = make([]float32, dModel)
			layers[l].VWeights[i] = make([]float32, dModel)
			layers[l].OWeights[i] = make([]float32, dModel)

			for j := 0; j < dModel; j++ {
				layers[l].QWeights[i][j] = float32(rand.NormFloat64() * math.Sqrt(2.0/float64(dModel)))
				layers[l].KWeights[i][j] = float32(rand.NormFloat64() * math.Sqrt(2.0/float64(dModel)))
				layers[l].VWeights[i][j] = float32(rand.NormFloat64() * math.Sqrt(2.0/float64(dModel)))
				layers[l].OWeights[i][j] = float32(rand.NormFloat64() * math.Sqrt(2.0/float64(dModel)))
			}
		}

		// 初始化前馈网络权重
		for i := 0; i < dModel; i++ {
			layers[l].FFNWeights1[i] = make([]float32, dFF)
			for j := 0; j < dFF; j++ {
				layers[l].FFNWeights1[i][j] = float32(rand.NormFloat64() * math.Sqrt(2.0/float64(dModel)))
			}
		}

		for i := 0; i < dFF; i++ {
			layers[l].FFNWeights2[i] = make([]float32, dModel)
			for j := 0; j < dModel; j++ {
				layers[l].FFNWeights2[i][j] = float32(rand.NormFloat64() * math.Sqrt(2.0/float64(dFF)))
			}
		}
	}

	// 输出投影
	outputProj := make([][]float32, layerSizes[nLayer-1])
	for i := 0; i < layerSizes[nLayer-1]; i++ {
		outputProj[i] = make([]float32, maxID+1)
		for j := 0; j < maxID+1; j++ {
			outputProj[i][j] = float32(rand.NormFloat64() * 0.1)
		}
	}

	return &TransformerLanguageModel{
		Vocab:           vocab,
		ReverseVocab:    reverseVocab,
		VocabSize:       vocabSize,
		Embedding:       embedding,
		PosEncodings:    posEncodings,
		Layers:          layers,
		OutputProj:      outputProj,
		DropoutRate:     dropoutRate,
		DModel:          dModel,
		NLayer:          nLayer,
		MaxLen:          maxLen,
		TrendWindowSize: 5,
	}
}

// NewTransformerLanguageModel 创建新的Transformer语言模型
func NewTransformerLanguageModel(corpus []string, minFrequency int, ngramSize int, idMappingPath string) (*TransformerLanguageModel, error) {
	// 使用BuildVocab函数构建词汇表
	vocab, reverseVocab, err := BuildVocab(corpus, minFrequency, ngramSize, idMappingPath, true)
	if err != nil {
		return nil, fmt.Errorf("构建词汇表失败: %v", err)
	}

	return NewTransformerLanguageModelWithVocab(vocab, reverseVocab), nil
}

// Forward 前向传播
func (lm *TransformerLanguageModel) Forward(input [][][]float32) [][][]float32 {
	// 词嵌入 + 位置编码
	batchSize := len(input)
	seqLen := len(input[0])
	dModel := lm.DModel

	// 初始化隐藏状态为输入
	hidden := make([][][]float32, batchSize)
	for b := 0; b < batchSize; b++ {
		hidden[b] = make([][]float32, seqLen)
		for t := 0; t < seqLen; t++ {
			hidden[b][t] = make([]float32, dModel)
			copy(hidden[b][t], input[b][t])
		}
	}

	// 应用Dropout
	if lm.DropoutRate > 0 {
		hidden = Dropout(hidden, lm.DropoutRate)
	}

	// 通过Transformer层
	for l := 0; l < lm.NLayer; l++ {
		layer := lm.Layers[l]
		// 自注意力机制
		// 计算query, key, value
		query := make([][][]float32, batchSize)
		key := make([][][]float32, batchSize)
		value := make([][][]float32, batchSize)
		for b := 0; b < batchSize; b++ {
			query[b] = make([][]float32, seqLen)
			key[b] = make([][]float32, seqLen)
			value[b] = make([][]float32, seqLen)
			for t := 0; t < seqLen; t++ {
				query[b][t] = make([]float32, dModel)
				key[b][t] = make([]float32, dModel)
				value[b][t] = make([]float32, dModel)
				// 计算Q, K, V
				for i := 0; i < dModel; i++ {
					for j := 0; j < dModel; j++ {
						query[b][t][j] += hidden[b][t][i] * layer.QWeights[i][j]
						key[b][t][j] += hidden[b][t][i] * layer.KWeights[i][j]
						value[b][t][j] += hidden[b][t][i] * layer.VWeights[i][j]
					}
				}
			}
		}

		// 应用掩码
		mask := make([][][]float32, batchSize)
		for b := 0; b < batchSize; b++ {
			mask[b] = make([][]float32, seqLen)
			for t1 := 0; t1 < seqLen; t1++ {
				mask[b][t1] = make([]float32, seqLen)
				for t2 := 0; t2 < seqLen; t2++ {
					if t2 <= t1 {
						mask[b][t1][t2] = 1.0
					} else {
						mask[b][t1][t2] = 0.0
					}
				}
			}
		}

		// 自注意力计算
		attnOutput := SelfAttention(query, key, value, mask)

		// 残差连接和层归一化
		for b := 0; b < batchSize; b++ {
			for t := 0; t < seqLen; t++ {
				for d := 0; d < dModel; d++ {
					attnOutput[b][t][d] += hidden[b][t][d] // 残差连接
				}
			}
		}

		// 应用层归一化
		attnOutput = LayerNorm(attnOutput)

		// 前馈网络
		ffnOutput := TransformerFeedForward(attnOutput, layer.FFNWeights1, layer.FFNWeights2, layer.FFNBias1, layer.FFNBias2)

		// 残差连接和层归一化
		for b := 0; b < batchSize; b++ {
			for t := 0; t < seqLen; t++ {
				for d := 0; d < dModel; d++ {
					ffnOutput[b][t][d] += attnOutput[b][t][d] // 残差连接
				}
			}
		}

		// 应用层归一化
		hidden = LayerNorm(ffnOutput)
	}

	// 输出投影
	output := make([][][]float32, batchSize)
	for b := 0; b < batchSize; b++ {
		output[b] = make([][]float32, seqLen)
		for i := 0; i < seqLen; i++ {
			output[b][i] = make([]float32, lm.VocabSize)
			for j := 0; j < lm.VocabSize; j++ {
				sum := float32(0)
				for k := 0; k < dModel; k++ {
					if k < len(lm.OutputProj) && j < len(lm.OutputProj[k]) {
						sum += hidden[b][i][k] * lm.OutputProj[k][j]
					}
				}
				output[b][i][j] = sum
			}
		}
	}

	return output
}

// LayerNorm 实现层归一化
func LayerNorm(x [][][]float32) [][][]float32 {
	batchSize := len(x)
	seqLen := len(x[0])
	dModel := len(x[0][0])
	epsilon := 1e-5

	result := make([][][]float32, batchSize)
	for b := 0; b < batchSize; b++ {
		result[b] = make([][]float32, seqLen)
		for t := 0; t < seqLen; t++ {
			result[b][t] = make([]float32, dModel)

			// 计算均值
			mean := float32(0.0)
			for d := 0; d < dModel; d++ {
				mean += x[b][t][d]
			}
			mean /= float32(dModel)

			// 计算方差
			var variance float32 = 0.0
			for d := 0; d < dModel; d++ {
				variance += (x[b][t][d] - mean) * (x[b][t][d] - mean)
			}
			variance /= float32(dModel)

			// 归一化
			for d := 0; d < dModel; d++ {
				result[b][t][d] = (x[b][t][d] - mean) / float32(math.Sqrt(float64(variance)+epsilon))
			}
		}
	}

	return result
}

// Generate 生成文本
func (lm *TransformerLanguageModel) Generate(prompt string, maxLength int) string {
	// 处理提示，支持中文单字分割
	words := splitPrompt(prompt)
	if len(words) == 0 {
		words = []string{"今", "天"}
	}

	fmt.Printf("[信息] 提示\\> %s\n", prompt)

	// 将词转换为索引
	inputIndices := make([]int, len(words))
	for i, word := range words {
		idx, ok := lm.Vocab[word]
		if !ok {
			idx = lm.Vocab["<unk>"]
		}
		inputIndices[i] = idx
	}

	// 生成掩码 (形状: [batch_size, seq_len, seq_len])
	mask := make([][][]float32, 1)
	mask[0] = make([][]float32, maxLength)
	for i := 0; i < maxLength; i++ {
		mask[0][i] = make([]float32, maxLength)
		for j := 0; j < maxLength; j++ {
			if j <= i {
				mask[0][i][j] = 1.0
			} else {
				mask[0][i][j] = 0.0
			}
		}
	}

	// 生成文本
	generatedIndices := make([]int, len(inputIndices))
	copy(generatedIndices, inputIndices)

	// 检查生成的索引是否有效
	if len(generatedIndices) == 0 {
		panic("生成的索引为空")
	}

	// 检查嵌入矩阵是否有效
	if len(lm.Embedding) == 0 || len(lm.Embedding[0]) == 0 {
		panic("无效的嵌入矩阵")
	}

	// 检查位置编码是否有效
	if len(lm.PosEncodings) == 0 || len(lm.PosEncodings[0]) == 0 {
		panic("无效的位置编码")
	}

	for len(generatedIndices) < maxLength {
		currentLen := len(generatedIndices)
		// 准备输入 (形状: [batch_size, seq_len, d_model])
		input := make([][][]float32, 1)
		input[0] = make([][]float32, currentLen)
		for i := 0; i < currentLen; i++ {
			input[0][i] = make([]float32, lm.DModel)
			// 词嵌入 + 位置编码
			for j := 0; j < lm.DModel; j++ {
				pos := i % lm.MaxLen
				input[0][i][j] = lm.Embedding[generatedIndices[i]][j] + lm.PosEncodings[pos][j]
			}
		}

		// 前向传播
		output := lm.Forward(input)

		// 检查输出是否有效
		if len(output) == 0 || len(output[0]) == 0 {
			panic("无效的模型输出")
		}

		// 投影到词汇表空间
		logits := make([]float32, lm.VocabSize)
		// 获取最后一个时间步的输出
		seqLen := len(output[0])
		lastOutput := output[0][seqLen-1]
		for j := 0; j < lm.VocabSize; j++ {
			logits[j] = lastOutput[j]
		}

		// 使用带温度的softmax和top-p采样选择下一个词，并添加重复词惩罚
		// 温度参数控制随机性
		temperature := float32(0.7)
		// top-p参数控制累积概率阈值
		topP := float32(0.8)
		// 重复词惩罚系数
		repeatPenalty := float32(1.2)

		// 应用重复词惩罚，对标点符号增加额外惩罚
		penalizedLogits := make([]float32, lm.VocabSize)
		for j := 0; j < lm.VocabSize; j++ {
			// 检查是否是最近生成的词
			penalty := float32(1.0)
			if len(generatedIndices) > 0 {
				recentWords := make(map[int]bool)
				// 检查最近5个词
				windowSize := 5
				startIdx := len(generatedIndices) - windowSize
				if startIdx < 0 {
					startIdx = 0
				}
				for i := startIdx; i < len(generatedIndices); i++ {
					recentWords[generatedIndices[i]] = true
				}

				// 检查是否是重复词
				if recentWords[j] {
					penalty = repeatPenalty

					// 额外检查是否是标点符号
					word := lm.ReverseVocab[j]
					if isPunctuation(word) {
						// 对标点符号应用更高的惩罚
						penalty *= 1.5
					}

					// 检查是否连续出现相同标点
					if len(generatedIndices) > 0 && generatedIndices[len(generatedIndices)-1] == j && isPunctuation(word) {
						// 对连续相同标点应用最高惩罚
						penalty *= 2.0
					}
				}
			}
			penalizedLogits[j] = logits[j] / penalty
		}

		// 应用温度缩放
		scaledLogits := make([]float32, lm.VocabSize)
		for j := 0; j < lm.VocabSize; j++ {
			scaledLogits[j] = penalizedLogits[j] / temperature
		}

		// 应用softmax
		predictions := Softmax(scaledLogits)

		// 选择top-p个词
		// 1. 排序
		type probIndex struct {
			prob float32
			idx  int
		}
		probIndices := make([]probIndex, lm.VocabSize)
		for j := 0; j < lm.VocabSize; j++ {
			probIndices[j] = probIndex{predictions[j], j}
		}
		// 降序排序
		for i := 0; i < lm.VocabSize; i++ {
			for j := i + 1; j < lm.VocabSize; j++ {
				if probIndices[i].prob < probIndices[j].prob {
					probIndices[i], probIndices[j] = probIndices[j], probIndices[i]
				}
			}
		}
		// 2. 累积概率直到达到top-p
		topIndices := make([]int, 0)
		topProbs := make([]float32, 0)
		sumProbs := float32(0)
		for _, pi := range probIndices {
			if sumProbs >= topP {
				break
			}
			topIndices = append(topIndices, pi.idx)
			topProbs = append(topProbs, pi.prob)
			sumProbs += pi.prob
		}
		// 至少保留一个词
		if len(topIndices) == 0 {
			topIndices = append(topIndices, probIndices[0].idx)
			topProbs = append(topProbs, probIndices[0].prob)
		}

		// 归一化top-p概率
		for i := range topProbs {
			topProbs[i] /= sumProbs
		}

		// 根据概率采样
		r := rand.Float32()
		sum := float32(0)
		nextIdx := topIndices[0]
		for i := 0; i < len(topIndices); i++ {
			sum += topProbs[i]
			if r < sum {
				nextIdx = topIndices[i]
				break
			}
		}

		// 添加到结果
		generatedIndices = append(generatedIndices, nextIdx)

		// 结束条件1: 生成句号
		if lm.ReverseVocab[nextIdx] == "。" {
			break
		}

		// 结束条件2: 连续生成6个<unk>标记
		if len(generatedIndices) >= 8 {
			unkCount := 0
			for i := len(generatedIndices) - 8; i < len(generatedIndices); i++ {
				if lm.ReverseVocab[generatedIndices[i]] == "<unk>" {
					unkCount++
				}
			}
			if unkCount >= 8 {
				break
			}
		}

		// 结束条件3: 文本长度达到最大限制
		if len(generatedIndices) >= maxLength {
			break
		}
	}

	// 转换为文本
	result := prompt
	for i := len(words); i < len(generatedIndices); i++ {
		result += " " + lm.ReverseVocab[generatedIndices[i]]
	}

	return result
}

// Thinking 生成文本并返回思考过程
func (lm *TransformerLanguageModel) Thinking(prompt string, maxLength int, showThought bool) (string, string) {
	// 生成文本
	generatedText := lm.Generate(prompt, maxLength)

	// 构建思考过程
	thoughtProcess := ""
	if showThought {
		thoughtProcess = fmt.Sprintf("收到提示: '%s'\n", prompt)
		thoughtProcess += ("分析提示语义...\n")
		thoughtProcess += "应用自注意力机制处理上下文...\n"
		thoughtProcess += "使用softmax和top-p采样选择下一个词...\n"
		thoughtProcess += "文本生成完成。"
	}

	return generatedText, thoughtProcess
}

// Train 训练模型并返回最后的Loss值和学习率
// Evaluate 评估模型在验证集上的表现
func (lm *TransformerLanguageModel) Evaluate(valCorpus []string) float32 {
	totalLoss := float32(0)

	for _, sentence := range valCorpus {
		// 准备评估数据
		words := strings.Fields(sentence)
		if len(words) < 2 {
			continue
		}

		// 转换为索引
		indices := make([]int, len(words))
		for i, word := range words {
			idx, ok := lm.Vocab[word]
			if !ok {
				idx = lm.Vocab["<unk>"]
			}
			indices[i] = idx
		}

		// 构建输入和目标序列
		seqLen := len(indices) - 1
		if seqLen < 1 {
			continue
		}

		// 输入是完整句子
		input := make([][][]float32, 1)
		input[0] = make([][]float32, seqLen)
		for i := 0; i < seqLen; i++ {
			input[0][i] = make([]float32, lm.DModel)
			// 词嵌入 + 位置编码
			for j := 0; j < lm.DModel; j++ {
				pos := i % lm.MaxLen
				input[0][i][j] = lm.Embedding[indices[i]][j] + lm.PosEncodings[pos][j]
			}
		}

		// 目标序列
		targets := make([][][]float32, 1)
		targets[0] = make([][]float32, seqLen)
		for i := 0; i < seqLen; i++ {
			targets[0][i] = make([]float32, lm.VocabSize)
			targets[0][i][indices[i+1]] = 1.0
		}

		// 前向传播
		output := lm.Forward(input)

		// 计算损失
		loss := float32(0)
		for i := 0; i < seqLen; i++ {
			predictions := Softmax(output[0][i])
			loss += CrossEntropyLoss(predictions, targets[0][i])
		}
		totalLoss += loss / float32(seqLen)
	}

	// 返回平均损失
	return totalLoss / float32(len(valCorpus))
}

// patience: 早停耐心值
// minDelta: 最小损失变化值
func (lm *TransformerLanguageModel) Train(corpus []string, epochs int, learningRate float32, startEpoch int, patience int, minDelta float32) (float32, float32, bool) {
	var lastLoss, lastLR float32

	// 初始化动态学习率调整参数
	if lm.TrendWindowSize == 0 {
		lm.TrendWindowSize = 5 // 默认趋势窗口大小为5个epoch
	}
	lm.LossHistory = make([]float32, 0, lm.TrendWindowSize*2)
	lm.LrHistory = make([]float32, 0, lm.TrendWindowSize*2)

	for epoch := 0; epoch < epochs; epoch++ {
		// 检查是否需要早停
		if epoch > startEpoch+patience && CheckEarlyStopping(lm.LossHistory, patience, minDelta) {
			fmt.Printf("[早停机制] 第 %d 轮训练后触发早停\n", epoch+1)
			return lastLoss, lastLR, true
		}

		totalLoss := float32(0)
		// 学习率策略 - 带预热的余弦退火调度
		minLearningRate := float32(5e-5)
		maxLearningRate := float32(0.03)
		// 权重衰减系数
		weightDecay := float32(1e-4)
		// 确保maxLearningRate不小于minLearningRate
		maxLearningRate = float32(math.Max(float64(maxLearningRate), float64(minLearningRate)))

		totalSteps := float64(epochs)
		step := float64(epoch)

		var epochLR float32

		// 学习率预热 - 动态设置为总epoch数的10%
		warmupSteps := 0.1 * totalSteps
		// 确保至少有1个预热步骤
		if warmupSteps < 1.0 {
			warmupSteps = 1.0
		}
		if step < warmupSteps {
			epochLR = minLearningRate + (maxLearningRate-minLearningRate)*float32(step/warmupSteps)
		} else {
			// 余弦退火公式
			adjustedStep := (step - warmupSteps) / (totalSteps - warmupSteps)
			epochLR = minLearningRate + 0.5*(maxLearningRate-minLearningRate)*(1+float32(math.Cos(math.Pi*adjustedStep)))
		}

		// 添加学习率调试信息
		if epoch%5 == 0 {
			fmt.Printf("[调试] Epoch %d, 计算的学习率: %.8f, maxLR: %.8f, minLR: %.8f\n", epoch+1, epochLR, maxLearningRate, minLearningRate)
		}

		for _, sentence := range corpus {
			// 准备训练数据
			words := strings.Fields(sentence)
			if len(words) < 2 {
				continue
			}

			// 转换为索引
			indices := make([]int, len(words))
			for i, word := range words {
				idx, ok := lm.Vocab[word]
				if !ok {
					idx = lm.Vocab["<unk>"]
				}
				indices[i] = idx
			}

			// 构建输入和目标序列
			seqLen := len(indices) - 1
			if seqLen < 1 {
				continue
			}

			// 输入是完整句子
			input := make([][][]float32, 1)
			input[0] = make([][]float32, seqLen)
			for i := 0; i < seqLen; i++ {
				input[0][i] = make([]float32, lm.DModel)
				// 词嵌入 + 位置编码
				for j := 0; j < lm.DModel; j++ {
					pos := i % lm.MaxLen
					input[0][i][j] = lm.Embedding[indices[i]][j] + lm.PosEncodings[pos][j]
				}
			}

			// 目标序列
			targets := make([][][]float32, 1)
			targets[0] = make([][]float32, seqLen)
			for i := 0; i < seqLen; i++ {
				targets[0][i] = make([]float32, lm.VocabSize)
				targets[0][i][indices[i+1]] = 1.0
			}

			// 前向传播
			output := lm.Forward(input)

			// 计算损失
			loss := float32(0)
			for i := 0; i < seqLen; i++ {
				predictions := Softmax(output[0][i])
				loss += CrossEntropyLoss(predictions, targets[0][i])
			}
			totalLoss += loss / float32(seqLen)

			// 反向传播
			// 计算输出层梯度
			dOutput := make([][][]float32, 1)
			dOutput[0] = make([][]float32, seqLen)
			for i := 0; i < seqLen; i++ {
				dOutput[0][i] = make([]float32, lm.VocabSize)
				predictions := Softmax(output[0][i])
				for j := 0; j < lm.VocabSize; j++ {
					dOutput[0][i][j] = predictions[j] - targets[0][i][j]
				}
			}

			// 更新输出投影权重
			lastLayerSize := len(lm.Layers[lm.NLayer-1].Bias)
			for i := 0; i < seqLen; i++ {
				// 梯度裁剪
				maxGrad := float32(5.0)
				for k := 0; k < lm.VocabSize; k++ {
					if dOutput[0][i][k] > maxGrad {
						dOutput[0][i][k] = maxGrad
					} else if dOutput[0][i][k] < -maxGrad {
						dOutput[0][i][k] = -maxGrad
					}
				}

				for j := 0; j < lastLayerSize; j++ {
					for k := 0; k < lm.VocabSize; k++ {
						if j < len(lm.OutputProj) && k < len(lm.OutputProj[j]) {
							// 添加权重衰减
							lm.OutputProj[j][k] -= epochLR * (dOutput[0][i][k]*output[0][i][j] + weightDecay*lm.OutputProj[j][k])
						}
					}
				}
			}

			// 更新所有层的权重
			for l := lm.NLayer - 1; l >= 0; l-- {
				layer := &lm.Layers[l]
				layerSize := len(layer.Bias)
				// 获取模型参数
				dModel := lm.DModel
				dFF := 4 * dModel

				// 简化的层梯度计算
				for i := 0; i < seqLen; i++ {
					// 更新偏置
					for j := 0; j < layerSize; j++ {
						// 这里使用输出层梯度作为近似
						layer.Bias[j] -= epochLR * dOutput[0][i][j%lm.VocabSize]
					}

					// 更新权重
					// 注意：这里简化处理，实际应该分别更新每个注意力权重矩阵
					for j := 0; j < dModel; j++ {
						for k := 0; k < dModel; k++ {
							// 计算梯度
							grad := float32(0)
							if l == 0 {
								grad = input[0][i][j] * dOutput[0][i][k%lm.VocabSize]
							} else {
								grad = output[0][i][j] * dOutput[0][i][k%lm.VocabSize]
							}
							// 更新自注意力权重
							layer.QWeights[j][k] -= epochLR * (grad + weightDecay*layer.QWeights[j][k])
							layer.KWeights[j][k] -= epochLR * (grad + weightDecay*layer.KWeights[j][k])
							layer.VWeights[j][k] -= epochLR * (grad + weightDecay*layer.VWeights[j][k])
							layer.OWeights[j][k] -= epochLR * (grad + weightDecay*layer.OWeights[j][k])
						}
					}

					// 更新前馈网络权重
					for j := 0; j < dModel; j++ {
						for k := 0; k < dFF; k++ {
							// 计算梯度
							grad := float32(0)
							if l == 0 {
								grad = input[0][i][j] * dOutput[0][i][k%lm.VocabSize]
							} else {
								grad = output[0][i][j] * dOutput[0][i][k%lm.VocabSize]
							}
							// 更新前馈网络第一层权重
							if j < len(layer.FFNWeights1) && k < len(layer.FFNWeights1[j]) {
								layer.FFNWeights1[j][k] -= epochLR * (grad + weightDecay*layer.FFNWeights1[j][k])
							}
						}
					}

					for j := 0; j < dFF; j++ {
						for k := 0; k < dModel; k++ {
							// 计算梯度
							grad := float32(0)
							if l == 0 {
								grad = input[0][i][j%dModel] * dOutput[0][i][k]
							} else {
								grad = output[0][i][j%dModel] * dOutput[0][i][k]
							}
							// 更新前馈网络第二层权重
							if j < len(layer.FFNWeights2) && k < len(layer.FFNWeights2[j]) {
								layer.FFNWeights2[j][k] -= epochLR * (grad + weightDecay*layer.FFNWeights2[j][k])
							}
						}
					}
				}
			}

			// 更新嵌入矩阵
			// 在嵌入矩阵更新处添加边界检查...
			for i := 0; i < seqLen; i++ {
				wordIdx := indices[i]
				// 检查索引是否在有效范围内
				if wordIdx < 0 || wordIdx >= len(lm.Embedding) {
					fmt.Printf("警告: 无效的嵌入索引 %d (矩阵大小: %d)\n", wordIdx, len(lm.Embedding))
					continue
				}
				for j := 0; j < lm.DModel; j++ {
					// 简化的嵌入梯度计算
					// 添加权重衰减
					lm.Embedding[wordIdx][j] -= epochLR * (dOutput[0][i][j%lm.VocabSize] + weightDecay*lm.Embedding[wordIdx][j])
				}
			}

		}

		avgLoss := totalLoss / float32(len(corpus))
		// 检查Loss是否为NaN
		if math.IsNaN(float64(avgLoss)) {
			fmt.Printf("警告: Epoch %d 出现NaN Loss，将重置学习率\n", epoch+1)
			avgLoss = float32(10.0) // 设置一个较大的初始Loss
			if startEpoch == 0 {
				epochLR = learningRate * float32(math.Pow(0.95, float64(epoch)))
			} else {
				epochLR = learningRate * float32(1.5) // 提高学习率尝试跳出NaN
			}
			epochLR = float32(math.Min(float64(epochLR), float64(learningRate*2))) // 限制最大学习率
		}
		// 记录当前损失和学习率
		lm.LossHistory = append(lm.LossHistory, avgLoss)
		lm.LrHistory = append(lm.LrHistory, epochLR)

		// 保持历史记录不超过2*趋势窗口大小
		if len(lm.LossHistory) > lm.TrendWindowSize*2 {
			lm.LossHistory = lm.LossHistory[len(lm.LossHistory)-lm.TrendWindowSize*2:]
			lm.LrHistory = lm.LrHistory[len(lm.LrHistory)-lm.TrendWindowSize*2:]
		}

		// 动态调整学习率
		if len(lm.LossHistory) >= lm.TrendWindowSize {
			// 获取最近的趋势窗口数据
			windowStart := len(lm.LossHistory) - lm.TrendWindowSize
			recentLosses := lm.LossHistory[windowStart:]
			recentLRs := lm.LrHistory[windowStart:]

			// 计算损失趋势
			isDecreasing := true
			for i := 1; i < len(recentLosses); i++ {
				if recentLosses[i] >= recentLosses[i-1]*(1-5e-4) {
					isDecreasing = false
					break
				}
			}

			// 如果损失不再下降
			if !isDecreasing {
				// 检查当前学习率是否已经很低
				currentLR := recentLRs[len(recentLRs)-1]
				if currentLR > maxLearningRate*0.8 {
					// 学习率过高，降低它
					epochLR *= 0.8
					fmt.Printf("[动态调整] 损失不再下降，降低学习率到: %.8f\n", epochLR)
				} else if currentLR < minLearningRate*2 {
					// 学习率过低，提高它
					epochLR *= 1.1
					fmt.Printf("[动态调整] 损失不再下降，提高学习率到: %.8f\n", epochLR)
				} else {
					// 学习率在合理范围，轻微降低
					epochLR *= 0.9995
					fmt.Printf("[动态调整] 损失不再下降，轻微降低学习率到: %.8f\n", epochLR)
				}

				// 确保学习率在合理范围内
				epochLR = float32(math.Max(float64(epochLR), float64(minLearningRate)))
				epochLR = float32(math.Min(float64(epochLR), float64(maxLearningRate)))
			}
		}

		fmt.Printf("Epoch %d, Loss: %.4f, LR: %.6f\n", epoch+1, avgLoss, epochLR)

		// 保存最后一个epoch的Loss和LR
		if epoch == epochs-1 {
			lastLoss = avgLoss
			lastLR = epochLR
		}
	}
	return lastLoss, lastLR, false
}
