package main

import (
	"context"
	"fmt"
	"log"
	"os"
	"path/filepath"
	"strings"
	"time"

	"mcp-server/internal/qdrant"
	"mcp-server/internal/storage"
	"mcp-server/internal/types"
)

// SimpleKnowledgeVectorizer 简化的知识库向量化工具
type SimpleKnowledgeVectorizer struct {
	ctx           context.Context
	storageClient storage.VectorStorage
}

// NewSimpleKnowledgeVectorizer 创建简化的知识库向量化工具
func NewSimpleKnowledgeVectorizer() *SimpleKnowledgeVectorizer {
	return &SimpleKnowledgeVectorizer{
		ctx: context.Background(),
	}
}

// Initialize 初始化
func (skv *SimpleKnowledgeVectorizer) Initialize() error {
	log.Println("正在初始化存储管理器...")

	// 创建存储配置
	config := &storage.StorageConfig{
		QdrantConfig: &qdrant.Config{
			DataPath: "./knowledge_vectors",
		},
		BatchSize: 100,
		CacheSize: 1000,
	}

	// 创建存储管理器
	manager := storage.NewStorageManager()
	err := manager.Initialize(config)
	if err != nil {
		return fmt.Errorf("初始化存储管理器失败: %w", err)
	}

	skv.storageClient = manager.GetClient()

	// 确保知识库集合存在
	exists, err := skv.storageClient.CollectionExists(skv.ctx, "knowledge_base")
	if err != nil {
		return fmt.Errorf("检查集合失败: %w", err)
	}

	if !exists {
		log.Println("创建知识库集合...")
		collectionConfig := &storage.CollectionConfig{
			VectorSize: 768,
			Distance:   "cosine",
			IndexType:  "hnsw",
			IndexParams: map[string]interface{}{
				"m":                   16,
				"ef_construct":        200,
				"full_scan_threshold": 10000,
			},
		}

		err = skv.storageClient.CreateCollection(skv.ctx, "knowledge_base", collectionConfig)
		if err != nil {
			return fmt.Errorf("创建集合失败: %w", err)
		}
		log.Println("知识库集合创建成功")
	}

	log.Println("初始化完成")
	return nil
}

// ProcessKnowledgeBase 处理知识库
func (skv *SimpleKnowledgeVectorizer) ProcessKnowledgeBase(rootPath string) error {
	log.Printf("开始处理知识库: %s", rootPath)

	var totalFiles, processedFiles, totalChunks, totalVectors int

	err := filepath.Walk(rootPath, func(path string, info os.FileInfo, err error) error {
		if err != nil {
			return err
		}

		// 只处理markdown文件
		if !info.IsDir() && strings.HasSuffix(strings.ToLower(path), ".md") {
			totalFiles++
			log.Printf("处理文件: %s", path)

			chunks, vectors, err := skv.processFile(path)
			if err != nil {
				log.Printf("处理文件失败 %s: %v", path, err)
				return err
			}

			processedFiles++
			totalChunks += chunks
			totalVectors += vectors

			log.Printf("文件 %s 处理完成: %d 个分片, %d 个向量",
				filepath.Base(path), chunks, vectors)
		}

		return nil
	})

	if err != nil {
		return fmt.Errorf("遍历知识库失败: %w", err)
	}

	log.Printf("知识库处理完成: 总文件 %d, 处理成功 %d, 总分片 %d, 总向量 %d",
		totalFiles, processedFiles, totalChunks, totalVectors)
	return nil
}

// processFile 处理单个文件
func (skv *SimpleKnowledgeVectorizer) processFile(filePath string) (int, int, error) {
	// 读取文件内容
	content, err := os.ReadFile(filePath)
	if err != nil {
		return 0, 0, fmt.Errorf("读取文件失败: %w", err)
	}

	// 简单文本分片
	chunks := skv.simpleTextSplit(string(content), filePath)
	if len(chunks) == 0 {
		return 0, 0, nil
	}

	// 直接存储向量（跳过向量化服务）
	vectorCount, err := skv.storeVectors(chunks, filePath)
	if err != nil {
		return len(chunks), 0, fmt.Errorf("存储向量失败: %w", err)
	}

	return len(chunks), vectorCount, nil
}

// simpleTextSplit 简单文本分片
func (skv *SimpleKnowledgeVectorizer) simpleTextSplit(content, filePath string) []*types.TextChunk {
	// 按段落分割
	paragraphs := strings.Split(content, "\n\n")
	var chunks []*types.TextChunk

	chunkIndex := 0
	for _, paragraph := range paragraphs {
		paragraph = strings.TrimSpace(paragraph)
		if len(paragraph) < 10 { // 忽略太短的段落
			continue
		}

		// 如果段落太长，进一步分割
		if len(paragraph) > 800 {
			subChunks := skv.splitLongParagraph(paragraph, filePath, &chunkIndex)
			chunks = append(chunks, subChunks...)
		} else {
			chunk := &types.TextChunk{
				ID:         fmt.Sprintf("%s_chunk_%d", generateDocID(filePath), chunkIndex),
				Content:    paragraph,
				ChunkIndex: chunkIndex,
				Metadata: map[string]string{
					"file_path": filePath,
					"file_name": filepath.Base(filePath),
				},
				CreatedAt: time.Now(),
			}
			chunks = append(chunks, chunk)
			chunkIndex++
		}
	}

	return chunks
}

// splitLongParagraph 分割长段落
func (skv *SimpleKnowledgeVectorizer) splitLongParagraph(paragraph, filePath string, chunkIndex *int) []*types.TextChunk {
	var chunks []*types.TextChunk

	// 按句子分割
	sentences := strings.FieldsFunc(paragraph, func(r rune) bool {
		return r == '。' || r == '！' || r == '？' || r == '；'
	})

	currentChunk := ""
	for _, sentence := range sentences {
		sentence = strings.TrimSpace(sentence)
		if len(sentence) == 0 {
			continue
		}

		// 如果加上这个句子会超过限制，先保存当前分片
		if len(currentChunk)+len(sentence) > 600 && len(currentChunk) > 0 {
			chunk := &types.TextChunk{
				ID:         fmt.Sprintf("%s_chunk_%d", generateDocID(filePath), *chunkIndex),
				Content:    currentChunk,
				ChunkIndex: *chunkIndex,
				Metadata: map[string]string{
					"file_path": filePath,
					"file_name": filepath.Base(filePath),
				},
				CreatedAt: time.Now(),
			}
			chunks = append(chunks, chunk)
			*chunkIndex++
			currentChunk = sentence
		} else {
			if len(currentChunk) > 0 {
				currentChunk += "。" + sentence
			} else {
				currentChunk = sentence
			}
		}
	}

	// 保存最后一个分片
	if len(currentChunk) > 0 {
		chunk := &types.TextChunk{
			ID:         fmt.Sprintf("%s_chunk_%d", generateDocID(filePath), *chunkIndex),
			Content:    currentChunk,
			ChunkIndex: *chunkIndex,
			Metadata: map[string]string{
				"file_path": filePath,
				"file_name": filepath.Base(filePath),
			},
			CreatedAt: time.Now(),
		}
		chunks = append(chunks, chunk)
		*chunkIndex++
	}

	return chunks
}

// storeVectors 存储向量数据
func (skv *SimpleKnowledgeVectorizer) storeVectors(chunks []*types.TextChunk, filePath string) (int, error) {
	if len(chunks) == 0 {
		return 0, nil
	}

	// 为每个文本分片创建向量数据
	vectors := make([]*storage.VectorData, len(chunks))
	for i, chunk := range chunks {
		// 生成简化的向量（基于文本内容的简单哈希）
		vector := make([]float32, 768)
		contentHash := 0
		for _, r := range chunk.Content {
			contentHash = contentHash*31 + int(r)
		}

		for j := range vector {
			// 基于文本内容和位置生成向量值
			vector[j] = float32((contentHash+j)%10000) * 0.0001
		}

		vectors[i] = &storage.VectorData{
			ID:     chunk.ID,
			Vector: vector,
			Payload: map[string]interface{}{
				"file_path":    filePath,
				"file_name":    filepath.Base(filePath),
				"text":         chunk.Content,
				"chunk_index":  chunk.ChunkIndex,
				"timestamp":    time.Now().Format(time.RFC3339),
				"content_hash": fmt.Sprintf("%x", contentHash),
			},
		}
	}

	// 插入向量
	err := skv.storageClient.InsertVectors(skv.ctx, "knowledge_base", vectors)
	if err != nil {
		log.Printf("存储向量失败: %v", err)
		return 0, err
	}

	log.Printf("成功存储 %d 个向量", len(vectors))
	return len(vectors), nil
}

// generateDocID 生成文档ID
func generateDocID(filePath string) string {
	// 使用文件路径生成简单的ID
	return strings.ReplaceAll(strings.ReplaceAll(filePath, "/", "_"), "\\", "_")
}

// GetStats 获取处理统计信息
func (skv *SimpleKnowledgeVectorizer) GetStats() error {
	// 获取存储统计
	stats, err := skv.storageClient.GetStorageStats(skv.ctx)
	if err != nil {
		return fmt.Errorf("获取存储统计失败: %w", err)
	}

	fmt.Printf("\n=== 处理完成统计 ===\n")
	fmt.Printf("总向量数: %d\n", stats.TotalVectors)
	fmt.Printf("总集合数: %d\n", stats.TotalCollections)
	fmt.Printf("存储大小: %d bytes\n", stats.TotalSize)

	return nil
}

func main() {
	if len(os.Args) < 2 {
		log.Fatal("用法: go run main.go <知识库目录路径>")
	}

	knowledgeBasePath := os.Args[1]

	// 创建向量化工具
	vectorizer := NewSimpleKnowledgeVectorizer()

	// 初始化
	if err := vectorizer.Initialize(); err != nil {
		log.Fatalf("初始化失败: %v", err)
	}

	// 处理知识库
	startTime := time.Now()
	if err := vectorizer.ProcessKnowledgeBase(knowledgeBasePath); err != nil {
		log.Fatalf("处理知识库失败: %v", err)
	}

	// 显示统计信息
	if err := vectorizer.GetStats(); err != nil {
		log.Printf("获取统计信息失败: %v", err)
	}

	fmt.Printf("处理时间: %v\n", time.Since(startTime))
	fmt.Println("\n知识库向量化完成！")
	fmt.Println("可以使用验证工具检查向量化结果")
}
