package document

import (
	"context"
	"fmt"
	"path/filepath"
	"strings"

	"ai-rag/document/loaders"

	"github.com/cloudwego/eino/schema"
	"github.com/sirupsen/logrus"
)

// Document 文档结构
type Document struct {
	Content  string                 `json:"content"`
	MetaData map[string]interface{} `json:"metadata"`
	FilePath string                 `json:"file_path"`
}

// DocumentLoader 文档加载器接口
type DocumentLoader interface {
	Load(ctx context.Context, path string) ([]Document, error)
}

// DocumentTransformer 文档转换器接口
type DocumentTransformer interface {
	Transform(ctx context.Context, docs []*schema.Document) ([]*schema.Document, error)
}

// GenericDocumentLoader 通用文档加载器实现
type GenericDocumentLoader struct {
	supportedFormats map[string]bool
}

// NewGenericDocumentLoader 创建新的通用文档加载器
func NewGenericDocumentLoader(supportedFormats []string) *GenericDocumentLoader {
	formatMap := make(map[string]bool)
	for _, format := range supportedFormats {
		formatMap[strings.ToLower(format)] = true
	}
	return &GenericDocumentLoader{
		supportedFormats: formatMap,
	}
}

// Load 加载文档
func (l *GenericDocumentLoader) Load(ctx context.Context, path string) ([]Document, error) {
	ext := strings.ToLower(filepath.Ext(path))[1:] // 获取扩展名并去掉点号

	if !l.supportedFormats[ext] {
		return nil, fmt.Errorf("不支持的文档格式: %s", ext)
	}

	logrus.WithField("path", path).Info("开始加载文档")

	// 使用自定义的文档加载器
	var loader loaders.Loader
	switch ext {
	case "pdf":
		loader = loaders.NewPDFLoader(path)
	case "docx":
		loader = loaders.NewDocxLoader(path)
	case "txt":
		loader = loaders.NewTextLoader(path)
	case "html":
		loader = loaders.NewHTMLLoader(path)
	case "markdown", "md":
		loader = loaders.NewMarkdownLoader(path)
	default:
		return nil, fmt.Errorf("未实现的文档格式加载器: %s", ext)
	}

	// 加载文档
	loadedDocs, err := loader.Load(ctx)
	if err != nil {
		logrus.WithError(err).WithField("path", path).Error("加载文档失败")
		return nil, err
	}

	// 转换为Document类型
	result := make([]Document, len(loadedDocs))
	for i, doc := range loadedDocs {
		result[i] = Document{
			Content:  doc.Content,
			MetaData: doc.MetaData,
			FilePath: doc.FilePath,
		}
	}

	logrus.WithField("path", path).WithField("count", len(result)).Info("文档加载成功")
	return result, nil
}

// ChunkTransformer 文档分块转换器
type ChunkTransformer struct {
	strategy     string
	maxChunkSize int
	chunkOverlap int
	minChunkSize int
}

// NewChunkTransformer 创建新的文档分块转换器
func NewChunkTransformer(strategy string, maxChunkSize, chunkOverlap, minChunkSize int) *ChunkTransformer {
	return &ChunkTransformer{
		strategy:     strategy,
		maxChunkSize: maxChunkSize,
		chunkOverlap: chunkOverlap,
		minChunkSize: minChunkSize,
	}
}

// Transform 转换文档
func (t *ChunkTransformer) Transform(ctx context.Context, docs []Document) ([]Document, error) {
	var result []Document

	for _, doc := range docs {
		var chunks []Document
		var err error

		switch t.strategy {
		case "chunk_by_size":
			chunks, err = t.chunkBySize(doc)
		case "chunk_by_paragraph":
			chunks, err = t.chunkByParagraph(doc)
		default:
			return nil, fmt.Errorf("未实现的分割策略: %s", t.strategy)
		}

		if err != nil {
			return nil, err
		}

		result = append(result, chunks...)
	}

	logrus.WithField("count", len(result)).Info("文档转换完成")
	return result, nil
}

// chunkBySize 按大小分块
func (t *ChunkTransformer) chunkBySize(doc Document) ([]Document, error) {
	var chunks []*schema.Document
	content := doc.Content
	contentLength := len(content)

	for i := 0; i < contentLength; i += t.maxChunkSize - t.chunkOverlap {
		end := i + t.maxChunkSize
		if end > contentLength {
			end = contentLength
		}

		chunkContent := content[i:end]
		if len(chunkContent) < t.minChunkSize && i > 0 {
			// 如果最后一个块太小，合并到前一个块
			if len(chunks) > 0 {
				chunks[len(chunks)-1].Content += chunkContent
			}
			break
		}

		// 尝试在句子边界处分割
		if end < contentLength {
			// 找到下一个句子结束符
			lastDot := strings.LastIndex(chunkContent, ". ")
			lastComma := strings.LastIndex(chunkContent, ", ")
			lastSpace := strings.LastIndex(chunkContent, " ")

			cutPos := -1
			if lastDot > 0 {
				cutPos = lastDot + 1
			} else if lastComma > 0 {
				cutPos = lastComma + 1
			} else if lastSpace > 0 {
				cutPos = lastSpace
			}

			if cutPos > 0 && cutPos >= t.minChunkSize {
				chunkContent = content[i:cutPos]
				i = cutPos - t.chunkOverlap
				if i < 0 {
					i = 0
				}
			}
		}

		// 复制元数据
		metadata := make(map[string]interface{})
		for k, v := range doc.MetaData {
			metadata[k] = v
		}
		metadata["chunk_index"] = len(chunks)
		metadata["chunk_size"] = len(chunkContent)

		chunk := &schema.Document{
			Content:  chunkContent,
			MetaData: metadata,
		}

		chunks = append(chunks, chunk)
	}

	return chunks, nil
}

// chunkByParagraph 按段落分块
func (t *ChunkTransformer) chunkByParagraph(doc Document) ([]Document, error) {
	var chunks []*schema.Document
	paragraphs := strings.Split(doc.Content, "\n\n")

	var currentChunk string
	var chunkMetadata []map[string]interface{}

	for i, para := range paragraphs {
		para = strings.TrimSpace(para)
		if para == "" {
			continue
		}

		// 如果当前块加上新段落会超过最大大小，则保存当前块并开始新块
		if len(currentChunk) > 0 && len(currentChunk)+len(para) > t.maxChunkSize {
			// 保存当前块
			metadata := make(map[string]interface{})
			for k, v := range doc.MetaData {
				metadata[k] = v
			}
			metadata["chunk_index"] = len(chunks)
			metadata["paragraph_range"] = fmt.Sprintf("%d-%d",
				len(chunkMetadata), len(chunkMetadata)+len(chunkMetadata)-1)

			chunk := &schema.Document{
				Content:  currentChunk,
				MetaData: metadata,
			}
			chunks = append(chunks, chunk)

			// 开始新块，包含前一个块的重叠部分
			overlap := ""
			if len(currentChunk) > t.chunkOverlap {
				overlap = currentChunk[len(currentChunk)-t.chunkOverlap:]
			}
			currentChunk = overlap + "\n\n" + para
			chunkMetadata = []map[string]interface{}{{"paragraph_index": i}}
		} else {
			// 添加到当前块
			if currentChunk != "" {
				currentChunk += "\n\n"
			}
			currentChunk += para
			chunkMetadata = append(chunkMetadata, map[string]interface{}{"paragraph_index": i})
		}
	}

	// 保存最后一个块
	if len(currentChunk) > 0 && len(currentChunk) >= t.minChunkSize {
		metadata := make(map[string]interface{})
		for k, v := range doc.MetaData {
			metadata[k] = v
		}
		metadata["chunk_index"] = len(chunks)
		metadata["paragraph_range"] = fmt.Sprintf("%d-%d", 0, len(chunkMetadata)-1)

		chunk := &schema.Document{
			Content:  currentChunk,
			MetaData: metadata,
		}
		chunks = append(chunks, chunk)
	}

	return chunks, nil
}
