package algorithms

import (
	"crypto/md5"
	"encoding/hex"
	"fmt"
	"strings"
	"sync"
	"time"
)

// AIOptimizer AI推理优化器
type AIOptimizer struct {
	responseCache    *LRUCache
	featureExtractor *FeatureExtractor
	requestBatcher   *RequestBatcher
	contextOptimizer *ContextOptimizer
	mu               sync.RWMutex
}

// NewAIOptimizer 创建AI优化器
func NewAIOptimizer() *AIOptimizer {
	return &AIOptimizer{
		responseCache:    NewLRUCache(1000), // 缓存1000个响应
		featureExtractor: NewFeatureExtractor(),
		requestBatcher:   NewRequestBatcher(10, 100*time.Millisecond),
		contextOptimizer: NewContextOptimizer(),
	}
}

// OptimizeRequest 优化AI请求
func (ao *AIOptimizer) OptimizeRequest(messages []interface{}) ([]interface{}, string) {
	// 生成请求指纹
	fingerprint := ao.generateFingerprint(messages)
	
	// 检查缓存
	if cached, found := ao.responseCache.Get(fingerprint); found {
		return messages, cached.(string)
	}
	
	// 优化上下文
	optimizedMessages := ao.contextOptimizer.OptimizeContext(messages)
	
	return optimizedMessages, ""
}

// CacheResponse 缓存AI响应
func (ao *AIOptimizer) CacheResponse(messages []interface{}, response string) {
	fingerprint := ao.generateFingerprint(messages)
	ao.responseCache.Put(fingerprint, response, 30*time.Minute) // 缓存30分钟
}

// generateFingerprint 生成请求指纹
func (ao *AIOptimizer) generateFingerprint(messages []interface{}) string {
	var content strings.Builder
	for _, msg := range messages {
		content.WriteString(fmt.Sprintf("%v", msg))
	}
	
	hash := md5.Sum([]byte(content.String()))
	return hex.EncodeToString(hash[:])
}

// FeatureExtractor 特征提取器
type FeatureExtractor struct {
	keywordCache *FastMap
	patternCache *LRUCache
}

// NewFeatureExtractor 创建特征提取器
func NewFeatureExtractor() *FeatureExtractor {
	return &FeatureExtractor{
		keywordCache: NewFastMap(),
		patternCache: NewLRUCache(500),
	}
}

// ExtractFeatures 提取文本特征
func (fe *FeatureExtractor) ExtractFeatures(text string) map[string]interface{} {
	features := make(map[string]interface{})
	
	// 基本统计特征
	features["length"] = len(text)
	features["word_count"] = len(strings.Fields(text))
	features["line_count"] = strings.Count(text, "\n") + 1
	
	// 关键词特征
	keywords := fe.extractKeywords(text)
	features["keywords"] = keywords
	features["keyword_count"] = len(keywords)
	
	// 模式特征
	patterns := fe.extractPatterns(text)
	features["patterns"] = patterns
	
	// 语言特征
	features["language"] = fe.detectLanguage(text)
	
	// 复杂度特征
	features["complexity"] = fe.calculateComplexity(text)
	
	return features
}

// extractKeywords 提取关键词
func (fe *FeatureExtractor) extractKeywords(text string) []string {
	// 简化的关键词提取，实际应用中可以使用更复杂的NLP算法
	words := strings.Fields(strings.ToLower(text))
	wordFreq := make(map[string]int)
	
	for _, word := range words {
		// 过滤停用词和短词
		if len(word) > 2 && !fe.isStopWord(word) {
			wordFreq[word]++
		}
	}
	
	// 选择频率最高的词作为关键词
	var keywords []string
	for word, freq := range wordFreq {
		if freq > 1 {
			keywords = append(keywords, word)
		}
	}
	
	return keywords
}

// extractPatterns 提取文本模式
func (fe *FeatureExtractor) extractPatterns(text string) []string {
	var patterns []string
	
	// 检测常见模式
	if strings.Contains(text, "Get-") || strings.Contains(text, "Set-") {
		patterns = append(patterns, "powershell_command")
	}
	
	if strings.Contains(text, "http://") || strings.Contains(text, "https://") {
		patterns = append(patterns, "url")
	}
	
	if strings.Contains(text, "@") && strings.Contains(text, ".") {
		patterns = append(patterns, "email")
	}
	
	if strings.Contains(text, "C:\\") || strings.Contains(text, "/") {
		patterns = append(patterns, "file_path")
	}
	
	return patterns
}

// detectLanguage 检测语言
func (fe *FeatureExtractor) detectLanguage(text string) string {
	// 简化的语言检测
	chineseCount := 0
	englishCount := 0
	
	for _, char := range text {
		if char >= 0x4e00 && char <= 0x9fff {
			chineseCount++
		} else if (char >= 'a' && char <= 'z') || (char >= 'A' && char <= 'Z') {
			englishCount++
		}
	}
	
	if chineseCount > englishCount {
		return "chinese"
	}
	return "english"
}

// calculateComplexity 计算文本复杂度
func (fe *FeatureExtractor) calculateComplexity(text string) float64 {
	words := strings.Fields(text)
	if len(words) == 0 {
		return 0
	}
	
	// 基于词汇多样性和平均词长计算复杂度
	uniqueWords := make(map[string]bool)
	totalLength := 0
	
	for _, word := range words {
		uniqueWords[strings.ToLower(word)] = true
		totalLength += len(word)
	}
	
	vocabularyDiversity := float64(len(uniqueWords)) / float64(len(words))
	averageWordLength := float64(totalLength) / float64(len(words))
	
	return vocabularyDiversity * averageWordLength
}

// isStopWord 检查是否为停用词
func (fe *FeatureExtractor) isStopWord(word string) bool {
	stopWords := map[string]bool{
		"the": true, "a": true, "an": true, "and": true, "or": true,
		"but": true, "in": true, "on": true, "at": true, "to": true,
		"for": true, "of": true, "with": true, "by": true, "is": true,
		"are": true, "was": true, "were": true, "be": true, "been": true,
		"have": true, "has": true, "had": true, "do": true, "does": true,
		"did": true, "will": true, "would": true, "could": true, "should": true,
		"的": true, "了": true, "在": true, "是": true, "我": true,
		"你": true, "他": true, "她": true, "它": true, "们": true,
	}
	
	return stopWords[word]
}

// RequestBatcher 请求批处理器
type RequestBatcher struct {
	batchSize   int
	batchDelay  time.Duration
	pendingReqs []BatchRequest
	mu          sync.Mutex
	timer       *time.Timer
	processFunc func([]BatchRequest) []BatchResponse
}

// BatchRequest 批处理请求
type BatchRequest struct {
	ID       string
	Messages []interface{}
	Response chan BatchResponse
}

// BatchResponse 批处理响应
type BatchResponse struct {
	ID       string
	Response string
	Error    error
}

// NewRequestBatcher 创建请求批处理器
func NewRequestBatcher(batchSize int, batchDelay time.Duration) *RequestBatcher {
	return &RequestBatcher{
		batchSize:  batchSize,
		batchDelay: batchDelay,
	}
}

// SetProcessFunc 设置批处理函数
func (rb *RequestBatcher) SetProcessFunc(fn func([]BatchRequest) []BatchResponse) {
	rb.processFunc = fn
}

// Submit 提交批处理请求
func (rb *RequestBatcher) Submit(id string, messages []interface{}) <-chan BatchResponse {
	rb.mu.Lock()
	defer rb.mu.Unlock()
	
	responseChan := make(chan BatchResponse, 1)
	req := BatchRequest{
		ID:       id,
		Messages: messages,
		Response: responseChan,
	}
	
	rb.pendingReqs = append(rb.pendingReqs, req)
	
	// 如果达到批处理大小，立即处理
	if len(rb.pendingReqs) >= rb.batchSize {
		rb.processBatch()
	} else if rb.timer == nil {
		// 启动延迟处理定时器
		rb.timer = time.AfterFunc(rb.batchDelay, func() {
			rb.mu.Lock()
			defer rb.mu.Unlock()
			rb.processBatch()
		})
	}
	
	return responseChan
}

// processBatch 处理批次
func (rb *RequestBatcher) processBatch() {
	if len(rb.pendingReqs) == 0 {
		return
	}
	
	if rb.timer != nil {
		rb.timer.Stop()
		rb.timer = nil
	}
	
	reqs := rb.pendingReqs
	rb.pendingReqs = nil
	
	// 异步处理批次
	go func() {
		if rb.processFunc != nil {
			responses := rb.processFunc(reqs)
			for _, resp := range responses {
				// 找到对应的请求并发送响应
				for _, req := range reqs {
					if req.ID == resp.ID {
						req.Response <- resp
						close(req.Response)
						break
					}
				}
			}
		} else {
			// 如果没有处理函数，返回错误
			for _, req := range reqs {
				req.Response <- BatchResponse{
					ID:    req.ID,
					Error: fmt.Errorf("no process function set"),
				}
				close(req.Response)
			}
		}
	}()
}

// ContextOptimizer 上下文优化器
type ContextOptimizer struct {
	maxContextLength int
	compressionRatio float64
}

// NewContextOptimizer 创建上下文优化器
func NewContextOptimizer() *ContextOptimizer {
	return &ContextOptimizer{
		maxContextLength: 4000, // 最大上下文长度
		compressionRatio: 0.7,  // 压缩比例
	}
}

// OptimizeContext 优化上下文
func (co *ContextOptimizer) OptimizeContext(messages []interface{}) []interface{} {
	if len(messages) == 0 {
		return messages
	}
	
	// 计算总长度
	totalLength := co.calculateTotalLength(messages)
	
	// 如果超过最大长度，进行压缩
	if totalLength > co.maxContextLength {
		return co.compressMessages(messages)
	}
	
	return messages
}

// calculateTotalLength 计算消息总长度
func (co *ContextOptimizer) calculateTotalLength(messages []interface{}) int {
	totalLength := 0
	for _, msg := range messages {
		totalLength += len(fmt.Sprintf("%v", msg))
	}
	return totalLength
}

// compressMessages 压缩消息
func (co *ContextOptimizer) compressMessages(messages []interface{}) []interface{} {
	if len(messages) <= 2 {
		return messages // 保留最少的消息
	}
	
	// 保留第一条和最后一条消息，压缩中间部分
	first := messages[0]
	last := messages[len(messages)-1]
	
	// 计算需要保留的中间消息数量
	middleCount := int(float64(len(messages)-2) * co.compressionRatio)
	if middleCount < 1 {
		middleCount = 1
	}
	
	// 选择最重要的中间消息（这里简化为选择最后的几条）
	startIndex := len(messages) - 1 - middleCount
	if startIndex < 1 {
		startIndex = 1
	}
	
	compressed := []interface{}{first}
	compressed = append(compressed, messages[startIndex:len(messages)-1]...)
	compressed = append(compressed, last)
	
	return compressed
}

// ResponseOptimizer 响应优化器
type ResponseOptimizer struct {
	templateMatcher *FastMap
	responseCache   *LRUCache
}

// NewResponseOptimizer 创建响应优化器
func NewResponseOptimizer() *ResponseOptimizer {
	return &ResponseOptimizer{
		templateMatcher: NewFastMap(),
		responseCache:   NewLRUCache(200),
	}
}

// OptimizeResponse 优化响应
func (ro *ResponseOptimizer) OptimizeResponse(response string) string {
	// 移除多余的空白字符
	optimized := strings.TrimSpace(response)
	
	// 移除重复的换行符
	optimized = strings.ReplaceAll(optimized, "\n\n\n", "\n\n")
	
	// 移除代码块标记（如果存在）
	optimized = strings.TrimPrefix(optimized, "```powershell\n")
	optimized = strings.TrimPrefix(optimized, "```PowerShell\n")
	optimized = strings.TrimPrefix(optimized, "```ps\n")
	optimized = strings.TrimPrefix(optimized, "```")
	optimized = strings.TrimSuffix(optimized, "```")
	
	// 移除多余的反引号
	optimized = strings.Trim(optimized, "`")
	
	return strings.TrimSpace(optimized)
}

// InferenceAccelerator 推理加速器
type InferenceAccelerator struct {
	precomputedCache *LRUCache
	patternMatcher   *FastMap
	quickResponses   map[string]string
	mu               sync.RWMutex
}

// NewInferenceAccelerator 创建推理加速器
func NewInferenceAccelerator() *InferenceAccelerator {
	ia := &InferenceAccelerator{
		precomputedCache: NewLRUCache(500),
		patternMatcher:   NewFastMap(),
		quickResponses:   make(map[string]string),
	}
	
	// 预设一些快速响应
	ia.initQuickResponses()
	
	return ia
}

// initQuickResponses 初始化快速响应
func (ia *InferenceAccelerator) initQuickResponses() {
	ia.quickResponses["help"] = "Get-Help"
	ia.quickResponses["list files"] = "Get-ChildItem"
	ia.quickResponses["current directory"] = "Get-Location"
	ia.quickResponses["change directory"] = "Set-Location"
	ia.quickResponses["copy file"] = "Copy-Item"
	ia.quickResponses["delete file"] = "Remove-Item"
	ia.quickResponses["create folder"] = "New-Item -ItemType Directory"
}

// TryQuickResponse 尝试快速响应
func (ia *InferenceAccelerator) TryQuickResponse(input string) (string, bool) {
	ia.mu.RLock()
	defer ia.mu.RUnlock()
	
	inputLower := strings.ToLower(input)
	
	// 检查是否有直接匹配的快速响应
	for pattern, response := range ia.quickResponses {
		if strings.Contains(inputLower, pattern) {
			return response, true
		}
	}
	
	return "", false
}

// CachePrecomputed 缓存预计算结果
func (ia *InferenceAccelerator) CachePrecomputed(key string, result string) {
	ia.precomputedCache.Put(key, result, 1*time.Hour)
}

// GetPrecomputed 获取预计算结果
func (ia *InferenceAccelerator) GetPrecomputed(key string) (string, bool) {
	if result, found := ia.precomputedCache.Get(key); found {
		return result.(string), true
	}
	return "", false
}