package algorithms

import (
	"fmt"
	"log"
	"math"
	"regexp"
	"sort"
	"strings"
	"time"

	"navigation-service/internal/models"
)

// DataPreprocessor 数据预处理器
type DataPreprocessor struct {
	stopWords map[string]bool
}

// NewDataPreprocessor 创建数据预处理器
func NewDataPreprocessor() *DataPreprocessor {
	return &DataPreprocessor{
		stopWords: map[string]bool{
			"的": true, "了": true, "在": true, "是": true, "我": true, "有": true, "和": true, "就": true,
			"不": true, "人": true, "都": true, "一": true, "一个": true, "上": true, "也": true, "很": true,
			"到": true, "说": true, "要": true, "去": true, "你": true, "会": true, "着": true, "没有": true,
			"看": true, "好": true, "自己": true, "这": true, "那": true, "里": true, "来": true, "下": true,
			"the": true, "a": true, "an": true, "and": true, "or": true, "but": true, "in": true, "on": true,
			"at": true, "to": true, "for": true, "of": true, "with": true, "by": true, "is": true, "are": true,
			"was": true, "were": true, "be": true, "been": true, "have": true, "has": true, "had": true,
			"do": true, "does": true, "did": true, "will": true, "would": true, "could": true, "should": true,
		},
	}
}

// ProcessBrowsingRecords 处理浏览记录，提取特征
func (dp *DataPreprocessor) ProcessBrowsingRecords(records []models.BrowsingRecord) ([]models.URLFeature, error) {
	if len(records) == 0 {
		return []models.URLFeature{}, nil
	}

	// 按URL分组统计
	urlStats := make(map[string]*URLStats)
	for _, record := range records {
		if _, exists := urlStats[record.URL]; !exists {
			urlStats[record.URL] = &URLStats{
				URL:        record.URL,
				Title:      record.Title,
				Domain:     record.Domain,
				Path:       record.Path,
				VisitCount: 0,
				FirstVisit: record.FirstVisit,
				LastVisit:  record.LastVisit,
				VisitTimes: []time.Time{},
			}
		}

		stats := urlStats[record.URL]
		stats.VisitCount += record.VisitCount
		if record.FirstVisit.Before(stats.FirstVisit) {
			stats.FirstVisit = record.FirstVisit
		}
		if record.LastVisit.After(stats.LastVisit) {
			stats.LastVisit = record.LastVisit
		}
	}

	// 转换为URLFeature
	var features []models.URLFeature
	for _, stats := range urlStats {
		feature := models.URLFeature{
			URL:          stats.URL,
			Title:        stats.Title,
			Domain:       stats.Domain,
			Path:         stats.Path,
			VisitCount:   stats.VisitCount,
			FirstVisit:   stats.FirstVisit,
			LastVisit:    stats.LastVisit,
			VisitPattern: dp.generateVisitPattern(stats.VisitTimes),
			Keywords:     dp.extractKeywords(stats.Title, stats.URL),
			Category:     dp.predictCategory(stats.Domain, stats.Path, stats.Title),
			Similarity:   make(map[string]float64),
		}
		features = append(features, feature)
	}

	// 去重相同标题的URL，保留较短的URL
	features = dp.DeduplicateByTitle(features)

	// 计算URL之间的相似度
	dp.calculateSimilarities(features)

	return features, nil
}

// URLStats URL统计信息
type URLStats struct {
	URL        string
	Title      string
	Domain     string
	Path       string
	VisitCount int
	FirstVisit time.Time
	LastVisit  time.Time
	VisitTimes []time.Time
}

// generateVisitPattern 生成访问模式（按小时统计）
func (dp *DataPreprocessor) generateVisitPattern(visitTimes []time.Time) []int {
	pattern := make([]int, 24) // 24小时

	for _, visitTime := range visitTimes {
		hour := visitTime.Hour()
		pattern[hour]++
	}

	return pattern
}

// extractKeywords 提取关键词
func (dp *DataPreprocessor) extractKeywords(title, url string) []string {
	// 合并标题和URL路径
	text := title + " " + url

	// 清理文本
	text = dp.cleanText(text)

	// 分词（简单实现）
	words := dp.tokenize(text)

	// 过滤停用词和短词
	var keywords []string
	for _, word := range words {
		if len(word) > 1 && !dp.stopWords[strings.ToLower(word)] {
			keywords = append(keywords, word)
		}
	}

	// 去重并限制数量
	keywordSet := make(map[string]bool)
	var uniqueKeywords []string
	for _, keyword := range keywords {
		if !keywordSet[keyword] {
			keywordSet[keyword] = true
			uniqueKeywords = append(uniqueKeywords, keyword)
		}
	}

	// 限制关键词数量
	if len(uniqueKeywords) > 10 {
		uniqueKeywords = uniqueKeywords[:10]
	}

	return uniqueKeywords
}

// cleanText 清理文本
func (dp *DataPreprocessor) cleanText(text string) string {
	// 转换为小写
	text = strings.ToLower(text)

	// 移除特殊字符，保留字母、数字、中文、空格和连字符
	reg := regexp.MustCompile(`[^\p{L}\p{N}\s\-]`)
	text = reg.ReplaceAllString(text, " ")

	// 移除多余空格
	reg = regexp.MustCompile(`\s+`)
	text = reg.ReplaceAllString(text, " ")

	return strings.TrimSpace(text)
}

// tokenize 分词
func (dp *DataPreprocessor) tokenize(text string) []string {
	// 按空格分割
	words := strings.Fields(text)

	var tokens []string
	for _, word := range words {
		// 移除连字符并分割
		parts := strings.Split(word, "-")
		for _, part := range parts {
			if len(part) > 0 {
				tokens = append(tokens, part)
			}
		}
	}

	return tokens
}

// predictCategory 预测类别
func (dp *DataPreprocessor) predictCategory(domain, path, title string) string {
	// 基于域名的类别预测
	domainLower := strings.ToLower(domain)
	pathLower := strings.ToLower(path)
	titleLower := strings.ToLower(title)

	// 社交媒体
	if dp.containsAny(domainLower, []string{"facebook", "twitter", "instagram", "linkedin", "weibo", "wechat", "qq"}) {
		return "社交媒体"
	}

	// 新闻媒体
	if dp.containsAny(domainLower, []string{"news", "cnn", "bbc", "reuters", "xinhua", "sina", "sohu", "163"}) {
		return "新闻媒体"
	}

	// 电商
	if dp.containsAny(domainLower, []string{"amazon", "taobao", "tmall", "jd", "ebay", "shop", "buy", "mall"}) {
		return "电商购物"
	}

	// 视频娱乐
	if dp.containsAny(domainLower, []string{"youtube", "netflix", "bilibili", "iqiyi", "youku", "video", "movie", "tv"}) {
		return "视频娱乐"
	}

	// 搜索引擎
	if dp.containsAny(domainLower, []string{"google", "baidu", "bing", "search", "yahoo"}) {
		return "搜索引擎"
	}

	// 技术开发
	if dp.containsAny(domainLower, []string{"github", "stackoverflow", "developer", "api", "code", "programming", "tech"}) {
		return "技术开发"
	}

	// 教育学习
	if dp.containsAny(domainLower, []string{"edu", "course", "learn", "study", "education", "university", "school"}) {
		return "教育学习"
	}

	// 金融理财
	if dp.containsAny(domainLower, []string{"bank", "finance", "money", "invest", "stock", "trading", "pay"}) {
		return "金融理财"
	}

	// 基于路径的预测
	if strings.Contains(pathLower, "blog") || strings.Contains(pathLower, "article") {
		return "博客文章"
	}

	if strings.Contains(pathLower, "product") || strings.Contains(pathLower, "item") {
		return "产品页面"
	}

	// 基于标题的预测
	if dp.containsAny(titleLower, []string{"教程", "tutorial", "guide", "how to", "学习", "learn"}) {
		return "教程指南"
	}

	if dp.containsAny(titleLower, []string{"新闻", "news", "报道", "report"}) {
		return "新闻资讯"
	}

	return "其他"
}

// containsAny 检查字符串是否包含任意一个关键词
func (dp *DataPreprocessor) containsAny(text string, keywords []string) bool {
	for _, keyword := range keywords {
		if strings.Contains(text, keyword) {
			return true
		}
	}
	return false
}

// calculateSimilarities 计算URL之间的相似度
func (dp *DataPreprocessor) calculateSimilarities(features []models.URLFeature) {
	for i := 0; i < len(features); i++ {
		for j := i + 1; j < len(features); j++ {
			similarity := dp.calculateURLSimilarity(features[i], features[j])
			features[i].Similarity[features[j].URL] = similarity
			features[j].Similarity[features[i].URL] = similarity
		}
	}
}

// calculateURLSimilarity 计算两个URL的相似度
func (dp *DataPreprocessor) calculateURLSimilarity(f1, f2 models.URLFeature) float64 {
	// 域名相似度
	domainSim := dp.domainSimilarity(f1.Domain, f2.Domain)

	// 路径相似度
	pathSim := dp.pathSimilarity(f1.Path, f2.Path)

	// 标题相似度
	titleSim := dp.titleSimilarity(f1.Title, f2.Title)

	// 关键词相似度
	keywordSim := dp.keywordSimilarity(f1.Keywords, f2.Keywords)

	// 加权平均
	return domainSim*0.4 + pathSim*0.3 + titleSim*0.2 + keywordSim*0.1
}

// domainSimilarity 域名相似度
func (dp *DataPreprocessor) domainSimilarity(domain1, domain2 string) float64 {
	if domain1 == domain2 {
		return 1.0
	}

	// 提取主域名
	main1 := dp.extractMainDomain(domain1)
	main2 := dp.extractMainDomain(domain2)

	if main1 == main2 {
		return 0.8
	}

	// 计算编辑距离相似度
	return dp.editDistanceSimilarity(main1, main2)
}

// extractMainDomain 提取主域名
func (dp *DataPreprocessor) extractMainDomain(domain string) string {
	parts := strings.Split(domain, ".")
	if len(parts) >= 2 {
		return strings.Join(parts[len(parts)-2:], ".")
	}
	return domain
}

// pathSimilarity 路径相似度
func (dp *DataPreprocessor) pathSimilarity(path1, path2 string) float64 {
	if path1 == path2 {
		return 1.0
	}

	// 路径层级相似度
	parts1 := strings.Split(strings.Trim(path1, "/"), "/")
	parts2 := strings.Split(strings.Trim(path2, "/"), "/")

	if len(parts1) == 0 && len(parts2) == 0 {
		return 1.0
	}

	// 计算最长公共子序列
	lcs := dp.longestCommonSubsequence(parts1, parts2)
	maxLen := math.Max(float64(len(parts1)), float64(len(parts2)))

	return float64(lcs) / maxLen
}

// titleSimilarity 标题相似度
func (dp *DataPreprocessor) titleSimilarity(title1, title2 string) float64 {
	if title1 == title2 {
		return 1.0
	}

	// 转换为小写并分词
	words1 := strings.Fields(strings.ToLower(title1))
	words2 := strings.Fields(strings.ToLower(title2))

	if len(words1) == 0 && len(words2) == 0 {
		return 1.0
	}

	// 计算Jaccard相似度
	return dp.jaccardSimilarity(words1, words2)
}

// keywordSimilarity 关键词相似度
func (dp *DataPreprocessor) keywordSimilarity(keywords1, keywords2 []string) float64 {
	if len(keywords1) == 0 && len(keywords2) == 0 {
		return 1.0
	}

	return dp.jaccardSimilarity(keywords1, keywords2)
}

// jaccardSimilarity 计算Jaccard相似度
func (dp *DataPreprocessor) jaccardSimilarity(set1, set2 []string) float64 {
	if len(set1) == 0 && len(set2) == 0 {
		return 1.0
	}

	set1Map := make(map[string]bool)
	for _, item := range set1 {
		set1Map[item] = true
	}

	set2Map := make(map[string]bool)
	for _, item := range set2 {
		set2Map[item] = true
	}

	intersection := 0
	for item := range set1Map {
		if set2Map[item] {
			intersection++
		}
	}

	union := len(set1Map) + len(set2Map) - intersection

	if union == 0 {
		return 0.0
	}

	return float64(intersection) / float64(union)
}

// editDistanceSimilarity 编辑距离相似度
func (dp *DataPreprocessor) editDistanceSimilarity(s1, s2 string) float64 {
	if s1 == s2 {
		return 1.0
	}

	dist := dp.editDistance(s1, s2)
	maxLen := math.Max(float64(len(s1)), float64(len(s2)))

	if maxLen == 0 {
		return 1.0
	}

	return 1.0 - float64(dist)/maxLen
}

// editDistance 计算编辑距离
func (dp *DataPreprocessor) editDistance(s1, s2 string) int {
	m, n := len(s1), len(s2)
	editDP := make([][]int, m+1)
	for i := range editDP {
		editDP[i] = make([]int, n+1)
	}

	for i := 0; i <= m; i++ {
		editDP[i][0] = i
	}
	for j := 0; j <= n; j++ {
		editDP[0][j] = j
	}

	for i := 1; i <= m; i++ {
		for j := 1; j <= n; j++ {
			if s1[i-1] == s2[j-1] {
				editDP[i][j] = editDP[i-1][j-1]
			} else {
				editDP[i][j] = 1 + int(math.Min(float64(editDP[i-1][j]), math.Min(float64(editDP[i][j-1]), float64(editDP[i-1][j-1]))))
			}
		}
	}

	return editDP[m][n]
}

// longestCommonSubsequence 最长公共子序列
func (dp *DataPreprocessor) longestCommonSubsequence(seq1, seq2 []string) int {
	m, n := len(seq1), len(seq2)
	lcsDP := make([][]int, m+1)
	for i := range lcsDP {
		lcsDP[i] = make([]int, n+1)
	}

	for i := 1; i <= m; i++ {
		for j := 1; j <= n; j++ {
			if seq1[i-1] == seq2[j-1] {
				lcsDP[i][j] = lcsDP[i-1][j-1] + 1
			} else {
				lcsDP[i][j] = int(math.Max(float64(lcsDP[i-1][j]), float64(lcsDP[i][j-1])))
			}
		}
	}

	return lcsDP[m][n]
}

// ValidateFeatures 验证特征数据
func (dp *DataPreprocessor) ValidateFeatures(features []models.URLFeature) error {
	for i, feature := range features {
		if feature.URL == "" {
			return fmt.Errorf("特征 %d: URL不能为空", i)
		}

		if feature.VisitCount < 0 {
			return fmt.Errorf("特征 %d: 访问次数不能为负数", i)
		}

		if feature.FirstVisit.After(feature.LastVisit) {
			return fmt.Errorf("特征 %d: 首次访问时间不能晚于最后访问时间", i)
		}

		if len(feature.VisitPattern) != 24 {
			return fmt.Errorf("特征 %d: 访问模式必须包含24小时数据", i)
		}
	}

	return nil
}

// NormalizeFeatures 标准化特征数据
func (dp *DataPreprocessor) NormalizeFeatures(features []models.URLFeature) []models.URLFeature {
	if len(features) == 0 {
		return features
	}

	// 计算访问次数的最大值和最小值
	maxVisits := 0
	minVisits := features[0].VisitCount

	for _, feature := range features {
		if feature.VisitCount > maxVisits {
			maxVisits = feature.VisitCount
		}
		if feature.VisitCount < minVisits {
			minVisits = feature.VisitCount
		}
	}

	// 标准化访问次数
	visitRange := maxVisits - minVisits
	if visitRange > 0 {
		for i := range features {
			normalized := float64(features[i].VisitCount-minVisits) / float64(visitRange)
			features[i].VisitCount = int(normalized * 100) // 缩放到0-100
		}
	}

	return features
}

// FilterFeatures 过滤特征数据
func (dp *DataPreprocessor) FilterFeatures(features []models.URLFeature, minVisits int) []models.URLFeature {
	var filtered []models.URLFeature

	for _, feature := range features {
		if feature.VisitCount >= minVisits {
			filtered = append(filtered, feature)
		}
	}

	return filtered
}

// SortFeaturesByVisits 按访问次数排序特征
func (dp *DataPreprocessor) SortFeaturesByVisits(features []models.URLFeature, descending bool) []models.URLFeature {
	sorted := make([]models.URLFeature, len(features))
	copy(sorted, features)

	sort.Slice(sorted, func(i, j int) bool {
		if descending {
			return sorted[i].VisitCount > sorted[j].VisitCount
		}
		return sorted[i].VisitCount < sorted[j].VisitCount
	})

	return sorted
}

// DeduplicateByTitle 去重相同标题的URL，保留较短的URL
func (dp *DataPreprocessor) DeduplicateByTitle(features []models.URLFeature) []models.URLFeature {
	log.Printf("=== 开始去重相同标题的URL ===")
	log.Printf("输入特征数量: %d", len(features))

	if len(features) == 0 {
		log.Printf("特征为空，无需去重")
		return features
	}

	// 按标题分组
	titleGroups := make(map[string][]models.URLFeature)
	for _, feature := range features {
		title := strings.TrimSpace(feature.Title)
		if title == "" {
			title = "无标题"
		}
		titleGroups[title] = append(titleGroups[title], feature)
	}

	log.Printf("发现 %d 个不同的标题", len(titleGroups))

	var deduplicatedFeatures []models.URLFeature
	duplicateCount := 0
	removedCount := 0

	// 处理每个标题组
	for title, group := range titleGroups {
		if len(group) == 1 {
			// 只有一个URL，直接保留
			deduplicatedFeatures = append(deduplicatedFeatures, group[0])
			log.Printf("标题 '%s': 只有1个URL，直接保留", title)
		} else {
			// 多个相同标题的URL，保留最短的
			log.Printf("标题 '%s': 发现 %d 个URL，开始去重", title, len(group))

			// 按URL长度排序，保留最短的
			sort.Slice(group, func(i, j int) bool {
				return len(group[i].URL) < len(group[j].URL)
			})

			// 保留最短的URL
			shortestFeature := group[0]
			deduplicatedFeatures = append(deduplicatedFeatures, shortestFeature)

			log.Printf("  保留最短URL: %s (长度: %d)", shortestFeature.URL, len(shortestFeature.URL))

			// 记录被移除的URL
			for i := 1; i < len(group); i++ {
				removedFeature := group[i]
				log.Printf("  移除较长URL: %s (长度: %d)", removedFeature.URL, len(removedFeature.URL))
				removedCount++
			}

			duplicateCount++
		}
	}

	log.Printf("=== 去重完成 ===")
	log.Printf("原始特征数量: %d", len(features))
	log.Printf("去重后特征数量: %d", len(deduplicatedFeatures))
	log.Printf("发现重复标题组数: %d", duplicateCount)
	log.Printf("移除的URL数量: %d", removedCount)

	return deduplicatedFeatures
}
