#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
民法典数据服务 - 更新版本
使用正确的MongoDB连接方式
"""

from flask import current_app, g, request
from app.database import get_database, get_collection, DatabaseManager, get_db_client
from collections import defaultdict
from functools import lru_cache
import re
from datetime import datetime
import hashlib
from pymongo import TEXT, ASCENDING, DESCENDING # 导入TEXT，以及其他可能需要的索引类型
from pymongo.errors import PyMongoError # 导入PyMongo异常类

import jieba # 确保jieba在全局范围内可用


@lru_cache(maxsize=128) # 缓存分词结果，避免重复计算
def chinese_number_to_int(chinese_num: str) -> int:
    """将中文数字转换为阿拉伯数字"""
    # 中文数字映射表
    chinese_digits = {
        '零': 0, '一': 1, '二': 2, '三': 3, '四': 4, '五': 5, 
        '六': 6, '七': 7, '八': 8, '九': 9, '十': 10,
        '百': 100, '千': 1000, '万': 10000
    }
    
    # 特殊情况处理
    if chinese_num == '十':
        return 10
    
    # 处理类似"一十"、"二十"的情况
    if chinese_num.startswith('十'):
        return 10 + chinese_number_to_int(chinese_num[1:]) if len(chinese_num) > 1 else 10
    
    result = 0
    temp = 0
    
    for char in chinese_num:
        if char in chinese_digits:
            digit = chinese_digits[char]
            if digit < 10:
                temp = digit
            elif digit == 10:
                if temp == 0:
                    temp = 1
                result += temp * 10
                temp = 0
            elif digit == 100:
                if temp == 0:
                    temp = 1
                result += temp * 100
                temp = 0
            elif digit == 1000:
                if temp == 0:
                    temp = 1
                result += temp * 1000
                temp = 0
            elif digit == 10000:
                result = (result + temp) * 10000
                temp = 0
    
    return result + temp


def extract_article_number(article_number: str) -> int:
    """从条文编号中提取数字用于排序"""
    try:
        # 移除"第"和"条"，提取中间的数字部分
        # 例：第一条 -> 一, 第一千二百六十条 -> 一千二百六十
        number_part = article_number.replace('第', '').replace('条', '')
        
        # 特殊处理一些常见格式
        if not number_part:
            return 0
            
        # 如果是纯数字，直接返回
        if number_part.isdigit():
            return int(number_part)
            
        # 转换中文数字
        return chinese_number_to_int(number_part)
    except Exception:
        # 如果转换失败，返回一个大数，让它排在最后
        return 9999

class CivilCodeService:
    def __init__(self):
        self.db = None
        self.collection = None
        self.favorites_collection = None
        self.history_collection = None
        self._initialized = False
    
    def _init_db(self):
        """延迟初始化数据库连接"""
        if not self._initialized:
            try:
                self.db = get_database()
                self.collection = self.db.articles if self.db is not None else None
                self.favorites_collection = self.db.user_favorites if self.db is not None else None
                self.history_collection = self.db.search_history if self.db is not None else None
                if self.collection is not None:
                    self._ensure_indexes()
                self._initialized = True
            except Exception as e:
                current_app.logger.error(f"数据库初始化失败: {e}")
                self._initialized = False

    def _ensure_indexes(self):
        """确保所有必要的索引存在"""
        try:
            # 全文搜索索引
            if "full_text_search" not in self.collection.index_information():
                self.collection.create_index([
                    ("content", TEXT),
                    ("title", TEXT)
                ], name="full_text_search", default_language='none')
                current_app.logger.info("创建全文搜索索引 civil_code_text_search")

            # 关键词数组索引
            if "keywords_index" not in self.collection.index_information():
                self.collection.create_index([("keywords", 1)], name="keywords_index")
                current_app.logger.info("创建关键词索引 keywords_index")

            # 条文号唯一索引
            if "article_number_unique" not in self.collection.index_information():
                self.collection.create_index([("article_number", 1)], unique=True, name="article_number_unique")
                current_app.logger.info("创建条文号唯一索引 article_number_unique")

            # 章节复合索引
            if "part_chapter_index" not in self.collection.index_information():
                self.collection.create_index([
                    ("part", 1),
                    ("chapter", 1)
                ], name="part_chapter_index")
                current_app.logger.info("创建章节复合索引 part_chapter_index")

            current_app.logger.info("所有索引已确保存在")

        except PyMongoError as e:
            current_app.logger.error(f"创建索引失败: {e}")

    def smart_search(self, query: str, page: int = 1, per_page: int = 20):
        """智能组合搜索法条 - 支持模糊搜索改进版"""
        self._init_db()
        if self.collection is None:
            return {"articles": [], "pagination": {}, "search_info": {"query": query, "result_count": 0, "strategy": "db_not_connected"}}

        # --- 优化分词策略与缓存 ---
        # 优先检查是否为精确的条文号查询
        article_number_regex = re.compile(r"^第([零一二三四五六七八九十百千万\d]+)条$")
        match = article_number_regex.match(query)
        if match:
            try:
                # 尝试直接获取条文
                article = self.get_article_by_number(query)
                if article:
                    current_app.logger.debug(f"精准条文号查询成功: {query}")
                    # 直接返回结果，跳过后续复杂搜索
                    return {
                        "articles": [article],
                        "pagination": {"page": 1, "per_page": 1, "total": 1, "pages": 1},
                        "search_info": {"query": query, "result_count": 1, "strategy": "exact_article_number"}
                    }
            except Exception as e:
                current_app.logger.warning(f"尝试精准条文号查询失败: {e}")

        # 使用lru_cache优化分词结果
        seg_list = self._get_cached_seg_list(query)
        
        current_app.logger.debug(f"搜索查询: '{query}', 处理后关键词: {seg_list}")

        # 初始化一个用于存储所有匹配到的文章的字典，键为文章ID，值为包含文章和相关性分数的字典
        combined_results_map = {}

        # --- 查询策略优化：分层执行，避免冗余 --- 

        # 1. 精确内容匹配 (最高权重，优先执行)
        exact_content_query = {"content": {"$regex": re.escape(query), "$options": "i"}}
        self._execute_and_merge_results(exact_content_query, combined_results_map, "exact_content", 15, query=query)
        if len(combined_results_map) > 0: # 如果有精确内容匹配，可以认为结果已经足够相关，适当减少后续查询的权重或范围
             # 但仍会执行其他查询，以保证结果的全面性，只是权重可能调整
             current_app.logger.debug(f"精确内容匹配找到 {len(combined_results_map)} 条结果，继续细化搜索。")

        # 2. 精确标题匹配 (高权重)
        exact_title_query = {"title": {"$regex": re.escape(query), "$options": "i"}}
        self._execute_and_merge_results(exact_title_query, combined_results_map, "exact_title", 12, query=query)

        # 3. 全文搜索 (高权重) - 利用MongoDB的全文索引
        if query and current_app.config.get('MONGODB_TEXT_SEARCH_ENABLED', True): # 检查是否启用全文搜索
            text_search_results = list(self.collection.find(
                {"$text": {"$search": query}},
                {"score": {"$meta": "textScore"}}
            ).sort([("score", {"$meta": "textScore"})]).limit(per_page * 2)) # 稍微多取一些结果，用于权重计算
            for article in text_search_results:
                article_id = article['_id']
                score = article.get('score', 1) * 10 # 权重10
                self._merge_article_into_results(combined_results_map, article_id, article, score, "text", {"text_score": article.get('score', 1)})
            current_app.logger.debug(f"全文搜索找到 {len(text_search_results)} 条结果")

        # 4. 多关键词内容匹配 (中高权重)
        if len(seg_list) >= 2:
            multi_conditions = []
            for word in seg_list:
                multi_conditions.append({"content": {"$regex": re.escape(word), "$options": "i"}})
            multi_keyword_query = {"$and": multi_conditions}
            self._execute_and_merge_results(multi_keyword_query, combined_results_map, "multi_keyword", 8, seg_list=seg_list)

        # 5. 单关键词内容匹配 (中等权重)
        single_conditions = []
        for word in seg_list:
            if word != query: # 避免与精确匹配重复
                single_conditions.append({"content": {"$regex": re.escape(word), "$options": "i"}})
        if single_conditions:
            single_keyword_query = {"$or": single_conditions}
            self._execute_and_merge_results(single_keyword_query, combined_results_map, "single_keyword", 4, seg_list=seg_list, query=query)
        
        # 6. 标题关键词匹配 (中等权重)
        title_conditions = []
        for word in seg_list:
            if word != query: # 避免与精确标题匹配重复
                title_conditions.append({"title": {"$regex": re.escape(word), "$options": "i"}})
        if title_conditions:
            title_query = {"$or": title_conditions}
            self._execute_and_merge_results(title_query, combined_results_map, "title_keyword", 6, seg_list=seg_list, query=query)

        # 7. 关键词数组搜索 (低权重，仅作补充)
        array_conditions = []
        for word in seg_list:
            array_conditions.append({"keywords": {"$regex": re.escape(word), "$options": "i"}})
        if array_conditions:
            keyword_query = {"$or": array_conditions}
            self._execute_and_merge_results(keyword_query, combined_results_map, "keyword_array", 2, seg_list=seg_list)


        # --- 结果合并、排序与分页 ---
        sorted_results = sorted(combined_results_map.values(), key=lambda x: x["score"], reverse=True)
        
        total_results = len(sorted_results)
        start_index = (page - 1) * per_page
        end_index = start_index + per_page
        paginated_results = sorted_results[start_index:end_index]
        
        articles = []
        for result_data in paginated_results:
            article = result_data["article"]
            if 'score' in article:
                del article['score'] # 移除MongoDB的score字段
            if '_id' in article:
                article['_id'] = str(article['_id'])
            articles.append(article)

        pagination = {
            "page": page,
            "per_page": per_page,
            "total": total_results,
            "pages": (total_results + per_page - 1) // per_page
        }

        strategy = "fuzzy_search_improved"
        if total_results == 0:
            strategy = "no_results"
        elif len(combined_results_map) > 0: # 如果精确内容匹配找到结果，优先显示
            strategy = "exact_match_primary"
        elif len(text_search_results) > 0:
            strategy = "text_search_primary"
        elif len(combined_results_map) > 0: # 如果多关键词或单关键词匹配找到结果，显示模糊搜索结果
            strategy = "fuzzy_fallback"
        else:
            strategy = "fuzzy_fallback"

        search_info = {
            "query": query,
            "result_count": total_results,
            "strategy": strategy,
            "debug_info": {
                "processed_keywords": seg_list,
                "exact_content_results": len(combined_results_map.get("exact_content", [])),
                "exact_title_results": len(combined_results_map.get("exact_title", [])),
                "text_results": len(text_search_results),
                "multi_keyword_results": len(combined_results_map.get("multi_keyword", [])),
                "single_keyword_results": len(combined_results_map.get("single_keyword", [])),
                "title_keyword_results": len(combined_results_map.get("title_keyword", [])),
                "keyword_array_results": len(combined_results_map.get("keyword_array", []))
            }
        }

        return {"articles": articles, "pagination": pagination, "search_info": search_info}

    @lru_cache(maxsize=256) # 缓存分词结果，避免重复计算
    def _get_cached_seg_list(self, query: str):
        """缓存分词结果的辅助方法"""
        seg_list = jieba.lcut_for_search(query)
        if len(query) <= 3:
            meaningful_words = [query]
            for word in seg_list:
                if len(word) >= 1 and word not in ['的', '了', '在', '是', '和', '与', '或', '、', '，', '。']:
                    meaningful_words.append(word)
            return list(set(meaningful_words))
        else:
            return [word for word in seg_list if len(word) >= 1 and word not in ['的', '了', '在', '是', '和', '与', '或', '、', '，', '。']]

    def _execute_and_merge_results(self, query_filter: dict, results_map: dict, strategy_name: str, weight: int, **kwargs):
        """执行查询并将结果合并到结果集中"""
        try:
            limit = kwargs.get('limit', 20) * 3 # 适当多取一些结果用于权重计算
            articles_found = list(self.collection.find(query_filter).limit(limit))
            current_app.logger.debug(f"{strategy_name} 找到 {len(articles_found)} 条结果")

            for article in articles_found:
                article_id = article['_id']
                score = weight # 基础权重

                # 根据特定策略进行更精细的评分调整
                if strategy_name == "exact_content" or strategy_name == "exact_title":
                    match_query = kwargs.get('query', '').lower()
                    field_content = article.get('content', '').lower() if strategy_name == "exact_content" else article.get('title', '').lower()
                    count = field_content.count(match_query)
                    score += count * weight # 每出现一次增加权重
                    match_details = {f"{strategy_name}_matches": count}
                elif strategy_name == "multi_keyword" or strategy_name == "single_keyword" or strategy_name == "title_keyword":
                    seg_list = kwargs.get('seg_list', [])
                    query = kwargs.get('query', '')
                    field_content = article.get('content', '').lower() if strategy_name != "title_keyword" else article.get('title', '').lower()
                    
                    matched_keywords = [word for word in seg_list if word.lower() in field_content and word != query]
                    score += len(matched_keywords) * weight # 匹配到的关键词越多，分数越高
                    match_details = {f"{strategy_name}_matched_keywords": matched_keywords}
                elif strategy_name == "keyword_array":
                    seg_list = kwargs.get('seg_list', [])
                    keywords_array = article.get('keywords', [])
                    matched_array_keywords = []
                    for kw in keywords_array:
                        for word in seg_list:
                            if word.lower() in kw.lower():
                                matched_array_keywords.append(kw)
                    score += len(set(matched_array_keywords)) * weight
                    match_details = {"array_matches": matched_array_keywords}
                else:
                    match_details = {}
                
                self._merge_article_into_results(results_map, article_id, article, score, strategy_name, match_details)

        except PyMongoError as e:
            current_app.logger.warning(f"执行查询 {strategy_name} 失败: {e}")

    def _merge_article_into_results(self, results_map: dict, article_id, article, score: float, strategy_name: str, match_details: dict):
        """将文章合并到结果集中，并更新分数和策略"""
        if article_id not in results_map:
            results_map[article_id] = {
                "article": article, 
                "score": score, 
                "strategies": [strategy_name],
                "match_details": match_details
            }
        else:
            results_map[article_id]["score"] += score
            if strategy_name not in results_map[article_id]["strategies"]:
                results_map[article_id]["strategies"].append(strategy_name)
            results_map[article_id]["match_details"].update(match_details)

    def get_articles_by_chapter(self, part: str, chapter_number: int, page: int = 1, per_page: int = 20):
        """根据编和章节号获取法条"""
        self._init_db()
        if self.collection is None:
            return {"articles": [], "pagination": {}}

        query = {
            "part": part,
            "chapter": chapter_number
        }
        total_articles = self.collection.count_documents(query)
        
        articles = list(
            self.collection.find(query)
            .sort([("article_number", ASCENDING)]) # 在数据库层面按条文号升序排序
            .skip((page - 1) * per_page)
            .limit(per_page)
        )
        
        for article in articles:
            if '_id' in article:
                article['_id'] = str(article['_id'])

        pagination = {
            "page": page,
            "per_page": per_page,
            "total": total_articles,
            "pages": (total_articles + per_page - 1) // per_page
        }
        return {"articles": articles, "pagination": pagination}

    def get_article_by_number(self, article_number: str):
        """根据条文号获取法条详情"""
        self._init_db()
        if self.collection is None:
            return None
        article = self.collection.find_one({"article_number": article_number})
        if article and '_id' in article:
            article['_id'] = str(article['_id'])
        return article

    @lru_cache(maxsize=1) # 缓存章节列表，不经常变动，可以长期缓存
    def get_chapters_list(self):
        """获取所有章节列表"""
        self._init_db()
        if self.collection is None:
            return []
        
        # 定义民法典编的正确顺序
        PART_ORDER = ["总则编", "物权编", "合同编", "人格权编", "婚姻家庭编", "继承编", "侵权责任编"]
        
        pipeline = [
            {"$group": {
                "_id": {"part": "$part", "chapter_number": "$chapter", "chapter_title": {"$literal": "章节"}},
                "count": {"$sum": 1}
            }},
            {"$sort": {"_id.chapter_number": 1}},  # 先按章节号排序
            {"$project": {
                "_id": 0,
                "part": "$_id.part",
                "chapter_number": "$_id.chapter_number",
                "chapter_title": "$_id.chapter_title",
                "article_count": "$count"
            }}
        ]
        chapters = list(self.collection.aggregate(pipeline))
        
        # 按民法典的正确顺序排序
        def sort_key(chapter):
            part = chapter["part"]
            try:
                part_index = PART_ORDER.index(part)
            except ValueError:
                part_index = len(PART_ORDER)  # 未知编排在最后
            return (part_index, chapter["chapter_number"])
        
        chapters.sort(key=sort_key)
        return chapters

    def get_search_suggestions(self, query: str, limit: int = 10):
        """获取搜索建议"""
        self._init_db()
        if self.collection is None:
            return []
        
        pipeline = [
            {"$match": {"keywords": {"$regex": query, "$options": "i"}}}, # 匹配包含查询词的文档
            {"$unwind": "$keywords"}, # 展开 keywords 数组
            {"$match": {"keywords": {"$regex": query, "$options": "i"}}}, # 再次匹配以确保关键词自身包含查询词
            {"$group": {"_id": "$keywords"}}, # 按关键词分组，实现去重
            {"$project": {"_id": 0, "suggestion": "$_id"}}, # 重命名 _id 为 suggestion
            {"$limit": limit} # 限制返回数量
        ]
        
        results = list(self.collection.aggregate(pipeline))
        suggestions = [item['suggestion'] for item in results]
        
        return suggestions

    def add_favorite(self, article_number: str, user_id: str = None, user_ip: str = None):
        """添加收藏"""
        self._init_db()
        if self.favorites_collection is None:
            return False
        try:
            # 如果没有提供user_ip，尝试从中间件获取
            if user_ip is None:
                from flask import g
                user_ip = getattr(g, 'user_ip', '127.0.0.1')
            
            favorite_item = {
                "user_id": user_id,
                "user_ip": user_ip,
                "article_id": article_number,  # 历史字段，保留兼容
                "article_number": article_number,  # 新字段，前端统一使用
                "created_at": datetime.now()
            }
            self.favorites_collection.update_one(
                {"user_id": user_id, "article_number": article_number},
                {"$set": favorite_item},
                upsert=True
            )
            # 清理仅含旧字段的历史记录
            self.favorites_collection.delete_many({
                "user_id": user_id,
                "article_number": {"$exists": False},
                "article_id": article_number
            })
            return True
        except PyMongoError as e:
            current_app.logger.error(f"添加收藏失败: {e}")
            return False

    def remove_favorite(self, article_number: str, user_id: str = "anonymous"):
        """移除收藏"""
        self._init_db()
        if self.favorites_collection is None:
            return False
        try:
            self.favorites_collection.delete_one({
                "user_id": user_id,
                "$or": [
                    {"article_number": article_number},
                    {"article_id": article_number}
                ]
            })
            return True
        except PyMongoError as e:
            current_app.logger.error(f"移除收藏失败: {e}")
            return False

    def get_user_favorites(self, user_id: str = "anonymous", page: int = 1, per_page: int = 20):
        """获取用户收藏列表"""
        try:
            current_app.logger.info(f"开始获取用户收藏列表: user_id={user_id}, page={page}, per_page={per_page}")
            self._init_db()
            if self.favorites_collection is None:
                current_app.logger.warning("favorites_collection为None")
                return {"favorites": [], "pagination": {}}
            
            query = {"user_id": user_id}
            
            # 获取总收藏数
            total_favorites = self.favorites_collection.count_documents(query)
            current_app.logger.info(f"找到收藏数量: {total_favorites}")
            
            # 简化版本：直接获取收藏列表，不进行复杂的关联查询
            favorites_cursor = self.favorites_collection.find(query).sort("created_at", -1).skip((page - 1) * per_page).limit(per_page)
            current_app.logger.info(f"执行数据库查询成功，跳过{(page - 1) * per_page}条，限制{per_page}条")
            
            # 转换为列表以便调试
            favorites_raw = list(favorites_cursor)
            current_app.logger.info(f"查询到原始收藏数据: {len(favorites_raw)}条")
            
            favorites = []
            for fav in favorites_raw:
                article_number = fav.get("article_number", "")
                current_app.logger.info(f"处理收藏: _id={fav.get('_id')}, article_number='{article_number}', type={type(article_number)}")
                
                # 跳过article_number为空或None的收藏
                if not article_number or article_number == "None" or str(article_number) == "None":
                    current_app.logger.warning(f"跳过无效收藏: {fav.get('_id')}, article_number={article_number}")
                    continue
                
                favorite_item = {
                    "_id": str(fav.get("_id", "")),
                    "user_id": fav.get("user_id", ""),
                    "user_ip": fav.get("user_ip", ""),
                    "article_number": article_number,
                    "created_at": str(fav.get("created_at", "")),  # 确保datetime被转换为字符串
                    "title": article_number  # 暂时使用article_number作为标题
                }
                
                # 尝试获取条文详情
                if self.collection is not None:
                    try:
                        article = self.collection.find_one({"article_number": article_number})
                        if article:
                            current_app.logger.debug(f"找到条文: {article_number}, part={article.get('part')}")
                            favorite_item["title"] = article.get("content", article_number)[:50] + "..."
                            favorite_item["content"] = article.get("content", "")
                            
                            # 获取章节信息 - 使用数据库中的实际字段
                            favorite_item["part"] = str(article.get("part", "未知"))
                            favorite_item["chapter"] = int(article.get("chapter", 0)) if article.get("chapter") else 0
                            favorite_item["chapter_path"] = str(article.get("chapter_path", ""))
                        else:
                            current_app.logger.warning(f"未找到条文: {article_number}")
                            favorite_item["part"] = "未知"
                    except Exception as e:
                        current_app.logger.warning(f"获取条文详情失败: {e}")
                        favorite_item["part"] = "未知"
                else:
                    favorite_item["part"] = "未知"
                
                favorites.append(favorite_item)
            
            # 构建分页信息
            pagination = {
                "page": page,
                "per_page": per_page,
                "total": total_favorites,
                "pages": (total_favorites + per_page - 1) // per_page if total_favorites > 0 else 0
            }
            
            current_app.logger.info(f"成功获取收藏列表，共{len(favorites)}项")
            return {"favorites": favorites, "pagination": pagination}
            
        except Exception as e:
            current_app.logger.error(f"获取用户收藏列表失败: {e}")
            import traceback
            current_app.logger.error(f"详细错误堆栈: {traceback.format_exc()}")
            # 返回空结果而不是抛出异常
            return {"favorites": [], "pagination": {"page": page, "per_page": per_page, "total": 0, "pages": 0}}

    def add_search_history(self, query: str, result_count: int = 0, user_id: str = None, user_ip: str = None):
        """添加搜索历史"""
        self._init_db()
        if self.history_collection is None:
            return False
        try:
            # 如果没有提供user_ip，尝试从中间件获取
            if user_ip is None:
                from flask import g
                user_ip = getattr(g, 'user_ip', '127.0.0.1')
            
            history_item = {
                "user_id": user_id,
                "user_ip": user_ip,
                "query": query,
                "result_count": result_count,
                "created_at": datetime.now()
            }
            self.history_collection.update_one(
                {"user_id": user_id, "query": query},
                {"$set": history_item},
                upsert=True
            )
            return True
        except PyMongoError as e:
            current_app.logger.error(f"添加搜索历史失败: {e}")
            return False

    def get_search_history(self, user_id: str = None, limit: int = 20):
        """获取用户搜索历史"""
        self._init_db()
        if self.history_collection is None:
            return []
        history = list(self.history_collection.find(
            {"user_id": user_id},
            projection={"user_id": 1, "query": 1, "timestamp": 1, "result_count": 1}  # 只返回必要字段
        ).sort([("user_id", ASCENDING), ("query", ASCENDING), ("timestamp", DESCENDING)])
         .limit(limit))
        for item in history:
            if '_id' in item:
                item['_id'] = str(item['_id'])
        return history

    def clear_search_history(self, user_id: str = None):
        """清空搜索历史"""
        self._init_db()
        if self.history_collection is None:
            return False
        try:
            self.history_collection.delete_many({"user_id": user_id})
            return True
        except PyMongoError as e:
            current_app.logger.error(f"清空搜索历史失败: {e}")
            return False

civil_code_service = CivilCodeService()
