from flask import Flask, request, jsonify, Blueprint
from elasticsearch import Elasticsearch
import logging
import re
import datetime
import json
import os
from functools import wraps
import html

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

app = Blueprint('search_api', __name__)
es = Elasticsearch(["http://localhost:9200"])

# 确保查询日志目录存在
QUERY_LOG_DIR = "query_logs"
os.makedirs(QUERY_LOG_DIR, exist_ok=True)

# 用户会话字典，存储临时用户信息
user_sessions = {}

def get_user_id():
    """获取用户ID，如果用户已登录则返回用户ID，否则返回会话ID"""
    from flask import session, request
    if 'user_id' in session:
        return session['user_id']
    else:
        # 从cookie获取会话ID或创建新的
        session_id = request.cookies.get('session_id')
        if not session_id:
            import uuid
            session_id = str(uuid.uuid4())
        return f"anonymous_{session_id}"

def log_query(f):
    """装饰器：记录用户查询"""
    @wraps(f)
    def decorated_function(*args, **kwargs):
        user_id = get_user_id()
        query = request.args.get('q', '')
        search_type = request.args.get('type', 'all')
        
        if query:  # 只记录非空查询
            # 记录查询日志
            log_entry = {
                "user_id": user_id,
                "query": query,
                "type": search_type,
                "timestamp": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            }
            
            # 添加到用户查询历史，避免重复
            if user_id not in user_sessions:
                user_sessions[user_id] = {"history": []}
            
            # 检查是否已存在相同查询，如果存在则移除旧的
            existing_queries = [item["query"] for item in user_sessions[user_id]["history"]]
            if query in existing_queries:
                # 找到并移除旧的查询
                for i, item in enumerate(user_sessions[user_id]["history"]):
                    if item["query"] == query:
                        user_sessions[user_id]["history"].pop(i)
                        break
            
            # 最多保存10条查询历史
            user_sessions[user_id]["history"].insert(0, log_entry)
            if len(user_sessions[user_id]["history"]) > 10:
                user_sessions[user_id]["history"].pop()
            
            # 写入查询日志文件
            log_file = os.path.join(QUERY_LOG_DIR, f"{user_id}.json")
            try:
                if os.path.exists(log_file):
                    with open(log_file, 'r', encoding='utf-8') as file_handle:
                        logs = json.load(file_handle)
                    
                    # 从日志文件中移除相同的查询
                    logs = [log for log in logs if log.get("query") != query]
                else:
                    logs = []
                
                logs.insert(0, log_entry)
                logs = logs[:10]  # 每个用户最多保存10条查询记录
                
                with open(log_file, 'w', encoding='utf-8') as file_handle:
                    json.dump(logs, file_handle, ensure_ascii=False, indent=2)
            except Exception as e:
                logger.error(f"写入查询日志失败: {e}")
        
        return f(*args, **kwargs)
    return decorated_function

@app.route('/api/search', methods=['GET'])
@log_query
def search():
    """
    搜索API主入口
    参数：
    - q: 查询字符串
    - type: 搜索类型 (web, doc, all)，默认为all
    - page: 页码，默认为1
    - size: 每页结果数，默认为10
    - search_mode: 搜索模式 (normal, phrase, wildcard)，默认为normal
    """
    try:
        query = request.args.get('q', '')
        search_type = request.args.get('type', 'all')
        page = int(request.args.get('page', 1))
        size = int(request.args.get('size', 10))
        search_mode = request.args.get('search_mode', 'normal')
        
        if not query:
            return jsonify({
                "status": "error",
                "message": "请提供搜索关键词"
            }), 400
        
        # 计算起始位置
        from_val = (page - 1) * size
        
        # 获取用户ID用于个性化排序
        user_id = get_user_id()
        
        # 获取用户兴趣关键词
        user_interests = get_user_interests(user_id)
        
        # 单一类型搜索
        if search_type == 'web':
            return jsonify(search_webpages(query, from_val, size, search_mode, user_interests))
        elif search_type == 'doc':
            return jsonify(search_documents(query, from_val, size, search_mode, user_interests))
        # 混合搜索（网页+文档）
        elif search_type == 'all':
            # 获取足够的结果以确保分页正确
            # 计算需要从每个索引获取的结果数量，确保足够用于合并后的分页
            fetch_size = page * size  # 每个索引获取足够的数据以覆盖当前页
            
            # 从两个索引分别获取结果（从开始位置获取，不预先跳过）
            web_results = search_webpages(query, 0, fetch_size, search_mode, user_interests)
            doc_results = search_documents(query, 0, fetch_size, search_mode, user_interests)
            
            # 合并结果
            all_results = []
            if 'results' in web_results:
                all_results.extend(web_results['results'])
            if 'results' in doc_results:
                all_results.extend(doc_results['results'])
            
            # 按相关性排序
            all_results = sorted(all_results, key=lambda x: x['score'], reverse=True)
            
            # 计算总结果数
            total_results = web_results.get('total', 0) + doc_results.get('total', 0)
            
            # 应用正确的分页
            paginated_results = all_results[from_val:from_val + size]
            
            return jsonify({
                "status": "success",
                "query": query,
                "total": total_results,
                "page": page,
                "size": size,
                "results": paginated_results
            })
        else:
            return jsonify({
                "status": "error",
                "message": f"不支持的搜索类型: {search_type}"
            }), 400
    
    except Exception as e:
        logger.error(f"搜索错误: {e}")
        return jsonify({
            "status": "error",
            "message": f"搜索处理出错: {e}"
        }), 500

def build_query(field, query_text, search_mode):
    """根据搜索模式构建查询"""
    if search_mode == 'phrase':
        # 使用正确格式的短语匹配方式，支持后续添加boost
        return {
            "match_phrase": {
                field: {
                    "query": query_text
                }
            }
        }
    elif search_mode == 'wildcard':
        # 简化通配符查询，支持后续添加boost
        if '*' in query_text or '?' in query_text:
            # 对于中文查询优化通配符处理
            # 如果通配符在末尾，使用前缀查询以优化性能和提高相关性
            if query_text.endswith('*') and '*' not in query_text[:-1] and '?' not in query_text:
                prefix = query_text[:-1]
                return {
                    "match_phrase_prefix": {
                        field: {
                            "query": prefix
                        }
                    }
                }
            # 如果通配符是?且在末尾，使用match_phrase_prefix以提高单字匹配相关性
            elif query_text.endswith('?') and '?' not in query_text[:-1] and '*' not in query_text:
                prefix = query_text[:-1]
                return {
                    "match_phrase_prefix": {
                        field: {
                            "query": prefix
                        }
                    }
                }
            # 其他通配符情况使用标准wildcard查询
            else:
                return {
                    "wildcard": {
                        field: {
                            "value": query_text
                        }
                    }
                }
        else:
            # 如果没有通配符，退回到普通匹配
            return {"match": {field: {"query": query_text}}}
    else:  # normal mode
        return {"match": {field: {"query": query_text}}}

def search_webpages(query, from_val, size, search_mode='normal', user_interests={}):
    """搜索网页内容"""
    if search_mode == 'normal':
        # 构建基础查询
        should_clauses = [
            {"match": {"title": {"query": query, "boost": 3}}},
            {"match": {"text": {"query": query, "boost": 1}}},
            {"match": {"meta_keywords": {"query": query, "boost": 2}}},
            {"match": {"meta_description": {"query": query, "boost": 2}}}
        ]
        
        # 构建主查询
        main_query = {
            "bool": {
                "should": should_clauses,
                "minimum_should_match": 1
            }
        }
        
        # 如果有用户兴趣数据，使用function_score查询进行个性化排序
        if user_interests:
            # 构建兴趣度加权函数
            functions = []
            
            # 为每个用户兴趣关键词添加权重函数
            for term, weight in user_interests.items():
                if len(term) > 1:  # 忽略单字符词
                    # 标题中包含兴趣词的加权
                    functions.append({
                        "filter": {"match": {"title": term}},
                        "weight": weight * 2.0  # 标题匹配权重更高
                    })
                    
                    # 正文中包含兴趣词的加权
                    functions.append({
                        "filter": {"match": {"text": term}},
                        "weight": weight * 1.0
                    })
                    
                    # 元关键词中包含兴趣词的加权
                    functions.append({
                        "filter": {"match": {"meta_keywords": term}},
                        "weight": weight * 1.5
                    })
            
            # 使用function_score组合基础查询和个性化加权
            search_body = {
                "from": from_val,
                "size": size,
                "query": {
                    "function_score": {
                        "query": main_query,
                        "functions": functions,
                        "score_mode": "sum",
                        "boost_mode": "sum"
                    }
                },
                "highlight": {
                    "fields": {
                        "title": {"number_of_fragments": 1, "fragment_size": 150},
                        "text": {"number_of_fragments": 1, "fragment_size": 300},
                        "meta_description": {"number_of_fragments": 1, "fragment_size": 150}
                    },
                    "pre_tags": ["<em>"],
                    "post_tags": ["</em>"]
                }
            }
        else:
            # 无用户兴趣数据时使用标准查询
            search_body = {
                "from": from_val,
                "size": size,
                "query": main_query,
                "highlight": {
                    "fields": {
                        "title": {"number_of_fragments": 1, "fragment_size": 150},
                        "text": {"number_of_fragments": 1, "fragment_size": 300},
                        "meta_description": {"number_of_fragments": 1, "fragment_size": 150}
                    },
                    "pre_tags": ["<em>"],
                    "post_tags": ["</em>"]
                }
            }
    else:  # phrase or wildcard
        should_clauses = []
        
        # 标题字段
        title_query = build_query("title", query, search_mode)
        if search_mode == 'phrase':
            title_query["match_phrase"]["title"]["boost"] = 3
        elif search_mode == 'wildcard' and ('*' in query or '?' in query):
            if "match_phrase_prefix" in title_query:
                title_query["match_phrase_prefix"]["title"]["boost"] = 3
            else:
                title_query["wildcard"]["title"]["boost"] = 3
        else:
            title_query["match"]["title"]["boost"] = 3
        should_clauses.append(title_query)
        
        # 正文字段
        text_query = build_query("text", query, search_mode)
        if search_mode == 'phrase':
            text_query["match_phrase"]["text"]["boost"] = 1
        elif search_mode == 'wildcard' and ('*' in query or '?' in query):
            if "match_phrase_prefix" in text_query:
                text_query["match_phrase_prefix"]["text"]["boost"] = 1
            else:
                text_query["wildcard"]["text"]["boost"] = 1
        else:
            text_query["match"]["text"]["boost"] = 1
        should_clauses.append(text_query)
        
        # 元关键词字段
        meta_keywords_query = build_query("meta_keywords", query, search_mode)
        if search_mode == 'phrase':
            meta_keywords_query["match_phrase"]["meta_keywords"]["boost"] = 2
        elif search_mode == 'wildcard' and ('*' in query or '?' in query):
            if "match_phrase_prefix" in meta_keywords_query:
                meta_keywords_query["match_phrase_prefix"]["meta_keywords"]["boost"] = 2
            else:
                meta_keywords_query["wildcard"]["meta_keywords"]["boost"] = 2
        else:
            meta_keywords_query["match"]["meta_keywords"]["boost"] = 2
        should_clauses.append(meta_keywords_query)
        
        # 元描述字段
        meta_description_query = build_query("meta_description", query, search_mode)
        if search_mode == 'phrase':
            meta_description_query["match_phrase"]["meta_description"]["boost"] = 2
        elif search_mode == 'wildcard' and ('*' in query or '?' in query):
            if "match_phrase_prefix" in meta_description_query:
                meta_description_query["match_phrase_prefix"]["meta_description"]["boost"] = 2
            else:
                meta_description_query["wildcard"]["meta_description"]["boost"] = 2
        else:
            meta_description_query["match"]["meta_description"]["boost"] = 2
        should_clauses.append(meta_description_query)
        
        # 构建基础查询
        main_query = {
            "bool": {
                "should": should_clauses,
                "minimum_should_match": 1
            }
        }
        
        # 如果有用户兴趣数据，使用function_score查询进行个性化排序
        if user_interests:
            # 构建兴趣度加权函数
            functions = []
            
            # 为每个用户兴趣关键词添加权重函数
            for term, weight in user_interests.items():
                if len(term) > 1:  # 忽略单字符词
                    # 标题中包含兴趣词的加权
                    functions.append({
                        "filter": {"match": {"title": term}},
                        "weight": weight * 2.0  # 标题匹配权重更高
                    })
                    
                    # 正文中包含兴趣词的加权
                    functions.append({
                        "filter": {"match": {"text": term}},
                        "weight": weight * 1.0
                    })
                    
                    # 元关键词中包含兴趣词的加权
                    functions.append({
                        "filter": {"match": {"meta_keywords": term}},
                        "weight": weight * 1.5
                    })
            
            # 使用function_score组合基础查询和个性化加权
            search_body = {
                "from": from_val,
                "size": size,
                "query": {
                    "function_score": {
                        "query": main_query,
                        "functions": functions,
                        "score_mode": "sum",
                        "boost_mode": "sum"
                    }
                },
                "highlight": {
                    "fields": {
                        "title": {"number_of_fragments": 1, "fragment_size": 150},
                        "text": {"number_of_fragments": 1, "fragment_size": 300},
                        "meta_description": {"number_of_fragments": 1, "fragment_size": 150}
                    },
                    "pre_tags": ["<em>"],
                    "post_tags": ["</em>"]
                }
            }
        else:
            # 无用户兴趣数据时使用标准查询
            search_body = {
                "from": from_val,
                "size": size,
                "query": main_query,
                "highlight": {
                    "fields": {
                        "title": {"number_of_fragments": 1, "fragment_size": 150},
                        "text": {"number_of_fragments": 1, "fragment_size": 300},
                        "meta_description": {"number_of_fragments": 1, "fragment_size": 150}
                    },
                    "pre_tags": ["<em>"],
                    "post_tags": ["</em>"]
                }
            }
        
        # 所有查询使用相同的高亮配置即可
        # 不需要特殊处理
    
    response = es.search(index="nankai_webpages", body=search_body)
    hits = response["hits"]["hits"]
    
    results = []
    
    for hit in hits:
        source = hit["_source"]
        highlight = hit.get("highlight", {})
        
        # 清理标题和摘要中的HTML标签，只保留高亮
        raw_title = highlight.get("title", [source["title"]])[0]
        clean_title = clean_html_except_highlights(raw_title)
        
        raw_snippet = get_snippet(highlight.get("text", []), source.get("text", ""))
        clean_snippet = clean_html_except_highlights(raw_snippet)
        
        result = {
            "id": hit["_id"],
            "url": source["url"],
            "title": clean_title,
            "snippet": clean_snippet,
            "score": hit["_score"],
            "type": "webpage",
            "crawl_time": source["crawl_time"],
            "page_hash": source.get("page_hash", "")  # 添加页面哈希用于快照功能
        }
        
        results.append(result)
    
    return {
        "status": "success",
        "query": query,
        "total": response["hits"]["total"]["value"],
        "results": results
    }

def search_documents(query, from_val, size, search_mode='normal', user_interests={}):
    """搜索文档内容"""
    if search_mode == 'normal':
        # 构建基础查询
        should_clauses = [
            {"match": {"title": {"query": query, "boost": 3}}},
            {"match": {"content": {"query": query, "boost": 2}}}
        ]
        
        # 构建主查询
        main_query = {
            "bool": {
                "should": should_clauses,
                "minimum_should_match": 1
            }
        }
        
        # 如果有用户兴趣数据，使用function_score查询进行个性化排序
        if user_interests:
            # 构建兴趣度加权函数
            functions = []
            
            # 为每个用户兴趣关键词添加权重函数
            for term, weight in user_interests.items():
                if len(term) > 1:  # 忽略单字符词
                    # 标题中包含兴趣词的加权
                    functions.append({
                        "filter": {"match": {"title": term}},
                        "weight": weight * 2.0  # 标题匹配权重更高
                    })
                    
                    # 内容中包含兴趣词的加权
                    functions.append({
                        "filter": {"match": {"content": term}},
                        "weight": weight * 1.0
                    })
            
            # 使用function_score组合基础查询和个性化加权
            search_body = {
                "from": from_val,
                "size": size,
                "query": {
                    "function_score": {
                        "query": main_query,
                        "functions": functions,
                        "score_mode": "sum",
                        "boost_mode": "sum"
                    }
                },
                "highlight": {
                    "fields": {
                        "title": {"number_of_fragments": 1, "fragment_size": 150},
                        "content": {"number_of_fragments": 1, "fragment_size": 300}
                    },
                    "pre_tags": ["<em>"],
                    "post_tags": ["</em>"]
                }
            }
        else:
            # 无用户兴趣数据时使用标准查询
            search_body = {
                "from": from_val,
                "size": size,
                "query": main_query,
                "highlight": {
                    "fields": {
                        "title": {"number_of_fragments": 1, "fragment_size": 150},
                        "content": {"number_of_fragments": 1, "fragment_size": 300}
                    },
                    "pre_tags": ["<em>"],
                    "post_tags": ["</em>"]
                }
            }
    else:  # phrase or wildcard
        should_clauses = []
        
        # 标题字段
        title_query = build_query("title", query, search_mode)
        if search_mode == 'phrase':
            title_query["match_phrase"]["title"]["boost"] = 3
        elif search_mode == 'wildcard' and ('*' in query or '?' in query):
            if "match_phrase_prefix" in title_query:
                title_query["match_phrase_prefix"]["title"]["boost"] = 3
            else:
                title_query["wildcard"]["title"]["boost"] = 3
        else:
            title_query["match"]["title"]["boost"] = 3
        should_clauses.append(title_query)
        
        # 内容字段
        content_query = build_query("content", query, search_mode)
        if search_mode == 'phrase':
            content_query["match_phrase"]["content"]["boost"] = 2
        elif search_mode == 'wildcard' and ('*' in query or '?' in query):
            if "match_phrase_prefix" in content_query:
                content_query["match_phrase_prefix"]["content"]["boost"] = 2
            else:
                content_query["wildcard"]["content"]["boost"] = 2
        else:
            content_query["match"]["content"]["boost"] = 2
        should_clauses.append(content_query)
        
        # 构建主查询
        main_query = {
            "bool": {
                "should": should_clauses,
                "minimum_should_match": 1
            }
        }
        
        # 如果有用户兴趣数据，使用function_score查询进行个性化排序
        if user_interests:
            # 构建兴趣度加权函数
            functions = []
            
            # 为每个用户兴趣关键词添加权重函数
            for term, weight in user_interests.items():
                if len(term) > 1:  # 忽略单字符词
                    # 标题中包含兴趣词的加权
                    functions.append({
                        "filter": {"match": {"title": term}},
                        "weight": weight * 2.0  # 标题匹配权重更高
                    })
                    
                    # 内容中包含兴趣词的加权
                    functions.append({
                        "filter": {"match": {"content": term}},
                        "weight": weight * 1.0
                    })
            
            # 使用function_score组合基础查询和个性化加权
            search_body = {
                "from": from_val,
                "size": size,
                "query": {
                    "function_score": {
                        "query": main_query,
                        "functions": functions,
                        "score_mode": "sum",
                        "boost_mode": "sum"
                    }
                },
                "highlight": {
                    "fields": {
                        "title": {"number_of_fragments": 1, "fragment_size": 150},
                        "content": {"number_of_fragments": 1, "fragment_size": 300}
                    },
                    "pre_tags": ["<em>"],
                    "post_tags": ["</em>"]
                }
            }
        else:
            # 无用户兴趣数据时使用标准查询
            search_body = {
                "from": from_val,
                "size": size,
                "query": main_query,
                "highlight": {
                    "fields": {
                        "title": {"number_of_fragments": 1, "fragment_size": 150},
                        "content": {"number_of_fragments": 1, "fragment_size": 300}
                    },
                    "pre_tags": ["<em>"],
                    "post_tags": ["</em>"]
                }
            }
        
        # 所有查询使用相同的高亮配置即可
        # 不需要特殊处理
    
    response = es.search(index="nankai_documents", body=search_body)
    hits = response["hits"]["hits"]
    
    results = []
    
    for hit in hits:
        source = hit["_source"]
        highlight = hit.get("highlight", {})
        
        # 清理标题和摘要中的HTML标签，只保留高亮
        raw_title = highlight.get("title", [source["title"]])[0]
        clean_title = clean_html_except_highlights(raw_title)
        
        raw_snippet = get_snippet(highlight.get("content", []), source.get("content", ""))
        clean_snippet = clean_html_except_highlights(raw_snippet)
        
        result = {
            "id": hit["_id"],
            "url": source["url"],
            "title": clean_title,
            "snippet": clean_snippet,
            "score": hit["_score"],
            "type": "document",
            "file_type": source["file_type"],
            "file_size": source["file_size"],
            "crawl_time": source["crawl_time"],
            "file_path": source.get("file_path", "")
        }
        
        results.append(result)
    
    return {
        "status": "success",
        "query": query,
        "total": response["hits"]["total"]["value"],
        "results": results
    }

def get_snippet(highlights, full_text, max_len=200):
    """从高亮片段或原文中获取摘要"""
    if highlights and len(highlights) > 0:
        return highlights[0]
    
    if full_text:
        if len(full_text) > max_len:
            return full_text[:max_len] + "..."
        return full_text
    
    return ""

@app.route('/api/history', methods=['GET'])
def query_history():
    """获取用户查询历史"""
    user_id = get_user_id()
    
    # 先尝试从内存获取
    if user_id in user_sessions and "history" in user_sessions[user_id]:
        history = user_sessions[user_id]["history"]
    else:
        # 否则从文件读取
        log_file = os.path.join(QUERY_LOG_DIR, f"{user_id}.json")
        if os.path.exists(log_file):
            try:
                with open(log_file, 'r', encoding='utf-8') as file_handle:
                    history = json.load(file_handle)
            except:
                history = []
        else:
            history = []
    
    # 去重处理
    seen_queries = set()
    unique_history = []
    
    for item in history:
        query = item.get("query", "")
        if query and query not in seen_queries:
            seen_queries.add(query)
            unique_history.append(item)
    
    return jsonify({
        "status": "success",
        "history": unique_history
    })

@app.route('/api/snapshot/<page_hash>', methods=['GET'])
def page_snapshot(page_hash):
    """获取网页快照"""
    try:
        # 根据页面哈希查找原始页面内容
        query = {
            "query": {
                "term": {
                    "page_hash": page_hash
                }
            }
        }
        
        response = es.search(index="nankai_webpages", body=query)
        
        if response["hits"]["total"]["value"] == 0:
            return jsonify({
                "status": "error",
                "message": "找不到该网页的快照"
            }), 404
        
        page_data = response["hits"]["hits"][0]["_source"]
        
        # 构建简单的快照页面HTML
        snapshot_html = f"""
        <!DOCTYPE html>
        <html>
        <head>
            <meta charset="UTF-8">
            <title>网页快照: {page_data['title']}</title>
            <style>
                body {{ font-family: Arial, sans-serif; margin: 0; padding: 20px; }}
                .snapshot-header {{ background: #f5f5f5; padding: 10px; margin-bottom: 20px; border-bottom: 1px solid #ddd; }}
                .snapshot-content {{ padding: 10px; }}
                .snapshot-meta {{ color: #777; font-size: 0.9em; }}
            </style>
        </head>
        <body>
            <div class="snapshot-header">
                <h2>{page_data['title']}</h2>
                <p class="snapshot-meta">
                    <strong>原始网址:</strong> <a href="{page_data['url']}">{page_data['url']}</a><br>
                    <strong>抓取时间:</strong> {page_data['crawl_time']}<br>
                </p>
            </div>
            <div class="snapshot-content">
                {page_data.get('text', '')}
            </div>
        </body>
        </html>
        """
        
        return snapshot_html
    except Exception as e:
        logger.error(f"获取快照失败: {e}")
        return jsonify({
            "status": "error",
            "message": f"获取快照失败: {e}"
        }), 500

@app.route('/api/recommend', methods=['GET'])
def recommend():
    """获取相关查询推荐"""
    query = request.args.get('q', '')
    if not query:
        return jsonify({
            "status": "error",
            "message": "请提供搜索关键词"
        }), 400
    
    try:
        # 基于当前查询词生成相关查询推荐
        related_terms = get_related_queries(query)
        
        return jsonify({
            "status": "success",
            "related_terms": related_terms
        })
    except Exception as e:
        logger.error(f"推荐失败: {e}")
        return jsonify({
            "status": "error",
            "message": f"推荐失败: {e}"
        }), 500

def get_related_queries(query, max_results=8):
    """
    基于当前查询生成相关查询
    使用多种策略提高相关词质量
    """
    try:
        related_terms = {}  # 使用字典存储词和权重
        query_terms = query.split()  # 简单分词
        
        # 0. 检查查询长度 - 查询太短时，需要扩展; 查询太长时，需要提炼
        short_query = len(query_terms) <= 2
        
        # 1. 精确匹配 - 使用match_phrase查询获取最相关的文档
        phrase_query = {
            "query": {
                "match_phrase": {
                    "text": {
                        "query": query,
                        "slop": 1
                    }
                }
            },
            "_source": ["title", "meta_keywords"],
            "size": 5
        }
        
        try:
            phrase_response = es.search(index="nankai_webpages", body=phrase_query)
            
            # 提取高质量的相关词
            for hit in phrase_response["hits"]["hits"]:
                score = hit["_score"]
                # 提取标题中的相关词
                title = hit["_source"].get("title", "")
                if title:
                    # 避免只返回单个字符的词
                    for term in title.split():
                        # 只添加有意义的词且与原查询不同
                        if len(term) > 1 and term not in query_terms:
                            # 根据文档的相关性评分给予权重
                            if term in related_terms:
                                related_terms[term] += score * 0.8
                            else:
                                related_terms[term] = score * 0.8
                
                # 从元关键词提取（通常质量更高）
                keywords = hit["_source"].get("meta_keywords", "")
                if keywords:
                    for keyword in keywords.split(","):
                        keyword = keyword.strip()
                        if keyword and len(keyword) > 1 and keyword not in query_terms:
                            # 元关键词通常相关性更高
                            if keyword in related_terms:
                                related_terms[keyword] += score * 1.2
                            else:
                                related_terms[keyword] = score * 1.2
        except:
            pass
        
        # 2. 如果是短查询，使用更广泛的匹配策略找到相关文档的标题
        if short_query:
            search_body = {
                "query": {
                    "match": {
                        "text": {
                            "query": query,
                            "minimum_should_match": "80%"  # 提高最小匹配阈值
                        }
                    }
                },
                "_source": ["title", "meta_keywords"],
                "size": 10  # 获取更多文档
            }
            
            response = es.search(index="nankai_webpages", body=search_body)
            
            # 分析结果
            for hit in response["hits"]["hits"]:
                score = hit["_score"] * 0.6  # 权重略低于精确匹配
                
                # 从标题中提取关键词
                title = hit["_source"].get("title", "")
                if title:
                    # 从标题中提取长度>1的词
                    title_terms = [t for t in title.split() if len(t) > 1]
                    
                    # 计算标题词的权重
                    for term in title_terms:
                        if term not in query_terms:
                            if term in related_terms:
                                related_terms[term] += score * 0.5
                            else:
                                related_terms[term] = score * 0.5
                
                # 利用meta_keywords
                keywords = hit["_source"].get("meta_keywords", "")
                if keywords:
                    for keyword in keywords.split(","):
                        keyword = keyword.strip()
                        if keyword and len(keyword) > 1 and keyword not in query_terms:
                            if keyword in related_terms:
                                related_terms[keyword] += score * 0.7
                            else:
                                related_terms[keyword] = score * 0.7
        
        # 3. 使用更高级的more-like-this查询找到语义相似的内容
        mlt_query = {
            "query": {
                "more_like_this": {
                    "fields": ["title", "text"],
                    "like": query,
                    "min_term_freq": 1,
                    "max_query_terms": 12,
                    "min_doc_freq": 1,
                    "min_word_length": 2  # 忽略单字符词
                }
            },
            "_source": ["title"],
            "size": 5
        }
        
        try:
            mlt_response = es.search(index="nankai_webpages", body=mlt_query)
            
            for hit in mlt_response["hits"]["hits"]:
                mlt_score = hit["_score"] * 0.9  # MLT通常相关性很高
                title = hit["_source"].get("title", "")
                
                if title:
                    words = title.split()
                    for word in words:
                        if len(word) > 1 and word not in query_terms:
                            if word in related_terms:
                                related_terms[word] += mlt_score
                            else:
                                related_terms[word] = mlt_score
        except:
            # more-like-this可能因为查询词问题而失败，忽略错误
            pass
        
        # 4. 对于高等教育领域，添加一些领域特定词汇扩展（如果匹配到相关词）
        edu_terms = {
            "学院": ["系", "专业", "学科", "课程", "教学"],
            "研究": ["学术", "科研", "论文", "成果", "项目"],
            "招生": ["报考", "考试", "录取", "分数", "入学"],
            "就业": ["工作", "职业", "就业率", "企业", "岗位"],
            "教授": ["老师", "导师", "学者", "院士", "讲师"],
            "学生": ["本科生", "研究生", "博士生", "硕士生", "学子"],
            "大学": ["高校", "学府", "学校", "院校", "教育"]
        }
        
        # 检查查询中是否包含教育领域关键词
        for key, expansions in edu_terms.items():
            if key in query:
                # 如果查询中包含某个关键词，添加相关扩展词
                for exp in expansions:
                    if exp not in query_terms:
                        related_terms[exp] = related_terms.get(exp, 0) + 4.0  # 较高的基础权重
        
        # 5. 排序并限制结果数量
        # 按权重排序
        sorted_terms = sorted(related_terms.items(), key=lambda x: x[1], reverse=True)
        
        # 只保留最相关的词
        filtered_terms = []
        seen_term_parts = set()  # 用于避免返回部分重复的词
        
        for term, score in sorted_terms:
            # 过滤低质量/低相关度词
            if score < 2.0:  # 设置一个最低相关度阈值
                continue
                
            # 检查是否与已有结果部分重复
            term_parts = set(term.split())
            
            # 不再特别优待长词条，主要看语义相关性和重复情况
            if not any(term in filtered_term or filtered_term in term for filtered_term in filtered_terms):
                # 确保不添加彼此包含的词条，避免"软件"和"软件学院"同时出现
                if not term_parts.intersection(seen_term_parts) or score > 6.0:  # 只有相关性极高的词可以部分重叠
                    filtered_terms.append(term)
                    seen_term_parts.update(term_parts)
                    
                    if len(filtered_terms) >= max_results:
                        break
        
        return filtered_terms
    except Exception as e:
        logger.error(f"获取相关查询失败: {e}")
        return []

@app.route('/api/document/<doc_id>/preview', methods=['GET'])
def document_preview(doc_id):
    """获取文档预览内容"""
    try:
        # 根据文档ID查询ES获取内容
        query = {
            "query": {
                "term": {
                    "_id": doc_id
                }
            }
        }
        
        response = es.search(index="nankai_documents", body=query)
        
        if response["hits"]["total"]["value"] == 0:
            return jsonify({
                "status": "error",
                "message": "找不到该文档"
            }), 404
        
        document = response["hits"]["hits"][0]["_source"]
        
        return jsonify({
            "status": "success",
            "title": document.get("title", "未知文档"),
            "file_type": document.get("file_type", ""),
            "content": document.get("content", ""),
            "url": document.get("url", "")
        })
    
    except Exception as e:
        logger.error(f"获取文档预览失败: {e}")
        return jsonify({
            "status": "error",
            "message": f"获取文档预览失败: {e}"
        }), 500

# 清理HTML标签，仅保留<em>标签用于高亮
def clean_html_except_highlights(text):
    if not text:
        return ""
    # 先将<em>标签替换为特殊标记
    text = text.replace("<em>", "###EM_START###")
    text = text.replace("</em>", "###EM_END###")
    # 移除所有HTML标签
    text = re.sub(r'<[^>]+>', '', text)
    # 恢复<em>标签
    text = text.replace("###EM_START###", "<em>")
    text = text.replace("###EM_END###", "</em>")
    # 转义HTML特殊字符
    text = text.replace("&", "&amp;")
    text = text.replace("<", "&lt;").replace(">", "&gt;")
    # 恢复em标签
    text = text.replace("&lt;em&gt;", "<em>").replace("&lt;/em&gt;", "</em>")
    return text

def get_user_interests(user_id, max_terms=10):
    """
    获取用户感兴趣的关键词和主题
    基于用户历史查询构建用户兴趣模型
    """
    # 权重衰减因子 - 较新的查询权重更高
    DECAY_FACTOR = 0.9
    
    # 获取用户历史查询
    history = []
    log_file = os.path.join(QUERY_LOG_DIR, f"{user_id}.json")
    
    if os.path.exists(log_file):
        try:
            with open(log_file, 'r', encoding='utf-8') as f:
                history = json.load(f)
        except:
            return {}
    
    if not history:
        return {}
    
    # 提取所有查询词和频率
    term_weights = {}
    for idx, entry in enumerate(history):
        query = entry.get("query", "").lower()
        # 对查询进行简单分词 (中文环境可能需要更复杂的分词)
        terms = query.split()
        
        # 计算这条查询的权重 (新查询权重更高)
        entry_weight = DECAY_FACTOR ** idx
        
        # 累加每个词的权重
        for term in terms:
            if len(term) > 1:  # 忽略单字符词
                if term in term_weights:
                    term_weights[term] += entry_weight
                else:
                    term_weights[term] = entry_weight
    
    # 按权重排序并返回前N个关键词
    sorted_terms = sorted(term_weights.items(), key=lambda x: x[1], reverse=True)
    top_terms = dict(sorted_terms[:max_terms])
    
    return top_terms

if __name__ == "__main__":
    from flask import Flask
    app_instance = Flask(__name__)
    app_instance.register_blueprint(app)
    app_instance.run(debug=True, host="0.0.0.0", port=5000) 