import frappe
from frappe.utils import global_search

def apply_global_search_patches():
    """应用全局搜索的达梦数据库补丁"""
    
    # 保存原始的函数
    original_sync_value = global_search.sync_value
    original_insert_values_for_multiple_docs = global_search.insert_values_for_multiple_docs
    original_sync_values = global_search.sync_values

    def patched_sync_value(value: dict):
        """
        Sync a given document to global search with DM support
        :param value: dict of { doctype, name, content, published, title, route }
        """
        if frappe.conf.db_type == "dm":
            frappe.db.sql("""
                MERGE INTO "__global_search" T1 
                USING (SELECT %(doctype)s AS "DOCTYPE", %(name)s AS "NAME") T2
                ON (T1.DOCTYPE = T2.DOCTYPE AND T1.NAME = T2.NAME)
                WHEN MATCHED THEN
                    UPDATE SET
                        CONTENT = %(content)s,
                        PUBLISHED = %(published)s,
                        TITLE = %(title)s,
                        ROUTE = %(route)s
                WHEN NOT MATCHED THEN
                    INSERT (DOCTYPE, NAME, CONTENT, PUBLISHED, TITLE, ROUTE)
                    VALUES (%(doctype)s, %(name)s, %(content)s, %(published)s, %(title)s, %(route)s)
            """, value)
        else:
            return original_sync_value(value)

    def patched_insert_values_for_multiple_docs(all_contents):
        """批量插入全局搜索内容，支持达梦数据库"""
        if frappe.conf.db_type != "dm":
            return original_insert_values_for_multiple_docs(all_contents)

        values = []
        for content in all_contents:
            values.append("""
                SELECT 
                    '{doctype}' as "DOCTYPE",
                    '{name}' as "NAME",
                    '{content}' as "CONTENT",
                    {published} as "PUBLISHED",
                    '{title}' as "TITLE",
                    '{route}' as "ROUTE"
                FROM DUAL
            """.format(**content))

        batch_size = 50000
        for i in range(0, len(values), batch_size):
            batch_values = values[i : i + batch_size]
            merge_sql = """
                MERGE INTO "__global_search" T1
                USING ({}) T2
                ON (T1."DOCTYPE" = T2."DOCTYPE" AND T1."NAME" = T2."NAME")
                WHEN NOT MATCHED THEN
                    INSERT ("DOCTYPE", "NAME", "CONTENT", "PUBLISHED", "TITLE", "ROUTE")
                    VALUES (T2."DOCTYPE", T2."NAME", T2."CONTENT", T2."PUBLISHED", T2."TITLE", T2."ROUTE")
            """.format(" UNION ALL ".join(batch_values))
            
            frappe.db.sql(merge_sql)

    def patched_sync_values(values: list):
        """同步多个值到全局搜索，支持达梦数据库"""
        if frappe.conf.db_type != "dm":
            return original_sync_values(values)

        # 构建 VALUES 子句
        values_list = []
        for value in values:
            doctype, name, content, published, title, route = value
            values_list.append("""
                SELECT 
                    '{doctype}' as "DOCTYPE",
                    '{name}' as "NAME",
                    '{content}' as "CONTENT",
                    {published} as "PUBLISHED",
                    '{title}' as "TITLE",
                    '{route}' as "ROUTE"
                FROM DUAL
            """.format(
                doctype=doctype,
                name=name,
                content=content,
                published=published,
                title=title,
                route=route
            ))

        merge_sql = """
            MERGE INTO "__global_search" T1
            USING ({}) T2
            ON (T1."DOCTYPE" = T2."DOCTYPE" AND T1."NAME" = T2."NAME")
            WHEN MATCHED THEN
                UPDATE SET
                    "CONTENT" = T2."CONTENT",
                    "PUBLISHED" = T2."PUBLISHED",
                    "TITLE" = T2."TITLE",
                    "ROUTE" = T2."ROUTE"
            WHEN NOT MATCHED THEN
                INSERT ("DOCTYPE", "NAME", "CONTENT", "PUBLISHED", "TITLE", "ROUTE")
                VALUES (T2."DOCTYPE", T2."NAME", T2."CONTENT", T2."PUBLISHED", T2."TITLE", T2."ROUTE")
        """.format(" UNION ALL ".join(values_list))

        frappe.db.sql(merge_sql)

    # 应用补丁
    global_search.sync_value = patched_sync_value
    global_search.insert_values_for_multiple_docs = patched_insert_values_for_multiple_docs
    global_search.sync_values = patched_sync_values

def web_search(text: str, scope: str | None = None, start: int = 0, limit: int = 20):
    """网页搜索功能，支持达梦数据库
    
    Args:
        text: 搜索文本
        scope: 搜索范围（路由）
        start: 起始位置
        limit: 返回结果数量
    """
    if frappe.conf.db_type != "dm":
        return global_search.web_search(text, scope, start, limit)

    results = []
    texts = text.split("&")
    
    for text in texts:
        # 基本查询结构
        common_query = """ SELECT "DOCTYPE", "NAME", "CONTENT", "TITLE", "ROUTE",
                SCORE(1) as "RELEVANCE"
            FROM "__global_search"
            WHERE {conditions}
            ORDER BY "RELEVANCE" DESC
            LIMIT %(limit)s OFFSET %(start)s"""

        # 构建条件
        scope_condition = '"ROUTE" like %(scope)s AND ' if scope else ""
        published_condition = '"PUBLISHED" = 1 AND '
        
        # 达梦数据库的全文搜索条件
        dm_conditions = published_condition + scope_condition + 'CONTAINS("CONTENT", %(text)s, 1) > 0'
        
        values = {
            "scope": "".join([scope, "%"]) if scope else "",
            "limit": limit,
            "start": start,
            "text": text
        }

        # 执行查询
        result = frappe.db.sql(
            common_query.format(conditions=dm_conditions),
            values=values,
            as_dict=True
        )

        # 处理结果
        tmp_result = []
        for r in result:
            if r not in results:
                tmp_result.append(r)
        results.extend(tmp_result)

    # 计算标题相关性
    words = set(global_search.get_distinct_words(text))
    for r in results:
        title_words = set(global_search.get_distinct_words(r.title))
        words_match = len(words.intersection(title_words))
        # 结合 SCORE 和标题匹配度
        r.relevance = float(r.relevance or 0) * (1 + words_match)

    # 按相关性排序
    results = sorted(results, key=lambda x: x.relevance, reverse=True)
    return results