import os
import subprocess
import sqlite3
from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
from typing import Optional, List, Dict

# 引入数据库操作模块
from database_operations import main
# 引入微信数据处理模块
from getwxdata import WeixinDbStorage

app = FastAPI(title="微信数据库解析服务")


class DecryptResponse(BaseModel):
    success: bool
    error: Optional[str] = None


class ProgressResponse(BaseModel):
    progress: int


class ChatDataResponse(BaseModel):
    success: bool
    data: Optional[List] = None
    error: Optional[str] = None


class ExportResponse(BaseModel):
    success: bool
    file_path: Optional[str] = None
    error: Optional[str] = None


# 添加一个新的响应模型用于所有消息的导出
class AllMessagesExportResponse(BaseModel):
    success: bool
    file_path: Optional[str] = None
    error: Optional[str] = None
    count: Optional[int] = None

class TableInfo(BaseModel):
    table_name: str
    columns: List[Dict[str, str]]
    row_count: int



# 全局变量存储进度
current_progress = 0

# 全局变量存储微信数据处理器
wx_storage: Optional[WeixinDbStorage] = None


def find_files_debug(directory, pattern):
    """带调试信息的文件查找"""
    import glob
    print(f"在目录 {directory} 中搜索模式 {pattern}")
    search_path = os.path.join(directory, "**", pattern)
    matches = glob.glob(search_path, recursive=True)
    print(f"找到匹配项: {matches}")
    return matches


_progress = 0


@app.post("/decrypt", response_model=DecryptResponse)
async def start_decrypt_process():
    """
    启动数据解密过程

    使用wechat-dump-rs工具解密微信数据库

    Returns:
        DecryptResponse: 包含解密结果的响应
    """
    global _progress, current_progress, wx_storage
    current_progress = 0
    _progress = 0

    try:
        # 确保db目录存在
        if not os.path.exists("./db"):
            os.makedirs("./db")

        # 获取现有数据库文件列表
        dblist = os.listdir("./db") if os.path.exists("./db") else []

        # 调用解密工具
        executable_path = "./bin/wechat-dump-rs.exe"
        if not os.path.exists(executable_path):
            return DecryptResponse(success=False, error=f"未找到解密工具: {executable_path}")

        p = subprocess.Popen([executable_path, "-a", "-o", "./db"],
                             stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        out, err = p.communicate()

        # 检查解密工具的执行结果
        if p.returncode != 0:
            if err:
                err_str = err.decode("utf-8")
                print(f"解密工具错误输出: {err_str}")
                if "WeChat is not running" in err_str:
                    return DecryptResponse(success=False, error="微信未运行")
                return DecryptResponse(success=False, error=f"解密工具执行失败: {err_str}")
            else:
                return DecryptResponse(success=False, error="解密工具执行失败")

        # 清理旧文件 - 不再删除整个目录，而是只清理文件
        for db in dblist:
            folder_path = f"./db/{db}"
            if os.path.isdir(folder_path):
                print("保留文件夹", folder_path)  # 不再删除文件夹

        merged_db_path = 'msg/merged.db'
        if os.path.exists(merged_db_path):
            os.remove(merged_db_path)

        # 执行数据库合并处理
        _progress = 1
        if main():
            # 初始化微信数据处理器
            try:
                wx_storage = WeixinDbStorage()
                return DecryptResponse(success=True)
            except Exception as e:
                return DecryptResponse(success=False, error=f"初始化数据处理器失败: {str(e)}")
        else:
            return DecryptResponse(success=False, error="数据库处理失败")

    except Exception as e:
        print(f"解密过程中发生异常: {str(e)}")
        return DecryptResponse(success=False, error=str(e))


@app.get("/progress", response_model=ProgressResponse)
async def get_progress():
    """
    获取解密进度

    Returns:
        dict: 当前进度值
    """
    global current_progress
    return ProgressResponse(progress=current_progress)


@app.get("/files")
async def list_files():
    """
    列出当前目录结构，用于调试

    Returns:
        dict: 当前目录和文件信息
    """

    def scan_directory(path, level=0):
        if level > 3:  # 限制递归层级
            return {}

        result = {}
        try:
            items = os.listdir(path)
            for item in items:
                item_path = os.path.join(path, item)
                if os.path.isdir(item_path):
                    result[item] = scan_directory(item_path, level + 1)
                else:
                    # 只显示特定类型的文件
                    if item.endswith(('.db', '.exe')):
                        result[item] = "file"
        except:
            pass
        return result

    return scan_directory(".")


@app.get("/db-files")
async def list_db_files():
    """
    列出db目录中的文件

    Returns:
        dict: db目录中的文件列表
    """
    db_files = []
    if os.path.exists("./db"):
        for root, dirs, files in os.walk("./db"):
            for file in files:
                db_files.append(os.path.relpath(os.path.join(root, file), "."))
    return {"db_files": db_files, "count": len(db_files)}


@app.get("/chatroom/msg-count")
async def get_chatroom_msg_count(username: str, topN: int = 10, start: str = "", end: str = ""):
    """
    获取群聊消息数量统计

    Args:
        username: 群聊用户名
        topN: 返回前N个发送者
        start: 开始时间
        end: 结束时间

    Returns:
        dict: 消息统计结果
    """
    if wx_storage is None:
        raise HTTPException(status_code=503, detail="数据未初始化，请先执行解密过程")

    try:
        result = wx_storage.get_chatroom_msg_count(username, topN, start, end)
        return ChatDataResponse(success=True, data=result)
    except Exception as e:
        return ChatDataResponse(success=False, error=str(e))


@app.get("/contacts/top")
async def get_top_contacts(top_n: int = 10, start: str = "", end: str = "", contain_chatroom: bool = False):
    """
    获取聊天最多的联系人

    Args:
        top_n: 返回前N个联系人
        start: 开始时间
        end: 结束时间
        contain_chatroom: 是否包含群聊

    Returns:
        dict: 联系人统计结果
    """
    if wx_storage is None:
        raise HTTPException(status_code=503, detail="数据未初始化，请先执行解密过程")

    try:
        result = wx_storage.get_chatted_top_contacts(top_n, start, end, contain_chatroom)
        return ChatDataResponse(success=True, data=result)
    except Exception as e:
        return ChatDataResponse(success=False, error=str(e))


@app.get("/messages/by-days")
async def get_messages_by_days(username: str, start: str, end: str):
    """
    按天获取消息统计

    Args:
        username: 用户名
        start: 开始时间
        end: 结束时间

    Returns:
        dict: 按天统计的消息数量
    """
    if wx_storage is None:
        raise HTTPException(status_code=503, detail="数据未初始化，请先执行解密过程")

    try:
        result = wx_storage.get_messages_by_days(username, start, end)
        return ChatDataResponse(success=True, data=result)
    except Exception as e:
        return ChatDataResponse(success=False, error=str(e))


@app.get("/messages/by-month")
async def get_messages_by_month(username: str, start: str = "", end: str = ""):
    """
    按月获取消息统计

    Args:
        username: 用户名
        start: 开始时间
        end: 结束时间

    Returns:
        dict: 按月统计的消息数量
    """
    if wx_storage is None:
        raise HTTPException(status_code=503, detail="数据未初始化，请先执行解密过程")

    try:
        result = wx_storage.get_messages_by_month(username, start, end)
        return ChatDataResponse(success=True, data=result)
    except Exception as e:
        return ChatDataResponse(success=False, error=str(e))


@app.get("/messages/by-hour")
async def get_messages_by_hour(username: str, start: str = "", end: str = ""):
    """
    按小时获取消息统计

    Args:
        username: 用户名
        start: 开始时间
        end: 结束时间

    Returns:
        dict: 按小时统计的消息数量
    """
    if wx_storage is None:
        raise HTTPException(status_code=503, detail="数据未初始化，请先执行解密过程")

    try:
        result = wx_storage.get_messages_by_hour(username, start, end)
        return ChatDataResponse(success=True, data=result)
    except Exception as e:
        return ChatDataResponse(success=False, error=str(e))


@app.get("/messages/keyword")
async def get_messages_by_keyword(username: str, keyword: str, num: int = 5, max_len: int = 10, start: str = "",
                                  end: str = ""):
    """
    通过关键词搜索消息

    Args:
        username: 用户名
        keyword: 关键词
        num: 返回结果数量
        max_len: 消息最大长度
        start: 开始时间
        end: 结束时间

    Returns:
        dict: 包含关键词的消息列表
    """
    if wx_storage is None:
        raise HTTPException(status_code=503, detail="数据未初始化，请先执行解密过程")

    try:
        result = wx_storage.get_messages_by_keyword(username, keyword, num, max_len, start, end)
        return ChatDataResponse(success=True, data=result)
    except Exception as e:
        return ChatDataResponse(success=False, error=str(e))


@app.get("/export/messages")
async def export_messages_to_csv(username: str, start: str = "", end: str = "", file_name: str = ""):
    """
    导出指定用户的聊天记录为CSV文件

    Args:
        username: 用户名
        start: 开始时间
        end: 结束时间
        file_name: 导出的文件名，默认使用username_timestamp格式

    Returns:
        dict: 导出结果和文件路径
    """
    if wx_storage is None:
        raise HTTPException(status_code=503, detail="数据未初始化，请先执行解密过程")

    try:
        # 获取消息列表
        messages = wx_storage.get_msg_list_by_username(username, start, end)

        # 准备CSV数据
        import csv
        import pandas as pd
        from datetime import datetime

        # 如果没有提供文件名，则生成一个默认文件名
        if not file_name:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            file_name = f"wechat_messages_{username}_{timestamp}.csv"

        # 确保文件名以.csv结尾
        if not file_name.endswith('.csv'):
            file_name += '.csv'

        # 创建DataFrame
        df = pd.DataFrame(messages, columns=['local_type', 'sender_name', 'create_time', 'message_content'])

        # 处理消息内容，将其转换为字符串
        df['message_content'] = df['message_content'].apply(
            lambda x: x.decode('utf-8', errors='ignore') if isinstance(x, bytes) else str(x)
        )

        # 保存为CSV文件
        file_path = os.path.join(os.getcwd(), file_name)
        df.to_csv(file_path, index=False, encoding='utf-8-sig')

        return ExportResponse(success=True, file_path=file_path)
    except Exception as e:
        return ExportResponse(success=False, error=str(e))


@app.get("/export/tables-with-data")
async def export_tables_with_data_to_csv(min_rows: int = 1, file_name: str = ""):
    """
    导出包含数据的表信息为CSV文件

    Args:
        min_rows: 最小行数阈值，只导出行数大于等于此值的表
        file_name: 导出的文件名

    Returns:
        dict: 导出结果和文件路径
    """
    import logging
    import pandas as pd
    from datetime import datetime

    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)

    try:
        logger.info(f"开始导出包含数据的表信息（最小行数: {min_rows}）")

        # 获取所有数据库文件
        db_files = []
        if os.path.exists("./db"):
            for root, dirs, files in os.walk("./db"):
                for file in files:
                    if file.endswith('.db') and not file.endswith(('.db-shm', '.db-wal')):
                        db_files.append(os.path.join(root, file))

        # 添加merged.db（如果存在）
        if os.path.exists("msg/merged.db"):
            db_files.append("msg/merged.db")

        logger.info(f"找到数据库文件: {len(db_files)} 个")

        # 存储有数据的表信息
        tables_with_data = []

        for db_file in db_files:
            logger.info(f"正在处理数据库: {db_file}")
            try:
                with sqlite3.connect(db_file) as conn:
                    cursor = conn.cursor()

                    # 获取所有表名
                    cursor.execute("SELECT name FROM sqlite_master WHERE type='table' ORDER BY name")
                    table_names = [row[0] for row in cursor.fetchall()]

                    for table_name in table_names:
                        try:
                            # 获取表中的记录数
                            cursor.execute(f"SELECT COUNT(*) FROM {table_name}")
                            row_count = cursor.fetchone()[0]

                            # 只处理有足够数据的表
                            if row_count >= min_rows:
                                # 获取表结构
                                cursor.execute(f"PRAGMA table_info({table_name})")
                                columns_info = cursor.fetchall()

                                column_names = [col[1] for col in columns_info]
                                primary_keys = [col[1] for col in columns_info if col[5]]

                                # 获取表的第一行数据作为示例
                                try:
                                    cursor.execute(f"SELECT * FROM {table_name} LIMIT 1")
                                    sample_row = cursor.fetchone()
                                    sample_data = ', '.join([str(val)[:50] + ('...' if len(str(val)) > 50 else '')
                                                             for val in sample_row]) if sample_row else ''
                                except:
                                    sample_data = 'Unable to fetch sample data'

                                tables_with_data.append({
                                    'database_file': db_file,
                                    'database_name': os.path.basename(db_file),
                                    'table_name': table_name,
                                    'row_count': row_count,
                                    'column_count': len(columns_info),
                                    'column_names': ', '.join(column_names),
                                    'primary_keys': ', '.join(primary_keys) if primary_keys else '',
                                    'sample_data': sample_data
                                })

                        except Exception as e:
                            logger.warning(f"  处理表 {table_name} 时出错: {e}")
                            continue

            except Exception as e:
                logger.error(f"处理数据库 {db_file} 时出错: {e}")
                continue

        if not tables_with_data:
            return ExportResponse(success=False, error=f"未找到行数大于等于 {min_rows} 的表")

        # 按行数排序
        tables_with_data.sort(key=lambda x: x['row_count'], reverse=True)

        # 创建DataFrame
        df = pd.DataFrame(tables_with_data)
        logger.info(f"创建DataFrame完成，共 {len(df)} 个有数据的表")

        # 如果没有提供文件名，则生成一个默认文件名
        if not file_name:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            file_name = f"tables_with_data_{timestamp}.csv"

        # 确保文件名以.csv结尾
        if not file_name.endswith('.csv'):
            file_name += '.csv'

        # 保存为CSV文件
        file_path = os.path.join(os.getcwd(), file_name)
        df.to_csv(file_path, index=False, encoding='utf-8-sig')
        logger.info(f"文件保存完成: {file_path}")

        # 统计信息
        total_rows = df['row_count'].sum()
        stats = {
            'tables_with_data': len(df),
            'total_rows_across_all_tables': total_rows,
            'largest_table': df.iloc[0]['table_name'] if len(df) > 0 else '',
            'largest_table_rows': df.iloc[0]['row_count'] if len(df) > 0 else 0
        }

        return {
            "success": True,
            "file_path": file_path,
            "statistics": stats,
            "message": f"成功导出 {stats['tables_with_data']} 个有数据的表，总计 {total_rows:,} 行数据"
        }

    except Exception as e:
        logger.error(f"导出有数据的表时发生错误: {str(e)}", exc_info=True)
        return ExportResponse(success=False, error=str(e))


@app.get("/export/all-messages", response_model=AllMessagesExportResponse)
async def export_all_messages_to_csv(start: str = "", end: str = "", file_name: str = ""):
    """
    导出所有聊天记录为CSV文件 - 直接从原始数据库表获取数据，使用Contact表的NickName作为username

    Args:
        start: 开始时间
        end: 结束时间
        file_name: 导出的文件名，默认使用all_messages_timestamp格式

    Returns:
        dict: 导出结果和文件路径
    """
    import logging
    import pandas as pd
    from datetime import datetime
    from getwxdata import date_solve, msg_solve

    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)

    try:
        logger.info(f"开始从原始数据库导出所有消息，时间范围: {start} 到 {end}")

        # 解析时间范围
        start_time, end_time = date_solve(start, end)
        logger.info(f"解析时间范围: start={start_time}, end={end_time}")

        # 获取所有数据库文件
        db_files = []
        if os.path.exists("./db"):
            for root, dirs, files in os.walk("./db"):
                for file in files:
                    if file.endswith('.db') and not file.endswith(('.db-shm', '.db-wal')):
                        db_files.append(os.path.join(root, file))

        logger.info(f"找到数据库文件: {len(db_files)} 个")

        # 存储所有消息数据
        all_messages = []

        # 构建联系人映射 - 从Contact表加载完整的联系人信息
        contact_mapping = {}  # {username: {'remark': '', 'nickname': '', 'alias': ''}}

        logger.info("开始加载Contact表中的联系人信息...")

        # 从各个数据库中加载Contact表信息
        for db_file in db_files:
            try:
                with sqlite3.connect(db_file) as conn:
                    cursor = conn.cursor()

                    # 获取所有表名
                    cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
                    tables = [row[0] for row in cursor.fetchall()]

                    # 查找Contact表（注意大小写）
                    contact_tables = []
                    for table in tables:
                        if table.lower() == 'contact':
                            contact_tables.append(table)

                    for table in contact_tables:
                        try:
                            # 检查表结构，确认列名
                            cursor.execute(f"PRAGMA table_info({table})")
                            columns_info = cursor.fetchall()
                            columns = [col[1] for col in columns_info]
                            logger.info(f"Contact表 {table} 的列: {columns[:10]}...")

                            # 根据实际的Contact表结构映射字段
                            username_col = None
                            remark_col = None
                            nickname_col = None
                            alias_col = None

                            for col in columns:
                                if col == 'UserName':
                                    username_col = col
                                elif col == 'Remark':
                                    remark_col = col
                                elif col == 'NickName':
                                    nickname_col = col
                                elif col == 'Alias':
                                    alias_col = col

                            if username_col and nickname_col:
                                # 构建查询，获取UserName, Remark, NickName, Alias
                                select_cols = [username_col, nickname_col]
                                if remark_col:
                                    select_cols.append(remark_col)
                                if alias_col:
                                    select_cols.append(alias_col)

                                query = f"SELECT {', '.join(select_cols)} FROM {table} WHERE {username_col} IS NOT NULL AND {username_col} != ''"

                                logger.info(f"执行Contact表查询: {query}")
                                contacts = cursor.execute(query).fetchall()
                                logger.info(f"从 {db_file} 的 {table} 加载了 {len(contacts)} 个联系人")

                                for contact in contacts:
                                    username = contact[0]  # UserName
                                    nickname = contact[1] if len(contact) > 1 else ''  # NickName
                                    remark = contact[2] if len(contact) > 2 else ''  # Remark
                                    alias = contact[3] if len(contact) > 3 else ''  # Alias

                                    if username:
                                        contact_mapping[username] = {
                                            'nickname': nickname or '',
                                            'remark': remark or '',
                                            'alias': alias or ''
                                        }

                            else:
                                logger.warning(f"Contact表 {table} 缺少必要的列（UserName或NickName）")

                        except Exception as e:
                            logger.warning(f"处理Contact表 {table} 时出错: {e}")
                            continue

            except Exception as e:
                logger.warning(f"从数据库 {db_file} 加载Contact信息时出错: {e}")
                continue

        logger.info(f"总共加载了 {len(contact_mapping)} 个联系人信息（从Contact表）")

        # 构建Name2Id映射 (rowid -> username)
        name_id_mapping = {}

        for db_file in db_files:
            try:
                with sqlite3.connect(db_file) as conn:
                    cursor = conn.cursor()

                    # 尝试不同的Name2Id表名和列名
                    name_id_patterns = [
                        ("Name2Id", "user_name"),
                        ("Name2Id", "UserName"),
                        ("name2id", "user_name"),
                    ]

                    for table_name, col_name in name_id_patterns:
                        try:
                            cursor.execute(f"SELECT rowid, {col_name} FROM {table_name}")
                            mappings = cursor.fetchall()
                            for rowid, username in mappings:
                                if username:
                                    name_id_mapping[rowid] = username
                            logger.info(f"从 {db_file} 的 {table_name} 加载了 {len(mappings)} 个ID映射")
                            break
                        except:
                            continue

            except Exception as e:
                logger.warning(f"从数据库 {db_file} 加载Name2Id映射时出错: {e}")
                continue

        logger.info(f"总共加载了 {len(name_id_mapping)} 个ID映射")

        # 从各个数据库中提取消息数据
        msgtype_filter = '1, 3, 43, 47, 48, 10000, 25769803825, 21474836529, 244813135921, 154618822705, 17179869233, 73014455032'

        for db_file in db_files:
            logger.info(f"正在处理消息数据库: {db_file}")
            try:
                with sqlite3.connect(db_file) as conn:
                    cursor = conn.cursor()

                    # 获取所有表名
                    cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
                    tables = [row[0] for row in cursor.fetchall()]

                    # 查找消息表
                    msg_tables = []
                    for table in tables:
                        table_lower = table.lower()
                        if any(keyword in table_lower for keyword in ['msg', 'message', 'chat']):
                            msg_tables.append(table)

                    logger.info(f"找到消息表: {msg_tables}")

                    for table in msg_tables:
                        try:
                            # 检查表结构
                            cursor.execute(f"PRAGMA table_info({table})")
                            columns_info = cursor.fetchall()
                            columns = [col[1] for col in columns_info]

                            logger.info(f"消息表 {table} 的列: {columns[:10]}...")  # 只显示前10个列名

                            # 映射标准字段名到实际列名
                            column_mapping = {
                                'local_type': None,
                                'create_time': None,
                                'source': None,  # 聊天对象
                                'message_content': None,
                                'real_sender_id': None,  # 真实发送者ID
                                'local_id': None
                            }

                            for col in columns:
                                col_lower = col.lower()
                                if col_lower in ['local_type', 'type']:
                                    column_mapping['local_type'] = col
                                elif col_lower in ['create_time', 'createtime', 'time']:
                                    column_mapping['create_time'] = col
                                elif col_lower in ['source', 'strtalker', 'talker', 'from_user']:
                                    column_mapping['source'] = col
                                elif col_lower in ['message_content', 'strcontent', 'content', 'msg']:
                                    column_mapping['message_content'] = col
                                elif col_lower in ['real_sender_id', 'talkerid', 'sender_id']:
                                    column_mapping['real_sender_id'] = col
                                elif col_lower in ['local_id', 'localid', 'msgid']:
                                    column_mapping['local_id'] = col

                            # 检查是否有足够的关键字段
                            if not (column_mapping['create_time'] and column_mapping['source']):
                                logger.warning(f"表 {table} 缺少关键字段，跳过")
                                continue

                            # 构建查询语句
                            select_fields = []
                            field_order = []  # 记录字段顺序

                            for field, col_name in column_mapping.items():
                                if col_name:
                                    select_fields.append(col_name)
                                    field_order.append(field)
                                else:
                                    select_fields.append("NULL")
                                    field_order.append(field)

                            where_clause = ""
                            params = []

                            # 添加消息类型过滤
                            if column_mapping['local_type']:
                                where_clause = f"WHERE {column_mapping['local_type']} IN ({msgtype_filter})"

                            # 添加时间范围过滤
                            if start_time and column_mapping['create_time']:
                                if where_clause:
                                    where_clause += f" AND {column_mapping['create_time']} > ?"
                                else:
                                    where_clause = f"WHERE {column_mapping['create_time']} > ?"
                                params.append(start_time)

                            if end_time and column_mapping['create_time']:
                                if where_clause:
                                    where_clause += f" AND {column_mapping['create_time']} < ?"
                                else:
                                    where_clause = f"WHERE {column_mapping['create_time']} < ?"
                                params.append(end_time)

                            query = f"SELECT {', '.join(select_fields)} FROM {table} {where_clause} ORDER BY {column_mapping['create_time'] or 'rowid'}"

                            logger.info(f"执行查询: {query[:100]}...")
                            cursor.execute(query, params)

                            batch_size = 10000
                            while True:
                                rows = cursor.fetchmany(batch_size)
                                if not rows:
                                    break

                                for row in rows:
                                    # 构建标准格式的消息记录
                                    row_dict = dict(zip(field_order, row))

                                    # 确定username和sender_name
                                    source = row_dict.get('source', '')
                                    real_sender_id = row_dict.get('real_sender_id')

                                    # 获取聊天对象的显示名称（优先使用NickName）
                                    display_username = source  # 默认使用原始的source
                                    if source in contact_mapping:
                                        contact_info = contact_mapping[source]
                                        # 优先使用NickName，如果为空则使用Remark，最后使用原始username
                                        if contact_info['nickname']:
                                            display_username = contact_info['nickname']
                                        elif contact_info['remark']:
                                            display_username = contact_info['remark']

                                    # 获取消息发送者的显示名称
                                    sender_display_name = display_username  # 默认和聊天对象相同
                                    if real_sender_id and real_sender_id in name_id_mapping:
                                        sender_username = name_id_mapping[real_sender_id]
                                        # 查找发送者的显示名称
                                        if sender_username in contact_mapping:
                                            sender_contact_info = contact_mapping[sender_username]
                                            if sender_contact_info['nickname']:
                                                sender_display_name = sender_contact_info['nickname']
                                            elif sender_contact_info['remark']:
                                                sender_display_name = sender_contact_info['remark']
                                            else:
                                                sender_display_name = sender_username
                                        else:
                                            sender_display_name = sender_username

                                    # 构建最终的消息记录
                                    message_record = [
                                        row_dict.get('local_type', 0),
                                        sender_display_name,  # 使用发送者的显示名称
                                        row_dict.get('create_time', 0),
                                        row_dict.get('message_content', ''),
                                        display_username  # 使用聊天对象的显示名称（优先NickName）
                                    ]

                                    all_messages.append(message_record)

                                if len(all_messages) % 10000 == 0:
                                    logger.info(f"已处理 {len(all_messages)} 条消息")

                        except Exception as e:
                            logger.warning(f"处理消息表 {table} 时出错: {e}")
                            continue

            except Exception as e:
                logger.error(f"处理数据库 {db_file} 时出错: {e}")
                continue

        if not all_messages:
            return AllMessagesExportResponse(success=False, error="未找到任何消息数据")

        logger.info(f"总共获取到 {len(all_messages)} 条消息")

        # 按时间排序
        all_messages.sort(key=lambda x: x[2] if x[2] else 0)

        # 处理消息内容
        try:
            all_messages = msg_solve(all_messages)
            logger.info("消息内容处理完成")
        except Exception as e:
            logger.warning(f"消息内容处理出错: {e}")

        # 创建DataFrame
        df = pd.DataFrame(all_messages,
                          columns=['local_type', 'sender_name', 'create_time', 'message_content', 'username'])

        # 重新排列列的顺序
        df = df[['username', 'sender_name', 'create_time', 'message_content', 'local_type']]

        # 添加可读时间
        try:
            df['readable_time'] = pd.to_datetime(df['create_time'], unit='s').dt.strftime('%Y-%m-%d %H:%M:%S')
        except:
            df['readable_time'] = df['create_time']

        # 处理消息内容
        def process_message_content(x):
            if isinstance(x, bytes):
                return x.decode('utf-8', errors='ignore')
            elif x is None:
                return ''
            else:
                return str(x)

        df['message_content'] = df['message_content'].apply(process_message_content)

        # 确保其他字段不为空
        df['username'] = df['username'].fillna('').astype(str)
        df['sender_name'] = df['sender_name'].fillna('').astype(str)

        # 最终列顺序
        df = df[['username', 'sender_name', 'readable_time', 'create_time', 'local_type', 'message_content']]

        # 生成文件名
        if not file_name:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            file_name = f"all_wechat_messages_nickname_{timestamp}.csv"

        if not file_name.endswith('.csv'):
            file_name += '.csv'

        # 保存为CSV文件
        file_path = os.path.join(os.getcwd(), file_name)
        df.to_csv(file_path, index=False, encoding='utf-8-sig')
        logger.info(f"文件保存完成: {file_path}")

        # 统计信息
        stats = {
            'total_messages': len(df),
            'unique_users': len(df['username'].unique()),
            'unique_senders': len(df['sender_name'].unique()),
            'contact_mappings_used': len(contact_mapping),
            'date_range': {
                'earliest': df['readable_time'].min(),
                'latest': df['readable_time'].max()
            }
        }

        logger.info(f"导出完成，共 {stats['total_messages']} 条消息，{stats['unique_users']} 个聊天对象")
        logger.info(f"使用了 {stats['contact_mappings_used']} 个联系人映射")

        return AllMessagesExportResponse(
            success=True,
            file_path=file_path,
            count=len(df)
        )

    except Exception as e:
        logger.error(f"导出所有消息时发生错误: {str(e)}", exc_info=True)
        return AllMessagesExportResponse(success=False, error=str(e))


@app.get("/database/message-stats")
async def get_message_database_stats():
    """
    获取消息数据库的统计信息

    Returns:
        dict: 各个数据库和表的消息统计
    """
    import logging

    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__name__)

    try:
        # 获取所有数据库文件
        db_files = []
        if os.path.exists("./db"):
            for root, dirs, files in os.walk("./db"):
                for file in files:
                    if file.endswith('.db') and not file.endswith(('.db-shm', '.db-wal')):
                        db_files.append(os.path.join(root, file))

        stats = {}
        msgtype_filter = '1, 3, 43, 47, 48, 10000, 25769803825, 21474836529, 244813135921, 154618822705, 17179869233, 73014455032'

        for db_file in db_files:
            try:
                with sqlite3.connect(db_file) as conn:
                    cursor = conn.cursor()

                    # 获取所有表名
                    cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
                    tables = [row[0] for row in cursor.fetchall()]

                    db_stats = {}

                    for table in tables:
                        table_lower = table.lower()
                        if any(keyword in table_lower for keyword in ['msg', 'message', 'chat']):
                            try:
                                # 检查是否有local_type列
                                cursor.execute(f"PRAGMA table_info({table})")
                                columns = [col[1] for col in cursor.fetchall()]

                                has_local_type = any(col.lower() in ['local_type', 'type'] for col in columns)
                                has_create_time = any(col.lower() in ['create_time', 'createtime'] for col in columns)

                                # 获取总行数
                                cursor.execute(f"SELECT COUNT(*) FROM {table}")
                                total_count = cursor.fetchone()[0]

                                # 如果有消息类型字段，获取相关消息数量
                                filtered_count = 0
                                if has_local_type:
                                    local_type_col = next(
                                        (col for col in columns if col.lower() in ['local_type', 'type']), None)
                                    if local_type_col:
                                        cursor.execute(
                                            f"SELECT COUNT(*) FROM {table} WHERE {local_type_col} IN ({msgtype_filter})")
                                        filtered_count = cursor.fetchone()[0]

                                db_stats[table] = {
                                    'total_rows': total_count,
                                    'message_rows': filtered_count,
                                    'columns': columns,
                                    'has_local_type': has_local_type,
                                    'has_create_time': has_create_time
                                }

                            except Exception as e:
                                db_stats[table] = {'error': str(e)}

                    if db_stats:
                        stats[db_file] = db_stats

            except Exception as e:
                stats[db_file] = {'error': str(e)}

        return {
            "success": True,
            "database_count": len(db_files),
            "statistics": stats
        }

    except Exception as e:
        logger.error(f"获取消息数据库统计时发生错误: {str(e)}")
        return {"success": False, "error": str(e)}


if __name__ == "__main__":
    import uvicorn

    uvicorn.run(app, host="0.0.0.0", port=8000)