from fastapi import FastAPI, File, UploadFile, Form, HTTPException, Query, Body, Path, APIRouter
from fastapi.responses import JSONResponse, Response
from typing import List, Optional, Dict, Any, Set
import uvicorn
import os
import json
from pydantic import BaseModel
import shutil
import redis
import requests
import time

# 导入Redis公司数据模块中的函数，后续修改支持多数据库
from redis_company_data import (
    retrieve_company_from_redis,
    list_all_companies,
    fuzzy_match_company_id,
    fuzzy_search_company,
    fuzzy_match_company_id_with_spaces,
    fuzzy_search_company_with_spaces
)

# 导入数据库连接模块
from db_connection import get_redis_connection, init_redis_from_json

app = FastAPI(
    title="公司信息Redis数据库API",
    description="提供Redis公司数据库的增删改查接口，支持多数据库操作",
    version="1.1.0",
)

# 定义请求和响应的模型
class CompanyListResponse(BaseModel):
    total: int
    companies: List[Dict[str, str]]

class FuzzyMatchRequest(BaseModel):
    query: str
    min_similarity: Optional[float] = 0.6

class FieldSearchRequest(BaseModel):
    query: str
    field: Optional[str] = "公司名称"
    min_similarity: Optional[float] = 0.3
    limit: Optional[int] = 5

class LoadFolderRequest(BaseModel):
    folder_path: str
    update_existing: Optional[bool] = False

# 创建Redis数据库路由
redis_router = APIRouter(prefix="/db_redis", tags=["Redis数据库"])

# 获取Redis连接
r = get_redis_connection()

# 在启动时尝试初始化数据库
@app.on_event("startup")
async def startup_db_init():
    """应用启动时初始化数据库"""
    # 初始化默认数据库
    init_redis_from_json()

# 获取Redis中所有数据库列表
def get_all_databases():
    # 使用集合前缀来区分不同的数据库
    db_keys = r.keys("db:*")
    databases = [key.split(':')[1] for key in db_keys]
    return sorted(list(set(databases)))

# 公司键名转换函数
def company_key(db_name, company_id):
    return f"db:{db_name}:company:{company_id}"

# 加载JSON到Redis
def load_json_to_redis(db_name, file_path, update_existing=False):
    loaded_count = 0
    skipped_count = 0
    updated_count = 0
    
    # Read JSON file
    with open(file_path, 'r', encoding='utf-8') as f:
        company_data = json.load(f)
    
    # Store data in Redis
    for company_id, details in company_data.items():
        # 检查ID是否已存在
        key = company_key(db_name, company_id)
        if r.exists(key):
            if update_existing:
                # 获取当前数据
                existing_company = r.hgetall(key)
                company_name = details.get('公司名称', existing_company.get('公司名称', '未知'))
                
                # 更新记录
                for field, value in details.items():
                    r.hset(key, field, value)
                
                updated_count += 1
            else:
                skipped_count += 1
                continue
        else:
            # 新记录，直接添加
            for field, value in details.items():
                r.hset(key, field, value)
            loaded_count += 1
    
    # 记录数据库名称，用于列出所有数据库
    r.sadd("database_list", db_name)
    
    total_processed = loaded_count + updated_count
    
    return {
        "loaded_count": loaded_count,
        "updated_count": updated_count,
        "skipped_count": skipped_count,
        "total_processed": total_processed
    }

# 加载文件夹中的所有JSON文件到Redis
def load_folder_json_to_redis(db_name, folder_path, update_existing=False):
    if not os.path.exists(folder_path):
        raise HTTPException(status_code=404, detail=f"文件夹 {folder_path} 不存在")
    
    json_files = [f for f in os.listdir(folder_path) if f.endswith('.json')]
    
    if not json_files:
        return {
            "total_companies": 0,
            "processed_files": 0,
            "error_files": 0,
            "message": f"警告: 文件夹 {folder_path} 中没有找到JSON文件"
        }
    
    total_companies = 0
    processed_files = 0
    error_files = 0
    
    # 处理每个JSON文件
    for json_file in json_files:
        file_path = os.path.join(folder_path, json_file)
        try:
            result = load_json_to_redis(db_name, file_path, update_existing)
            total_companies += result["total_processed"]
            processed_files += 1
        except Exception as e:
            error_files += 1
    
    return {
        "total_companies": total_companies,
        "processed_files": processed_files,
        "error_files": error_files,
        "message": f"文件夹处理完成。共处理 {processed_files} 个JSON文件，错误 {error_files} 个，总计导入 {total_companies} 家公司"
    }

# 获取特定数据库中的所有公司
def list_db_companies(db_name):
    company_keys = r.keys(f"db:{db_name}:company:*")
    
    companies = []
    for key in company_keys:
        company_id = key.split(":")[-1]
        company_name = r.hget(key, "公司名称")
        companies.append({"id": company_id, "name": company_name})
    
    return companies

# 获取特定数据库中的公司详情
@redis_router.get("/{db_name}/entity/{company_id}")
async def get_db_company(
    db_name: str = Path(..., description="数据库名称"),
    company_id: str = Path(..., description="公司ID")
):
    """根据ID获取公司详细信息"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        company_data = retrieve_company_from_redis(db_name, company_id)
        if not company_data:
            raise HTTPException(status_code=404, detail=f"未找到ID为 {company_id} 的公司")
        return company_data
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"获取公司详情时出错: {str(e)}")

# 获取特定公司的字段值
@redis_router.get("/{db_name}/entity/{company_id}/fields")
async def get_company_fields(
    db_name: str = Path(..., description="数据库名称"),
    company_id: str = Path(..., description="公司ID")
):
    """返回特定公司的字段和对应的值"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        # 检查company_id
        key = company_key(db_name, company_id)
        if not r.exists(key):
            raise HTTPException(status_code=404, detail=f"未找到ID为 {company_id} 的公司")
        
        # 获取公司所有字段和值
        company_data = r.hgetall(key)
        
        return {
            "database": db_name,
            "company_id": company_id,
            "fields": company_data
        }
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"获取公司字段时出错: {str(e)}")

# 获取特定数据库中的所有字段名称（保留原路由但改进实现）
@redis_router.get("/{db_name}/fields")
async def get_db_fields(db_name: str = Path(..., description="数据库名称")):
    """返回特定数据库中公司数据的所有字段名称"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        # 获取所有公司键
        company_keys = r.keys(f"db:{db_name}:company:*")
        
        if not company_keys:
            return {"fields": [], "message": f"数据库 {db_name} 中没有公司数据"}
        
        # 获取所有字段
        all_fields = set()
        for key in company_keys:
            # 获取该公司的所有字段
            company_fields = r.hkeys(key)
            all_fields.update(company_fields)
        
        # 转为列表并排序
        fields_list = sorted(list(all_fields))
        
        return {
            "database": db_name,
            "total": len(fields_list),
            "fields": fields_list
        }
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"获取字段列表时出错: {str(e)}")

# 模糊匹配特定数据库中的公司ID
@redis_router.post("/{db_name}/fuzzy_match")
async def match_db_company_id(
    request: FuzzyMatchRequest,
    db_name: str = Path(..., description="数据库名称")
):
    """根据部分ID模糊匹配公司"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        result = fuzzy_match_company_id(db_name, request.query, request.min_similarity)
        if not result or result["total_matches"] == 0:
            raise HTTPException(status_code=404, detail=f"未找到与 '{request.query}' 匹配的公司")
        return result
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"模糊匹配公司时出错: {str(e)}")

# 专门按ID模糊匹配特定数据库中的公司
@redis_router.post("/{db_name}/fuzzy_match/id")
async def match_db_company_by_id(
    request: FuzzyMatchRequest,
    db_name: str = Path(..., description="数据库名称")
):
    """根据部分ID模糊匹配公司"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        result = fuzzy_match_company_id(db_name, request.query, request.min_similarity)
        if not result or result["total_matches"] == 0:
            raise HTTPException(status_code=404, detail=f"未找到与ID '{request.query}' 匹配的公司")
        return result
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"按ID模糊匹配公司时出错: {str(e)}")

# 按公司名称模糊匹配特定数据库中的公司
@redis_router.post("/{db_name}/fuzzy_match/name")
async def match_db_company_by_name(
    request: FuzzyMatchRequest,
    db_name: str = Path(..., description="数据库名称")
):
    """根据公司名称模糊匹配公司"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        result = fuzzy_search_company(db_name, request.query, field="公司名称", min_similarity=request.min_similarity)
        if not result:
            raise HTTPException(status_code=404, detail=f"未找到与名称 '{request.query}' 匹配的公司")
        
        # 格式化响应以与 fuzzy_match_company_id 的返回结构一致
        formatted_result = {
            "total_matches": len(result),
            "matches": result
        }
        return formatted_result
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"按名称模糊匹配公司时出错: {str(e)}")

# 空格分隔的多条件模糊匹配请求模型
class BlankSeparatedMatchRequest(BaseModel):
    query: str
    min_similarity: Optional[float] = 0.4
    limit: Optional[int] = 10

# 使用空格分隔的多条件ID模糊匹配
@redis_router.post("/{db_name}/fuzzy_match_blank/id")
async def match_db_company_by_blank_separated_terms(
    request: BlankSeparatedMatchRequest,
    db_name: str = Path(..., description="数据库名称")
):
    """根据空格分隔的多个条件模糊匹配公司ID（不要求顺序）"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        result = fuzzy_match_company_id_with_spaces(
            db_name, 
            request.query, 
            min_similarity=request.min_similarity,
            limit=request.limit
        )
        
        if not result or result["total_matches"] == 0:
            raise HTTPException(status_code=404, detail=f"未找到与查询条件 '{request.query}' 匹配的公司")
        
        return result
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"多条件模糊匹配公司ID时出错: {str(e)}")

# 使用空格分隔的多条件名称模糊匹配
@redis_router.post("/{db_name}/fuzzy_match_blank/name")
async def match_db_company_by_blank_separated_name(
    request: BlankSeparatedMatchRequest,
    db_name: str = Path(..., description="数据库名称")
):
    """根据空格分隔的多个条件模糊匹配公司名称（不要求顺序）"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        result = fuzzy_search_company_with_spaces(
            db_name, 
            request.query, 
            field="公司名称",
            min_similarity=request.min_similarity,
            limit=request.limit
        )
        
        if not result or result["total_matches"] == 0:
            raise HTTPException(status_code=404, detail=f"未找到与查询条件 '{request.query}' 匹配的公司名称")
        
        return result
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"多条件模糊匹配公司名称时出错: {str(e)}")

# 列出数据库中的所有公司信息，按ID排序
@redis_router.get("/{db_name}/list")
async def list_all_db_companies(
    db_name: str = Path(..., description="数据库名称"),
    limit: Optional[int] = Query(100, description="返回结果数量限制，默认100"),
    offset: Optional[int] = Query(0, description="结果偏移量，用于分页，默认0")
):
    """返回指定数据库中的所有公司信息，按ID排序"""
    try:
        # 确保参数有默认值
        limit = 100 if limit is None else limit
        offset = 0 if offset is None else offset
        
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        # 获取所有公司键
        company_keys = r.keys(f"db:{db_name}:company:*")
        
        if not company_keys:
            return {
                "database": db_name,
                "total": 0,
                "offset": offset,
                "limit": limit,
                "companies": []
            }
        
        # 从键中提取ID并排序
        company_ids = [key.split(":")[-1] for key in company_keys]
        company_ids.sort()  # 按ID字母顺序排序
        
        # 应用分页
        paginated_ids = company_ids[offset:offset+limit]
        
        # 获取公司详情
        companies = []
        for company_id in paginated_ids:
            key = company_key(db_name, company_id)
            company_data = r.hgetall(key)
            company_data["id"] = company_id  # 添加ID字段到结果中
            companies.append(company_data)
        
        return {
            "database": db_name,
            "total": len(company_ids),
            "offset": offset,
            "limit": limit,
            "companies": companies
        }
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"获取公司列表时出错: {str(e)}")

# 数据库统计信息
@redis_router.get("/{db_name}/stats")
async def get_db_stats(db_name: str = Path(..., description="数据库名称")):
    """返回特定数据库的统计信息"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        # 获取所有公司键
        company_keys = r.keys(f"db:{db_name}:company:*")
        company_count = len(company_keys)
        
        if company_count == 0:
            return {
                "database": db_name,
                "company_count": 0,
                "field_stats": {},
                "message": "数据库中没有公司数据"
            }
        
        # 获取字段统计信息
        field_counts = {}
        for key in company_keys:
            # 获取该公司的所有字段
            company_fields = r.hkeys(key)
            for field in company_fields:
                if field in field_counts:
                    field_counts[field] += 1
                else:
                    field_counts[field] = 1
        
        # 计算每个字段的覆盖率
        field_stats = {}
        for field, count in field_counts.items():
            coverage = (count / company_count) * 100
            field_stats[field] = {
                "count": count,
                "coverage": f"{coverage:.2f}%"
            }
        
        return {
            "database": db_name,
            "company_count": company_count,
            "field_stats": field_stats
        }
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"获取数据库统计信息时出错: {str(e)}")

# 导出数据库为JSON
@redis_router.get("/{db_name}/export/json")
async def export_db_to_json_api(
    db_name: str = Path(..., description="数据库名称"),
    pretty: bool = Query(False, description="是否美化JSON输出"),
    as_file: bool = Query(False, description="是否作为文件下载")
):
    """将数据库导出为JSON格式"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            raise HTTPException(status_code=404, detail=f"数据库 {db_name} 不存在")
            
        # 获取所有公司数据
        company_keys = r.keys(f"db:{db_name}:company:*")
        
        if not company_keys:
            if as_file:
                return JSONResponse(
                    content={"message": f"数据库 {db_name} 中没有公司数据"},
                    status_code=404
                )
            return {"message": f"数据库 {db_name} 中没有公司数据"}
        
        # 构建结果字典
        result = {}
        for key in company_keys:
            company_id = key.split(":")[-1]
            company_data = r.hgetall(key)
            result[company_id] = company_data
        
        # 作为文件下载或直接返回
        if as_file:
            if pretty:
                json_content = json.dumps(result, ensure_ascii=False, indent=2)
            else:
                json_content = json.dumps(result, ensure_ascii=False)
                
            response = Response(content=json_content, media_type="application/json")
            response.headers["Content-Disposition"] = f"attachment; filename={db_name}_export.json"
            return response
        else:
            return result
    except Exception as e:
        if isinstance(e, HTTPException):
            raise e
        raise HTTPException(status_code=500, detail=f"导出数据库时出错: {str(e)}")

# 保存Swagger文档到Markdown文件
def save_swagger_to_markdown(url, output_file):
    try:
        print(f"获取OpenAPI文档: {url}/openapi.json")
        
        # 确保目录存在
        output_dir = os.path.dirname(output_file)
        if output_dir and not os.path.exists(output_dir):
            print(f"创建输出目录: {output_dir}")
            os.makedirs(output_dir, exist_ok=True)
        
        # 获取OpenAPI JSON
        try:
            response = requests.get(f"{url}/openapi.json", timeout=10)
            response.raise_for_status()
        except requests.exceptions.ConnectionError as e:
            print(f"连接错误: 无法连接到 {url}")
            print(f"错误详情: {str(e)}")
            return False
        except requests.exceptions.HTTPError as e:
            print(f"HTTP错误: 服务器返回{e.response.status_code}")
            print(f"错误详情: {str(e)}")
            return False
        except requests.exceptions.Timeout:
            print(f"请求超时: 获取 {url}/openapi.json 时超时")
            return False
        
        swagger_json = response.json()
        
        print(f"正在处理API文档: {swagger_json['info']['title']}")
        print(f"API版本: {swagger_json['info']['version']}")
        print(f"路径数量: {len(swagger_json['paths'])}")
        
        # 创建Markdown文件
        with open(output_file, 'w', encoding='utf-8') as f:
            # 写入标题
            f.write(f"# {swagger_json['info']['title']} API文档\n\n")
            f.write(f"**版本**: {swagger_json['info']['version']}\n\n")
            f.write(f"**描述**: {swagger_json['info']['description']}\n\n")
            
            # 写入基本信息
            f.write("## 基本信息\n\n")
            f.write(f"- **基础URL**: {url}\n")
            f.write(f"- **Swagger UI**: {url}/docs\n")
            f.write(f"- **ReDoc**: {url}/redoc\n\n")
            
            # 写入所有端点
            f.write("## API端点\n\n")
            
            # 按路径分组
            for path, path_item in swagger_json['paths'].items():
                f.write(f"### `{path}`\n\n")
                
                # 处理每个HTTP方法
                for method, operation in path_item.items():
                    method = method.upper()
                    f.write(f"#### {method}\n\n")
                    
                    # 添加描述
                    if 'summary' in operation:
                        f.write(f"**摘要**: {operation['summary']}\n\n")
                    if 'description' in operation:
                        f.write(f"**描述**: {operation['description']}\n\n")
                    
                    # 添加参数
                    if 'parameters' in operation and operation['parameters']:
                        f.write("**参数**:\n\n")
                        f.write("| 名称 | 位置 | 类型 | 必填 | 描述 |\n")
                        f.write("|------|------|------|------|------|\n")
                        
                        for param in operation['parameters']:
                            required = "是" if param.get('required', False) else "否"
                            param_type = param.get('schema', {}).get('type', '')
                            description = param.get('description', '')
                            f.write(f"| {param['name']} | {param['in']} | {param_type} | {required} | {description} |\n")
                        
                        f.write("\n")
                    
                    # 添加请求体
                    if 'requestBody' in operation:
                        f.write("**请求体**:\n\n")
                        content = operation['requestBody'].get('content', {})
                        for content_type, content_schema in content.items():
                            f.write(f"- Content Type: `{content_type}`\n\n")
                            if 'schema' in content_schema:
                                ref = content_schema['schema'].get('$ref', '')
                                if ref:
                                    ref_name = ref.split('/')[-1]
                                    f.write(f"  Schema: [{ref_name}](#schema-{ref_name.lower()})\n\n")
                                else:
                                    f.write("  Schema: 内联模式\n\n")
                    
                    # 添加响应
                    if 'responses' in operation:
                        f.write("**响应**:\n\n")
                        f.write("| 状态码 | 描述 | 内容类型 |\n")
                        f.write("|--------|------|----------|\n")
                        
                        for status, response in operation['responses'].items():
                            description = response.get('description', '')
                            content_types = list(response.get('content', {}).keys())
                            content_type_str = ', '.join(content_types) if content_types else '-'
                            f.write(f"| {status} | {description} | {content_type_str} |\n")
                        
                        f.write("\n")
                
                f.write("\n")
            
            # 写入模式定义
            if 'components' in swagger_json and 'schemas' in swagger_json['components']:
                f.write("## 模式定义\n\n")
                
                for schema_name, schema in swagger_json['components']['schemas'].items():
                    f.write(f"### <a id=\"schema-{schema_name.lower()}\"></a>{schema_name}\n\n")
                    
                    if 'description' in schema:
                        f.write(f"{schema['description']}\n\n")
                    
                    if 'properties' in schema:
                        f.write("**属性**:\n\n")
                        f.write("| 属性名 | 类型 | 描述 |\n")
                        f.write("|--------|------|------|\n")
                        
                        for prop_name, prop in schema['properties'].items():
                            prop_type = prop.get('type', '')
                            description = prop.get('description', '')
                            f.write(f"| {prop_name} | {prop_type} | {description} |\n")
                        
                        f.write("\n")
                    
                    f.write("\n")
        
        print(f"Swagger文档已保存到 {output_file}")
        file_size = os.path.getsize(output_file)
        print(f"文档大小: {file_size / 1024:.2f} KB")
        return True
    except json.JSONDecodeError as e:
        print(f"JSON解析错误: 无法解析API文档")
        print(f"错误详情: {str(e)}")
        return False
    except IOError as e:
        print(f"IO错误: 无法写入文档文件 {output_file}")
        print(f"错误详情: {str(e)}")
        return False
    except Exception as e:
        print(f"保存Swagger文档时出错: {str(e)}")
        return False

def start_server_and_docs(host="0.0.0.0", port=8000, docs_output="docs/swagger.md", wait_time=3):
    """
    启动API服务器并生成Swagger文档
    
    参数:
        host: 服务器主机地址
        port: 服务器端口号
        docs_output: Swagger文档输出路径
        wait_time: 等待服务器启动的时间(秒)
    """
    # 创建文档目录(如果不存在)
    os.makedirs(os.path.dirname(docs_output), exist_ok=True)
    
    # 启动API服务
    print(f"正在启动FastAPI服务, 地址: {host}:{port}...")
    import threading
    
    # 在子线程中运行uvicorn服务器
    server_thread = threading.Thread(
        target=lambda: uvicorn.run(app, host=host, port=port)
    )
    server_thread.daemon = True
    server_thread.start()
    
    # 等待服务器启动
    print(f"等待服务器启动... ({wait_time}秒)")
    time.sleep(wait_time)
    
    # 保存Swagger文档
    print(f"正在保存Swagger文档到 {docs_output}...")
    success = save_swagger_to_markdown(f"http://{host}:{port}", docs_output)
    
    if success:
        print(f"Swagger文档已成功生成: {docs_output}")
    
    # 返回服务器线程，以便于调用者可以控制它
    return server_thread

def generate_api_docs(api_url, docs_output="docs/swagger.md"):
    """
    仅生成API文档，不启动服务器
    
    参数:
        api_url: API服务器的URL地址，如 http://localhost:8000
        docs_output: Swagger文档输出路径
    
    返回:
        bool: 是否成功生成文档
    """
    try:
        # 确保api_url不以斜杠结尾
        if api_url.endswith("/"):
            api_url = api_url[:-1]
            
        print(f"正在从 {api_url} 获取API文档...")
        
        # 保存文档
        success = save_swagger_to_markdown(api_url, docs_output)
        
        if success:
            print(f"API文档已成功保存到 {docs_output}")
        else:
            print(f"生成API文档失败")
            
        return success
    except Exception as e:
        print(f"生成API文档时发生错误: {str(e)}")
        return False

def export_db_to_json(db_name, output_file, pretty=False):
    """
    将指定数据库导出为JSON文件
    
    参数:
        db_name: 数据库名称
        output_file: 输出文件路径
        pretty: 是否美化JSON输出
    
    返回:
        bool: 是否成功导出
    """
    try:
        # 检查数据库是否存在
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            print(f"错误: 数据库 {db_name} 不存在")
            return False
            
        print(f"正在导出数据库 {db_name} 到 {output_file}...")
    
        # 获取所有公司键
        company_keys = r.keys(f"db:{db_name}:company:*")
        
        if not company_keys:
            print(f"警告: 数据库 {db_name} 中没有公司数据")
            # 创建空JSON文件
            with open(output_file, 'w', encoding='utf-8') as f:
                json.dump({}, f)
            return True
            
        # 初始化结果字典
        result = {}
        
        # 获取每个公司的所有字段
        for key in company_keys:
            company_id = key.split(":")[-1]
            company_data = r.hgetall(key)
            result[company_id] = company_data
        
        # 创建目录（如果不存在）
        output_dir = os.path.dirname(output_file)
        if output_dir and not os.path.exists(output_dir):
            os.makedirs(output_dir, exist_ok=True)
        
        # 写入JSON文件
        with open(output_file, 'w', encoding='utf-8') as f:
            if pretty:
                json.dump(result, f, ensure_ascii=False, indent=2)
            else:
                json.dump(result, f, ensure_ascii=False)
        
        print(f"成功导出 {len(result)} 条公司数据到 {output_file}")
        file_size = os.path.getsize(output_file)
        print(f"文件大小: {file_size / 1024:.2f} KB")
        return True
    except Exception as e:
        print(f"导出数据库时出错: {str(e)}")
        return False

# Redis服务健康检查
@redis_router.get("/health")
async def check_redis_health():
    """检查Redis数据库连接状态"""
    try:
        # 测试Redis连接
        r.ping()
        return {
            "status": "online",
            "message": "Redis数据库连接正常",
            "connection": os.getenv("CHOOSE_DB_URL_NAME", "REDIS_URL_LOCAL")
        }
    except Exception as e:
        return {
            "status": "offline",
            "message": f"Redis数据库连接失败: {str(e)}",
            "connection": os.getenv("CHOOSE_DB_URL_NAME", "REDIS_URL_LOCAL")
        }

# 列出所有Redis数据库
@redis_router.get("/databases")
async def list_redis_databases():
    """列出所有可用的Redis数据库"""
    try:
        databases = get_all_databases()
        return {
            "total": len(databases),
            "databases": databases
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取数据库列表时出错: {str(e)}")

# 上传JSON文件并加载到特定数据库
@redis_router.post("/{db_name}/upload_json")
async def upload_and_load_json(
    file: UploadFile = File(..., description="JSON文件"),
    db_name: str = Path(..., description="数据库名称"),
    update_existing: bool = Form(False, description="是否更新已存在的记录")
):
    """上传JSON文件并加载到指定数据库"""
    if not file.filename.endswith('.json'):
        raise HTTPException(status_code=400, detail="只接受JSON文件")
    
    # 保存上传的文件
    temp_file_path = f"temp_{file.filename}"
    with open(temp_file_path, "wb") as buffer:
        shutil.copyfileobj(file.file, buffer)
    
    try:
        # 加载文件到Redis指定数据库
        result = load_json_to_redis(db_name, temp_file_path, update_existing)
        return {
            "status": "success",
            "message": f"文件成功加载到数据库 {db_name}",
            "filename": file.filename,
            "database": db_name,
            "loaded_count": result["loaded_count"],
            "updated_count": result["updated_count"],
            "skipped_count": result["skipped_count"],
            "total_processed": result["total_processed"]
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"加载文件时出错: {str(e)}")
    finally:
        # 删除临时文件
        if os.path.exists(temp_file_path):
            os.remove(temp_file_path)

# 初始化数据库
@redis_router.post("/init")
async def initialize_database(
    json_path: str = Query(None, description="JSON文件路径，为空则使用默认路径")
):
    """初始化Redis数据库，加载初始数据"""
    try:
        success = init_redis_from_json(json_path)
        if success:
            return {
                "status": "success",
                "message": "数据库初始化成功"
            }
        else:
            return {
                "status": "error",
                "message": "数据库初始化失败，详情请查看日志"
            }
    except Exception as e:
        return {
            "status": "error",
            "message": f"初始化时出错: {str(e)}"
        }

# 上传JSON文件
@redis_router.post("/{db_name}/load")
async def load_json_file(
    file: UploadFile = File(..., description="JSON文件"),
    db_name: str = Path(..., description="数据库名称"),
    update_existing: bool = Form(False, description="是否更新已存在的记录")
):
    """上传JSON文件并加载到数据库"""
    if not file.filename.endswith('.json'):
        raise HTTPException(status_code=400, detail="只接受JSON文件")
    
    # 保存上传的文件
    temp_file_path = f"temp_{file.filename}"
    with open(temp_file_path, "wb") as buffer:
        shutil.copyfileobj(file.file, buffer)
    
    try:
        # 加载文件到Redis指定数据库
        result = load_json_to_redis(db_name, temp_file_path, update_existing)
        return {
            "message": f"文件成功加载到数据库 {db_name}",
            "filename": file.filename,
            "database": db_name,
            "loaded_count": result["loaded_count"],
            "updated_count": result["updated_count"],
            "skipped_count": result["skipped_count"],
            "total_processed": result["total_processed"]
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"加载文件时出错: {str(e)}")
    finally:
        # 删除临时文件
        if os.path.exists(temp_file_path):
            os.remove(temp_file_path)

# 从请求体加载JSON数据
@redis_router.post("/{db_name}/load_json_body")
async def load_json_from_body(
    db_name: str = Path(..., description="数据库名称"),
    update_existing: bool = Query(False, description="是否更新已存在的记录"),
    company_data: Dict[str, Dict[str, Any]] = Body(..., description="公司数据，格式为 {公司ID: {字段1: 值1, 字段2: 值2, ...}, ...}")
):
    """从请求体加载JSON数据到数据库"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            # 如果数据库不存在，将自动创建
            r.sadd("database_list", db_name)
            
        loaded_count = 0
        skipped_count = 0
        updated_count = 0
        
        # 存储公司数据到Redis
        for company_id, details in company_data.items():
            # 检查ID是否已存在
            key = company_key(db_name, company_id)
            if r.exists(key):
                if update_existing:
                    # 获取当前数据
                    existing_company = r.hgetall(key)
                    company_name = details.get('公司名称', existing_company.get('公司名称', '未知'))
                    
                    # 更新记录
                    for field, value in details.items():
                        r.hset(key, field, value)
                    
                    updated_count += 1
                else:
                    skipped_count += 1
                    continue
            else:
                # 新记录，直接添加
                for field, value in details.items():
                    r.hset(key, field, value)
                loaded_count += 1
        
        total_processed = loaded_count + updated_count
        
        return {
            "status": "success",
            "message": f"数据成功加载到数据库 {db_name}",
            "database": db_name,
            "loaded_count": loaded_count,
            "updated_count": updated_count,
            "skipped_count": skipped_count,
            "total_processed": total_processed
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"加载JSON数据时出错: {str(e)}")

# 通用键值对数据模型
class GenericDataRequest(BaseModel):
    entity_type: str
    prefix: Optional[str] = ""
    data: Dict[str, Dict[str, Any]]
    update_existing: Optional[bool] = False

# 从请求体加载通用JSON数据
@redis_router.post("/{db_name}/load_generic_json_body")
async def load_generic_json_from_body(
    db_name: str = Path(..., description="数据库名称"),
    request_data: GenericDataRequest = Body(..., description="通用数据结构，支持自定义实体类型和前缀")
):
    """从请求体加载通用JSON数据到数据库，支持自定义实体类型和键前缀"""
    try:
        if not r.exists(f"database_list") or not r.sismember("database_list", db_name):
            # 如果数据库不存在，将自动创建
            r.sadd("database_list", db_name)
            
        entity_type = request_data.entity_type
        prefix = request_data.prefix
        update_existing = request_data.update_existing
        
        loaded_count = 0
        skipped_count = 0
        updated_count = 0
        
        # 存储数据到Redis
        for entity_id, details in request_data.data.items():
            # 构建Redis键名
            if prefix:
                key = f"db:{db_name}:{prefix}:{entity_type}:{entity_id}"
            else:
                key = f"db:{db_name}:{entity_type}:{entity_id}"
                
            # 检查键是否已存在
            if r.exists(key):
                if update_existing:
                    # 获取当前数据
                    existing_data = r.hgetall(key)
                    
                    # 更新记录
                    for field, value in details.items():
                        # 处理不同类型的值
                        if isinstance(value, (dict, list)):
                            r.hset(key, field, json.dumps(value, ensure_ascii=False))
                        else:
                            r.hset(key, field, value)
                    
                    updated_count += 1
                else:
                    skipped_count += 1
                    continue
            else:
                # 新记录，直接添加
                for field, value in details.items():
                    # 处理不同类型的值
                    if isinstance(value, (dict, list)):
                        r.hset(key, field, json.dumps(value, ensure_ascii=False))
                    else:
                        r.hset(key, field, value)
                loaded_count += 1
        
        total_processed = loaded_count + updated_count
        
        return {
            "status": "success",
            "message": f"通用数据成功加载到数据库 {db_name}",
            "database": db_name,
            "entity_type": entity_type,
            "prefix": prefix if prefix else "(无)",
            "loaded_count": loaded_count,
            "updated_count": updated_count,
            "skipped_count": skipped_count,
            "total_processed": total_processed
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"加载通用JSON数据时出错: {str(e)}")

# 注册Redis数据库路由
app.include_router(redis_router)

if __name__ == "__main__":
    # 解析命令行参数
    import argparse
    
    parser = argparse.ArgumentParser(
        description="Redis公司数据库API服务 - 基于FastAPI的公司信息管理系统",
        epilog="无参数时默认启动服务器并生成文档。使用子命令可以进行更精细的控制。"
    )
    subparsers = parser.add_subparsers(dest="command", help="可用命令")
    
    # 启动服务器命令
    server_parser = subparsers.add_parser(
        "server", 
        help="启动API服务器并生成文档",
        description="启动FastAPI服务器并生成API文档"
    )
    server_parser.add_argument("--host", default="0.0.0.0", help="服务器主机地址")
    server_parser.add_argument("--port", type=int, default=8000, help="服务器端口号")
    server_parser.add_argument("--docs", default="docs/swagger.md", help="生成的Swagger文档路径")
    server_parser.add_argument("--wait", type=int, default=3, help="等待服务器启动的时间(秒)")
    
    # 仅生成文档命令
    docs_parser = subparsers.add_parser(
        "docs", 
        help="仅生成API文档",
        description="从已运行的API服务器生成文档，不启动新服务器"
    )
    docs_parser.add_argument("--url", required=True, help="API服务器的URL地址，如http://localhost:8000")
    docs_parser.add_argument("--output", default="docs/swagger.md", help="生成的Swagger文档输出路径")
    
    # 导出数据库命令
    export_parser = subparsers.add_parser(
        "export", 
        help="将数据库导出为JSON文件",
        description="将指定的Redis数据库中的公司数据导出为JSON文件"
    )
    export_parser.add_argument("--db", required=True, help="要导出的数据库名称")
    export_parser.add_argument("--output", required=True, help="导出的JSON文件路径")
    export_parser.add_argument("--pretty", action="store_true", help="美化JSON输出")
    
    args = parser.parse_args()
    
    if args.command == "server":
        # 启动服务器并生成文档
        server_thread = start_server_and_docs(
            host=args.host,
            port=args.port,
            docs_output=args.docs,
            wait_time=args.wait
        )
        
        # 继续运行主线程直到程序结束
        try:
            while server_thread.is_alive():
                time.sleep(1)
        except KeyboardInterrupt:
            print("服务器已停止")
    elif args.command == "docs":
        # 仅生成文档
        generate_api_docs(args.url, args.output)
    elif args.command == "export":
        # 导出数据库
        export_db_to_json(args.db, args.output, args.pretty)
    else:
        # 默认行为：启动服务器并生成文档
        print("未指定命令，使用默认参数启动服务器并生成文档...")
        print("提示: 使用 'python api_server.py --help' 查看更多选项")
        print()
        
        # 获取默认参数
        default_host = "0.0.0.0"
        default_port = 8000
        default_docs = "docs/swagger.md"
        default_wait = 3
        
        print(f"主机地址: {default_host}")
        print(f"端口号: {default_port}")
        print(f"文档输出路径: {default_docs}")
        
        # 使用默认参数启动
        server_thread = start_server_and_docs(
            host=default_host,
            port=default_port,
            docs_output=default_docs,
            wait_time=default_wait
        )
        
        print(f"服务器已启动，访问地址: http://{default_host}:{default_port}")
        print(f"API文档: http://{default_host}:{default_port}/docs")
        print("使用Ctrl+C停止服务器")
        
        # 继续运行主线程直到程序结束
        try:
            while server_thread.is_alive():
                time.sleep(1)
        except KeyboardInterrupt:
            print("服务器已停止") 