from fastapi import FastAPI
from typing import Any
import logging
import os
import jieba
import pickle

app = FastAPI()

def returnData(code: int, msg: str, data: Any):
    return {
        "code": code,
        "msg": str(msg),
        "data": data
    }
    

def setLog(log_file_name: str, max_bytes=10*1024*1024, backup_count=10):
    from logging.handlers import TimedRotatingFileHandler
    import datetime
    import threading
    global _LAST_LOG_DATE
    
    # 使用模块名作为logger名称，确保每个模块有独立的logger
    logger_name = f"chicken.{log_file_name.replace('.log', '')}"
    logger = logging.getLogger(logger_name)
    logger.setLevel(logging.INFO)

    # 获取当前日期
    today = datetime.datetime.now().strftime('%Y%m%d')
    
    # 只有当日期变化或首次调用时才更新处理器
    if not logger.handlers or today != _LAST_LOG_DATE:
        _LAST_LOG_DATE = today
        
        log_dir = os.path.join(os.getcwd(), 'novelog', today)
        os.makedirs(log_dir, exist_ok=True)
        log_file = os.path.join(log_dir, log_file_name)
        
        # 移除所有现有的处理器
        if logger.handlers:
            for handler in logger.handlers[:]:
                logger.removeHandler(handler)
        
        # 创建新的处理器
        time_handler = TimedRotatingFileHandler(
            log_file,
            when='midnight',
            interval=1,
            backupCount=30,
            encoding='utf-8',
            errors='ignore'
        )
        time_handler.setLevel(logging.INFO)
        time_formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
        time_handler.setFormatter(time_formatter)
        logger.addHandler(time_handler)
    
    return logger
    
    
def load_sensitive_words(filepath):
    with open(filepath, 'rb') as f:
        return pickle.load(f)
        
        
def check_sensitive_words(text):
    # 使用jieba分词
    
    sensitive_words = load_sensitive_words('word/sensitive_words.pkl')  # 加载敏感词列表
    
    words = jieba.cut(text)
    
    # 检查是否有敏感词
    for word in words:
        if word in sensitive_words:
            return True, word  # 返回第一个找到的敏感词
    return False, None  # 没有敏感词




# def check_sensitive_words(content):
#     # 读取敏感词库文件
#     sensitive_words_file_path = "word/sensitive_words.txt"
#     with open(sensitive_words_file_path, 'r', encoding='utf-8') as f:
#         sensitive_words = f.read().splitlines()

#     # 对评论内容进行分词
#     words = jieba.cut(content)

#     # 检测是否存在敏感词
#     for word in words:
#         if word in sensitive_words:
#             return True

#     return False
    