# 工具函数模块

import os
import sys
import time
import logging
import random
import re
import json
from datetime import datetime

# 将项目根目录添加到Python搜索路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from config import settings

# 设置日志配置
logging.basicConfig(
    level=getattr(logging, settings.LOG_LEVEL),
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(settings.LOG_FILE, encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger('music_crawler')


def setup_logger(name):
    """设置日志记录器"""
    return logging.getLogger(name)


def create_dir_if_not_exists(dir_path):
    """如果目录不存在则创建"""
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
        logger.info(f"创建目录: {dir_path}")


def get_random_user_agent():
    """获取随机User-Agent"""
    # 移除fake_useragent依赖，直接使用内置的User-Agent列表
    agents = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/135.0",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.4.1 Safari/605.1.15",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
        "Mozilla/5.0 (iPad; CPU OS 17_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.4 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 17_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.4 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:135.0) Gecko/20100101 Firefox/135.0",
        "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:135.0) Gecko/20100101 Firefox/135.0",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/133.0.0.0 Safari/537.36 Edg/133.0.0.0"
    ]
    return random.choice(agents)


def clean_filename(filename):
    """清理文件名中的非法字符"""
    # 移除或替换非法字符
    illegal_chars = '<>:/\\|?*"'
    for char in illegal_chars:
        filename = filename.replace(char, '_')
    # 移除不可打印字符
    filename = re.sub(r'[\x00-\x1f\x7f]', '', filename)
    # 限制文件名长度
    max_length = 255
    if len(filename) > max_length:
        name, ext = os.path.splitext(filename)
        filename = name[:max_length - len(ext)] + ext
    return filename.strip()


def retry_on_error(max_retries=settings.MAX_RETRY, delay=settings.DELAY):
    """请求失败自动重试装饰器"""
    def decorator(func):
        def wrapper(*args, **kwargs):
            retries = 0
            while retries < max_retries:
                try:
                    return func(*args, **kwargs)
                except Exception as e:
                    retries += 1
                    logger.warning(f"函数 {func.__name__} 执行失败: {e}, 第 {retries} 次重试...")
                    if retries >= max_retries:
                        logger.error(f"函数 {func.__name__} 达到最大重试次数，执行失败")
                        raise
                    # 指数退避策略
                    wait_time = delay * (2 ** (retries - 1)) * (0.5 + random.random())
                    time.sleep(wait_time)
        return wrapper
    return decorator


def get_file_size(file_path):
    """获取文件大小（字节）"""
    if os.path.exists(file_path):
        return os.path.getsize(file_path)
    return 0


def format_file_size(size_bytes):
    """格式化文件大小（B, KB, MB, GB）"""
    if size_bytes < 1024:
        return f"{size_bytes} B"
    elif size_bytes < 1024 * 1024:
        return f"{size_bytes / 1024:.2f} KB"
    elif size_bytes < 1024 * 1024 * 1024:
        return f"{size_bytes / (1024 * 1024):.2f} MB"
    else:
        return f"{size_bytes / (1024 * 1024 * 1024):.2f} GB"


def write_log_to_file(message, level='INFO'):
    """将日志写入文件"""
    timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    log_entry = f"[{timestamp}] [{level}] {message}\n"
    
    with open(settings.LOG_FILE, 'a', encoding='utf-8') as f:
        f.write(log_entry)
        
    # 同时通过logger输出到控制台
    if level == 'ERROR' or level == 'CRITICAL':
        logger.error(message)
    elif level == 'WARNING':
        logger.warning(message)
    else:
        logger.info(message)


def get_song_id_from_url(url):
    """从URL中提取歌曲ID"""
    # 这里需要根据实际URL格式调整正则表达式
    match = re.search(r'sid=(\d+)', url)
    if match:
        return match.group(1)
    return None


def parse_page_number(url):
    """从URL中解析页码"""
    match = re.search(r'page=(\d+)', url)
    if match:
        return int(match.group(1))
    return 1


def get_current_timestamp(format_type='seconds'):
    """获取当前时间戳
    
    Args:
        format_type: 时间戳格式，'seconds'返回秒级时间戳，'datetime'返回格式化日期时间字符串
    
    Returns:
        int或str: 根据format_type返回相应格式的时间戳
    """
    if format_type == 'datetime':
        return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    else:
        return int(time.time())


def timestamp_to_datetime(timestamp):
    """时间戳转换为日期时间字符串"""
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(timestamp))


def read_json_file(file_path):
    """读取JSON文件"""
    if not os.path.exists(file_path):
        return None
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    except json.JSONDecodeError:
        logger.error(f"解析JSON文件失败: {file_path}")
        return None


def write_json_file(file_path, data):
    """写入JSON文件"""
    try:
        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        return True
    except Exception as e:
        logger.error(f"写入JSON文件失败: {file_path}, 错误: {e}")
        return False


def get_file_extension(filename):
    """获取文件扩展名"""
    _, ext = os.path.splitext(filename)
    return ext.lower()


def is_valid_url(url):
    """检查URL是否有效"""
    import re
    pattern = r'^https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+'
    return bool(re.match(pattern, url))


def extract_domain(url):
    """从URL中提取域名"""
    from urllib.parse import urlparse
    parsed_url = urlparse(url)
    return parsed_url.netloc