import os
import datetime
from pathlib import Path
from django.core.cache import cache
from .models import FileIndex

# 基础缓存时间设置为5分钟
BASE_CACHE_TIMEOUT = 300
# 热门路径缓存时间延长为15分钟
HOT_PATH_CACHE_TIMEOUT = 900
# 热门路径访问计数
path_access_count = {}

def get_cache_timeout(path):
    """动态获取缓存时间"""
    # 如果是热门路径(访问超过10次)，使用长缓存
    if path_access_count.get(path, 0) > 10:
        return HOT_PATH_CACHE_TIMEOUT
    return BASE_CACHE_TIMEOUT

def get_file_info(path):
    """获取文件/目录信息"""
    def safe_is_dir(p):
        try: return p.is_dir()
        except PermissionError: return False

    item = Path(path)
    is_dir = safe_is_dir(item)
    
    # 处理路径显示，确保正确提取file_storage之后的部分
    full_path = str(item)
    # 标准化路径分隔符
    normalized_path = full_path.replace('\\', '/')
    # 查找file_storage位置
    storage_index = normalized_path.find('file_storage/')
    display_path = normalized_path[storage_index + len('file_storage/'):] if storage_index >= 0 else full_path
    
    return {
        'name': item.name,
        'path': full_path,
        'display_path': display_path,
        'is_dir': is_dir,
        'size': get_size(item) if not is_dir else get_folder_size(item),
        'mtime': datetime.datetime.fromtimestamp(item.stat().st_mtime)
    }

def get_size(file_path):
    """获取文件大小"""
    return file_path.stat().st_size

# 文件夹大小缓存
folder_size_cache = {}

def get_folder_size(folder_path):
    """
    计算文件夹大小(带缓存和并行处理)
    
    Args:
        folder_path: Path对象，要计算大小的文件夹路径
        
    Returns:
        int: 文件夹总大小(字节)
    """
    # 检查缓存
    cache_key = str(folder_path)
    if cache_key in folder_size_cache:
        return folder_size_cache[cache_key]
    
    total = 0
    try:
        from concurrent.futures import ThreadPoolExecutor
        
        # 收集所有子目录
        subdirs = []
        files = []
        for entry in folder_path.iterdir():
            if entry.is_dir():
                subdirs.append(entry)
            elif entry.is_file():
                files.append(entry)
        
        # 并行计算子目录大小
        with ThreadPoolExecutor() as executor:
            futures = []
            for subdir in subdirs:
                futures.append(executor.submit(get_folder_size, subdir))
            
            # 累加文件大小
            for file in files:
                try:
                    total += file.stat().st_size
                except (PermissionError, FileNotFoundError):
                    continue
            
            # 累加子目录大小
            for future in futures:
                try:
                    total += future.result()
                except (PermissionError, FileNotFoundError):
                    continue
                
    except (PermissionError, FileNotFoundError):
        pass
    
    # 设置缓存(5分钟)
    folder_size_cache[cache_key] = total
    return total

def fuzzy_match(pattern, string):
    """
    优化的模糊匹配算法
    
    特性：
    1. 前缀匹配优先(如果pattern是string的前缀，直接返回True)
    2. 快速路径(完全匹配、空模式等)
    3. 优化的迭代实现
    """
    pattern = pattern.lower()
    string = string.lower()
    len_pattern = len(pattern)
    len_string = len(string)
    
    # 快速路径1: 空模式匹配任何字符串
    if len_pattern == 0:
        return True
    # 快速路径2: 空字符串不匹配非空模式
    if len_string == 0:
        return False
    # 快速路径3: 完全匹配
    if pattern == string:
        return True
    # 快速路径4: 前缀匹配
    if string.startswith(pattern):
        return True
        
    # 主匹配逻辑
    i = j = 0
    pattern_chars = set(pattern)
    last_match_pos = -1
    
    while i < len_pattern and j < len_string:
        if pattern[i] == string[j]:
            i += 1
            last_match_pos = j
        j += 1
        
        # 提前终止: 剩余字符不足以匹配剩余模式
        if len_string - j < len_pattern - i:
            return False
            
        # 快速跳过: 增加边界检查防止越界
        if j < len_string and string[j] not in pattern_chars:  # 修改点：增加j的范围检查
            j += 1
            
    return i == len_pattern

def list_files(path, search_query=None, recursive=True, limit=1000, offset=0, sort='name', order='asc'):
    """
    列出目录内容(支持分页搜索)
    """
    path_access_count[path] = path_access_count.get(path, 0) + 1
    cache_key = f"file_list:{path}:{search_query}:{recursive}:{sort}:{order}"
    cached_result = cache.get(cache_key)
    if cached_result is not None:
        return {
            'results': cached_result[offset:offset+limit],
            'total': len(cached_result),
            'has_more': (offset + limit) < len(cached_result)
        }

    # 如果有搜索关键词，直接用FileIndex表查询
    if search_query:
        qs = FileIndex.objects.all()
        if search_query:
            qs = qs.filter(name__icontains=search_query)
        # 递归参数可扩展为路径过滤，这里暂不处理
        if sort == 'mtime':
            order_by = '-mtime' if order == 'desc' else 'mtime'
        else:
            order_by = '-name' if order == 'desc' else 'name'
        qs = qs.order_by(order_by)
        total = qs.count()
        results = list(qs[offset:offset+limit].values())
        cache.set(cache_key, results, get_cache_timeout(path))
        return {
            'results': results,
            'total': total,
            'has_more': (offset + limit) < total
        }

    # 否则仍遍历磁盘
    from pathlib import Path
    path_obj = Path(path)
    items = [get_file_info(p) for p in path_obj.iterdir() if p.exists()]
    total_count = len(items)
    if sort == 'mtime':
        sort_key = lambda x: x['mtime']
    else:
        sort_key = lambda x: x['name']
    reverse = (order == 'desc')
    sorted_items = sorted(items, key=lambda x: (not x['is_dir'], sort_key(x)), reverse=reverse)
    cache.set(cache_key, sorted_items, get_cache_timeout(path))
    return {
        'results': sorted_items[offset:offset+limit],
        'total': total_count,
        'has_more': (offset + limit) < total_count
    }