# coding=utf-8
"""
    @project: MaxKB
    @Author：虎虎
    @file： common.py
    @date：2025/7/28 14:53
    @desc:
"""
import os
import pickle
import re
import shutil
import zipfile
from math import ceil
from pathlib import Path

from tqdm import tqdm

from migrate import BASE_DIR, APP_DIR

source_dir_size = 1000


class ImportQuerySet:

    def __init__(self, source_name: str):
        directory_path = Path(f"{BASE_DIR}/data/{source_name}/")
        self.file_list = [f for f in directory_path.rglob('*') if f.is_file()]

    def count(self):
        return len(self.file_list)

    def order_by(self, k):
        self.file_list.sort(key=lambda f: int(f.stem))
        return self

    def all(self):
        return self

    def __getitem__(self, item):
        return self.file_list[item.start:item.stop]


def _check(source_name, current_page):
    """

    @param source_name:
    @param current_page:
    @return:
    """
    dir_path = get_dir_path(source_name, current_page)
    base_path = f"{dir_path}/{current_page}.pickle"
    return not os.path.exists(base_path)


def import_check(source_name, current_page):
    dir_path = get_dir_path(source_name, current_page)
    base_path = f"{dir_path}/{current_page}.pickle_done"
    return not os.path.exists(base_path)


def page(query_set, page_size, handler, source_name, desc, primary_key="id", check=_check):
    """
    优化的分页函数，使用游标分页提高大数据量查询效率
    """
    query = query_set.order_by(primary_key)
    count = query_set.count()

    with tqdm(
            range(count),
            desc=desc,
            bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}] {postfix}",
    ) as pbar:
        last_id = None
        current_page = 1
        processed_count = 0

        while processed_count < count:
            if check(source_name, current_page):
                # 使用游标分页，避免 OFFSET 的性能问题
                if last_id is None:
                    data_list = query.all()[:page_size]
                else:
                    # 使用主键过滤，而不是 OFFSET
                    data_list = query.filter(**{f"{primary_key}__gt": last_id})[
                                :page_size
                                ]

                if not data_list:
                    break

                handler(data_list, source_name, current_page)

                # 更新最后处理的ID
                data_list_items = list(data_list)
                last_id = getattr(data_list_items[-1], primary_key)

            # 更新进度
            batch_size = min(page_size, count - processed_count)
            pbar.update(batch_size)
            processed_count += batch_size
            current_page += 1


def import_page(query_set, page_size, handler, source_name, desc, primary_key="id", check=_check):
    """

    @param primary_key: 主键
    @param desc:        任务描述
    @param query_set:   查询query_set
    @param page_size:   每次查询大小
    @param handler:     数据处理器
    @param source_name: 资源名称
    @param check:       校验是否已经导出
    @return:
    """
    query = query_set.order_by(primary_key)
    count = query_set.count()
    with tqdm(range(count), desc=desc,
              bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}] {postfix}") as pbar:
        for i in range(0, ceil(count / page_size)):
            offset = i * page_size
            if check(source_name, i + 1):
                data_list = query.all()[offset: offset + page_size]
                handler(data_list, source_name, i + 1)
                pbar.refresh()
            pbar.update(page_size if offset + page_size <=
                                     count else count - offset)


def get_dir_path(source_name, current_page):
    dir_path = f"{BASE_DIR}/data/{source_name}/{ceil(current_page / 1000)}/"
    return dir_path


def get_model_dir_path(source_name):
    dir_path = f"{BASE_DIR}/data/{source_name}/"
    return dir_path


def save_batch_file(data_list, source_name, current_page):
    dir_path = get_dir_path(source_name, current_page)
    base_path = f"{dir_path}/{current_page}.pickle_tmp"
    if os.path.exists(base_path):
        os.remove(base_path)
    ready_name = f"{dir_path}/{current_page}.pickle"
    os.makedirs(dir_path, exist_ok=True)
    with open(base_path, 'wb') as f:
        # 使用pickle的dump方法将对象序列化并写入文件
        pickle.dump(data_list, f)
    os.replace(base_path, ready_name)


def rename(file):
    new_name = f"{file.name}_done"
    new_path = file.with_name(new_name)
    file.rename(new_path)


def zip_folder():
    folder_path = f"{BASE_DIR}/data/"
    zip_name = f"{BASE_DIR}/migrate"
    if os.path.exists(zip_name + '.zip'):
        return
    shutil.make_archive(zip_name, 'zip', folder_path)


def un_zip():
    zip_name = Path(f"{BASE_DIR}/migrate.zip")
    extract_dir = Path(f"{BASE_DIR}/data/")
    if os.path.exists(extract_dir):
        return
    extract_dir.mkdir(exist_ok=True)
    with zipfile.ZipFile(zip_name, 'r') as zip_ref:
        zip_ref.extractall(extract_dir)


def contains_xpack():
    dir_path = Path(f"{APP_DIR}/")
    for source in dir_path.iterdir():
        if source.is_dir() and source.name.startswith("xpack"):
            return True
    return False


def base_version(version: str) -> str:
    """
    从形如 'v1.10.10-lts (build ...)' 提取基础版本 'v1.10.10-lts'
    """
    if not version:
        return ''
    m = re.match(r'^\s*(v[\w\.\-]+)', version)
    return m.group(1) if m else version.strip().split()[0]


def ver_tuple(v: str):
    s = base_version(v) or ""
    s = s.strip().lower()
    if s.startswith("v"):
        s = s[1:]
    s = s.split("-")[0]  # 去掉 -lts 等后缀
    parts = s.split(".")
    try:
        major = int(parts[0]) if len(parts) > 0 else 0
        minor = int(parts[1]) if len(parts) > 1 else 0
        patch = int(parts[2]) if len(parts) > 2 else 0
    except ValueError:
        return (0, 0, 0)
    return (major, minor, patch)


def to_workspace_user_resource_permission(user_id: str, auth_target_type, target_id, permission_list=None):
    from system_manage.models import WorkspaceUserResourcePermission
    if permission_list is None:
        permission_list = ['MANAGE', 'VIEW']
    return WorkspaceUserResourcePermission(workspace_id='default', user_id=user_id, auth_target_type=auth_target_type,
                                           target=target_id,
                                           auth_type='RESOURCE_PERMISSION_GROUP', permission_list=permission_list)


import pickle
from functools import wraps


def preserve_time_fields(model_class, *fields):
    """
    装饰器：临时禁用模型时间字段的 auto_now/auto_now_add
    """

    def decorator(func):
        @wraps(func)
        def wrapper(*args, **kwargs):
            # 记录原始配置
            field_settings = {
                name: (f.auto_now_add, f.auto_now)
                for name in fields
                for f in [model_class._meta.get_field(name)]
            }
            try:
                # 禁用自动更新时间
                for name in fields:
                    field = model_class._meta.get_field(name)
                    field.auto_now_add = False
                    field.auto_now = False

                return func(*args, **kwargs)
            finally:
                # 恢复原配置
                for name, (auto_now_add, auto_now) in field_settings.items():
                    field = model_class._meta.get_field(name)
                    field.auto_now_add = auto_now_add
                    field.auto_now = auto_now

        return wrapper

    return decorator
