import hashlib
import logging
import os
import subprocess
from contextlib import contextmanager
from pathlib import Path

from app.core.config import BILBAO_DATA, CACHE_DIR, SCRIPT_DIR

logger = logging.getLogger(__name__)


class ProcessingError(Exception):
    """处理过程中的错误"""

    pass


@contextmanager
def change_directory(path: Path):
    """安全的目录切换上下文管理器

    Args:
        path: 要切换到的目录路径
    """
    original_cwd = Path.cwd()
    try:
        os.chdir(path)
        yield path
    finally:
        os.chdir(original_cwd)


def setup_cache_directory(poscar_content: str, generate_pposcar: bool = True):
    """设置缓存目录并创建POSCAR文件，可选择是否生成PPOSCAR"""
    poscar_hash = get_file_hash(poscar_content)
    cache_dir = CACHE_DIR / poscar_hash
    cache_dir.mkdir(exist_ok=True, parents=True)

    logger.info(f"使用缓存目录: {cache_dir}")

    # 创建POSCAR文件（如果不存在）
    poscar_path = cache_dir / "POSCAR"
    if not poscar_path.exists():
        poscar_path.write_text(poscar_content)

    if generate_pposcar:
        generate_pposcar_with_phonopy(cache_dir)

    return cache_dir


def setup_tqc_data(cache_dir: Path, tqc_data: str) -> None:
    """设置tqc.data文件"""
    tqc_path = cache_dir / "tqc.data"

    if tqc_path.exists():
        existing_content = tqc_path.read_text()
        if get_file_hash(existing_content) == get_file_hash(tqc_data):
            return

    # 清理所有以decompose开头的文件
    if decompose_files := list(cache_dir.glob("decompose*")):
        logger.info(f"清理 {len(decompose_files)} 个旧的分解文件")
        for file in decompose_files:
            file.unlink()
    if tqc_data_files := list(cache_dir.glob("tqc.data*")):
        logger.info(f"清理 {len(tqc_data_files)} 个旧的tqc.data文件")
        for file in tqc_data_files:
            file.unlink()

    # 写入新的tqc.data内容
    tqc_path.write_text(tqc_data)
    logger.info(f"创建tqc.data文件: {tqc_path}")


def get_file_hash(file_content: str) -> str:
    """根据文件内容生成MD5哈希值"""
    return hashlib.md5(file_content.encode("utf-8")).hexdigest()


def _validate_poscar_format(poscar_path: Path) -> None:
    """验证POSCAR文件格式"""
    try:
        with poscar_path.open("r") as f:
            lines = f.readlines()

        if len(lines) < 8:
            raise ValueError("文件行数不足")

        # 验证缩放因子
        scale = float(lines[1].strip())

        # 验证晶格向量
        for i in range(2, 5):
            lattice_vec = list(map(float, lines[i].strip().split()))
            if len(lattice_vec) != 3:
                raise ValueError(f"第{i+1}行晶格向量格式错误")

        # 跳过元素符号行（第5行），直接验证原子数行
        if len(lines) > 6:
            # 检查第6行是否包含原子数
            try:
                atom_counts = list(map(int, lines[6].strip().split()))
                total_atoms = sum(atom_counts)

                # 检查是否有足够的原子位置行
                coord_start = 8 if len(lines) > 7 and lines[7].strip().lower().startswith(("d", "c")) else 7
                if len(lines) < coord_start + total_atoms:
                    raise ValueError("原子位置数据不完整")
            except ValueError:
                # 如果第6行不是原子数，可能是老格式，跳过这个检查
                pass

    except (ValueError, IndexError) as e:
        raise ValueError(f"POSCAR文件格式错误: {str(e)}")


def generate_pposcar_with_phonopy(cache_dir: Path, filename: str = "POSCAR"):
    """使用phonopy生成原胞PPOSCAR文件

    Args:
        temp_dir: 工作目录路径

    Raises:
        ProcessingError: 当phonopy执行失败或命令不存在时
    """
    try:
        phonopy_cmd = ["phonopy", "--symmetry", "--tolerance", "0.01", "-c", filename]
        subprocess.run(phonopy_cmd, cwd=cache_dir, capture_output=True, text=True, check=True)
        _validate_poscar_format(cache_dir / "PPOSCAR")
    except subprocess.CalledProcessError as e:
        raise ProcessingError(f"使用phonopy生成PPOSCAR失败: {e.stderr or '未知错误'}")
    except FileNotFoundError:
        raise ProcessingError("phonopy命令未找到，请确保已安装phonopy")


def determine_soc(tqc_data_path: Path):
    """
    确定tqc.data是否包含自旋轨道耦合(SOC)数据

    Args:
        tqc_data_path: tqc.data文件的路径

    Returns:
        0: 无SOC
        1: 有SOC

    Raises:
        ProcessingError: 如果tqc.data格式错误、SOC状态不明确/混合或在各k点间不一致。
    """
    if not tqc_data_path.exists():
        raise ProcessingError(f"tqc.data文件在 {tqc_data_path} 未找到")

    with tqc_data_path.open("r") as f:
        lines = f.readlines()

    if len(lines) < 1:  # 至少需要表头行
        raise ProcessingError("tqc.data文件为空或格式错误 (缺少表头)")

    # 解析第一行获取空间群和k点数量
    parts = lines[0].strip().split()
    if len(parts) < 3:
        raise ProcessingError("tqc.data第一行格式错误 (应包含SG, num_k, num_bands)")

    sg = int(parts[0])
    numk = int(parts[1])
    numb = int(parts[2])

    if numk == 0:
        logger.info("tqc.data中没有k点数据，返回默认SOC状态 (无SOC)。")
        return 0

    if len(lines) < numk + 1:
        raise ProcessingError(f"tqc.data文件行数不足以容纳 {numk} 个k点的数据")

    # 解析BR数据
    collect = []
    for i in range(1, numk + 1):
        line_content = lines[i].strip()
        k_index_in_br = line_content.split()[0]

        if "?" in line_content:
            raise ProcessingError(f"tqc.data第{i+1}行 (k点 {k_index_in_br}) 包含未知符号 '?'")

        brdata_str = line_content.split()
        if not brdata_str:
            raise ProcessingError(f"tqc.data第{i+1}行为空或格式错误")
        try:
            brdata = list(map(int, brdata_str))
        except ValueError:
            raise ProcessingError(f"tqc.data第{i+1}行 (k点 {k_index_in_br}) 包含非整数表示")

        if len(brdata) <= 1:  # 至少需要k点索引和1个BR
            raise ProcessingError(f"tqc.data第{i+1}行 (k点 {k_index_in_br}) BR数据格式错误")
        if len(brdata) > numb + 1:  # k点索引 + numb个BR
            raise ProcessingError(f"tqc.data第{i+1}行 (k点 {k_index_in_br}) 包含过多BR数据 (预期最多 {numb} 个)")
        collect.append(brdata)

    # 从BilBao数据读取参考值
    ref = {}
    kname = []
    kfile_path = BILBAO_DATA / "kvec_list_A.txt"

    with kfile_path.open("r") as kfile:
        for i in range(230):
            line = kfile.readline()
            if not line:
                break  # 避免读取超出行数
            nowsg, totk = list(map(int, line.strip().split()))
            if sg != nowsg:
                for _ in range(totk):
                    kfile.readline()  # 跳过这些行
            else:
                for _ in range(totk):
                    tmp0 = kfile.readline().strip().split()
                    if len(tmp0) < 6:
                        continue  # 确保有足够的元素
                    tmp = list(map(int, tmp0[0:4]))
                    ref[tmp[0]] = tuple(tmp[1:4])
                    kname.append(tmp0[5])
                break  # 找到了目标 sg，退出
        else:
            raise ProcessingError(f"空间群 {sg} 在参考文件中未找到")

    # 对负k点进行处理 (归一化)
    for ik_collect in range(len(collect)):  # Use index to modify collect in place
        if collect[ik_collect][0] < 0:
            absk = abs(collect[ik_collect][0])
            # Check bounds for kname access
            if absk - 1 < 0 or absk - 1 >= len(kname):
                raise ProcessingError(
                    f"k点索引 {collect[ik_collect][0]} (绝对值 {absk}) 超出kname列表范围 (大小 {len(kname)})"
                )
            abskname = kname[absk - 1]
            knameA = abskname + "A"
            # As per original logic for anti-symmetric k-points
            if knameA in kname:
                kaindex = kname.index(knameA) + 1
                collect[ik_collect][0] = kaindex
            # If knameA not in kname, original logic implies it remains negative or unchanged.
            # This might need further clarification if negative indices are problematic later.

    determined_soc_status = -1  # -1: undetermined, 0: no SOC, 1: SOC

    for br in collect:
        k_idx, *reps = br

        if k_idx not in ref:
            raise ProcessingError(f"tqc.data中k点 {k_idx} 在参考数据中未找到。")

        D_max, D_nsoc, _ = ref[k_idx]
        # (D_soc) is not directly used in the decision logic below,
        # which relies on D_nsoc as the threshold.

        # 检查表示是否超过最大值
        if any(rep > D_max for rep in reps):
            raise ProcessingError(
                f"tqc.data中k点 {k_idx} 的表示编号超出范围 " f"(允许的最大编号: {D_max}, BRs: {reps})"
            )

        all_no_soc = all(rep <= D_nsoc for rep in reps)
        all_soc = all(rep > D_nsoc for rep in reps)

        if all_no_soc:  # Exclusively no SOC
            current_k_point_soc = 0
        elif all_soc:  # Exclusively SOC
            current_k_point_soc = 1
        else:  # Ambiguous or mixed SOC representation for this k-point
            raise ProcessingError(
                f"tqc.data中k点 {k_idx} 的SOC状态不明确或混合。BRs: {reps} " f"(无SOC最大表示维度: {D_nsoc})"
            )

        if determined_soc_status == -1:
            determined_soc_status = current_k_point_soc
        elif determined_soc_status != current_k_point_soc:
            raise ProcessingError(
                f"tqc.data中不同k点的SOC状态不一致。先前确定为 {determined_soc_status}, "
                f"但k点 {k_idx} 指示为 {current_k_point_soc}。"
            )

    return determined_soc_status


def run_pos2abr(cache_dir: Path):
    """执行pos2aBR程序"""
    pos2abr_path = SCRIPT_DIR / "pos2aBR"
    if not pos2abr_path.exists() or not os.access(pos2abr_path, os.X_OK):
        raise ProcessingError(f"找不到可执行文件: {pos2abr_path}")

    # 直接执行pos2aBR，使用当前环境变量
    with open(cache_dir / "posout", "w") as posout_file:
        result = subprocess.run([pos2abr_path], cwd=cache_dir, stdout=posout_file, stderr=subprocess.PIPE, text=True)

    # 处理执行结果
    if result.returncode != 0:
        raise ProcessingError(f"pos2aBR程序执行失败: {result.stderr or '未知错误'}")


def run_mom2msg(cache_dir: Path):
    """执行mom2msg程序"""
    mom2msg_path = SCRIPT_DIR / "mom2msg"
    if not mom2msg_path.exists() or not os.access(mom2msg_path, os.X_OK):
        raise ProcessingError(f"找不到可执行文件: {mom2msg_path}")

    result = subprocess.run([mom2msg_path], cwd=cache_dir, capture_output=True, text=True)
    if result.returncode != 0:
        raise ProcessingError(f"mom2msg脚本执行失败: {result.stderr or '未知错误'}")


def remove_He(poscar: str) -> str:
    """从POSCAR内容中移除He元素和对应的原子坐标"""
    lines = poscar.splitlines(True)
    if len(lines) < 7:
        return poscar

    elements = lines[5].rstrip("\n").split()
    if not elements or elements[-1] != "He":
        return poscar

    try:
        lines[5] = " ".join(elements[:-1]) + "\n"
        atom_counts = lines[6].rstrip("\n").split()
        n = int(float(atom_counts[-1]))
        lines[6] = " ".join(atom_counts[:-1]) + "\n"
    except (ValueError, IndexError):
        return poscar

    if n <= 0 or n > len(lines) - 8:
        return poscar
    return "".join(lines[:-n])
