#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Filename: capture_get_dir_jsp.py
# Author: ChenChangSong
# Date: 2025-08-24


"""
功能描述: 获取测试用例目录(通过jsp获取的比较全)
"""
import re
from comm.common_code import *

def capture_get_dir_jsp(max_retries = 3):
    """获取测试用例目录(通过jsp)"""
    excel_data = get_excel_data()
    url = f"https://www.tapd.cn/{excel_data['项目ID']}/sparrow/tcase/tcase_list?data\[Filter\]\[name\]=&async=1&category_id=0&select_workspace={excel_data['项目ID']}"
    payload={}
    headers = {
    'authority': 'www.tapd.cn',
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'zh-CN,zh;q=0.9',
    'cache-control': 'max-age=0',
    'cookie': excel_data['Cookie'],
    # 'referer': f'https://www.tapd.cn/{excel_data["项目ID"]}/imports/import_preview_tcase/37106665/add/0',
    'sec-ch-ua': '"Not)A;Brand";v="24", "Chromium";v="116"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'same-origin',
    'sec-fetch-user': '?1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.5845.97 Safari/537.36 Core/1.116.554.400 QQBrowser/19.5.6663.400'
    }
    for attempt in range(max_retries):
        response_code, response = api_request(method="GET", url=url, headers=headers, data=payload)
        if response_code != 200:
            logger.error(f"请求失败，状态码：{response_code}")
            continue

        pattern = r'var ZNODES = (\[.*?\]);'  # 假设数组后面有分号，但实际上日志中没有分号，所以去掉分号
        match = re.search(pattern, response, re.DOTALL)
        if not match:
            logger.error("未匹配到ZNODES")
            continue

        data_str = match.group(1)## 第一个分组:
        # logger.info(f"var ZNODES数据:\n {data_str}")
        # 按照 pId 进行排序（接口放回顺序是乱的，不然生成脑图顺序会乱）=====================================================
        # 注意：有些 pId 是字符串，有些是数字(id是0（根节点）)，需要统一处理
        sorted_data = sorted(json.loads(data_str), key=lambda x: str(x.get('pId', '')))

        # 输出排序后的结果
        sorted_list = []
        for item in sorted_data:
            sorted_data = {"id": f"{item['id']}", "pId": f"{item.get('pId', 'None')}", "name": f"{item['name']}"}
            sorted_list.append(sorted_data)
        # logger.info(f"排序后的结果：\n {sorted_list}")
        try:
            # data = json.loads(data_str)
            # # 如果解析成功，则处理数据并返回
            node_map = {}
            for node in sorted_list:
                node_id = node['id']
                node_map[node_id] = {
                    'pId': node.get('pId', '0'),
                    'name': node['name']
                }

            # 递归函数：获取节点的完整路径
            def get_full_path(node_id):
                node = node_map.get(node_id)
                if not node:
                    return None
                pId = node['pId']
                name = node['name']
                if pId == '0':
                    return name
                parent_path = get_full_path(pId)
                if parent_path:
                    return f"{parent_path}-{name}"
                else:
                    return name

            dir_dict = {}
            for node_id in node_map:
                if node_id == '0':
                    continue
                full_path = get_full_path(node_id)
                if full_path:
                    dir_dict[full_path] = str(node_id)
            logger.info(f'目录映射字典:\n {dir_dict}')
            return dir_dict
        except json.JSONDecodeError as e:
            logger.error(f"JSON解析失败（尝试{attempt+1}/{max_retries}）: {e}")
            if attempt < max_retries - 1:
                time.sleep(2)  # 等待2秒后重试
            else:
                logger.error("达到最大重试次数，无法获取完整目录数据")
                return None
    return None

if __name__ == "__main__":
    capture_get_dir_jsp()