#!/usr/bin/env python
# _*_ coding:utf-8 _*_
import time

from json import JSONDecodeError

import allure
import httpx
import requests
import stamina
import json


from _pytest.outcomes import Skipped
from httpx import Response as HttpxResponse
from requests import Response as RequestsResponse

from autoapi.common.errors import AssertError, SendRequestError
from autoapi.common.log import log
from autoapi.core.get_conf import autoapi_config
from autoapi.db.mysql import mysql_client
from autoapi.enums.query_fetch_type import QueryFetchType
from autoapi.enums.request.body import BodyType
from autoapi.enums.request.engin import EnginType
from autoapi.enums.setup_type import SetupType
from autoapi.enums.teardown_type import TeardownType
from autoapi.utils.allure_control import allure_attach_file, allure_step
from autoapi.utils.assert_control import asserter
from autoapi.utils.enum_control import get_enum_values
from autoapi.utils.relate_testcase_executor import exec_setup_testcase
from autoapi.utils.request.hook_executor import hook_executor
from autoapi.utils.request.request_data_parse import RequestDataParse
from autoapi.utils.request.vars_extractor import var_extractor
from autoapi.utils.time_control import get_current_time
from autoapi.common.variable_cache import variable_cache


class SendRequests:
    """发送请求"""

    @property
    def init_response_metadata(self) -> dict:
        """
        :return: 响应元数据
        """
        response_metadata = {
            "url": None,
            "status_code": 200,
            "elapsed": 0,
            "headers": None,
            "cookies": None,
            "json": None,
            "content": None,
            "text": None,
            "stat": {
                "execute_time": None,
            },
            "request": None,
        }
        return response_metadata

    @staticmethod
    def _requests_engin(**kwargs) -> RequestsResponse:
        """
        requests 引擎

        :param kwargs:
        :return:
        """
        kwargs["timeout"] = kwargs["timeout"] or autoapi_config.REQUEST_TIMEOUT
        kwargs["verify"] = kwargs["verify"] or autoapi_config.REQUEST_VERIFY
        kwargs["proxies"] = kwargs["proxies"] or autoapi_config.REQUEST_PROXIES_REQUESTS
        kwargs["allow_redirects"] = (
            kwargs["allow_redirects"] or autoapi_config.REQUEST_REDIRECTS
        )
        request_retry = kwargs["retry"] or autoapi_config.REQUEST_RETRY
        del kwargs["retry"]
        # 消除安全警告
        requests.packages.urllib3.disable_warnings()  # type: ignore
        log.info("📤 开始发送请求")
        try:
            for attempt in stamina.retry_context(
                on=requests.HTTPError, attempts=request_retry
            ):
                with attempt:
                    if attempt.num > 1:
                        log.warning("🔁 请求响应异常重试")
                    response = requests.session().request(**kwargs)
                    response.raise_for_status()
        except Exception as e:
            log.error(f"❗️ 发送 requests 请求响应异常: {e}")
            raise SendRequestError(str(e))
        else:
            log.info("📥 请求完成")
            return response  # type: ignore

    @staticmethod
    def _httpx_engin(**kwargs) -> HttpxResponse:
        """
        httpx 引擎

        :param kwargs:
        :return:
        """
        kwargs["timeout"] = kwargs["timeout"] or autoapi_config.REQUEST_TIMEOUT
        verify = kwargs["verify"] or autoapi_config.REQUEST_VERIFY
        proxies = kwargs["proxies"] or autoapi_config.REQUEST_PROXIES_HTTPX
        redirects = kwargs["allow_redirects"] or autoapi_config.REQUEST_REDIRECTS
        request_retry = kwargs["retry"] or autoapi_config.REQUEST_RETRY
        del kwargs["verify"]
        del kwargs["proxies"]
        del kwargs["allow_redirects"]
        del kwargs["retry"]
        log.info("📤 开始发送请求")
        try:
            with httpx.Client(verify=verify, proxies=proxies, follow_redirects=redirects) as client:  # type: ignore
                for attempt in stamina.retry_context(
                    on=httpx.HTTPError, attempts=request_retry
                ):
                    with attempt:
                        if attempt.num > 1:
                            log.warning("🔁 请求响应异常重试")
                        response = client.request(**kwargs)
                        response.raise_for_status()
        except Exception as e:
            log.error(f"❗️ 发送 httpx 请求响应异常: {e}")
            raise SendRequestError(str(e))
        else:
            log.info("📥 请求完成")
            return response  # type: ignore

    def send_request(
        self,
        request_data: dict,
        *,
        request_engin: EnginType = EnginType.requests,
        log_data: bool = True,
        relate_log: bool = False,
        **kwargs,
    ) -> dict:
        """
        发送请求

        :param request_data: 请求数据
        :param request_engin: 请求引擎
        :param log_data: 日志记录数据
        :param relate_log: 关联测试用例
        :return: response
        """
        if request_engin not in get_enum_values(EnginType):
            raise SendRequestError("❌️ 请求发起失败，请使用合法的请求引擎")

        # 获取解析后的请求数据
        log.info("🔛 开始解析用例数据" if not relate_log else "🔛 开始解析关联用例数据")
        try:
            request_data_parse = RequestDataParse(request_data, request_engin)
            parsed_data = request_data_parse.get_request_data_parsed(relate_log)
        except Skipped as e:
            raise e
        except Exception as e:
            if not relate_log:
                log.error(f"❌️ 用例数据解析失败: {e}")
            raise e
        log.info("✅️ 用例数据解析完成" if not relate_log else "✅️ 关联用例数据解析完成")

        # 记录请求前置数据; 此处数据中如果包含关联用例变量, 不会被替换为结果记录, 因为替换动作还未发生
        setup = parsed_data["setup"]
        if log_data:
            if parsed_data["is_setup"]:
                self.log_request_setup(setup)

        # 前置处理
        if parsed_data["is_setup"]:
            log.info("🔛 开始处理请求前置")
            try:
                for item in setup:
                    for key, value in item.items():
                        if value is not None:
                            if key == SetupType.TESTCASE:
                                relate_parsed_data = exec_setup_testcase(
                                    parsed_data, value
                                )
                                if relate_parsed_data:
                                    parsed_data = relate_parsed_data
                            elif key == SetupType.SQL:
                                setup_sql = var_extractor.vars_replace(value, parsed_data["env"])
                                sql_fetch = QueryFetchType.ALL
                                db_name = None  # 默认使用默认数据库
                                
                                if isinstance(setup_sql, dict):
                                    sql = setup_sql.get("sql")
                                    sql_fetch = setup_sql.get("fetch")
                                    db_name = setup_sql.get("db")  # 获取数据库名称参数
                                    if db_name is None:
                                        db_name = autoapi_config.MYSQL_DEFAULT  # 使用默认数据库
                                    
                                    # 记录数据库信息
                                    log.info(f"📊 使用数据库: {db_name}")
                                    
                                    # 执行SQL并获取结果
                                    if sql is not None:
                                        result = mysql_client.exec_case_sql(sql, sql_fetch, parsed_data["env"], db_name)
                                    else:
                                        log.warning("SQL语句为空，跳过执行")
                                        result = None
                                    log.info(f"SQL查询结果: {result}")
                                    
                                    # 提取变量并存储
                                    if "key" in setup_sql and "jsonpath" in setup_sql:
                                        from autoapi.utils.request.vars_recorder import record_variables
                                        try:
                                            # 将结果包装成与响应相同的结构
                                            if isinstance(result, list):
                                                if len(result) == 1:
                                                    wrapped_result = {"json": result[0]}
                                                else:
                                                    wrapped_result = {"json": result}
                                            else:
                                                wrapped_result = {"json": result}
                                            
                                            record_variables(
                                                setup_sql["jsonpath"],
                                                wrapped_result,
                                                setup_sql["key"],
                                                setup_sql.get("type", "cache"),
                                                parsed_data["env"]
                                            )
                                            log.info(f"成功提取变量 {setup_sql['key']}")
                                            
                                            # 检查是否为多个ID的情况
                                            if isinstance(result, list) and len(result) > 1:
                                                log.info(f"🔍 检测到多个ID: {result}")
                                                # 存储原始URL用于后续处理
                                                original_url = parsed_data["url"]
                                                parsed_data["_original_url"] = original_url
                                                parsed_data["_multiple_ids"] = result
                                                
                                        except Exception as e:
                                            log.error(f"提取变量失败: {e}")
                                            raise
                                else:
                                    if isinstance(setup_sql, str):
                                        mysql_client.exec_case_sql(setup_sql, sql_fetch, parsed_data["env"], db_name or "")
                                    else:
                                        log.error(f"❌ SQL 语句类型错误，期望字符串类型，实际得到: {type(setup_sql)}")
                                        raise SendRequestError(f"SQL 语句类型错误，期望字符串类型，实际得到: {type(setup_sql)}")
                            elif key == SetupType.HOOK:
                                hook_executor.exec_hook_func(value)
                            elif key == SetupType.WAIT_TIME:
                                time.sleep(value)
                                log.info(f"⏳️ 执行请求前等待：{value} s")
            except Exception as e:
                log.error(f"❗️ 请求前置处理异常: {e}")
                raise e
            log.info("✅️ 请求前置处理完成")

        # allure 记录动态数据
        self.allure_dynamic_data(parsed_data)

        # 整理请求参数
        request_conf = {
            "timeout": parsed_data["timeout"],
            "verify": parsed_data["verify"],
            "proxies": parsed_data["proxies"],
            "allow_redirects": parsed_data["redirects"],
            "retry": parsed_data["retry"],
        }
        request_data_parsed = {
            "method": parsed_data["method"],
            "url": parsed_data["url"],
            "params": parsed_data["params"],
            "headers": parsed_data["headers"],
            "cookies": parsed_data["cookies"],
            "body": parsed_data["body"],
            "files": parsed_data["files"],
        }
        try:
            # 分离文件对象，避免在变量替换中被序列化
            files_temp = request_data_parsed.pop("files", None)
            request_data_parsed: dict = var_extractor.vars_replace(request_data_parsed, parsed_data["env"])  # type: ignore # noqa: ignore
            # 将文件对象添加回请求数据
            if files_temp is not None:
                request_data_parsed["files"] = files_temp
            body = request_data_parsed.pop("body")
            if (
                parsed_data["body_type"] == BodyType.JSON
                or parsed_data["body_type"] == BodyType.GraphQL
            ):
                request_data_parsed.update({"json": body})
            elif parsed_data["body_type"] == BodyType.binary:
                if request_engin == EnginType.httpx:
                    request_data_parsed.update({"content": body})
            else:
                request_data_parsed.update({"data": body})
            parsed_data.update(
                body=request_data_parsed.get("json")
                or request_data_parsed.get("data")
                or request_data_parsed.get("content")
            )
        except Exception as e:
            log.error(e)
            raise e

        # 日志记录请求数据
        if log_data:
            self.log_request_up(parsed_data)
            self.allure_request_up(parsed_data)

        # 发送请求
        response_data = self.init_response_metadata
        response_data["stat"]["execute_time"] = get_current_time()
        
        # 检查是否为多个ID的情况，需要批量处理
        if "_multiple_ids" in parsed_data and parsed_data["_multiple_ids"]:
            log.info(f"🔄 检测到多个ID，开始批量处理: {parsed_data['_multiple_ids']}")
            all_responses = []
            
            # 获取原始URL模板
            original_url = parsed_data["_original_url"]
            
            # 为每个ID执行请求
            for i, id_item in enumerate(parsed_data["_multiple_ids"]):
                if isinstance(id_item, dict) and "id" in id_item:
                    user_id = id_item["id"]
                else:
                    user_id = id_item
                
                log.info(f"📤 处理第 {i+1}/{len(parsed_data['_multiple_ids'])} 个ID: {user_id}")
                
                # 更新URL
                current_url = original_url.replace("${user_id}", str(user_id))
                request_data_parsed["url"] = current_url
                
                # 发送单个请求
                if request_engin == EnginType.requests:
                    response = self._requests_engin(
                        **request_conf, **request_data_parsed, **kwargs
                    )
                elif request_engin == EnginType.httpx:
                    response = self._httpx_engin(
                        **request_conf, **request_data_parsed, **kwargs
                    )
                else:
                    raise SendRequestError(
                        "❌️ 请求发起失败，请使用合法的请求引擎：requests / httpx"
                    )
                
                # 序列化响应数据
                res_headers = dict(response.headers)
                res_content_type = res_headers.get("Content-Type")
                try:
                    if res_content_type and "application/json" in res_content_type:
                        json_data = response.json()
                    else:
                        json_data = {}
                except JSONDecodeError:
                    err_msg = "❌️ 响应数据解析失败，响应数据不是有效的 json 格式"
                    log.warning(err_msg)
                    raise SendRequestError(err_msg)
                
                # 记录单个响应数据
                single_response = self.init_response_metadata.copy()
                single_response["url"] = str(response.url)
                single_response["status_code"] = int(response.status_code)
                single_response["elapsed"] = response.elapsed.microseconds / 1000.0
                single_response["headers"] = res_headers
                single_response["cookies"] = dict(response.cookies)
                single_response["json"] = json_data
                single_response["content"] = response.content
                single_response["text"] = response.text
                single_response["request"] = request_data_parsed.copy()
                
                all_responses.append(single_response)
                
                # 记录每个请求的响应内容
                try:
                    response_content = json.dumps(json_data, ensure_ascii=False, indent=2)
                    log.info(f"✅ 第 {i+1} 个请求完成，状态码: {response.status_code}")
                    log.info(f"📄 第 {i+1} 个请求响应内容:\n{response_content}")
                except (json.JSONDecodeError, TypeError):
                    log.info(f"✅ 第 {i+1} 个请求完成，状态码: {response.status_code}")
                    log.info(f"📄 第 {i+1} 个请求响应内容: {response.text}")
            
            # 使用最后一个响应作为主要响应（保持兼容性）
            if all_responses:
                response_data = all_responses[-1]
                response_data["_all_responses"] = all_responses  # 存储所有响应
                log.info(f"✅ 批量处理完成，共处理 {len(all_responses)} 个请求")
                
                # 批量处理时，不重复记录最后一个请求的响应内容
                # 因为每个请求的响应内容已经在循环中记录过了
                if log_data:
                    # 只记录响应摘要，不重复记录内容
                    self._log_batch_response_summary(response_data, len(all_responses))
                    self.allure_request_down(response_data)
                    if parsed_data["is_teardown"]:
                        self.log_request_teardown(parsed_data["teardown"])
            else:
                raise SendRequestError("❌️ 批量处理失败，没有成功执行的请求")
        else:
            # 单个请求处理（原有逻辑）
            if request_engin == EnginType.requests:
                response = self._requests_engin(
                    **request_conf, **request_data_parsed, **kwargs
                )
            elif request_engin == EnginType.httpx:
                response = self._httpx_engin(
                    **request_conf, **request_data_parsed, **kwargs
                )
            else:
                raise SendRequestError(
                    "❌️ 请求发起失败，请使用合法的请求引擎：requests / httpx"
                )

            # 序列化响应数据
            res_headers = dict(response.headers)
            res_content_type = res_headers.get("Content-Type")
            try:
                if res_content_type and "application/json" in res_content_type:
                    json_data = response.json()
                else:
                    json_data = {}
            except JSONDecodeError:
                err_msg = "❌️ 响应数据解析失败，响应数据不是有效的 json 格式"
                log.warning(err_msg)
                raise SendRequestError(err_msg)

            # 记录响应数据
            response_data["url"] = str(response.url)
            response_data["status_code"] = int(response.status_code)
            response_data["elapsed"] = response.elapsed.microseconds / 1000.0
            response_data["headers"] = res_headers
            response_data["cookies"] = dict(response.cookies)
            response_data["json"] = json_data
            response_data["content"] = response.content
            response_data["text"] = response.text
            response_data["request"] = request_data_parsed

            # 日志记录响应数据（单个请求）
            if log_data:
                self.log_request_down(response_data)
                self.allure_request_down(response_data)
                if parsed_data["is_teardown"]:
                    self.log_request_teardown(parsed_data["teardown"])

        # 后置处理
        if parsed_data["is_teardown"]:
            log.info("🛠️ 开始执行后置操作")
            try:
                for item in parsed_data["teardown"]:
                    for key, value in item.items():
                        if value is not None:
                            if key == TeardownType.SQL:
                                teardown_sql = var_extractor.vars_replace(value, parsed_data["env"])
                                sql_fetch = QueryFetchType.ALL
                                db_name = None  # 默认使用默认数据库
                                
                                if isinstance(teardown_sql, dict):
                                    sql = teardown_sql.get("sql")
                                    sql_fetch = teardown_sql.get("fetch")
                                    db_name = teardown_sql.get("db")  # 获取数据库名称参数
                                    if db_name is None:
                                        db_name = autoapi_config.MYSQL_DEFAULT  # 使用默认数据库
                                    
                                    # 记录数据库信息
                                    log.info(f"📊 使用数据库: {db_name}")
                                else:
                                    sql = teardown_sql
                                    db_name = autoapi_config.MYSQL_DEFAULT  # 使用默认数据库
                                    log.info(f"📊 使用数据库: {db_name}")
                                    
                                mysql_client.exec_case_sql(sql, sql_fetch, parsed_data["env"], db_name)  # type: ignore
                            if key == TeardownType.HOOK:
                                hook_executor.exec_hook_func(value)
                            if key == TeardownType.EXTRACT:
                                var_extractor.teardown_var_extract(
                                    response_data, value, parsed_data["env"]
                                )
                            if key == TeardownType.ASSERT:
                                assert_data = var_extractor.vars_replace(
                                    value, env=parsed_data["env"]
                                )
                                try:
                                    if isinstance(assert_data, dict):
                                        log.info(
                                            f"  🔸 执行断言: {assert_data.get('check')}"
                                        )
                                        log.info(
                                            f"    🔹 类型: {assert_data.get('type')}"
                                        )
                                        log.info(
                                            f"    🔹 路径: {assert_data.get('jsonpath')}"
                                        )
                                        log.info(
                                            f"    🔹 预期值: {assert_data.get('value')}"
                                        )
                                    else:
                                        log.info(f"  🔹 执行断言: {assert_data}")

                                    jsonpath_expr = (
                                        assert_data["jsonpath"]
                                        if isinstance(assert_data, dict)
                                        and isinstance(assert_data.get("jsonpath"), str)
                                        else ""
                                    )
                                    actual_value = var_extractor.extract_json_value(
                                        response_data, jsonpath_expr
                                    )
                                    log.info(f"    🔹 实际值: {actual_value}")
                                    asserter.exec_asserter(response_data, assert_data)
                                    log.info("  ✅️ 断言通过")
                                except AssertionError as e:

                                    jsonpath_expr = (
                                        assert_data["jsonpath"]
                                        if isinstance(assert_data, dict)
                                        and isinstance(assert_data.get("jsonpath"), str)
                                        else ""
                                    )
                                    actual_value = var_extractor.extract_json_value(
                                        response_data, jsonpath_expr
                                    )
                                    # 打印断言失败信息
                                    if isinstance(assert_data, dict):
                                        log.error(
                                            f"  ❌️ 断言失败：类型{assert_data.get('type')}, 预期值\"{assert_data.get('value')}\", 实际值\"{actual_value}\""
                                        )

                                    # 重新抛出异常，维持原有逻辑
                                    # raise AssertError(f"❌️ 断言失败: {e}") from e
                                    raise AssertError(f"❌️ 断言失败: {e}")
                            elif key == TeardownType.WAIT_TIME:
                                log.info(f"⏳️ 执行请求后等待：{value} s")
                                time.sleep(value)
            except AssertionError as e:
                raise AssertError(f"❌️ 断言失败: {e}")
            except Exception as e:
                log.error(f"❗️ 请求后置处理异常: {e}")
                raise e
            log.info("☑️ 后置处理完成")
            for var_key in list(parsed_data.get("extracted_vars", [])):
                variable_cache.delete(var_key, tag="relate_testcase")
        return response_data

    def log_request_setup(self, setup: list) -> None:
        for item in setup:
            for key, value in item.items():
                if key == SetupType.TESTCASE:
                    log.info(f"🔧 前置依赖用例: {value}")
                    self.allure_request_setup({"setup_testcase": value})
                elif key == SetupType.SQL:
                    log.info(f"📜 前置SQL语句: {value}")
                    self.allure_request_setup({"setup_sql": value})
                elif key == SetupType.HOOK:
                    log.info(f"🪝 前置Hook函数: {value}")
                    self.allure_request_setup({"setup_hook": value})
                elif key == SetupType.WAIT_TIME:
                    log.info(f"⏳ 前置等待时间: {value}")
                    self.allure_request_setup({"setup_wait_time": value})

    @staticmethod
    def log_request_up(parsed_data: dict) -> None:
        log.info("  🔸 模块: {}".format(parsed_data["module"]))
        log.info("  🔸 标题: {}".format(parsed_data["name"]))
        if parsed_data["description"]:
            log.info("  🔸 描述: {}".format(parsed_data["description"]))
        log.info("  🌐 环境: {}".format(parsed_data["env"]))

        log.info("  🔸 请求信息:")
        log.info("    🔹 URL: {}".format(parsed_data["url"]))
        log.info("    🔹 方法: {}".format(parsed_data["method"]))
        if parsed_data["headers"]:
            log.info("    🔹 请求头: {}".format(parsed_data["headers"]))
        if parsed_data["body_type"]:
            log.info("    🔹 Body类型: {}".format(parsed_data["body_type"]))
        if parsed_data["body"]:
            log.info("    🔹 Body内容: {}".format(parsed_data["body"]))
        if parsed_data["params"]:
            log.info("    🔹 参数: {}".format(parsed_data["params"]))
        if parsed_data["files_no_parse"]:
            log.info("    🔹 文件: {}".format(parsed_data["files_no_parse"]))

    def log_request_teardown(self, teardown: list) -> None:
        log.info("  🔸 后置内容:")
        for item in teardown:
            for key, value in item.items():
                if key == TeardownType.SQL:
                    if isinstance(value, dict):
                        log.info("📜 后置SQL语句: {}".format(value))
                    else:
                        log.info("📜 后置SQL语句: {}".format(value))
                    self.allure_request_teardown({"teardown_sql": value})
                elif key == TeardownType.HOOK:
                    log.info("    🔹 钩子: {}".format(value))
                    self.allure_request_teardown({"teardown_hook": value})
                elif key == TeardownType.EXTRACT:
                    log.info(
                        "    🔹 提取变量: {} = {}".format(
                            value["key"], value["jsonpath"]
                        )
                    )
                    self.allure_request_teardown({"teardown_extract": value})
                elif key == TeardownType.ASSERT:
                    if isinstance(value, dict):
                        log.info("    🔹 断言: {}".format(value.get("check")))
                    else:
                        log.info("    🔹 断言: {}".format(value))
                    self.allure_request_teardown({"teardown_assert": value})
                elif key == TeardownType.WAIT_TIME:
                    log.info("    🔹 等待: {}秒".format(value))
                    self.allure_request_teardown({"teardown_wait_time": value})

    def _log_batch_response_summary(self, response_data: dict, total_count: int) -> None:
        """记录批量处理的响应摘要，避免重复记录内容"""
        log.info("  🔸 批量响应摘要:")
        log.info("    🔹 时间: {}".format(response_data["stat"]["execute_time"]))
        
        str_status_code = str(response_data["status_code"])
        if str_status_code.startswith(("4", "5")):
            log.error("    🔹 状态码: {}".format(response_data["status_code"]))
        else:
            log.info("    🔹 状态码: {}".format(response_data["status_code"]))
        
        log.info("    🔹 耗时: {} ms".format(response_data["elapsed"]))
        log.info("    🔹 总请求数: {}".format(total_count))
        log.info("    🔹 内容: [已在前面的日志中记录每个请求的详细内容]")

    @staticmethod
    def log_request_down(response_data: dict) -> None:
        log.info("  🔸 响应信息:")
        log.info("    🔹 时间: {}".format(response_data["stat"]["execute_time"]))
        
        str_status_code = str(response_data["status_code"])
        if str_status_code.startswith(("4", "5")):
            log.error("    🔹 状态码: {}".format(response_data["status_code"]))
        else:
            log.info("    🔹 状态码: {}".format(response_data["status_code"]))
        
        log.info("    🔹 耗时: {} ms".format(response_data["elapsed"]))
        
        content = response_data["text"]
        try:
            json_data = json.loads(content)
            formatted_content = json.dumps(json_data, ensure_ascii=False, indent=2)
            log.info("    🔹 内容:\n{}".format(formatted_content))
        except (json.JSONDecodeError, TypeError):
            log.info("    🔹 内容: {}".format(content))

    @staticmethod
    def allure_request_setup(setup_log: dict) -> None:
        allure_step("请求前置", setup_log)

    @staticmethod
    def allure_request_up(parsed_data: dict) -> None:
        allure_step(
            "请求数据",
            {
                "env": parsed_data["env"],
                "module": parsed_data["module"],
                "name": parsed_data["name"],
                "case_id": parsed_data["case_id"],
                "description": parsed_data["description"],
                "method": parsed_data["method"],
                "url": parsed_data["url"],
                "params": parsed_data["params"],
                "headers": parsed_data["headers"],
                "body_type": parsed_data["body_type"],
                "body": parsed_data["body"],
                "files": parsed_data["files_no_parse"],
            },
        )

    @staticmethod
    def allure_request_teardown(teardown_log: dict) -> None:
        allure_step("请求后置", teardown_log)

    @staticmethod
    def allure_request_down(response_data: dict) -> None:
        allure_step(
            "响应数据",
            {
                "status_code": response_data["status_code"],
                "elapsed": response_data["elapsed"],
                "json": response_data["json"],
            },
        )

    @staticmethod
    def allure_dynamic_data(parsed_data: dict) -> None:
        allure.dynamic.parameter(
            "case_data", {"module": parsed_data["module"], "id": parsed_data["case_id"]}
        )
        allure.dynamic.id(parsed_data["case_id"])
        allure.dynamic.tag(parsed_data["module"])
        allure.dynamic.epic(parsed_data["allure_epic"])
        allure.dynamic.feature(parsed_data["allure_feature"])
        allure.dynamic.story(parsed_data["allure_story"])
        allure.dynamic.title(parsed_data["name"])
        allure.dynamic.description(parsed_data["description"])
        allure.dynamic.link(parsed_data["url"])
        if parsed_data["allure_severity"] is not None:
            allure.dynamic.severity(parsed_data["allure_severity"])
        if parsed_data["files_no_parse"] is not None:
            for v in parsed_data["files_no_parse"].values():
                if isinstance(v, list):
                    for path in v:
                        allure_attach_file(path)
                else:
                    allure_attach_file(v)


send_request = SendRequests()
