1126 lines
62 KiB
Python
1126 lines
62 KiB
Python
"""
|
||
测试编排器模块
|
||
|
||
负责组合API解析器、API调用器、验证器和规则执行器,进行端到端的API测试
|
||
"""
|
||
|
||
import logging
|
||
import json
|
||
import time
|
||
import re # 添加 re 模块导入
|
||
from typing import Dict, List, Any, Optional, Union, Tuple, Type, ForwardRef
|
||
from enum import Enum
|
||
import datetime
|
||
import datetime as dt
|
||
from uuid import UUID
|
||
|
||
from pydantic import BaseModel, Field, create_model
|
||
from pydantic.networks import EmailStr
|
||
|
||
from .input_parser.parser import InputParser, YAPIEndpoint, SwaggerEndpoint, ParsedYAPISpec, ParsedSwaggerSpec
|
||
from .api_caller.caller import APICaller, APIRequest, APIResponse
|
||
from .json_schema_validator.validator import JSONSchemaValidator
|
||
from .test_framework_core import ValidationResult, TestSeverity, APIRequestContext, APIResponseContext, BaseAPITestCase
|
||
from .test_case_registry import TestCaseRegistry
|
||
# 尝试导入 LLMService,如果失败则允许,因为 LLM 功能是可选的
|
||
try:
|
||
from .llm_utils.llm_service import LLMService
|
||
except ImportError:
|
||
LLMService = None
|
||
logging.getLogger(__name__).info("LLMService 未找到,LLM 相关功能将不可用。")
|
||
|
||
# Cache for dynamically created Pydantic models to avoid redefinition issues
|
||
_dynamic_model_cache: Dict[str, Type[BaseModel]] = {}
|
||
|
||
class ExecutedTestCaseResult:
|
||
"""存储单个APITestCase在其适用的端点上执行后的结果。"""
|
||
|
||
class Status(str, Enum):
|
||
"""单个测试用例的执行状态枚举"""
|
||
PASSED = "通过"
|
||
FAILED = "失败"
|
||
ERROR = "执行错误" # 指测试用例代码本身出错,而不是API验证失败
|
||
SKIPPED = "跳过" # 如果测试用例因某些条件被跳过执行
|
||
|
||
def __init__(self,
|
||
test_case_id: str,
|
||
test_case_name: str,
|
||
test_case_severity: TestSeverity,
|
||
status: Status,
|
||
validation_points: List[ValidationResult],
|
||
message: str = "", # 总体消息,例如执行错误时的错误信息
|
||
duration: float = 0.0):
|
||
self.test_case_id = test_case_id
|
||
self.test_case_name = test_case_name
|
||
self.test_case_severity = test_case_severity
|
||
self.status = status
|
||
self.validation_points = validation_points or []
|
||
self.message = message
|
||
self.duration = duration # 执行此测试用例的耗时
|
||
self.timestamp = datetime.datetime.now()
|
||
|
||
def to_dict(self) -> Dict[str, Any]:
|
||
return {
|
||
"test_case_id": self.test_case_id,
|
||
"test_case_name": self.test_case_name,
|
||
"test_case_severity": self.test_case_severity.value, # 使用枚举值
|
||
"status": self.status.value,
|
||
"message": self.message,
|
||
"duration_seconds": self.duration,
|
||
"timestamp": self.timestamp.isoformat(),
|
||
"validation_points": [vp.details if vp.details else {"passed": vp.passed, "message": vp.message} for vp in self.validation_points]
|
||
}
|
||
|
||
class TestResult: # 原来的 TestResult 被重构为 EndpointExecutionResult
|
||
"""
|
||
存储对单个API端点执行所有适用APITestCase后的整体测试结果。
|
||
(此类替换了旧的 TestResult 的角色,并进行了结构调整)
|
||
"""
|
||
class Status(str, Enum): # 这个枚举保持不变,但其含义现在是端点的整体状态
|
||
"""端点测试状态枚举"""
|
||
PASSED = "通过" # 所有关键测试用例通过
|
||
FAILED = "失败" # 任何一个关键测试用例失败
|
||
ERROR = "错误" # 测试执行过程中出现错误(非API本身错误,而是测试代码或环境)
|
||
SKIPPED = "跳过" # 如果整个端点的测试被跳过
|
||
PARTIAL_SUCCESS = "部分成功" # 一些非关键测试用例失败,但关键的通过
|
||
|
||
def __init__(self,
|
||
endpoint_id: str, # 通常是 method + path
|
||
endpoint_name: str, # API 的可读名称/标题
|
||
overall_status: Status = Status.SKIPPED, # 默认为跳过,后续根据测试用例结果更新
|
||
start_time: Optional[datetime.datetime] = None
|
||
):
|
||
self.endpoint_id = endpoint_id
|
||
self.endpoint_name = endpoint_name
|
||
self.overall_status = overall_status
|
||
self.executed_test_cases: List[ExecutedTestCaseResult] = []
|
||
self.start_time = start_time if start_time else datetime.datetime.now()
|
||
self.end_time: Optional[datetime.datetime] = None
|
||
self.error_message: Optional[str] = None # 如果整个端点测试出错,记录错误信息
|
||
|
||
def add_executed_test_case_result(self, result: ExecutedTestCaseResult):
|
||
self.executed_test_cases.append(result)
|
||
|
||
def finalize_endpoint_test(self):
|
||
self.end_time = datetime.datetime.now()
|
||
# 根据所有 executed_test_cases 的状态和严重性来计算 overall_status
|
||
if not self.executed_test_cases and self.overall_status == TestResult.Status.SKIPPED : # 如果没有执行任何测试用例且状态仍为初始的SKIPPED
|
||
pass # 保持 SKIPPED
|
||
elif any(tc.status == ExecutedTestCaseResult.Status.ERROR for tc in self.executed_test_cases):
|
||
self.overall_status = TestResult.Status.ERROR
|
||
# 可以考虑将第一个遇到的ERROR的message赋给self.error_message
|
||
first_error = next((tc.message for tc in self.executed_test_cases if tc.status == ExecutedTestCaseResult.Status.ERROR), None)
|
||
if first_error:
|
||
self.error_message = f"测试用例执行错误: {first_error}"
|
||
else:
|
||
# 筛选出失败的测试用例
|
||
failed_tcs = [tc for tc in self.executed_test_cases if tc.status == ExecutedTestCaseResult.Status.FAILED]
|
||
if not failed_tcs:
|
||
if not self.executed_test_cases: # 如果没有执行任何测试用例但又不是SKIPPED,可能也算某种形式的错误或特殊通过
|
||
self.overall_status = TestResult.Status.PASSED # 或者定义一个"NO_CASES_RUN"状态
|
||
else:
|
||
self.overall_status = TestResult.Status.PASSED
|
||
else:
|
||
# 检查失败的测试用例中是否有CRITICAL或HIGH严重级别的
|
||
if any(tc.test_case_severity in [TestSeverity.CRITICAL, TestSeverity.HIGH] for tc in failed_tcs):
|
||
self.overall_status = TestResult.Status.FAILED
|
||
else: # 所有失败的都是 MEDIUM, LOW, INFO
|
||
self.overall_status = TestResult.Status.PARTIAL_SUCCESS
|
||
|
||
if not self.executed_test_cases and self.overall_status not in [TestResult.Status.SKIPPED, TestResult.Status.ERROR]:
|
||
# 如果没有执行测试用例,并且不是因为错误或明确跳过,这可能是一个配置问题或意外情况
|
||
self.overall_status = TestResult.Status.ERROR # 或者一个更特定的状态
|
||
self.error_message = "没有为该端点找到或执行任何适用的测试用例。"
|
||
|
||
|
||
@property
|
||
def duration(self) -> float:
|
||
if self.start_time and self.end_time:
|
||
return (self.end_time - self.start_time).total_seconds()
|
||
return 0.0
|
||
|
||
def to_dict(self) -> Dict[str, Any]:
|
||
data = {
|
||
"endpoint_id": self.endpoint_id,
|
||
"endpoint_name": self.endpoint_name,
|
||
"overall_status": self.overall_status.value,
|
||
"duration_seconds": self.duration,
|
||
"start_time": self.start_time.isoformat() if self.start_time else None,
|
||
"end_time": self.end_time.isoformat() if self.end_time else None,
|
||
"executed_test_cases": [tc.to_dict() for tc in self.executed_test_cases]
|
||
}
|
||
if self.error_message:
|
||
data["error_message"] = self.error_message
|
||
return data
|
||
|
||
class TestSummary:
|
||
"""测试结果摘要 (已更新以适应新的结果结构)"""
|
||
|
||
def __init__(self):
|
||
self.total_endpoints_defined: int = 0 # YAPI/Swagger中定义的端点总数
|
||
self.total_endpoints_tested: int = 0 # 实际执行了测试的端点数量 (至少有一个测试用例被执行)
|
||
|
||
self.endpoints_passed: int = 0
|
||
self.endpoints_failed: int = 0
|
||
self.endpoints_partial_success: int = 0
|
||
self.endpoints_error: int = 0
|
||
self.endpoints_skipped: int = 0 # 由于配置或过滤器,整个端点被跳过测试
|
||
|
||
self.total_test_cases_applicable: int = 0 # 所有端点上适用测试用例的总和
|
||
self.total_test_cases_executed: int = 0 # 所有端点上实际执行的测试用例总数
|
||
self.test_cases_passed: int = 0
|
||
self.test_cases_failed: int = 0
|
||
self.test_cases_error: int = 0 # 测试用例代码本身出错
|
||
self.test_cases_skipped_in_endpoint: int = 0 # 测试用例在端点执行中被跳过
|
||
|
||
self.start_time = datetime.datetime.now()
|
||
self.end_time: Optional[datetime.datetime] = None
|
||
self.detailed_results: List[TestResult] = [] # 将存储新的 TestResult (EndpointExecutionResult) 对象
|
||
|
||
def add_endpoint_result(self, result: TestResult): # result 现在是新的 TestResult 类型
|
||
self.detailed_results.append(result)
|
||
|
||
if result.executed_test_cases or result.overall_status not in [TestResult.Status.SKIPPED, TestResult.Status.ERROR]: # 只有实际尝试了测试的端点才算tested
|
||
if not (len(result.executed_test_cases) == 0 and result.overall_status == TestResult.Status.ERROR and result.error_message and "没有为该端点找到或执行任何适用的测试用例" in result.error_message):
|
||
self.total_endpoints_tested +=1
|
||
|
||
if result.overall_status == TestResult.Status.PASSED:
|
||
self.endpoints_passed += 1
|
||
elif result.overall_status == TestResult.Status.FAILED:
|
||
self.endpoints_failed += 1
|
||
elif result.overall_status == TestResult.Status.PARTIAL_SUCCESS:
|
||
self.endpoints_partial_success +=1
|
||
elif result.overall_status == TestResult.Status.ERROR:
|
||
self.endpoints_error += 1
|
||
elif result.overall_status == TestResult.Status.SKIPPED: # 端点级别跳过
|
||
self.endpoints_skipped +=1
|
||
|
||
for tc_result in result.executed_test_cases:
|
||
self.total_test_cases_executed += 1 # 每个APITestCase算一次执行
|
||
if tc_result.status == ExecutedTestCaseResult.Status.PASSED:
|
||
self.test_cases_passed += 1
|
||
elif tc_result.status == ExecutedTestCaseResult.Status.FAILED:
|
||
self.test_cases_failed += 1
|
||
elif tc_result.status == ExecutedTestCaseResult.Status.ERROR:
|
||
self.test_cases_error +=1
|
||
elif tc_result.status == ExecutedTestCaseResult.Status.SKIPPED:
|
||
self.test_cases_skipped_in_endpoint +=1
|
||
|
||
def set_total_endpoints_defined(self, count: int):
|
||
self.total_endpoints_defined = count
|
||
|
||
def set_total_test_cases_applicable(self, count: int):
|
||
self.total_test_cases_applicable = count
|
||
|
||
def finalize_summary(self):
|
||
self.end_time = datetime.datetime.now()
|
||
|
||
@property
|
||
def duration(self) -> float:
|
||
if not self.end_time:
|
||
return 0.0
|
||
return (self.end_time - self.start_time).total_seconds()
|
||
|
||
@property
|
||
def endpoint_success_rate(self) -> float:
|
||
if self.total_endpoints_tested == 0:
|
||
return 0.0
|
||
# 通常只把 PASSED 算作成功
|
||
return (self.endpoints_passed / self.total_endpoints_tested) * 100
|
||
|
||
@property
|
||
def test_case_success_rate(self) -> float:
|
||
if self.total_test_cases_executed == 0:
|
||
return 0.0
|
||
return (self.test_cases_passed / self.total_test_cases_executed) * 100
|
||
|
||
def to_dict(self) -> Dict[str, Any]:
|
||
return {
|
||
"summary_metadata": {
|
||
"start_time": self.start_time.isoformat(),
|
||
"end_time": self.end_time.isoformat() if self.end_time else None,
|
||
"duration_seconds": f"{self.duration:.2f}",
|
||
},
|
||
"endpoint_stats": {
|
||
"total_defined": self.total_endpoints_defined,
|
||
"total_tested": self.total_endpoints_tested,
|
||
"passed": self.endpoints_passed,
|
||
"failed": self.endpoints_failed,
|
||
"partial_success": self.endpoints_partial_success,
|
||
"error": self.endpoints_error,
|
||
"skipped": self.endpoints_skipped,
|
||
"success_rate_percentage": f"{self.endpoint_success_rate:.2f}",
|
||
},
|
||
"test_case_stats": {
|
||
"total_applicable": self.total_test_cases_applicable, # 计划执行的测试用例总数
|
||
"total_executed": self.total_test_cases_executed, # 实际执行的测试用例总数
|
||
"passed": self.test_cases_passed,
|
||
"failed": self.test_cases_failed,
|
||
"error_in_execution": self.test_cases_error,
|
||
"skipped_during_endpoint_execution": self.test_cases_skipped_in_endpoint,
|
||
"success_rate_percentage": f"{self.test_case_success_rate:.2f}",
|
||
},
|
||
"detailed_results": [result.to_dict() for result in self.detailed_results]
|
||
}
|
||
|
||
def to_json(self, pretty=True) -> str:
|
||
indent = 2 if pretty else None
|
||
return json.dumps(self.to_dict(), indent=indent, ensure_ascii=False)
|
||
|
||
def print_summary_to_console(self): # Renamed from print_summary
|
||
# (Implementation can be more detailed based on the new stats)
|
||
print("\n===== 测试运行摘要 =====")
|
||
print(f"开始时间: {self.start_time.isoformat()}")
|
||
if self.end_time:
|
||
print(f"结束时间: {self.end_time.isoformat()}")
|
||
print(f"总耗时: {self.duration:.2f} 秒")
|
||
|
||
print("\n--- 端点统计 ---")
|
||
print(f"定义的端点总数: {self.total_endpoints_defined}")
|
||
print(f"实际测试的端点数: {self.total_endpoints_tested}")
|
||
print(f" 通过: {self.endpoints_passed}")
|
||
print(f" 失败: {self.endpoints_failed}")
|
||
print(f" 部分成功: {self.endpoints_partial_success}")
|
||
print(f" 执行错误: {self.endpoints_error}")
|
||
print(f" 跳过执行: {self.endpoints_skipped}")
|
||
print(f" 端点通过率: {self.endpoint_success_rate:.2f}%")
|
||
|
||
print("\n--- 测试用例统计 ---")
|
||
print(f"适用的测试用例总数 (计划执行): {self.total_test_cases_applicable}")
|
||
print(f"实际执行的测试用例总数: {self.total_test_cases_executed}")
|
||
print(f" 通过: {self.test_cases_passed}")
|
||
print(f" 失败: {self.test_cases_failed}")
|
||
print(f" 执行错误 (测试用例代码问题): {self.test_cases_error}")
|
||
print(f" 跳过 (在端点内被跳过): {self.test_cases_skipped_in_endpoint}")
|
||
print(f" 测试用例通过率: {self.test_case_success_rate:.2f}%")
|
||
|
||
# 可选:打印失败的端点和测试用例摘要
|
||
failed_endpoints = [res for res in self.detailed_results if res.overall_status == TestResult.Status.FAILED]
|
||
if failed_endpoints:
|
||
print("\n--- 失败的端点摘要 ---")
|
||
for ep_res in failed_endpoints:
|
||
print(f" 端点: {ep_res.endpoint_id} ({ep_res.endpoint_name}) - 状态: {ep_res.overall_status.value}")
|
||
for tc_res in ep_res.executed_test_cases:
|
||
if tc_res.status == ExecutedTestCaseResult.Status.FAILED:
|
||
print(f" - 测试用例失败: {tc_res.test_case_id} ({tc_res.test_case_name})")
|
||
for vp in tc_res.validation_points:
|
||
if not vp.passed:
|
||
print(f" - 验证点: {vp.message}")
|
||
|
||
class APITestOrchestrator:
|
||
"""API测试编排器"""
|
||
|
||
def __init__(self, base_url: str,
|
||
custom_test_cases_dir: Optional[str] = None, # 新的自定义测试用例目录路径
|
||
llm_api_key: Optional[str] = None,
|
||
llm_base_url: Optional[str] = None,
|
||
llm_model_name: Optional[str] = None,
|
||
use_llm_for_request_body: bool = False
|
||
):
|
||
"""
|
||
初始化API测试编排器
|
||
|
||
Args:
|
||
base_url: API基础URL
|
||
custom_test_cases_dir: 存放自定义 APITestCase 的目录路径。如果为 None,则不加载自定义测试用例。
|
||
llm_api_key: 大模型服务的API Key。
|
||
llm_base_url: 大模型服务的兼容OpenAI的基础URL。
|
||
llm_model_name: 要使用的具体模型名称。
|
||
use_llm_for_request_body: 是否使用LLM生成请求体,默认为False。
|
||
"""
|
||
self.base_url = base_url.rstrip('/')
|
||
self.logger = logging.getLogger(__name__)
|
||
|
||
# 初始化组件
|
||
self.parser = InputParser()
|
||
self.api_caller = APICaller()
|
||
self.validator = JSONSchemaValidator() # JSON Schema 验证器,可能会被测试用例内部使用
|
||
|
||
# 初始化 (新) 测试用例注册表
|
||
self.test_case_registry: Optional[TestCaseRegistry] = None
|
||
if custom_test_cases_dir:
|
||
self.logger.info(f"初始化 TestCaseRegistry,扫描目录: {custom_test_cases_dir}")
|
||
try:
|
||
self.test_case_registry = TestCaseRegistry(test_cases_dir=custom_test_cases_dir)
|
||
self.logger.info(f"TestCaseRegistry 初始化完成,发现 {len(self.test_case_registry.get_all_test_case_classes())} 个测试用例类。")
|
||
except Exception as e:
|
||
self.logger.error(f"初始化 TestCaseRegistry 失败: {e}", exc_info=True)
|
||
else:
|
||
self.logger.info("未提供 custom_test_cases_dir,不加载自定义 APITestCase。")
|
||
|
||
# 初始化 LLM 服务 (如果配置了)
|
||
self.llm_service: Optional[LLMService] = None
|
||
self.use_llm_for_request_body = use_llm_for_request_body
|
||
|
||
if LLMService is None: # 检查导入是否成功
|
||
self.logger.warning("LLMService 类未能导入,LLM 相关功能将完全禁用。")
|
||
self.use_llm_for_request_body = False # 强制禁用
|
||
elif self.use_llm_for_request_body: # 只有当用户希望使用且类已导入时才尝试初始化
|
||
if llm_api_key and llm_base_url and llm_model_name:
|
||
try:
|
||
self.llm_service = LLMService(
|
||
api_key=llm_api_key,
|
||
base_url=llm_base_url,
|
||
model_name=llm_model_name
|
||
)
|
||
self.logger.info(f"LLMService 已成功初始化,模型: {llm_model_name}。将尝试使用LLM生成请求体。")
|
||
except ValueError as ve: # LLMService init might raise ValueError for bad args
|
||
self.logger.error(f"LLMService 初始化失败 (参数错误): {ve}。将回退到非LLM请求体生成。")
|
||
self.llm_service = None
|
||
self.use_llm_for_request_body = False # 初始化失败,禁用LLM使用
|
||
except Exception as e:
|
||
self.logger.error(f"LLMService 初始化时发生未知错误: {e}。将回退到非LLM请求体生成。", exc_info=True)
|
||
self.llm_service = None
|
||
self.use_llm_for_request_body = False # 初始化失败,禁用LLM使用
|
||
else:
|
||
self.logger.warning("希望使用LLM生成请求体,但未提供完整的LLM配置 (api_key, base_url, model_name)。将回退到非LLM请求体生成。")
|
||
self.use_llm_for_request_body = False # 配置不全,禁用LLM使用
|
||
elif not self.use_llm_for_request_body:
|
||
self.logger.info("配置为不使用LLM生成请求体。")
|
||
|
||
def _create_pydantic_model_from_schema(
|
||
self,
|
||
schema: Dict[str, Any],
|
||
model_name: str,
|
||
recursion_depth: int = 0
|
||
) -> Optional[Type[BaseModel]]:
|
||
"""
|
||
动态地从JSON Schema字典创建一个Pydantic模型类。
|
||
支持嵌套对象和数组。
|
||
|
||
Args:
|
||
schema: JSON Schema字典。
|
||
model_name: 要创建的Pydantic模型的名称。
|
||
recursion_depth: 当前递归深度,用于防止无限循环。
|
||
|
||
Returns:
|
||
一个Pydantic BaseModel的子类,如果创建失败则返回None。
|
||
"""
|
||
MAX_RECURSION_DEPTH = 10
|
||
if recursion_depth > MAX_RECURSION_DEPTH:
|
||
self.logger.error(f"创建Pydantic模型 '{model_name}' 时达到最大递归深度 {MAX_RECURSION_DEPTH}。可能存在循环引用。")
|
||
return None
|
||
|
||
# 清理模型名称,使其成为有效的Python标识符
|
||
safe_model_name = "".join(c if c.isalnum() or c == '_' else '_' for c in model_name)
|
||
if not safe_model_name or not safe_model_name[0].isalpha() and safe_model_name[0] != '_':
|
||
safe_model_name = f"DynamicModel_{safe_model_name}"
|
||
|
||
# 检查缓存 (使用清理后的名称)
|
||
if safe_model_name in _dynamic_model_cache:
|
||
self.logger.debug(f"从缓存返回动态模型: {safe_model_name}")
|
||
return _dynamic_model_cache[safe_model_name]
|
||
|
||
self.logger.debug(f"开始从Schema创建Pydantic模型: '{safe_model_name}' (原始名: '{model_name}', 深度: {recursion_depth})")
|
||
|
||
if not isinstance(schema, dict) or schema.get('type') != 'object':
|
||
# Safely get type for logging if schema is not a dict or does not have 'type'
|
||
schema_type_for_log = schema.get('type') if isinstance(schema, dict) else type(schema).__name__
|
||
self.logger.error(f"提供的Schema用于模型 '{safe_model_name}' 的必须是 type 'object' 且是一个字典, 实际: {schema_type_for_log}")
|
||
return None
|
||
|
||
properties = schema.get('properties', {})
|
||
required_fields = set(schema.get('required', []))
|
||
field_definitions: Dict[str, Tuple[Any, Any]] = {}
|
||
|
||
for prop_name, prop_schema in properties.items():
|
||
if not isinstance(prop_schema, dict):
|
||
self.logger.warning(f"属性 '{prop_name}' 在模型 '{safe_model_name}' 中的Schema无效,已跳过。")
|
||
continue
|
||
|
||
python_type: Any = Any
|
||
field_args: Dict[str, Any] = {}
|
||
|
||
default_value: Any = ... # Ellipsis for required fields with no default
|
||
if 'default' in prop_schema:
|
||
default_value = prop_schema['default']
|
||
elif prop_name not in required_fields:
|
||
default_value = None
|
||
|
||
if 'description' in prop_schema:
|
||
field_args['description'] = prop_schema['description']
|
||
|
||
json_type = prop_schema.get('type')
|
||
json_format = prop_schema.get('format')
|
||
|
||
if json_type == 'object':
|
||
nested_model_name_base = f"{safe_model_name}_{prop_name}"
|
||
python_type = self._create_pydantic_model_from_schema(prop_schema, nested_model_name_base, recursion_depth + 1)
|
||
if python_type is None:
|
||
self.logger.warning(f"无法为 '{safe_model_name}' 中的嵌套属性 '{prop_name}' 创建模型,已跳过。")
|
||
continue
|
||
elif json_type == 'array':
|
||
items_schema = prop_schema.get('items')
|
||
if not isinstance(items_schema, dict):
|
||
self.logger.warning(f"数组属性 '{prop_name}' 在模型 '{safe_model_name}' 中的 'items' schema无效,已跳过。")
|
||
continue
|
||
|
||
item_type: Any = Any
|
||
item_json_type = items_schema.get('type')
|
||
item_json_format = items_schema.get('format')
|
||
|
||
if item_json_type == 'object':
|
||
item_model_name_base = f"{safe_model_name}_{prop_name}_Item"
|
||
item_type = self._create_pydantic_model_from_schema(items_schema, item_model_name_base, recursion_depth + 1)
|
||
if item_type is None:
|
||
self.logger.warning(f"无法为 '{safe_model_name}' 中的数组属性 '{prop_name}' 的项创建模型,已跳过。")
|
||
continue
|
||
elif item_json_type == 'string':
|
||
if item_json_format == 'date-time': item_type = dt.datetime
|
||
elif item_json_format == 'date': item_type = dt.date
|
||
elif item_json_format == 'email': item_type = EmailStr
|
||
elif item_json_format == 'uuid': item_type = UUID
|
||
else: item_type = str
|
||
elif item_json_type == 'integer': item_type = int
|
||
elif item_json_type == 'number': item_type = float
|
||
elif item_json_type == 'boolean': item_type = bool
|
||
else:
|
||
self.logger.warning(f"数组 '{prop_name}' 中的项具有未知类型 '{item_json_type}',默认为 Any。")
|
||
|
||
python_type = List[item_type] # type: ignore
|
||
elif json_type == 'string':
|
||
if json_format == 'date-time': python_type = dt.datetime
|
||
elif json_format == 'date': python_type = dt.date
|
||
elif json_format == 'email': python_type = EmailStr
|
||
elif json_format == 'uuid': python_type = UUID
|
||
else: python_type = str
|
||
if 'minLength' in prop_schema: field_args['min_length'] = prop_schema['minLength']
|
||
if 'maxLength' in prop_schema: field_args['max_length'] = prop_schema['maxLength']
|
||
if 'pattern' in prop_schema: field_args['pattern'] = prop_schema['pattern']
|
||
elif json_type == 'integer':
|
||
python_type = int
|
||
if 'minimum' in prop_schema: field_args['ge'] = prop_schema['minimum']
|
||
if 'maximum' in prop_schema: field_args['le'] = prop_schema['maximum']
|
||
elif json_type == 'number':
|
||
python_type = float
|
||
if 'minimum' in prop_schema: field_args['ge'] = prop_schema['minimum']
|
||
if 'maximum' in prop_schema: field_args['le'] = prop_schema['maximum']
|
||
elif json_type == 'boolean':
|
||
python_type = bool
|
||
elif json_type is None and '$ref' in prop_schema:
|
||
self.logger.warning(f"Schema $ref '{prop_schema['$ref']}' in '{safe_model_name}.{prop_name}' not yet supported. Defaulting to Any.")
|
||
python_type = Any
|
||
else:
|
||
self.logger.warning(f"属性 '{prop_name}' 在模型 '{safe_model_name}' 中具有未知类型 '{json_type}',默认为 Any。")
|
||
python_type = Any
|
||
|
||
if 'enum' in prop_schema:
|
||
enum_values = prop_schema['enum']
|
||
if enum_values:
|
||
enum_desc = f" (Enum values: {', '.join(map(str, enum_values))})"
|
||
field_args['description'] = field_args.get('description', '') + enum_desc
|
||
|
||
current_field_is_optional = prop_name not in required_fields
|
||
if current_field_is_optional and python_type is not Any and default_value is None:
|
||
# For Pydantic v1/v2, if a field is not required and has no other default, it's Optional.
|
||
# The `python_type` itself might already be an `Optional` if it came from a nested optional model.
|
||
# We only wrap with Optional if it's not already wrapped effectively.
|
||
# A simple check: if the type name doesn't start with "Optional"
|
||
if not (hasattr(python_type, '__origin__') and python_type.__origin__ is Union and type(None) in python_type.__args__):
|
||
python_type = Optional[python_type]
|
||
|
||
|
||
field_definitions[prop_name] = (python_type, Field(default_value, **field_args))
|
||
|
||
if not field_definitions:
|
||
self.logger.warning(f"模型 '{safe_model_name}' 没有有效的字段定义,无法创建。")
|
||
# Return a very basic BaseModel if no properties are defined but an object schema was given
|
||
# This might happen for an empty object schema {}
|
||
try:
|
||
EmptyModel = create_model(safe_model_name, __base__=BaseModel)
|
||
_dynamic_model_cache[safe_model_name] = EmptyModel
|
||
self.logger.info(f"创建了一个空的动态Pydantic模型: '{safe_model_name}' (由于无属性定义)")
|
||
return EmptyModel
|
||
except Exception as e_empty:
|
||
self.logger.error(f"尝试为 '{safe_model_name}' 创建空模型时失败: {e_empty}", exc_info=True)
|
||
return None
|
||
|
||
|
||
try:
|
||
# ForwardRef for self-referencing models is complex; not fully handled here yet.
|
||
# If a type in field_definitions is a string (e.g., a ForwardRef string), create_model handles it.
|
||
DynamicModel = create_model(safe_model_name, **field_definitions, __base__=BaseModel) # type: ignore
|
||
_dynamic_model_cache[safe_model_name] = DynamicModel
|
||
self.logger.info(f"成功创建/缓存了动态Pydantic模型: '{safe_model_name}'")
|
||
|
||
# Attempt to update forward refs if any were string types that are now defined
|
||
# This is a simplified approach. Pydantic's update_forward_refs is usually called on the module or specific model.
|
||
# For dynamically created models, this might need careful handling if true circular deps are common.
|
||
# For now, we assume nested creation order mostly handles dependencies.
|
||
# if hasattr(DynamicModel, 'update_forward_refs'):
|
||
# try:
|
||
# DynamicModel.update_forward_refs(**_dynamic_model_cache)
|
||
# self.logger.debug(f"Attempted to update forward refs for {safe_model_name}")
|
||
# except Exception as e_fwd:
|
||
# self.logger.warning(f"Error updating forward_refs for {safe_model_name}: {e_fwd}")
|
||
|
||
return DynamicModel
|
||
except Exception as e:
|
||
self.logger.error(f"使用Pydantic create_model创建 '{safe_model_name}' 时失败: {e}", exc_info=True)
|
||
return None
|
||
|
||
def _execute_single_test_case(
|
||
self,
|
||
test_case_class: Type[BaseAPITestCase],
|
||
endpoint_spec: Union[YAPIEndpoint, SwaggerEndpoint], # 当前端点的规格
|
||
global_api_spec: Union[ParsedYAPISpec, ParsedSwaggerSpec] # 整个API的规格
|
||
) -> ExecutedTestCaseResult:
|
||
"""
|
||
实例化并执行单个APITestCase。
|
||
"""
|
||
tc_start_time = time.time()
|
||
validation_points: List[ValidationResult] = []
|
||
test_case_instance: Optional[BaseAPITestCase] = None
|
||
|
||
endpoint_spec_dict: Dict[str, Any]
|
||
if hasattr(endpoint_spec, 'to_dict') and callable(endpoint_spec.to_dict):
|
||
endpoint_spec_dict = endpoint_spec.to_dict()
|
||
elif isinstance(endpoint_spec, (YAPIEndpoint, SwaggerEndpoint)):
|
||
endpoint_spec_dict = {
|
||
"method": getattr(endpoint_spec, 'method', 'UNKNOWN_METHOD'),
|
||
"path": getattr(endpoint_spec, 'path', 'UNKNOWN_PATH'),
|
||
"title": getattr(endpoint_spec, 'title', getattr(endpoint_spec, 'summary', '')),
|
||
"summary": getattr(endpoint_spec, 'summary', ''),
|
||
"description": getattr(endpoint_spec, 'description', ''), # 确保description也被传递
|
||
"_original_object_type": type(endpoint_spec).__name__
|
||
}
|
||
if isinstance(endpoint_spec, YAPIEndpoint):
|
||
for attr_name in dir(endpoint_spec):
|
||
if not attr_name.startswith('_') and not callable(getattr(endpoint_spec, attr_name)):
|
||
try:
|
||
json.dumps({attr_name: getattr(endpoint_spec, attr_name)})
|
||
endpoint_spec_dict[attr_name] = getattr(endpoint_spec, attr_name)
|
||
except (TypeError, OverflowError):
|
||
pass
|
||
elif isinstance(endpoint_spec, SwaggerEndpoint):
|
||
if hasattr(endpoint_spec, 'parameters'): endpoint_spec_dict['parameters'] = endpoint_spec.parameters
|
||
if hasattr(endpoint_spec, 'request_body'): endpoint_spec_dict['request_body'] = endpoint_spec.request_body
|
||
if hasattr(endpoint_spec, 'responses'): endpoint_spec_dict['responses'] = endpoint_spec.responses
|
||
else:
|
||
endpoint_spec_dict = endpoint_spec if isinstance(endpoint_spec, dict) else {}
|
||
if not endpoint_spec_dict:
|
||
self.logger.warning(f"endpoint_spec 无法转换为字典,实际类型: {type(endpoint_spec)}")
|
||
|
||
global_api_spec_dict: Dict[str, Any]
|
||
if hasattr(global_api_spec, 'to_dict') and callable(global_api_spec.to_dict):
|
||
global_api_spec_dict = global_api_spec.to_dict()
|
||
else:
|
||
global_api_spec_dict = global_api_spec if isinstance(global_api_spec, dict) else {}
|
||
if not global_api_spec_dict:
|
||
self.logger.warning(f"global_api_spec 无法转换为字典,实际类型: {type(global_api_spec)}")
|
||
|
||
|
||
try:
|
||
test_case_instance = test_case_class(
|
||
endpoint_spec=endpoint_spec_dict,
|
||
global_api_spec=global_api_spec_dict
|
||
)
|
||
test_case_instance.logger.info(f"开始执行测试用例 '{test_case_instance.id}' for endpoint '{endpoint_spec_dict.get('method')} {endpoint_spec_dict.get('path')}'")
|
||
|
||
# 调用 _prepare_initial_request_data 时传递 test_case_instance
|
||
initial_request_data = self._prepare_initial_request_data(endpoint_spec, test_case_instance=test_case_instance)
|
||
|
||
current_q_params = test_case_instance.generate_query_params(initial_request_data['query_params'])
|
||
current_headers = test_case_instance.generate_headers(initial_request_data['headers'])
|
||
current_body = test_case_instance.generate_request_body(initial_request_data['body'])
|
||
|
||
current_path_params = initial_request_data['path_params']
|
||
|
||
final_url = self.base_url + endpoint_spec_dict.get('path', '')
|
||
for p_name, p_val in current_path_params.items():
|
||
placeholder = f"{{{p_name}}}"
|
||
if placeholder in final_url:
|
||
final_url = final_url.replace(placeholder, str(p_val))
|
||
else:
|
||
self.logger.warning(f"路径参数 '{p_name}' 在路径模板 '{endpoint_spec_dict.get('path')}' 中未找到占位符。")
|
||
|
||
api_request_context = APIRequestContext(
|
||
method=endpoint_spec_dict.get('method', 'GET').upper(),
|
||
url=final_url,
|
||
path_params=current_path_params,
|
||
query_params=current_q_params,
|
||
headers=current_headers,
|
||
body=current_body,
|
||
endpoint_spec=endpoint_spec_dict
|
||
)
|
||
|
||
validation_points.extend(test_case_instance.validate_request_url(api_request_context.url, api_request_context))
|
||
validation_points.extend(test_case_instance.validate_request_headers(api_request_context.headers, api_request_context))
|
||
validation_points.extend(test_case_instance.validate_request_body(api_request_context.body, api_request_context))
|
||
|
||
critical_pre_validation_failure = False
|
||
failure_messages = []
|
||
for vp in validation_points:
|
||
if not vp.passed and test_case_instance.severity in [TestSeverity.CRITICAL, TestSeverity.HIGH]: # Check severity of the Test Case for pre-validation
|
||
critical_pre_validation_failure = True
|
||
failure_messages.append(vp.message)
|
||
|
||
if critical_pre_validation_failure:
|
||
self.logger.warning(f"测试用例 '{test_case_instance.id}' 因请求预校验失败而中止 (TC严重级别: {test_case_instance.severity.value})。失败信息: {'; '.join(failure_messages)}")
|
||
tc_duration = time.time() - tc_start_time
|
||
return ExecutedTestCaseResult(
|
||
test_case_id=test_case_instance.id,
|
||
test_case_name=test_case_instance.name,
|
||
test_case_severity=test_case_instance.severity,
|
||
status=ExecutedTestCaseResult.Status.FAILED,
|
||
validation_points=validation_points,
|
||
message=f"请求预校验失败: {'; '.join(failure_messages)}",
|
||
duration=tc_duration
|
||
)
|
||
|
||
api_request_obj = APIRequest(
|
||
method=api_request_context.method,
|
||
url=api_request_context.url,
|
||
params=api_request_context.query_params,
|
||
headers=api_request_context.headers,
|
||
json_data=api_request_context.body
|
||
)
|
||
|
||
response_call_start_time = time.time()
|
||
api_response_obj = self.api_caller.call_api(api_request_obj)
|
||
response_call_elapsed_time = time.time() - response_call_start_time
|
||
|
||
actual_text_content: Optional[str] = None
|
||
if hasattr(api_response_obj, 'text_content') and api_response_obj.text_content is not None:
|
||
actual_text_content = api_response_obj.text_content
|
||
elif api_response_obj.json_content is not None:
|
||
if isinstance(api_response_obj.json_content, str): # Should not happen if json_content is parsed
|
||
actual_text_content = api_response_obj.json_content
|
||
else:
|
||
try:
|
||
actual_text_content = json.dumps(api_response_obj.json_content, ensure_ascii=False)
|
||
except TypeError: # If json_content is not serializable (e.g. bytes)
|
||
actual_text_content = str(api_response_obj.json_content)
|
||
|
||
|
||
api_response_context = APIResponseContext(
|
||
status_code=api_response_obj.status_code,
|
||
headers=api_response_obj.headers,
|
||
json_content=api_response_obj.json_content,
|
||
text_content=actual_text_content,
|
||
elapsed_time=response_call_elapsed_time,
|
||
original_response= getattr(api_response_obj, 'raw_response', None), # Pass raw if available
|
||
request_context=api_request_context
|
||
)
|
||
|
||
validation_points.extend(test_case_instance.validate_response(api_response_context, api_request_context))
|
||
validation_points.extend(test_case_instance.check_performance(api_response_context, api_request_context))
|
||
|
||
final_status = ExecutedTestCaseResult.Status.PASSED
|
||
if any(not vp.passed for vp in validation_points):
|
||
final_status = ExecutedTestCaseResult.Status.FAILED
|
||
|
||
tc_duration = time.time() - tc_start_time
|
||
return ExecutedTestCaseResult(
|
||
test_case_id=test_case_instance.id,
|
||
test_case_name=test_case_instance.name,
|
||
test_case_severity=test_case_instance.severity,
|
||
status=final_status,
|
||
validation_points=validation_points,
|
||
duration=tc_duration
|
||
)
|
||
|
||
except Exception as e:
|
||
self.logger.error(f"执行测试用例 '{test_case_class.id if test_case_instance else test_case_class.__name__}' 时发生严重错误: {e}", exc_info=True)
|
||
tc_duration = time.time() - tc_start_time
|
||
return ExecutedTestCaseResult(
|
||
test_case_id=test_case_instance.id if test_case_instance else test_case_class.id if hasattr(test_case_class, 'id') else "unknown_tc_id",
|
||
test_case_name=test_case_instance.name if test_case_instance else test_case_class.name if hasattr(test_case_class, 'name') else "Unknown Test Case Name",
|
||
test_case_severity=test_case_instance.severity if test_case_instance else TestSeverity.CRITICAL,
|
||
status=ExecutedTestCaseResult.Status.ERROR,
|
||
validation_points=validation_points,
|
||
message=f"测试用例执行时发生内部错误: {str(e)}",
|
||
duration=tc_duration
|
||
)
|
||
|
||
def _prepare_initial_request_data(self, endpoint_spec: Union[YAPIEndpoint, SwaggerEndpoint], test_case_instance: Optional[BaseAPITestCase] = None) -> Dict[str, Any]:
|
||
"""
|
||
根据端点规格准备一个初始的请求数据结构。
|
||
返回一个包含 'path_params', 'query_params', 'headers', 'body' 的字典。
|
||
Args:
|
||
endpoint_spec: 当前端点的规格。
|
||
test_case_instance: (可选) 当前正在执行的测试用例实例,用于细粒度控制LLM使用。
|
||
"""
|
||
self.logger.debug(f"Preparing initial request data for: {endpoint_spec.method} {endpoint_spec.path}")
|
||
|
||
path_params_spec_list: List[Dict[str, Any]] = []
|
||
query_params_spec_list: List[Dict[str, Any]] = []
|
||
headers_spec_list: List[Dict[str, Any]] = []
|
||
body_schema_dict: Optional[Dict[str, Any]] = None
|
||
path_str = getattr(endpoint_spec, 'path', '')
|
||
|
||
if isinstance(endpoint_spec, YAPIEndpoint):
|
||
query_params_spec_list = endpoint_spec.req_query or []
|
||
headers_spec_list = endpoint_spec.req_headers or []
|
||
if endpoint_spec.req_body_type == 'json' and endpoint_spec.req_body_other:
|
||
try:
|
||
body_schema_dict = json.loads(endpoint_spec.req_body_other) if isinstance(endpoint_spec.req_body_other, str) else endpoint_spec.req_body_other
|
||
except json.JSONDecodeError:
|
||
self.logger.warning(f"YAPI req_body_other for {path_str} is not valid JSON: {endpoint_spec.req_body_other}")
|
||
|
||
elif isinstance(endpoint_spec, SwaggerEndpoint):
|
||
# 优先尝试 OpenAPI 3.0+ 的 requestBody
|
||
if endpoint_spec.request_body and 'content' in endpoint_spec.request_body:
|
||
json_content_spec = endpoint_spec.request_body['content'].get('application/json', {})
|
||
if 'schema' in json_content_spec:
|
||
body_schema_dict = json_content_spec['schema']
|
||
self.logger.debug("从 Swagger 3.0+ 'requestBody' 中提取到 body schema。")
|
||
|
||
# 如果没有从 requestBody 中找到,再尝试 Swagger 2.0 的 in: "body" 参数
|
||
if not body_schema_dict and endpoint_spec.parameters:
|
||
for param_spec in endpoint_spec.parameters:
|
||
if param_spec.get('in') == 'body':
|
||
if 'schema' in param_spec:
|
||
body_schema_dict = param_spec['schema']
|
||
self.logger.debug(f"从 Swagger 2.0 'in: body' 参数 '{param_spec.get('name')}' 中提取到 body schema (作为回退)。")
|
||
break # 找到一个 body 参数就足够了
|
||
|
||
# 处理 path, query, header 参数 (这部分逻辑需要保留并放在正确的位置)
|
||
if endpoint_spec.parameters:
|
||
for param_spec in endpoint_spec.parameters:
|
||
param_in = param_spec.get('in')
|
||
if param_in == 'path':
|
||
path_params_spec_list.append(param_spec)
|
||
elif param_in == 'query':
|
||
query_params_spec_list.append(param_spec)
|
||
elif param_in == 'header':
|
||
headers_spec_list.append(param_spec)
|
||
|
||
path_params_data: Dict[str, Any] = {}
|
||
import re
|
||
path_param_names_in_url = re.findall(r'{(.*?)}', path_str)
|
||
|
||
for p_name in path_param_names_in_url:
|
||
found_spec = None
|
||
for spec in path_params_spec_list:
|
||
if spec.get('name') == p_name:
|
||
found_spec = spec
|
||
break
|
||
|
||
if found_spec and isinstance(found_spec, dict):
|
||
value = found_spec.get('example')
|
||
if value is None and found_spec.get('schema'):
|
||
value = self._generate_data_from_schema(found_spec['schema'])
|
||
path_params_data[p_name] = value if value is not None else f"example_{p_name}"
|
||
else:
|
||
path_params_data[p_name] = f"example_{p_name}"
|
||
self.logger.debug(f"Path param '{p_name}' generated value: {path_params_data[p_name]}")
|
||
|
||
query_params_data: Dict[str, Any] = {}
|
||
for q_param_spec in query_params_spec_list:
|
||
name = q_param_spec.get('name')
|
||
if name:
|
||
value = q_param_spec.get('example')
|
||
if value is None and 'value' in q_param_spec:
|
||
value = q_param_spec['value']
|
||
|
||
if value is None and q_param_spec.get('schema'):
|
||
value = self._generate_data_from_schema(q_param_spec['schema'])
|
||
elif value is None and q_param_spec.get('type'):
|
||
value = self._generate_data_from_schema({'type': q_param_spec.get('type')})
|
||
|
||
query_params_data[name] = value if value is not None else f"example_query_{name}"
|
||
|
||
headers_data: Dict[str, str] = {"Content-Type": "application/json", "Accept": "application/json"}
|
||
for h_param_spec in headers_spec_list:
|
||
name = h_param_spec.get('name')
|
||
if name and name.lower() not in ['content-type', 'accept']:
|
||
value = h_param_spec.get('example')
|
||
if value is None and 'value' in h_param_spec:
|
||
value = h_param_spec['value']
|
||
|
||
if value is None and h_param_spec.get('schema'):
|
||
value = self._generate_data_from_schema(h_param_spec['schema'])
|
||
elif value is None and h_param_spec.get('type'):
|
||
value = self._generate_data_from_schema({'type': h_param_spec.get('type')})
|
||
|
||
if value is not None:
|
||
headers_data[name] = str(value)
|
||
else:
|
||
headers_data[name] = f"example_header_{name}"
|
||
|
||
body_data: Optional[Any] = None
|
||
if body_schema_dict:
|
||
generated_by_llm = False
|
||
|
||
# 决定是否应该为这个特定的情况尝试LLM
|
||
# 1. 全局开关 self.use_llm_for_request_body 必须为 True
|
||
# 2. LLM 服务 self.llm_service 必须可用
|
||
# 3. 测试用例级别配置 test_case_instance.use_llm_for_body (如果存在且不是None) 会覆盖全局配置
|
||
attempt_llm_globally = self.use_llm_for_request_body and self.llm_service
|
||
should_try_llm_for_this_run = attempt_llm_globally
|
||
|
||
if test_case_instance and hasattr(test_case_instance, 'use_llm_for_body') and test_case_instance.use_llm_for_body is not None:
|
||
should_try_llm_for_this_run = test_case_instance.use_llm_for_body
|
||
if should_try_llm_for_this_run and not self.llm_service:
|
||
self.logger.warning(f"测试用例 '{test_case_instance.id}' 配置为使用LLM,但LLM服务不可用。将回退。")
|
||
should_try_llm_for_this_run = False # LLM服务不可用时,即使TC要求也无法使用
|
||
self.logger.debug(f"测试用例 '{test_case_instance.id}' 的 use_llm_for_body 设置为 {test_case_instance.use_llm_for_body},最终决策是否尝试LLM: {should_try_llm_for_this_run}")
|
||
elif not attempt_llm_globally and test_case_instance and hasattr(test_case_instance, 'use_llm_for_body') and test_case_instance.use_llm_for_body is True and not self.llm_service:
|
||
# 特殊情况:全局LLM关闭,但测试用例希望开启,可是LLM服务不可用
|
||
self.logger.warning(f"测试用例 '{test_case_instance.id}' 配置为使用LLM,但全局LLM服务不可用或未配置。将回退。")
|
||
should_try_llm_for_this_run = False
|
||
|
||
if should_try_llm_for_this_run: # 只有在最终决策为True时才尝试
|
||
self.logger.debug(f"尝试使用 LLM 为端点 {endpoint_spec.method} {endpoint_spec.path} 生成请求体 (TC覆盖: {test_case_instance.use_llm_for_body if test_case_instance else 'N/A'})。")
|
||
try:
|
||
# 生成一个稍微独特但可预测的模型名称,以利于缓存和调试
|
||
model_base_name = "".join(part.capitalize() for part in re.split(r'[^a-zA-Z0-9]+', endpoint_spec.path.strip('/')) if part)
|
||
dynamic_model_name = f"{model_base_name}{endpoint_spec.method.capitalize()}Body"
|
||
if not dynamic_model_name or not dynamic_model_name[0].isalpha(): # 确保名称有效
|
||
dynamic_model_name = f"Dynamic{endpoint_spec.method.capitalize()}Body_{abs(hash(endpoint_spec.path))}"
|
||
|
||
|
||
DynamicPydanticModel = self._create_pydantic_model_from_schema(body_schema_dict, dynamic_model_name)
|
||
|
||
if DynamicPydanticModel:
|
||
# 尝试获取端点的可读名称,优先顺序: title, summary, path
|
||
readable_endpoint_name = getattr(endpoint_spec, 'title', None) or \
|
||
getattr(endpoint_spec, 'summary', None) or \
|
||
endpoint_spec.path
|
||
|
||
prompt_instr = f"请为API端点 '{readable_endpoint_name}' (方法: {endpoint_spec.method}) 生成一个符合其定义的请求体。"
|
||
|
||
# 可以进一步从 description 获取更详细的上下文给LLM
|
||
ep_description = getattr(endpoint_spec, 'description', None)
|
||
if ep_description:
|
||
prompt_instr += f" API描述: {ep_description}"
|
||
|
||
llm_generated_body = self.llm_service.generate_parameters_from_schema(
|
||
pydantic_model_class=DynamicPydanticModel,
|
||
prompt_instructions=prompt_instr
|
||
)
|
||
if llm_generated_body is not None:
|
||
try:
|
||
# 尝试用生成的模型验证LLM的输出,确保LLM确实遵循了schema
|
||
DynamicPydanticModel.model_validate(llm_generated_body)
|
||
body_data = llm_generated_body
|
||
generated_by_llm = True
|
||
self.logger.info(f"LLM 成功为 {endpoint_spec.method} {endpoint_spec.path} 生成并验证了请求体。")
|
||
except Exception as p_val_error: # Catches Pydantic's ValidationError
|
||
self.logger.warning(f"LLM为 {endpoint_spec.method} {endpoint_spec.path} 生成的请求体未能通过动态Pydantic模型验证: {p_val_error}. 将回退。LLM输出: {json.dumps(llm_generated_body, indent=2, ensure_ascii=False)[:500]}...")
|
||
else:
|
||
self.logger.warning(f"LLM未能为 {endpoint_spec.method} {endpoint_spec.path} 生成请求体内容,将回退到默认方法。")
|
||
else:
|
||
self.logger.warning(f"未能从Schema动态创建Pydantic模型用于LLM请求体生成 (端点: {endpoint_spec.method} {endpoint_spec.path}),将回退。")
|
||
except Exception as e:
|
||
self.logger.error(f"使用LLM生成请求体时发生错误: {e}。将回退到默认方法。", exc_info=True)
|
||
|
||
if not generated_by_llm:
|
||
# 只有当确实尝试了LLM(should_try_llm_for_this_run为True)但失败了,或者测试用例强制不使用LLM才记录回退日志
|
||
log_fallback = False
|
||
if should_try_llm_for_this_run: # 如果本应尝试LLM但generated_by_llm是False,说明LLM失败了
|
||
log_fallback = True
|
||
elif test_case_instance and hasattr(test_case_instance, 'use_llm_for_body') and test_case_instance.use_llm_for_body is False:
|
||
# 如果测试用例明确禁用了LLM
|
||
log_fallback = True
|
||
self.logger.debug(f"测试用例 '{test_case_instance.id}' 明确配置不使用LLM,使用基于规则的生成方法 for {endpoint_spec.method} {endpoint_spec.path}。")
|
||
|
||
if log_fallback and not (test_case_instance and hasattr(test_case_instance, 'use_llm_for_body') and test_case_instance.use_llm_for_body is False) : # 避免重复日志
|
||
self.logger.debug(f"LLM生成请求体失败或未启用 (最终决策: {should_try_llm_for_this_run}), 回退到基于规则的生成方法 for {endpoint_spec.method} {endpoint_spec.path}。")
|
||
body_data = self._generate_data_from_schema(body_schema_dict)
|
||
|
||
return {
|
||
"path_params": path_params_data,
|
||
"query_params": query_params_data,
|
||
"headers": headers_data,
|
||
"body": body_data
|
||
}
|
||
|
||
def run_test_for_endpoint(self, endpoint: Union[YAPIEndpoint, SwaggerEndpoint],
|
||
global_api_spec: Union[ParsedYAPISpec, ParsedSwaggerSpec]
|
||
) -> TestResult:
|
||
endpoint_id = f"{getattr(endpoint, 'method', 'GET').upper()} {getattr(endpoint, 'path', '/')}"
|
||
endpoint_name = getattr(endpoint, 'title', '') or getattr(endpoint, 'summary', '') or endpoint_id
|
||
|
||
self.logger.info(f"开始为端点测试: {endpoint_id} ({endpoint_name})")
|
||
|
||
endpoint_test_result = TestResult(
|
||
endpoint_id=endpoint_id,
|
||
endpoint_name=endpoint_name,
|
||
)
|
||
|
||
if not self.test_case_registry:
|
||
self.logger.warning(f"TestCaseRegistry 未初始化,无法为端点 '{endpoint_id}' 执行自定义测试用例。")
|
||
endpoint_test_result.overall_status = TestResult.Status.SKIPPED
|
||
endpoint_test_result.error_message = "TestCaseRegistry 未初始化。"
|
||
endpoint_test_result.finalize_endpoint_test()
|
||
return endpoint_test_result
|
||
|
||
applicable_test_case_classes = self.test_case_registry.get_applicable_test_cases(
|
||
endpoint_method=endpoint.method.upper(),
|
||
endpoint_path=endpoint.path
|
||
)
|
||
|
||
if not applicable_test_case_classes:
|
||
self.logger.info(f"端点 '{endpoint_id}' 没有找到适用的自定义测试用例。")
|
||
endpoint_test_result.finalize_endpoint_test()
|
||
return endpoint_test_result
|
||
|
||
self.logger.info(f"端点 '{endpoint_id}' 发现了 {len(applicable_test_case_classes)} 个适用的测试用例: {[tc.id for tc in applicable_test_case_classes]}")
|
||
|
||
for tc_class in applicable_test_case_classes:
|
||
self.logger.debug(f"准备执行测试用例 '{tc_class.id}' for '{endpoint_id}'")
|
||
executed_case_result = self._execute_single_test_case(
|
||
test_case_class=tc_class,
|
||
endpoint_spec=endpoint,
|
||
global_api_spec=global_api_spec
|
||
)
|
||
endpoint_test_result.add_executed_test_case_result(executed_case_result)
|
||
self.logger.debug(f"测试用例 '{tc_class.id}' 执行完毕,状态: {executed_case_result.status.value}")
|
||
|
||
endpoint_test_result.finalize_endpoint_test()
|
||
self.logger.info(f"端点 '{endpoint_id}' 测试完成,最终状态: {endpoint_test_result.overall_status.value}")
|
||
|
||
return endpoint_test_result
|
||
|
||
def run_tests_from_yapi(self, yapi_file_path: str,
|
||
categories: Optional[List[str]] = None,
|
||
custom_test_cases_dir: Optional[str] = None
|
||
) -> TestSummary:
|
||
if custom_test_cases_dir and (not self.test_case_registry or self.test_case_registry.test_cases_dir != custom_test_cases_dir):
|
||
self.logger.info(f"从 run_tests_from_yapi 使用新的目录重新初始化 TestCaseRegistry: {custom_test_cases_dir}")
|
||
try:
|
||
self.test_case_registry = TestCaseRegistry(test_cases_dir=custom_test_cases_dir)
|
||
self.logger.info(f"TestCaseRegistry (re)initialization complete, found {len(self.test_case_registry.get_all_test_case_classes())} test case classes.")
|
||
except Exception as e:
|
||
self.logger.error(f"从 run_tests_from_yapi 重新初始化 TestCaseRegistry 失败: {e}", exc_info=True)
|
||
|
||
self.logger.info(f"从YAPI文件加载API定义: {yapi_file_path}")
|
||
parsed_yapi = self.parser.parse_yapi_spec(yapi_file_path)
|
||
summary = TestSummary()
|
||
|
||
if not parsed_yapi:
|
||
self.logger.error(f"解析YAPI文件失败: {yapi_file_path}")
|
||
summary.finalize_summary()
|
||
return summary
|
||
|
||
endpoints_to_test = parsed_yapi.endpoints
|
||
if categories:
|
||
endpoints_to_test = [ep for ep in endpoints_to_test if ep.category_name in categories]
|
||
|
||
summary.set_total_endpoints_defined(len(endpoints_to_test))
|
||
|
||
total_applicable_tcs = 0
|
||
if self.test_case_registry:
|
||
for endpoint_spec in endpoints_to_test:
|
||
total_applicable_tcs += len(
|
||
self.test_case_registry.get_applicable_test_cases(
|
||
endpoint_spec.method.upper(), endpoint_spec.path
|
||
)
|
||
)
|
||
summary.set_total_test_cases_applicable(total_applicable_tcs)
|
||
|
||
for endpoint in endpoints_to_test:
|
||
result = self.run_test_for_endpoint(endpoint, global_api_spec=parsed_yapi)
|
||
summary.add_endpoint_result(result)
|
||
|
||
summary.finalize_summary()
|
||
return summary
|
||
|
||
def run_tests_from_swagger(self, swagger_file_path: str,
|
||
tags: Optional[List[str]] = None,
|
||
custom_test_cases_dir: Optional[str] = None
|
||
) -> TestSummary:
|
||
if custom_test_cases_dir and (not self.test_case_registry or self.test_case_registry.test_cases_dir != custom_test_cases_dir):
|
||
self.logger.info(f"从 run_tests_from_swagger 使用新的目录重新初始化 TestCaseRegistry: {custom_test_cases_dir}")
|
||
try:
|
||
self.test_case_registry = TestCaseRegistry(test_cases_dir=custom_test_cases_dir)
|
||
self.logger.info(f"TestCaseRegistry (re)initialization complete, found {len(self.test_case_registry.get_all_test_case_classes())} test case classes.")
|
||
except Exception as e:
|
||
self.logger.error(f"从 run_tests_from_swagger 重新初始化 TestCaseRegistry 失败: {e}", exc_info=True)
|
||
|
||
self.logger.info(f"从Swagger文件加载API定义: {swagger_file_path}")
|
||
parsed_swagger = self.parser.parse_swagger_spec(swagger_file_path)
|
||
summary = TestSummary()
|
||
|
||
if not parsed_swagger:
|
||
self.logger.error(f"解析Swagger文件失败: {swagger_file_path}")
|
||
summary.finalize_summary()
|
||
return summary
|
||
|
||
endpoints_to_test = parsed_swagger.endpoints
|
||
if tags:
|
||
endpoints_to_test = [ep for ep in endpoints_to_test if any(tag in ep.tags for tag in tags)]
|
||
|
||
summary.set_total_endpoints_defined(len(endpoints_to_test))
|
||
|
||
total_applicable_tcs = 0
|
||
if self.test_case_registry:
|
||
for endpoint_spec in endpoints_to_test:
|
||
total_applicable_tcs += len(
|
||
self.test_case_registry.get_applicable_test_cases(
|
||
endpoint_spec.method.upper(), endpoint_spec.path
|
||
)
|
||
)
|
||
summary.set_total_test_cases_applicable(total_applicable_tcs)
|
||
|
||
for endpoint in endpoints_to_test:
|
||
result = self.run_test_for_endpoint(endpoint, global_api_spec=parsed_swagger)
|
||
summary.add_endpoint_result(result)
|
||
|
||
summary.finalize_summary()
|
||
return summary
|
||
|
||
def _generate_data_from_schema(self, schema: Dict[str, Any]) -> Any:
|
||
"""
|
||
根据JSON Schema生成测试数据 (此方法基本保持不变,可能被测试用例或编排器内部使用)
|
||
"""
|
||
if not schema or not isinstance(schema, dict):
|
||
self.logger.debug(f"_generate_data_from_schema: 提供的 schema 无效或为空: {schema}")
|
||
return None
|
||
|
||
schema_type = schema.get('type')
|
||
|
||
if 'example' in schema:
|
||
return schema['example']
|
||
if 'default' in schema:
|
||
return schema['default']
|
||
|
||
if schema_type == 'object':
|
||
result = {}
|
||
properties = schema.get('properties', {})
|
||
|
||
for prop_name, prop_schema in properties.items():
|
||
result[prop_name] = self._generate_data_from_schema(prop_schema)
|
||
return result if result else {}
|
||
|
||
elif schema_type == 'array':
|
||
items_schema = schema.get('items', {})
|
||
min_items = schema.get('minItems', 1 if schema.get('default') is None and schema.get('example') is None else 0)
|
||
if min_items == 0 and (schema.get('default') == [] or schema.get('example') == []):
|
||
return []
|
||
|
||
num_items_to_generate = max(1, min_items)
|
||
generated_array = [self._generate_data_from_schema(items_schema) for _ in range(num_items_to_generate)]
|
||
return generated_array
|
||
|
||
elif schema_type == 'string':
|
||
string_format = schema.get('format', '')
|
||
if 'enum' in schema and schema['enum']:
|
||
return schema['enum'][0]
|
||
if string_format == 'date': return '2023-01-01'
|
||
if string_format == 'date-time': return datetime.datetime.now().isoformat()
|
||
if string_format == 'email': return 'test@example.com'
|
||
if string_format == 'uuid': import uuid; return str(uuid.uuid4())
|
||
return schema.get('default', schema.get('example', 'example_string'))
|
||
|
||
elif schema_type == 'number' or schema_type == 'integer':
|
||
val = schema.get('default', schema.get('example'))
|
||
if val is not None: return val
|
||
|
||
minimum = schema.get('minimum')
|
||
maximum = schema.get('maximum') # Not used yet for generation, but could be
|
||
if minimum is not None: return minimum
|
||
return 0 if schema_type == 'integer' else 0.0
|
||
|
||
elif schema_type == 'boolean':
|
||
return schema.get('default', schema.get('example', False))
|
||
|
||
elif schema_type == 'null':
|
||
return None
|
||
|
||
self.logger.debug(f"_generate_data_from_schema: 未知或不支持的 schema 类型 '{schema_type}' for schema: {schema}")
|
||
return None
|
||
|