fix:crud流程测试修复

This commit is contained in:
gongwenxin 2025-07-12 19:53:21 +08:00
parent 25789568a2
commit 6fb15f1840
33 changed files with 26857 additions and 52894 deletions

View File

@ -1,5 +1,6 @@
import uuid
import logging
import copy
from typing import List, Dict, Any, Optional, Union
from collections import defaultdict
@ -7,7 +8,9 @@ from ddms_compliance_suite.stage_framework import BaseAPIStage, StageStepDefinit
from ddms_compliance_suite.input_parser.parser import Endpoint, DMSEndpoint, ParsedAPISpec
from ddms_compliance_suite.test_framework_core import ValidationResult, APIResponseContext
from ddms_compliance_suite.api_caller.caller import APICaller
from ddms_compliance_suite.utils.data_generator import DataGenerator
from ddms_compliance_suite.input_parser.parser import ParsedAPISpec, YAPIEndpoint, SwaggerEndpoint
from ddms_compliance_suite.input_parser.parser import BaseEndpoint, DMSEndpoint
# --- Helper function to get value from nested dict ---
def _get_value_by_path(data: Optional[Dict[str, Any]], path_str: Optional[str]):
if data is None or not path_str:
@ -56,6 +59,26 @@ def validate_resource_details(response_ctx: APIResponseContext, stage_ctx: dict)
return ValidationResult(passed=True, message="Resource details successfully validated against payload.")
def validate_resource_details_after_update(response_ctx: APIResponseContext, stage_ctx: dict) -> ValidationResult:
"""Validates the details of a resource against the *update* payload from the context."""
pk_name = stage_ctx.get("pk_name")
pk_value = stage_ctx.get("pk_value")
payload = stage_ctx.get("update_payload") # Use the specific update payload
response_data = _get_value_by_path(response_ctx.json_content, "data")
if not isinstance(response_data, dict):
return ValidationResult(passed=False, message=f"Response 'data' field is not a JSON object. Got: {response_data}")
# Check if all fields from the payload exist in the response and match
for key, expected_value in payload.items():
if key not in response_data:
return ValidationResult(passed=False, message=f"Field '{key}' from update_payload not found in response.")
if response_data[key] != expected_value:
return ValidationResult(passed=False, message=f"Field '{key}' mismatch. Expected '{expected_value}' from update_payload, got '{response_data[key]}'.")
return ValidationResult(passed=True, message="Resource details successfully validated against update_payload.")
def validate_resource_is_deleted(response_ctx: APIResponseContext, stage_ctx: dict) -> ValidationResult:
"""Checks if the resource is no longer in the list response."""
pk_name = stage_ctx.get("pk_name")
@ -139,21 +162,51 @@ class DmsCrudScenarioStage(BaseAPIStage):
self.logger.info(f"Setting up before_stage for scenario: {list(current_scenario.keys())}")
# Get the 'create' endpoint to determine the primary key
create_op = current_scenario['create']
# This is a bit of a hack. The primary key should ideally be discoverable
# from the schema without relying on request body definitions.
# Based on our parser, the delete request body contains the PK.
delete_op = current_scenario['delete']
pk_name = next(iter(delete_op.request_body['content']['application/json']['schema']['properties']['data']['items']['properties']))
create_op: DMSEndpoint = current_scenario.get('create')
if not create_op or not isinstance(create_op, DMSEndpoint):
raise Exception(f"Could not find a valid DMS 'create' operation for scenario.")
# The primary key name is now passed by the parser.
pk_name = create_op.model_pk_name
if not pk_name:
# Fallback for safety, though parser should always provide it
self.logger.warning("Could not find 'model_pk_name' on create endpoint. Falling back to inspecting delete request.")
delete_op = current_scenario['delete']
pk_name = next(iter(delete_op.request_body['content']['application/json']['schema']['properties']['data']['items']['properties']))
pk_value = str(uuid.uuid4())
# Prepare a sample payload. We'd need a proper data generator for this.
# For now, let's create a placeholder. The test framework should handle generation.
# Let's assume the framework's parameter generator will handle the real payload.
# We just need to provide the PK.
create_payload = { pk_name: pk_value, "description": "test-entry-from-scenario" }
update_payload = { pk_name: pk_value, "description": "updated-test-entry-from-scenario" }
# 使用测试框架的数据生成器生成完整有效的请求负载
# from ddms_compliance_suite.utils.schema_utils import DataGenerator
# 获取创建操作的请求体模式
create_schema = None
if create_op.request_body and 'content' in create_op.request_body:
content = create_op.request_body['content']
if 'application/json' in content and 'schema' in content['application/json']:
create_schema = content['application/json']['schema']
# 生成创建请求负载
data_generator = DataGenerator(logger_param=self.logger)
if create_schema:
# 生成基于模式的数据
generated_data = data_generator.generate_data_from_schema(create_schema)
# 确保主键字段存在且被正确设置
if isinstance(generated_data, dict) and 'data' in generated_data and isinstance(generated_data['data'], list) and len(generated_data['data']) > 0:
generated_data['data'][0][pk_name] = pk_value
create_payload = generated_data['data'][0]
else:
# 如果生成的数据结构不符合预期,使用基本负载
self.logger.warning("Generated data structure was not as expected. Falling back to a minimal payload.")
create_payload = { pk_name: pk_value }
else:
# 如果没有模式,使用基本负载
self.logger.warning("No create schema found. Falling back to a minimal payload.")
create_payload = { pk_name: pk_value }
# 更新负载基于创建负载,但修改描述字段
update_payload = copy.deepcopy(create_payload)
update_payload["description"] = "updated-test-entry-from-scenario"
# Populate stage context
stage_context["pk_name"] = pk_name
@ -161,6 +214,10 @@ class DmsCrudScenarioStage(BaseAPIStage):
stage_context["current_payload"] = create_payload
stage_context["update_payload"] = update_payload
stage_context["scenario_endpoints"] = current_scenario
# Pre-build the delete body to avoid key-templating issues later
# Per user request, the delete body should be an array of PK values
stage_context["delete_request_body"] = {"data": [pk_value]}
def get_api_spec_for_operation(self, lookup_key: str, *args, **kwargs) -> Optional[Endpoint]:
"""
@ -201,8 +258,6 @@ class DmsCrudScenarioStage(BaseAPIStage):
endpoint_spec_lookup_key="UPDATE",
request_overrides={
"body": {"data": ["{{stage_context.update_payload}}"]},
# The context needs to be updated for the next validation step
"context_updates": {"current_payload": "{{stage_context.update_payload}}"}
},
response_assertions=[validate_response_is_true]
),
@ -212,13 +267,13 @@ class DmsCrudScenarioStage(BaseAPIStage):
request_overrides={
"path_params": {"id": "{{stage_context.pk_value}}"}
},
response_assertions=[validate_resource_details]
response_assertions=[validate_resource_details_after_update]
),
StageStepDefinition(
name="Step 5: Delete Resource",
endpoint_spec_lookup_key="DELETE",
request_overrides={
"body": {"data": [{"{{stage_context.pk_name}}": "{{stage_context.pk_value}}"}]}
"body": "{{stage_context.delete_request_body}}"
},
response_assertions=[validate_response_is_true]
),

View File

@ -385,7 +385,8 @@ class DMSEndpoint(BaseEndpoint):
category_name: Optional[str] = None,
raw_record: Optional[Dict[str, Any]] = None,
test_mode: str = 'standalone',
operation_id: Optional[str] = None):
operation_id: Optional[str] = None,
model_pk_name: Optional[str] = None):
super().__init__(method=method.upper(), path=path)
self.title = title
self.request_body = request_body
@ -395,6 +396,7 @@ class DMSEndpoint(BaseEndpoint):
self._raw_record = raw_record
self.test_mode = test_mode
self.operation_id = operation_id or f"{self.method.lower()}_{self.category_name or 'dms'}_{title.replace(' ', '_')}"
self.model_pk_name = model_pk_name
def to_dict(self) -> Dict[str, Any]:
"""Converts the DMS endpoint data into a standardized OpenAPI-like dictionary."""
@ -411,7 +413,8 @@ class DMSEndpoint(BaseEndpoint):
"responses": self.responses,
"_source_format": "dms",
"_dms_raw_record": self._raw_record,
"_test_mode": self.test_mode
"_test_mode": self.test_mode,
"_dms_model_pk_name": self.model_pk_name
}
return endpoint_dict
@ -599,12 +602,25 @@ class InputParser:
continue
model_data = model_schema_response['data']
model = model_data.get('model')
# Based on user feedback, model_data itself is the schema, not model_data['model']
model = model_data
if not model or 'properties' not in model or not model['properties']:
self.logger.warning(f"Skipping API '{name}' due to missing or invalid 'model' object in schema.")
continue
pk_name = next(iter(model['properties']), None)
pk_name = None
# Find primary key by looking for top-level "identityId" array.
identity_id_list = model.get("identityId")
if isinstance(identity_id_list, list) and len(identity_id_list) > 0:
pk_name = identity_id_list[0]
self.logger.info(f"Found identityId property '{pk_name}' for model '{name}'.")
# Fallback to original behavior if no identityId found
if not pk_name:
pk_name = next(iter(model['properties']), None)
if pk_name:
self.logger.warning(f"No 'identityId' array found for model '{name}'. Falling back to using the first property '{pk_name}' as the primary key.")
if not pk_name:
self.logger.warning(f"Skipping API '{name}' because no properties found in model to identify a primary key.")
continue
@ -620,27 +636,27 @@ class InputParser:
# Create Endpoint (POST)
create_path = f"/api/dms/{dms_instance_code}/v1/{name}"
create_request_body_schema = {"type": "object", "properties": {"version": {"type": "string", "example": version}, "act": {"type": "integer", "example": 0}, "data": {"type": "array", "items": model}}, "required": ["data"]}
endpoints.append(DMSEndpoint(path=create_path, method='post', title=f"Create {name}", request_body={'content': {'application/json': {'schema': create_request_body_schema}}}, responses=success_response, test_mode='standalone', operation_id=f"create_{name}", category_name=category_name, raw_record=item))
endpoints.append(DMSEndpoint(path=create_path, method='post', title=f"Create {name}", request_body={'content': {'application/json': {'schema': create_request_body_schema}}}, responses=success_response, test_mode='scenario_only', operation_id=f"create_{name}", category_name=category_name, raw_record=item, model_pk_name=pk_name))
# List Endpoint (POST)
list_path = f"/api/dms/{dms_instance_code}/v1/{name}/{version}"
list_response_schema = {"type": "object", "properties": {"code": {"type": "integer"}, "message": {"type": "string"}, "data": {"type": "array", "items": model}}}
endpoints.append(DMSEndpoint(path=list_path, method='post', title=f"List {name}", request_body={'content': {'application/json': {'schema': {}}}}, responses={'200': {'description': 'Successful Operation', 'content': {'application/json': {'schema': list_response_schema}}}}, test_mode='scenario_only', operation_id=f"list_{name}", category_name=category_name, raw_record=item))
endpoints.append(DMSEndpoint(path=list_path, method='post', title=f"List {name}", request_body={'content': {'application/json': {'schema': {}}}}, responses={'200': {'description': 'Successful Operation', 'content': {'application/json': {'schema': list_response_schema}}}}, test_mode='standalone', operation_id=f"list_{name}", category_name=category_name, raw_record=item, model_pk_name=pk_name))
# Read Endpoint (GET)
read_path = f"/api/dms/{dms_instance_code}/v1/{name}/{version}/{{id}}"
read_response_schema = {"type": "object", "properties": {"code": {"type": "integer"}, "message": {"type": "string"}, "data": model}}
read_parameters = [{'name': 'id', 'in': 'path', 'required': True, 'description': f'The ID of the {name}, maps to {pk_name}', 'schema': pk_schema}]
endpoints.append(DMSEndpoint(path=read_path, method='get', title=f"Read {name}", request_body=None, responses={'200': {'description': 'Successful Operation', 'content': {'application/json': {'schema': read_response_schema}}}}, parameters=read_parameters, test_mode='scenario_only', operation_id=f"read_{name}", category_name=category_name, raw_record=item))
endpoints.append(DMSEndpoint(path=read_path, method='get', title=f"Read {name}", request_body=None, responses={'200': {'description': 'Successful Operation', 'content': {'application/json': {'schema': read_response_schema}}}}, parameters=read_parameters, test_mode='scenario_only', operation_id=f"read_{name}", category_name=category_name, raw_record=item, model_pk_name=pk_name))
# Update Endpoint (PUT)
update_path = f"/api/dms/{dms_instance_code}/v1/{name}"
endpoints.append(DMSEndpoint(path=update_path, method='put', title=f"Update {name}", request_body={'content': {'application/json': {'schema': create_request_body_schema}}}, responses=success_response, test_mode='scenario_only', operation_id=f"update_{name}", category_name=category_name, raw_record=item))
endpoints.append(DMSEndpoint(path=update_path, method='put', title=f"Update {name}", request_body={'content': {'application/json': {'schema': create_request_body_schema}}}, responses=success_response, test_mode='scenario_only', operation_id=f"update_{name}", category_name=category_name, raw_record=item, model_pk_name=pk_name))
# Delete Endpoint (DELETE)
delete_path = f"/api/dms/{dms_instance_code}/v1/{name}"
delete_request_body_schema = {"type": "object", "properties": {"version": {"type": "string", "example": version}, "data": {"type": "array", "items": {"type": "object", "properties": { pk_name: pk_schema }, "required": [pk_name]}}}, "required": ["data"]}
endpoints.append(DMSEndpoint(path=delete_path, method='delete', title=f"Delete {name}", request_body={'content': {'application/json': {'schema': delete_request_body_schema}}}, responses=success_response, test_mode='scenario_only', operation_id=f"delete_{name}", category_name=category_name, raw_record=item))
endpoints.append(DMSEndpoint(path=delete_path, method='delete', title=f"Delete {name}", request_body={'content': {'application/json': {'schema': delete_request_body_schema}}}, responses=success_response, test_mode='scenario_only', operation_id=f"delete_{name}", category_name=category_name, raw_record=item, model_pk_name=pk_name))
# The 'spec' for ParsedDMSSpec should represent the whole document.
# We can construct a dictionary holding all the raw data we fetched.

View File

@ -7,6 +7,7 @@ from typing import List, Dict, Any, Callable, Optional, Union
from enum import Enum
from datetime import datetime
from dataclasses import dataclass, field
import copy # Added for deepcopy
# Add Pydantic BaseModel for APIOperationSpec
from pydantic import BaseModel
@ -14,7 +15,8 @@ from pydantic import BaseModel
from .test_framework_core import ValidationResult, APIResponseContext
from .api_caller.caller import APICallDetail
# Import ParsedAPISpec and endpoint types for type hinting and usage
from .input_parser.parser import ParsedAPISpec, BaseEndpoint, YAPIEndpoint, SwaggerEndpoint, DMSEndpoint
from .input_parser.parser import ParsedAPISpec, YAPIEndpoint, SwaggerEndpoint,BaseEndpoint,DMSEndpoint
from .utils.context_utils import serialize_context_recursively
# 尝试从 .llm_utils 导入,如果失败则 LLMService 为 None
try:
@ -89,6 +91,18 @@ class ExecutedStageStepResult:
self.context_after_step = context_after_step if context_after_step is not None else {}
self.timestamp: datetime = datetime.now()
def finalize_step_result(self):
"""如果步骤的主消息为空,则根据验证点结果更新它。"""
if not self.message and hasattr(self, 'validation_points') and self.validation_points:
# Note: self.validation_points is a list of DICTS here, not objects.
# We need to access dict keys.
failed_vp_messages = [
vp.get("message") for vp in self.validation_points
if isinstance(vp, dict) and not vp.get("passed") and vp.get("message")
]
if failed_vp_messages:
self.message = "; ".join(failed_vp_messages)
def to_dict(self) -> Dict[str, Any]:
return {
"step_name": self.step_name,
@ -99,10 +113,10 @@ class ExecutedStageStepResult:
"resolved_endpoint": self.resolved_endpoint,
"duration_seconds": f"{self.duration_seconds:.4f}",
"timestamp": self.timestamp.isoformat(),
"request_details": self.request_details,
"request_details": serialize_context_recursively(self.request_details),
"api_call_details": self.api_call_details,
"validation_points": self.validation_points,
"context_snapshot_after_step": self.context_after_step # 可以选择是否完整记录上下文
"context_snapshot_after_step": serialize_context_recursively(self.context_after_step) # 可以选择是否完整记录上下文
}
@ -119,15 +133,17 @@ class ExecutedStageResult:
stage_id: str,
stage_name: str,
description: Optional[str] = None,
api_group_metadata: Optional[Dict[str, Any]] = None, # 存储阶段应用到的API分组元数据
tags: Optional[List[str]] = None, # <-- Added tags parameter
api_group_metadata: Optional[Dict[str, Any]] = None,
apis_in_group: Optional[List[BaseEndpoint]] = None,
tags: Optional[List[str]] = None,
overall_status: Status = Status.PENDING,
message: str = ""):
self.stage_id = stage_id
self.stage_name = stage_name
self.description = description
self.api_group_metadata = api_group_metadata
self.tags = tags if tags is not None else [] # <-- Store tags
self.apis_in_group = apis_in_group if apis_in_group is not None else []
self.tags = tags if tags is not None else []
self.overall_status = overall_status
self.message = message
self.executed_steps: List[ExecutedStageStepResult] = []
@ -139,24 +155,31 @@ class ExecutedStageResult:
def add_step_result(self, step_result: ExecutedStageStepResult):
self.executed_steps.append(step_result)
def finalize_stage_result(self, final_context: Optional[Dict[str, Any]] = None): # Renamed from finalize_result
def finalize_stage_result(self, final_context: Optional[Dict[str, Any]] = None):
self.end_time = datetime.now()
self.duration = (self.end_time - self.start_time).total_seconds()
self.final_context_snapshot = final_context if final_context is not None else {}
self.final_context_snapshot = serialize_context_recursively(final_context) if final_context is not None else {}
# 确定最终状态的逻辑可以放在这里,或者由编排器在调用后设置
if not self.executed_steps and self.overall_status == ExecutedStageResult.Status.PENDING:
self.overall_status = ExecutedStageResult.Status.SKIPPED # 如果没有步骤执行,且状态未被其他方式设置
self.overall_status = ExecutedStageResult.Status.SKIPPED
self.message = self.message or "阶段中没有步骤被执行或所有步骤被跳过。"
# 更复杂的最终状态判定可能需要编排器逻辑
def to_dict(self) -> Dict[str, Any]:
serializable_apis = []
if self.apis_in_group:
for endpoint in self.apis_in_group:
if hasattr(endpoint, 'to_dict') and callable(endpoint.to_dict):
serializable_apis.append(endpoint.to_dict())
else:
serializable_apis.append(str(endpoint))
return {
"stage_id": self.stage_id,
"stage_name": self.stage_name,
"description": self.description,
"api_group_metadata": self.api_group_metadata,
"tags": self.tags, # <-- Added tags to output
"api_group_metadata": serialize_context_recursively(self.api_group_metadata),
"apis_in_group": serializable_apis,
"tags": self.tags,
"overall_status": self.overall_status.value,
"message": self.message,
"duration_seconds": f"{self.duration:.2f}",
@ -199,7 +222,7 @@ class BaseAPIStage:
def __init__(self,
api_group_metadata: Dict[str, Any],
apis_in_group: List[BaseEndpoint], # MODIFIED TYPE HINT to use BaseEndpoint
apis_in_group: List[Union[YAPIEndpoint, SwaggerEndpoint]], # MODIFIED TYPE HINT
llm_service: Optional[LLMService] = None,
global_api_spec: Optional[ParsedAPISpec] = None, # <--- 修改类型注解
operation_keywords: Optional[Dict[str, List[str]]] = None):
@ -523,205 +546,3 @@ class BaseAPIStage:
def after_step(self, step: StageStepDefinition, step_result: 'ExecutedStageStepResult', stage_context: Dict[str, Any], global_api_spec: Optional[ParsedAPISpec] = None, api_group_name: Optional[str] = None):
"""在每个步骤执行之后调用。"""
self.logger.debug(f"Executing after_step for step '{step.name}' in stage '{self.id}'")
class ExecutedStageStepResult:
"""存储单个API测试阶段步骤执行后的结果。"""
class Status(str, Enum):
PASSED = "通过"
FAILED = "失败"
ERROR = "执行错误"
SKIPPED = "跳过"
PENDING = "处理中"
def __init__(self,
step_name: str,
status: Status,
message: str = "",
validation_points: Optional[List[ValidationResult]] = None,
duration: float = 0.0,
api_call_detail: Optional[APICallDetail] = None,
api_operation_spec: Optional[BaseEndpoint] = None, # <--- 添加此行
extracted_outputs: Optional[Dict[str, Any]] = None,
description: Optional[str] = None,
lookup_key: Optional[Union[str, Dict[str, str]]] = None,
resolved_endpoint: Optional[str] = None,
request_details: Optional[Dict[str, Any]] = None,
context_after_step: Optional[Dict[str, Any]] = None
):
self.step_name = step_name
self.status = status
self.message = message
self.validation_points = validation_points or []
self.duration = duration
self.timestamp = time.time()
self.api_call_detail = api_call_detail
self.api_operation_spec = api_operation_spec # <--- 添加此行
self.extracted_outputs = extracted_outputs or {}
self.description = description
self.lookup_key = lookup_key
self.resolved_endpoint = resolved_endpoint
self.request_details = request_details
self.context_after_step = context_after_step
def finalize_step_result(self):
"""如果步骤的主消息为空,则根据验证点结果更新它。"""
if not self.message and hasattr(self, 'validation_points') and self.validation_points:
failed_vp_messages = [
vp.message for vp in self.validation_points
if isinstance(vp, ValidationResult) and not vp.passed and vp.message
]
if failed_vp_messages:
self.message = "; ".join(failed_vp_messages)
def to_dict(self) -> Dict[str, Any]:
vps_details_for_output = []
if self.validation_points: # self.validation_points is List[ValidationResult]
for vp_obj in self.validation_points: # vp_obj is a ValidationResult object
if not isinstance(vp_obj, ValidationResult): # Defensive check
logger.warning(f"Step '{self.step_name}': Found non-ValidationResult item in validation_points: {type(vp_obj)}")
continue
processed_detail = {"passed": vp_obj.passed, "message": vp_obj.message}
details_content = getattr(vp_obj, 'details', None)
if details_content and isinstance(details_content, dict):
try:
# 只取部分关键信息或确保可序列化
if "status_code" in details_content:
processed_detail["status_code_in_validation"] = details_content["status_code"]
# 可以添加其他从 details_content 中提取的可序列化字段
except TypeError:
# 如果 details 无法完全序列化,更新消息
processed_detail["message"] = f"{vp_obj.message} (Details not fully serializable)"
elif details_content: # 如果 details 不是字典但存在
processed_detail["details_type"] = type(details_content).__name__
vps_details_for_output.append(processed_detail)
# 如果 finalize_step_result 已经被调用self.message 可能已经更新
# 否则,这里的逻辑会再次尝试整合 (如果 self.message 仍然为空)
current_message = self.message
if not current_message and self.validation_points:
failed_vp_messages = [
vp_obj.message for vp_obj in self.validation_points
if isinstance(vp_obj, ValidationResult) and not vp_obj.passed and vp_obj.message
]
if failed_vp_messages:
current_message = "; ".join(failed_vp_messages)
api_op_dict = self.api_operation_spec # Changed from self.api_operation
if isinstance(api_op_dict, BaseEndpoint):
api_op_dict = api_op_dict.model_dump()
return {
"step_name": self.step_name,
"description": self.description,
"lookup_key": str(self.lookup_key) if self.lookup_key is not None else None,
"resolved_endpoint": self.resolved_endpoint,
"status": self.status.value,
"message": current_message, # 使用当前或整合后的消息
"duration_seconds": f"{self.duration:.4f}",
"timestamp": time.strftime('%Y-%m-%dT%H:%M:%S%z', time.localtime(self.timestamp)),
"validation_points": vps_details_for_output,
"api_call_curl": getattr(getattr(self, 'api_call_detail', None), 'curl_command', 'N/A'),
"api_operation_spec": api_op_dict, # <--- 添加此行
"request_details": self.request_details,
"extracted_outputs": {k: str(v)[:200] + '...' if isinstance(v, (str, bytes)) and len(v) > 200 else v
for k, v in self.extracted_outputs.items()},
"context_after_step_summary": {
k: str(v)[:50] + '...' if isinstance(v, str) and len(v) > 50 else (
type(v).__name__ if not isinstance(v, (str, int, float, bool, list, dict, type(None))) else v
) for k,v in (self.context_after_step or {}).items()
}
}
class ExecutedStageResult:
"""存储整个API测试阶段执行后的结果。"""
class Status(str, Enum):
PASSED = "通过"
FAILED = "失败"
SKIPPED = "跳过" # 如果整个阶段因is_applicable_to_api_group返回False或其他原因被跳过
PENDING = "处理中" # 新增状态:表示阶段正在处理中
ERROR = "执行错误" # <--- 新增 ERROR 状态
def __init__(self,
stage_id: str,
stage_name: str,
api_group_metadata: Optional[Dict[str, Any]] = None,
description: Optional[str] = None, # <--- 添加 description 参数
tags: Optional[List[str]] = None, # <-- Added tags parameter
overall_status: Status = Status.PENDING,
message: str = ""):
self.stage_id = stage_id
self.stage_name = stage_name
self.description = description
self.api_group_metadata = api_group_metadata
self.tags = tags if tags is not None else [] # <-- Store tags
self.overall_status = overall_status
self.message = message
self.executed_steps: List[ExecutedStageStepResult] = [] # 确保初始化为空列表
self.final_context: Optional[Dict[str, Any]] = None
# executed_steps_count 应该是一个属性,或者在 to_dict 中计算
self.start_time: datetime = datetime.now() # Corrected
self.end_time: Optional[datetime] = None # Corrected type hint
self.duration: float = 0.0
def add_step_result(self, step_result: ExecutedStageStepResult):
self.executed_steps.append(step_result)
def finalize_stage_result(self, final_context: Optional[Dict[str, Any]] = None):
self.end_time = datetime.now() # Corrected
self.duration = (self.end_time - self.start_time).total_seconds()
self.final_context = final_context
if not self.executed_steps and self.overall_status == ExecutedStageResult.Status.SKIPPED:
# 如果没有执行任何步骤且状态是初始的 SKIPPED则保持
if not self.message: self.message = "此阶段没有执行任何步骤,被跳过。"
elif any(step.status == ExecutedStageResult.Status.ERROR for step in self.executed_steps):
self.overall_status = ExecutedStageResult.Status.FAILED # 步骤执行错误导致阶段失败
if not self.message: self.message = "一个或多个步骤执行时发生内部错误。"
elif any(step.status == ExecutedStageResult.Status.FAILED for step in self.executed_steps):
self.overall_status = ExecutedStageResult.Status.FAILED
if not self.message: self.message = "一个或多个步骤验证失败。"
elif all(step.status == ExecutedStageResult.Status.SKIPPED for step in self.executed_steps) and self.executed_steps:
self.overall_status = ExecutedStageResult.Status.SKIPPED # 所有步骤都跳过了
if not self.message: self.message = "所有步骤均被跳过。"
elif all(step.status == ExecutedStageResult.Status.PASSED or step.status == ExecutedStageResult.Status.SKIPPED for step in self.executed_steps) and \
any(step.status == ExecutedStageResult.Status.PASSED for step in self.executed_steps) :
self.overall_status = ExecutedStageResult.Status.PASSED # 至少一个通过,其他是跳过或通过
if not self.message: self.message = "阶段执行成功。"
else: # 其他情况,例如没有步骤但状态不是 SKIPPED (不应发生),或者混合状态未被明确处理
if self.executed_steps: # 如果有步骤,但没有明确成功或失败
self.overall_status = ExecutedStageResult.Status.FAILED
self.message = self.message or "阶段执行结果不明确,默认标记为失败。"
# else: 状态保持为初始的 SKIPPEDmessage也应该在之前设置了
def to_dict(self) -> Dict[str, Any]:
# 对 final_context 进行处理,避免过大或敏感信息直接输出
processed_context = {}
if self.final_context:
for k, v in self.final_context.items():
if isinstance(v, (str, bytes)) and len(v) > 200: # 截断长字符串
processed_context[k] = str(v)[:200] + '...'
elif isinstance(v, (dict, list)): # 对于字典和列表,只显示键或少量元素
processed_context[k] = f"Type: {type(v).__name__}, Keys/Count: {len(v)}"
else:
processed_context[k] = v
return {
"stage_id": self.stage_id,
"stage_name": self.stage_name,
"description": self.description, # <--- 添加 description 到输出
"api_group_name": self.api_group_metadata.get("name", "N/A"),
"tags": self.tags, # <-- Added tags to output
"overall_status": self.overall_status.value,
"duration_seconds": f"{self.duration:.2f}",
"start_time": self.start_time.strftime('%Y-%m-%dT%H:%M:%S%z'),
"end_time": self.end_time.strftime('%Y-%m-%dT%H:%M:%S%z') if self.end_time else None,
"message": self.message,
"executed_steps_count": len(self.executed_steps),
"executed_steps": [step.to_dict() for step in self.executed_steps],
"final_stage_context_summary": processed_context # 可选: 输出处理后的上下文摘要
}

View File

@ -3,6 +3,7 @@ from typing import Any, Dict, Optional, List, Tuple, Type, Union
import logging
from .utils import schema_utils
from pydantic import BaseModel, Field
from .utils.context_utils import serialize_context_recursively
class TestSeverity(Enum):
"""测试用例的严重程度"""
@ -28,11 +29,13 @@ class ValidationResult:
self.details = details or {} # 其他详细信息,如实际值、期望值等
def to_dict(self) -> Dict[str, Any]:
"""将 ValidationResult 对象转换为字典。"""
"""将 ValidationResult 对象转换为字典,并递归序列化其 details 字段。"""
# Recursively serialize details to handle nested objects like DMSEndpoint
serialized_details = serialize_context_recursively(self.details) if self.details else {}
return {
"passed": self.passed,
"message": self.message,
"details": self.details
"details": serialized_details
}
def __repr__(self):

View File

@ -24,7 +24,8 @@ from pydantic import BaseModel, Field, create_model, HttpUrl # Added HttpUrl for
from pydantic.networks import EmailStr
from pydantic.types import Literal # Explicitly import Literal
from .input_parser.parser import InputParser, BaseEndpoint, YAPIEndpoint, SwaggerEndpoint, ParsedYAPISpec, ParsedSwaggerSpec, ParsedAPISpec, DMSEndpoint, ParsedDMSSpec
from .input_parser.parser import InputParser, YAPIEndpoint, SwaggerEndpoint, ParsedYAPISpec, ParsedSwaggerSpec, ParsedAPISpec, DMSEndpoint, ParsedDMSSpec
from .input_parser.parser import BaseEndpoint, DMSEndpoint
from .api_caller.caller import APICaller, APIRequest, APIResponse, APICallDetail # Ensure APICallDetail is imported
from .json_schema_validator.validator import JSONSchemaValidator
from .test_framework_core import ValidationResult, TestSeverity, APIRequestContext, APIResponseContext, BaseAPITestCase
@ -74,20 +75,26 @@ class ExecutedTestCaseResult:
self.timestamp = datetime.datetime.now()
def to_dict(self) -> Dict[str, Any]:
message=""
if self.message:
message = self.message
message = self.message
if not message and self.validation_points:
# Revert to dictionary access since self.validation_points contains dicts
failed_messages = [vp.get("message") for vp in self.validation_points if isinstance(vp, dict) and not vp.get("passed") and vp.get("message")]
if failed_messages:
message = "; ".join(failed_messages)
else:
message= ";".join([vp.message for vp in self.validation_points])
# Fallback message if no specific failure messages are available
message = "One or more validation points failed without a detailed message." if self.status == self.Status.FAILED else "All validation points passed."
return {
"test_case_id": self.test_case_id,
"test_case_name": self.test_case_name,
"test_case_severity": self.test_case_severity.name, # 使用枚举名称
"test_case_severity": self.test_case_severity.name,
"status": self.status.value,
"message": message,
"duration_seconds": self.duration,
"timestamp": self.timestamp.isoformat(),
"validation_points": [vp.details if vp.details else {"passed": vp.passed, "message": vp.message} for vp in self.validation_points]
# The list already contains dictionaries, so just return it
"validation_points": self.validation_points
}
class TestResult: # 原来的 TestResult 被重构为 EndpointExecutionResult
@ -342,14 +349,8 @@ class TestSummary:
return data
def to_json(self, pretty=True) -> str:
def custom_serializer(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, BaseEndpoint):
return obj.to_dict()
raise TypeError(f"Object of type {type(obj).__name__} is not JSON serializable")
indent = 4 if pretty else None
return json.dumps(self.to_dict(), indent=indent, ensure_ascii=False, default=custom_serializer)
indent = 2 if pretty else None
return json.dumps(self.to_dict(), indent=indent, ensure_ascii=False)
def print_summary_to_console(self): # Renamed from print_summary
# (Implementation can be more detailed based on the new stats)
@ -1076,7 +1077,7 @@ class APITestOrchestrator:
test_case_name=test_case_instance.name,
test_case_severity=test_case_instance.severity,
status=ExecutedTestCaseResult.Status.FAILED,
validation_points=validation_results,
validation_points=[vp.to_dict() for vp in validation_results],
message=f"请求预校验失败: {'; '.join(failure_messages)}",
duration=tc_duration
)
@ -1131,7 +1132,7 @@ class APITestOrchestrator:
test_case_name=test_case_instance.name,
test_case_severity=test_case_instance.severity,
status=final_status,
validation_points=validation_results,
validation_points=[vp.to_dict() for vp in validation_results],
duration=tc_duration
)
@ -1150,7 +1151,7 @@ class APITestOrchestrator:
test_case_name=tc_name_for_log,
test_case_severity=tc_severity_for_log,
status=ExecutedTestCaseResult.Status.ERROR,
validation_points=validation_results, # Ensure validation_results is defined (it is, at the start of the function)
validation_points=[vp.to_dict() for vp in validation_results], # Ensure validation_results is defined (it is, at the start of the function)
message=f"测试用例执行时发生内部错误 (可能在实例化期间): {str(e)}",
duration=tc_duration
)
@ -1486,6 +1487,16 @@ class APITestOrchestrator:
def run_test_for_endpoint(self, endpoint: Union[YAPIEndpoint, SwaggerEndpoint, DMSEndpoint],
global_api_spec: Union[ParsedYAPISpec, ParsedSwaggerSpec, ParsedDMSSpec]
) -> TestResult:
# 检查是否为仅场景测试的端点,如果是则跳过独立测试
if isinstance(endpoint, DMSEndpoint) and hasattr(endpoint, 'test_mode') and endpoint.test_mode == 'scenario_only':
self.logger.info(f"跳过对仅场景测试端点的独立测试: {endpoint.method} {endpoint.path}")
result = TestResult(
endpoint_id=f"{endpoint.method}_{endpoint.path}",
endpoint_name=endpoint.title or f"{endpoint.method} {endpoint.path}"
)
result.overall_status = TestResult.Status.SKIPPED
result.message = "此端点标记为仅在场景中测试 (test_mode='scenario_only')"
return result
endpoint_id = f"{getattr(endpoint, 'method', 'GET').upper()} {getattr(endpoint, 'path', '/')}"
endpoint_name = getattr(endpoint, 'title', '') or getattr(endpoint, 'summary', '') or endpoint_id
@ -2141,7 +2152,8 @@ class APITestOrchestrator:
stage_id=stage_id,
stage_name=stage_name,
description=stage_instance.description,
api_group_metadata=stage_instance.current_api_group_metadata, # Changed from api_group_metadata
api_group_metadata=stage_instance.current_api_group_metadata,
apis_in_group=stage_instance.apis_in_group, # 传递端点对象列表
tags=stage_instance.tags
)
@ -2267,32 +2279,56 @@ class APITestOrchestrator:
request_context=request_context_for_assertion
)
for i, assertion_func in enumerate(step_definition.response_assertions):
assertion_name = getattr(assertion_func, '__name__', f"custom_assertion_{i+1}")
try:
self.logger.debug(f"{step_log_prefix}: 执行断言 '{assertion_name}'")
val_res = assertion_func(response_context_for_assertion, stage_context)
step_validation_points.append(val_res)
if not val_res.passed:
current_step_result.status = ExecutedStageStepResult.Status.FAILED
self.logger.warning(f"{step_log_prefix}: 断言 '{assertion_name}' 失败: {val_res.message}")
except Exception as assert_exc:
current_step_result.status = ExecutedStageStepResult.Status.ERROR
errMsg = f"断言 '{assertion_name}' 执行错误: {assert_exc}"
step_validation_points.append(ValidationResult(passed=False, message=errMsg, details={"error": str(assert_exc)}))
self.logger.error(f"{step_log_prefix}: {errMsg}", exc_info=True)
current_step_result.validation_points = step_validation_points # 修复:存储 ValidationResult 对象本身
# Use a separate list for this step's validation points
step_validation_results: List[ValidationResult] = []
if current_step_result.status != ExecutedStageStepResult.Status.ERROR:
# Assertions are callables that return ValidationResult
for assertion in step_definition.response_assertions:
try:
validation_result = assertion(response_context_for_assertion, stage_context)
step_validation_results.append(validation_result)
except Exception as e_assert:
self.logger.error(f"{step_log_prefix}: Assertion function '{getattr(assertion, '__name__', 'N/A')}' raised an exception: {e_assert}", exc_info=True)
failed_vr = ValidationResult(passed=False, message=f"Assertion function raised an unhandled exception: {e_assert}")
step_validation_results.append(failed_vr)
# Check status codes
if step_definition.expected_status_codes:
status_code_vr = self._validate_status_code(
actual_code=response_context_for_assertion.status_code,
expected_codes=step_definition.expected_status_codes
)
step_validation_results.append(status_code_vr)
current_step_result.validation_points = [vp.to_dict() for vp in step_validation_results]
if any(not vp['passed'] for vp in current_step_result.validation_points):
current_step_result.status = ExecutedStageStepResult.Status.FAILED
# Store API call details
if api_call_detail_obj:
if hasattr(api_call_detail_obj, 'model_dump') and callable(api_call_detail_obj.model_dump):
current_step_result.api_call_details = api_call_detail_obj.model_dump()
elif hasattr(api_call_detail_obj, 'dict') and callable(api_call_detail_obj.dict):
current_step_result.api_call_details = api_call_detail_obj.dict()
else:
# Fallback if it's some other object, though it should be APICallDetail
current_step_result.api_call_details = str(api_call_detail_obj)
else:
current_step_result.api_call_details = {}
self.logger.debug(f"{step_log_prefix}: 提取输出到上下文. Map: {step_definition.outputs_to_context}")
response_data_for_extraction = {
self._extract_outputs_to_context(
response_data={
"json_content": api_response_obj.json_content,
"headers": api_response_obj.headers,
"status_code": api_response_obj.status_code
}
self._extract_outputs_to_context(response_data_for_extraction, step_definition.outputs_to_context, stage_context, current_step_name)
current_step_result.context_after_step = copy.deepcopy(stage_context)
},
outputs_map=step_definition.outputs_to_context,
stage_context=stage_context,
step_name_for_log=current_step_name
)
current_step_result.context_after_step = copy.deepcopy(stage_context)
except Exception as step_exec_exc:
current_step_result.status = ExecutedStageStepResult.Status.ERROR
@ -2627,3 +2663,14 @@ class APITestOrchestrator:
summary.finalize_summary()
return summary, parsed_spec
def _validate_status_code(self, actual_code: int, expected_codes: List[int]) -> ValidationResult:
"""Helper to validate the HTTP status code."""
if actual_code in expected_codes:
return ValidationResult(passed=True, message=f"响应状态码 {actual_code} 符合预期。")
else:
return ValidationResult(
passed=False,
message=f"响应状态码不匹配。预期: {expected_codes}, 实际: {actual_code}",
details={"expected": expected_codes, "actual": actual_code}
)

View File

@ -0,0 +1,27 @@
import logging
from typing import Any
logger = logging.getLogger(__name__)
def serialize_context_recursively(context: Any, _path: str = "root") -> Any:
"""
Recursively traverses a data structure (dict, list) and converts any object
with a to_dict() method into its dictionary representation.
Includes logging to trace the serialization process.
"""
if hasattr(context, 'to_dict') and callable(context.to_dict):
logger.debug(f"Serializing object of type {type(context).__name__} at path: {_path}")
# If the object itself is serializable, serialize it and then process its dict representation
return serialize_context_recursively(context.to_dict(), _path)
if isinstance(context, dict):
logger.debug(f"Serializing dict at path: {_path}")
return {k: serialize_context_recursively(v, f"{_path}.{k}") for k, v in context.items()}
if isinstance(context, list):
logger.debug(f"Serializing list at path: {_path}")
return [serialize_context_recursively(i, f"{_path}[{idx}]") for idx, i in enumerate(context)]
logger.debug(f"Returning primitive at path: {_path}, type: {type(context).__name__}")
# Return primitives and other JSON-serializable types as-is
return context

View File

@ -0,0 +1,114 @@
"""
This module contains the DataGenerator class for creating test data from JSON schemas.
"""
import logging
import datetime
import uuid
from typing import Dict, Any, Optional, List
class DataGenerator:
"""
Generates test data based on a JSON Schema.
"""
def __init__(self, logger_param: Optional[logging.Logger] = None):
"""
Initializes the data generator.
Args:
logger_param: Optional logger instance. If not provided, a module-level logger is used.
"""
self.logger = logger_param or logging.getLogger(__name__)
def generate_data_from_schema(self, schema: Dict[str, Any],
context_name: Optional[str] = None,
operation_id: Optional[str] = None) -> Any:
"""
Generates test data from a JSON Schema.
This method was extracted and generalized from APITestOrchestrator.
Args:
schema: The JSON schema to generate data from.
context_name: A name for the context (e.g., 'requestBody'), for logging.
operation_id: The operation ID, for logging.
Returns:
Generated data that conforms to the schema.
"""
log_prefix = f"[{operation_id}] " if operation_id else ""
context_log = f" (context: {context_name})" if context_name else ""
if not schema or not isinstance(schema, dict):
self.logger.debug(f"{log_prefix}generate_data_from_schema: Invalid or empty schema provided{context_log}: {schema}")
return None
# Handle schema composition keywords
if 'oneOf' in schema or 'anyOf' in schema:
schemas_to_try = schema.get('oneOf') or schema.get('anyOf')
if schemas_to_try and isinstance(schemas_to_try, list) and schemas_to_try:
self.logger.debug(f"{log_prefix}Processing oneOf/anyOf, selecting the first schema for{context_log}")
return self.generate_data_from_schema(schemas_to_try[0], context_name, operation_id)
if 'allOf' in schema:
merged_schema = {}
for sub_schema in schema.get('allOf', []):
merged_schema.update(sub_schema)
self.logger.debug(f"{log_prefix}Processing allOf, merging schemas for{context_log}")
schema = merged_schema
# Use example or default values if available
if 'example' in schema:
self.logger.debug(f"{log_prefix}Using 'example' value from schema for{context_log}: {schema['example']}")
return schema['example']
if 'default' in schema:
self.logger.debug(f"{log_prefix}Using 'default' value from schema for{context_log}: {schema['default']}")
return schema['default']
schema_type = schema.get('type')
if schema_type == 'object':
result = {}
properties = schema.get('properties', {})
self.logger.debug(f"{log_prefix}Generating object data for{context_log}. Properties: {list(properties.keys())}")
for prop_name, prop_schema in properties.items():
nested_context = f"{context_name}.{prop_name}" if context_name else prop_name
result[prop_name] = self.generate_data_from_schema(prop_schema, nested_context, operation_id)
additional_properties = schema.get('additionalProperties')
if isinstance(additional_properties, dict):
self.logger.debug(f"{log_prefix}Generating an example property for additionalProperties for{context_log}")
result['additionalProp1'] = self.generate_data_from_schema(additional_properties, f"{context_name}.additionalProp1", operation_id)
return result
elif schema_type == 'array':
items_schema = schema.get('items', {})
min_items = schema.get('minItems', 1)
self.logger.debug(f"{log_prefix}Generating array data for{context_log}. Items schema: {items_schema}, minItems: {min_items}")
num_items_to_generate = max(1, min_items)
generated_array = []
for i in range(num_items_to_generate):
item_context = f"{context_name}[{i}]" if context_name else f"array_item[{i}]"
generated_array.append(self.generate_data_from_schema(items_schema, item_context, operation_id))
return generated_array
elif schema_type == 'string':
string_format = schema.get('format', '')
if 'enum' in schema and schema['enum']: return schema['enum'][0]
if string_format == 'date': return datetime.date.today().isoformat()
if string_format == 'date-time': return datetime.datetime.now().isoformat()
if string_format == 'email': return 'test@example.com'
if string_format == 'uuid': return str(uuid.uuid4())
return 'example_string'
elif schema_type in ['number', 'integer']:
minimum = schema.get('minimum')
if minimum is not None: return minimum
return 0 if schema_type == 'integer' else 0.0
elif schema_type == 'boolean':
return schema.get('default', False)
elif schema_type == 'null':
return None
self.logger.warning(f"{log_prefix}Unsupported schema type '{schema_type}' in {context_log}. Schema: {schema}")
return None

18032
log_dms.txt

File diff suppressed because it is too large Load Diff

View File

@ -45,26 +45,15 @@ def generate_fake_schema(record: dict) -> dict:
"type": "object",
"title": title,
"properties": schema_properties,
"required": [pk_name, "status"]
}
# 按照parser.py中的逻辑构建删除请求的schema
delete_pk_schema = {
"type": "array",
"items": {
"type": "object",
"properties": { pk_name: schema_properties[pk_name] },
"required": [pk_name]
}
"required": [pk_name, "status"],
"identityId": [pk_name]
}
return {
"code": 0, "message": "操作处理成功",
"data": {
"version": version,
"model": model,
"_delete_schema_for_mock": delete_pk_schema # 内部使用,方便查找主键
}
"data":model,
}
def preload_schemas():
@ -147,21 +136,34 @@ def read_resource(dms_instance_code, name, version, id):
def delete_resource(dms_instance_code, name):
"""Delete (DELETE): 删除资源"""
logging.info(f"Mock服务器: 收到对 '{name}' 的DELETE请求")
request_data = request.get_json(silent=True)
if not request_data or 'data' not in request_data or not isinstance(request_data['data'], list):
return jsonify({"code": 400, "message": "请求体格式错误,应为 {'data': [{pk: value}, ...]}"}), 400
data = request.json.get('data', [])
if not data or not isinstance(data, list):
return jsonify({"code": 400, "message": "请求体格式错误,需要'data'字段且为数组", "data": False}), 400
pk_name = get_pk_name_from_model(name)
deleted_count = 0
for item_to_delete in request_data['data']:
if pk_name in item_to_delete:
pk_value = item_to_delete[pk_name]
if IN_MEMORY_DB.get(name, {}).pop(pk_value, None):
logging.info(f" > 从 '{name}' 删除了资源 ID: {pk_value}")
deleted_count += 1
ids_to_delete = set()
# Check if the items in data are dicts (old format) or strings (new format)
if data and isinstance(data[0], dict):
# Legacy format: [{"pk_name": "value"}]
ids_to_delete = {item.get(pk_name) for item in data if pk_name in item}
else:
# New format: ["value1", "value2"]
ids_to_delete = set(data)
if not ids_to_delete:
return jsonify({"code": 400, "message": "未在请求中提供有效的ID", "data": False}), 400
deleted_count = 0
if name in IN_MEMORY_DB:
# Correctly pop items from the dictionary
for id_val in ids_to_delete:
if IN_MEMORY_DB[name].pop(id_val, None):
logging.info(f" > 从 '{name}' 删除了资源 ID: {id_val}")
deleted_count += 1
if deleted_count > 0:
return jsonify({"code": 0, "message": "删除成功", "data": True})
return jsonify({"code": 0, "message": f"成功删除 {deleted_count} 条记录", "data": True})
else:
return jsonify({"code": 404, "message": "未找到要删除的资源", "data": False})
@ -177,6 +179,17 @@ def list_resources(dms_instance_code, name, version):
"data": all_resources
})
# --- Schema Endpoints ---
@app.route('/api/dms/wb_ml/v1/schemas/<name>', methods=['GET'])
def get_schema_by_name(name):
"""模拟根据名称获取单个API模型(schema)的接口。"""
logging.info(f"Mock服务器: 收到名称为 '{name}' 的schema详情请求。")
schema_data = MOCK_SCHEMAS_CACHE.get(name)
if schema_data:
return jsonify(schema_data)
else:
return jsonify({"code": 404, "message": f"未找到名称为 '{name}' 的schema。"}), 404
def print_routes(app):
"""打印应用中所有已注册的路由。"""
logging.info("\n--- 已注册的API路由 ---")

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff