144 lines
9.3 KiB
Python
144 lines
9.3 KiB
Python
"""
|
|
Pydantic models for Test Cases, Test Steps, and Test Suites.
|
|
"""
|
|
from typing import Optional, List, Dict, Any, Union, Literal
|
|
from pydantic import BaseModel, Field, HttpUrl
|
|
from enum import Enum
|
|
|
|
from .rule_models import SeverityLevel # Assuming rule_models.py is in the same directory
|
|
from ..api_caller.caller import APIRequest # Adjust path as necessary
|
|
|
|
class TestDataGenerationStrategy(str, Enum):
|
|
"""Defines how test data for an API request should be generated."""
|
|
STATIC = "static" # Data is explicitly provided
|
|
FROM_SCHEMA = "from_schema" # Data is generated based on OpenAPI/JSON schema
|
|
FROM_PREVIOUS_STEP = "from_previous_step" # Data is extracted from a previous test step's output
|
|
AI_GENERATED = "ai_generated" # Data is generated by an AI model
|
|
|
|
class TestDataGenerationConfig(BaseModel):
|
|
"""Configuration for generating test data for an API request."""
|
|
strategy: TestDataGenerationStrategy = TestDataGenerationStrategy.STATIC
|
|
# For STATIC strategy:
|
|
static_data: Optional[Dict[str, Any]] = Field(default=None, description="Explicit data for the request (e.g., body, params).")
|
|
# For FROM_SCHEMA strategy:
|
|
schema_source: Optional[str] = Field(default=None, description="Identifier for the schema to be used (e.g., operationId, or path to schema file).")
|
|
# For FROM_PREVIOUS_STEP strategy:
|
|
source_step_id: Optional[str] = Field(default=None, description="The ID of the previous TestStep whose output will be used.")
|
|
extraction_rules: Optional[Dict[str, str]] = Field(default_None, description="Rules to extract data (e.g., JSONPath expressions like {'user_id': '$.body.id'}).")
|
|
# For AI_GENERATED strategy:
|
|
ai_prompt: Optional[str] = Field(default=None, description="Prompt to be used by an AI model to generate data.")
|
|
ai_model_config: Optional[Dict[str, Any]] = Field(default_None, description="Configuration for the AI model (e.g., temperature, max_tokens).")
|
|
|
|
class APIRequestContext(BaseModel):
|
|
"""
|
|
Defines how to construct the APIRequest for a TestStep, potentially using
|
|
data from previous steps or other dynamic sources.
|
|
"""
|
|
method: str = Field(description="HTTP method (GET, POST, PUT, DELETE, etc.).")
|
|
url_template: str = Field(description="URL template, can contain placeholders like {base_url} or {resource_id}.")
|
|
path_params_source: Optional[Dict[str, TestDataGenerationConfig]] = Field(default_None, description="How to generate/get path parameters.")
|
|
query_params_source: Optional[Dict[str, TestDataGenerationConfig]] = Field(default_None, description="How to generate/get query parameters.")
|
|
headers_source: Optional[Dict[str, TestDataGenerationConfig]] = Field(default_None, description="How to generate/get request headers. Default headers can be added by the orchestrator.")
|
|
body_source: Optional[TestDataGenerationConfig] = Field(default_None, description="How to generate/get the request body.")
|
|
|
|
# Stores the actual APIRequest object after context resolution
|
|
# This field will be populated by the TestOrchestrator before execution.
|
|
resolved_request: Optional[APIRequest] = Field(default=None, exclude=True)
|
|
|
|
|
|
class TestStep(BaseModel):
|
|
"""
|
|
Represents a single step within a TestCase, typically involving one API call
|
|
and a set of rules to be validated against its request/response.
|
|
"""
|
|
step_id: str = Field(description="Unique identifier for this test step within the TestCase.")
|
|
description: Optional[str] = Field(default=None, description="Description of what this test step does.")
|
|
api_request_context: APIRequestContext = Field(description="Context to build the API request for this step.")
|
|
|
|
# Rules to be applied at different phases of this step
|
|
# Applied after the request is prepared but before it's sent.
|
|
# Useful for validating the constructed request itself.
|
|
request_preparation_rule_ids: List[str] = Field(default_factory=list, description="Rules to apply to the APIRequest before sending.")
|
|
|
|
# Applied after the API response is received. This is the most common place for validation rules.
|
|
response_validation_rule_ids: List[str] = Field(default_factory=list, description="Rules to apply to the APIResponse.")
|
|
|
|
# Applied after response validation, يمكن استخدامه لتنظيف البيانات أو إجراءات ما بعد الاختبار
|
|
post_validation_rule_ids: List[str] = Field(default_factory=list, description="Rules to apply after response validation (e.g., cleanup).")
|
|
|
|
# Configuration for expected outcomes, more specific than just rule pass/fail
|
|
expected_status_code: Optional[int] = Field(default=None, description="Expected HTTP status code.")
|
|
# Allows defining assertions on the response body using something like JSONPath
|
|
# e.g., {"$.data.status": "completed", "$.errors": null}
|
|
response_body_assertions: Optional[Dict[str, Any]] = Field(default_None, description="Assertions to make on the response body (e.g., using JSONPath).")
|
|
|
|
# Execution control
|
|
skip: bool = Field(default=False, description="If True, this test step will be skipped.")
|
|
# Allows storing output from this step to be used by subsequent steps
|
|
# e.g., {"created_user_id": "$.response.body.id"}
|
|
outputs_to_extract: Optional[Dict[str, str]] = Field(default_None, description="JSONPath expressions to extract data from response to be used in later steps.")
|
|
|
|
|
|
class TestCaseExecutionMode(str, Enum):
|
|
SEQUENTIAL = "sequential" # Steps are executed one after another
|
|
PARALLEL = "parallel" # Steps (if independent) can be executed in parallel
|
|
|
|
class TestCase(BaseModel):
|
|
"""
|
|
Defines a test case, which consists of one or more test steps,
|
|
metadata, and configurations for execution.
|
|
"""
|
|
id: str = Field(description="Unique identifier for the test case.")
|
|
name: str = Field(description="Name of the test case.")
|
|
description: Optional[str] = Field(default=None, description="Detailed description of the test case.")
|
|
tags: List[str] = Field(default_factory=list, description="Tags for categorizing and filtering test cases.")
|
|
severity: SeverityLevel = Field(default=SeverityLevel.INFO, description="Severity of the test case if it fails.")
|
|
author: Optional[str] = Field(default=None, description="Author of the test case.")
|
|
creation_date: Optional[str] = Field(default=None, description="Date when the test case was created (ISO format).") # Consider using datetime
|
|
version: str = Field(default="1.0.0", description="Version of the test case.")
|
|
|
|
# Test Steps
|
|
steps: List[TestStep] = Field(description="A list of test steps to be executed for this test case.")
|
|
execution_mode: TestCaseExecutionMode = Field(default=TestCaseExecutionMode.SEQUENTIAL, description="How the steps in this test case should be executed.")
|
|
|
|
# Data that can be shared across steps within this test case.
|
|
# Steps can read from and write to this context.
|
|
shared_context: Dict[str, Any] = Field(default_factory=dict, description="Data context televisão across steps.")
|
|
|
|
# Overall expected outcome for the test case (can be high-level)
|
|
expected_overall_result: Optional[str] = Field(default=None, description="A high-level description of the expected outcome for the entire test case.")
|
|
|
|
# For iterative/load testing
|
|
execution_count: int = Field(default=1, ge=1, description="Number of times this test case should be executed.")
|
|
# If > 1, defines delay between iterations in seconds.
|
|
delay_between_iterations_sec: float = Field(default=0, ge=0)
|
|
|
|
|
|
class TestSuite(BaseModel):
|
|
"""
|
|
A collection of TestCases, possibly with a shared configuration or setup/teardown logic.
|
|
"""
|
|
id: str = Field(description="Unique identifier for the test suite.")
|
|
name: str = Field(description="Name of the test suite.")
|
|
description: Optional[str] = Field(default=None, description="Description of the test suite.")
|
|
tags: List[str] = Field(default_factory=list, description="Tags for categorizing and filtering test suites.")
|
|
|
|
# List of TestCase IDs or full TestCase objects. Using IDs allows for referencing.
|
|
# For simplicity in this initial design, let's assume full TestCase objects.
|
|
# Later, we could support referencing TestCase definitions stored elsewhere.
|
|
test_cases: List[TestCase] = Field(default_factory=list, description="List of test cases included in this suite.")
|
|
|
|
# Global parameters or configurations that can apply to all TestCases in the suite
|
|
# These could override individual TestCase settings or provide defaults.
|
|
global_parameters: Optional[Dict[str, Any]] = Field(default_None, description="Parameters applicable to all test cases in the suite.")
|
|
|
|
# Setup: API calls or actions to perform before running any TestCase in the suite.
|
|
# Similar structure to TestStep but without specific rule validations, more for setup.
|
|
setup_steps: Optional[List[TestStep]] = Field(default_None, description="Steps to execute before running test cases in the suite.")
|
|
|
|
# Teardown: API calls or actions to perform after all TestCases in the suite have run.
|
|
teardown_steps: Optional[List[TestStep]] = Field(default_None, description="Steps to execute after running test cases in the suite.")
|
|
|
|
# Execution control for the suite
|
|
# e.g., run all, run tagged, run failed from previous
|
|
execution_strategy: Optional[str] = Field(default=None, description="Strategy for executing test cases within the suite.") |