This commit is contained in:
gongwenxin 2025-08-18 23:56:08 +08:00
parent 2ae75b8c75
commit 336913fbd0
8 changed files with 1087 additions and 55 deletions

View File

@ -5,7 +5,7 @@ import logging
import datetime
import traceback
from pathlib import Path
from typing import List, Optional
from typing import List, Optional, Dict, Any
import unicodedata
import html
@ -630,6 +630,7 @@ def run_tests_logic(config: dict):
test_summary: Optional[TestSummary] = None
parsed_spec: Optional[ParsedAPISpec] = None
pagination_info: Dict[str, Any] = {}
if 'yapi' in config:
logger.info(f"Running tests from YAPI file: {config['yapi']}")
@ -647,10 +648,11 @@ def run_tests_logic(config: dict):
)
elif 'dms' in config:
logger.info(f"Running tests from DMS service discovery: {config['dms']}")
test_summary, parsed_spec = orchestrator.run_tests_from_dms(
test_summary, parsed_spec, pagination_info = orchestrator.run_tests_from_dms(
domain_mapping_path=config['dms'],
categories=config.get('categories'),
custom_test_cases_dir=config.get('custom-test-cases-dir')
custom_test_cases_dir=config.get('custom-test-cases-dir'),
page_size=config.get('page-size', 1000)
)
if not parsed_spec:
@ -684,12 +686,18 @@ def run_tests_logic(config: dict):
failed_count = getattr(test_summary, 'endpoints_failed', 0) + getattr(test_summary, 'test_cases_failed', 0)
error_count = getattr(test_summary, 'endpoints_error', 0) + getattr(test_summary, 'test_cases_error', 0)
return {
result = {
"status": "completed",
"message": "Tests finished." if failed_count == 0 and error_count == 0 else "Tests finished with failures or errors.",
"report_directory": str(output_directory.resolve()),
"summary": test_summary.to_dict()
}
# 如果有分页信息,添加到返回结果中
if pagination_info:
result["pagination"] = pagination_info
return result
else:
raise RuntimeError("Test execution failed to produce a summary.")

488
create-compose-package-simple.sh Executable file
View File

@ -0,0 +1,488 @@
#!/bin/bash
# DMS合规性测试工具 - Docker Compose版本部署包创建脚本简化版
# 使用Alpine Linux + 多阶段构建 + Docker Compose管理
# 自动检测当前平台架构
set -e
# 配置变量
EXPORT_DIR="dms-compliance-compose-$(date +%Y%m%d-%H%M%S)"
IMAGE_NAME="compliance-dms-mini"
ARCHIVE_NAME="$EXPORT_DIR.tar.gz"
echo "=== DMS合规性测试工具 Docker Compose版本部署包创建脚本简化版 ==="
echo "[信息] 使用Docker Compose管理完全兼容原版架构"
echo "[信息] 自动检测当前平台架构"
# 检查Docker是否运行
if ! docker info >/dev/null 2>&1; then
echo "[错误] Docker未运行或无法访问"
exit 1
fi
# 检测当前平台架构
CURRENT_ARCH=$(docker version --format '{{.Server.Arch}}' 2>/dev/null || uname -m)
case "$CURRENT_ARCH" in
x86_64|amd64)
TARGET_PLATFORM="linux/amd64"
ARCH_NAME="AMD64 (x86_64)"
;;
aarch64|arm64)
TARGET_PLATFORM="linux/arm64"
ARCH_NAME="ARM64 (aarch64)"
;;
*)
TARGET_PLATFORM="linux/amd64" # 默认使用amd64
ARCH_NAME="AMD64 (x86_64) - 默认"
echo "[警告] 未识别的架构 $CURRENT_ARCH,使用默认的 amd64"
;;
esac
echo "[信息] 检测到架构: $ARCH_NAME"
echo "[信息] 目标平台: $TARGET_PLATFORM"
# 创建导出目录
echo "[信息] 创建导出目录: $EXPORT_DIR"
rm -rf "$EXPORT_DIR"
mkdir -p "$EXPORT_DIR"
# 1. 创建临时构建目录,只包含必要文件
echo "[信息] 创建临时构建目录..."
TEMP_BUILD_DIR=$(mktemp -d)
trap "rm -rf $TEMP_BUILD_DIR" EXIT
# 白名单:只复制必要的文件
echo "[信息] 复制必要文件(超精简模式)..."
mkdir -p "$TEMP_BUILD_DIR"/{ddms_compliance_suite,custom_stages,custom_testcases,templates,static,assets}
# 只复制核心Python文件
echo "[信息] 复制核心Python文件..."
for file in api_server.py history_viewer.py flask_app.py; do
[ -f "$file" ] && cp "$file" "$TEMP_BUILD_DIR/"
done
# 复制核心目录(排除缓存和临时文件)
echo "[信息] 复制核心目录..."
rsync -av --exclude='__pycache__' --exclude='*.pyc' --exclude='*.log' ddms_compliance_suite/ "$TEMP_BUILD_DIR/ddms_compliance_suite/"
rsync -av --exclude='__pycache__' --exclude='*.pyc' custom_stages/ "$TEMP_BUILD_DIR/custom_stages/"
rsync -av --exclude='__pycache__' --exclude='*.pyc' custom_testcases/ "$TEMP_BUILD_DIR/custom_testcases/"
# 确保templates目录结构正确
echo "[信息] 复制模板和静态文件..."
rsync -av templates/ "$TEMP_BUILD_DIR/templates/"
rsync -av static/ "$TEMP_BUILD_DIR/static/"
rsync -av assets/ "$TEMP_BUILD_DIR/assets/"
# 验证templates目录内容
echo "[信息] 验证templates目录: $(ls "$TEMP_BUILD_DIR/templates/" 2>/dev/null | wc -l) 个文件"
echo "[信息] templates文件列表: $(ls "$TEMP_BUILD_DIR/templates/" 2>/dev/null | tr '\n' ' ')"
# 复制完整的requirements.txt
echo "[信息] 复制完整的requirements.txt..."
cp requirements.txt "$TEMP_BUILD_DIR/"
# 创建超轻量级Dockerfile
echo "[信息] 创建超轻量级Dockerfile..."
# 首先尝试创建优化的多阶段构建Dockerfile
cat > "$TEMP_BUILD_DIR/Dockerfile" << 'EOF'
# 多阶段构建:第一阶段安装依赖
FROM python:3.9-alpine AS builder
# 更新包索引并安装构建依赖
RUN apk update && \
apk add --no-cache \
gcc \
musl-dev \
linux-headers \
libffi-dev \
openssl-dev \
cargo \
rust && \
rm -rf /var/cache/apk/*
# 设置工作目录
WORKDIR /app
# 复制requirements并安装Python依赖
COPY requirements.txt .
# 升级pip并安装依赖
RUN pip install --upgrade pip setuptools wheel && \
pip install --no-cache-dir --user -r requirements.txt
# 第二阶段:运行时镜像
FROM python:3.9-alpine
# 安装运行时依赖
RUN apk update && \
apk add --no-cache \
supervisor \
curl \
bash && \
rm -rf /var/cache/apk/*
# 从构建阶段复制Python包
COPY --from=builder /root/.local /root/.local
# 设置工作目录
WORKDIR /app
# 复制应用代码
COPY . .
# 创建supervisor配置
RUN mkdir -p /etc/supervisor/conf.d
COPY supervisord.conf /etc/supervisor/conf.d/
# 创建日志目录
RUN mkdir -p /var/log/supervisor /app/logs /app/test_reports /app/uploads
# 设置环境变量
ENV PATH=/root/.local/bin:$PATH
ENV PYTHONPATH=/app
ENV FLASK_ENV=production
ENV PYTHONUNBUFFERED=1
# 暴露端口(两个服务的端口)
EXPOSE 5050 5051
# 健康检查
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD curl -f http://localhost:5050/ || exit 1
# 启动supervisor
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
EOF
# 创建备用的简化Dockerfile如果多阶段构建失败
cat > "$TEMP_BUILD_DIR/Dockerfile.simple" << 'EOF'
# 简化版本:单阶段构建
FROM python:3.9-alpine
# 安装所有必要的依赖
RUN apk update && \
apk add --no-cache \
gcc \
musl-dev \
linux-headers \
libffi-dev \
openssl-dev \
supervisor \
curl \
bash && \
rm -rf /var/cache/apk/*
# 设置工作目录
WORKDIR /app
# 复制应用代码
COPY . .
# 安装Python依赖
RUN pip install --upgrade pip setuptools wheel && \
pip install --no-cache-dir -r requirements.txt
# 创建supervisor配置目录
RUN mkdir -p /etc/supervisor/conf.d
COPY supervisord.conf /etc/supervisor/conf.d/
# 创建必要目录
RUN mkdir -p /var/log/supervisor /app/logs /app/test_reports /app/uploads
# 设置环境变量
ENV PYTHONPATH=/app
ENV FLASK_ENV=production
ENV PYTHONUNBUFFERED=1
# 暴露端口
EXPOSE 5050 5051
# 健康检查
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD curl -f http://localhost:5050/ || exit 1
# 启动supervisor
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
EOF
# 创建supervisor配置
echo "[信息] 创建supervisor配置..."
cat > "$TEMP_BUILD_DIR/supervisord.conf" << 'EOF'
[supervisord]
nodaemon=true
logfile=/var/log/supervisor/supervisord.log
pidfile=/var/run/supervisord.pid
childlogdir=/var/log/supervisor
logfile_maxbytes=50MB
logfile_backups=10
loglevel=info
[unix_http_server]
file=/tmp/supervisor.sock
chmod=0700
[supervisorctl]
serverurl=unix:///tmp/supervisor.sock
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
# DMS API服务器 (主服务)
[program:api_server]
command=python api_server.py
directory=/app
autostart=true
autorestart=true
redirect_stderr=true
stdout_logfile=/var/log/supervisor/api_server.log
stdout_logfile_maxbytes=10MB
stdout_logfile_backups=5
environment=PYTHONPATH="/app",PYTHONUNBUFFERED="1"
# 历史查看器服务
[program:history_viewer]
command=python history_viewer.py
directory=/app
autostart=true
autorestart=true
redirect_stderr=true
stdout_logfile=/var/log/supervisor/history_viewer.log
stdout_logfile_maxbytes=10MB
stdout_logfile_backups=5
environment=PYTHONPATH="/app",PYTHONUNBUFFERED="1"
# 进程组配置
[group:dms_services]
programs=api_server,history_viewer
priority=999
EOF
# 显示构建目录大小
echo "[信息] 临时构建目录大小: $(du -sh "$TEMP_BUILD_DIR" | cut -f1)"
# 2. 构建Docker镜像
echo "[信息] 构建超轻量Docker镜像 ($TARGET_PLATFORM)..."
cd "$TEMP_BUILD_DIR"
# 尝试构建,首先使用多阶段构建,失败则使用简化构建
if docker build --platform "$TARGET_PLATFORM" -t "$IMAGE_NAME:latest" .; then
echo "[成功] Docker构建完成多阶段构建"
else
echo "[警告] 多阶段构建失败,尝试简化构建..."
if docker build --platform "$TARGET_PLATFORM" -t "$IMAGE_NAME:latest" -f Dockerfile.simple .; then
echo "[成功] Docker构建完成简化构建"
else
echo "[错误] 所有构建方式都失败"
exit 1
fi
fi
cd - > /dev/null
# 3. 导出Docker镜像
echo "[信息] 导出Docker镜像..."
docker save "$IMAGE_NAME:latest" | gzip > "$EXPORT_DIR/docker-image.tar.gz"
# 4. 创建docker-compose.yml
echo "[信息] 创建docker-compose.yml..."
cat > "$EXPORT_DIR/docker-compose.yml" << 'EOF'
version: '3.8'
services:
dms-compliance-tool:
image: compliance-dms-mini:latest
container_name: dms-compliance-mini
ports:
- "5050:5050" # API服务器端口
- "5051:5051" # 历史查看器端口
volumes:
# 持久化测试报告
- ./test_reports:/app/test_reports
# 持久化上传文件
- ./uploads:/app/uploads
# 持久化日志
- ./logs:/app/logs
# 如果需要自定义配置文件
- ./config:/app/config:ro
environment:
- FLASK_ENV=production
- PYTHONUNBUFFERED=1
- TZ=Asia/Shanghai
restart: unless-stopped
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:5050/"]
interval: 30s
timeout: 10s
retries: 3
start_period: 40s
networks:
- dms-network
networks:
dms-network:
driver: bridge
volumes:
test_reports:
uploads:
logs:
EOF
# 5. 创建部署脚本
echo "[信息] 创建部署脚本..."
cat > "$EXPORT_DIR/deploy.sh" << 'EOF'
#!/bin/bash
# DMS合规性测试工具 - Docker Compose版本部署脚本
set -e
echo "=== DMS合规性测试工具 Docker Compose版本部署 ==="
# 检查Docker和Docker Compose
if ! docker info >/dev/null 2>&1; then
echo "[错误] Docker未运行"
exit 1
fi
if ! command -v docker-compose >/dev/null 2>&1 && ! docker compose version >/dev/null 2>&1; then
echo "[错误] Docker Compose未安装"
echo "请安装Docker Compose或使用Docker Desktop"
exit 1
fi
# 创建必要的目录
echo "[信息] 创建数据目录..."
mkdir -p test_reports uploads logs config
# 加载镜像
echo "[信息] 加载Docker镜像..."
docker load < docker-image.tar.gz
# 停止现有服务
echo "[信息] 停止现有服务..."
docker-compose down 2>/dev/null || docker compose down 2>/dev/null || true
# 启动服务
echo "[信息] 启动服务..."
if command -v docker-compose >/dev/null 2>&1; then
docker-compose up -d
else
docker compose up -d
fi
echo "[成功] 部署完成!"
echo "访问地址: http://localhost:5050 (API服务器)"
echo "访问地址: http://localhost:5051 (历史查看器)"
echo ""
echo "管理命令:"
echo "- 查看状态: docker-compose ps"
echo "- 查看日志: docker-compose logs"
echo "- 停止服务: docker-compose down"
echo "- 重启服务: docker-compose restart"
echo ""
echo "数据目录:"
echo "- 测试报告: $(pwd)/test_reports"
echo "- 上传文件: $(pwd)/uploads"
echo "- 日志文件: $(pwd)/logs"
echo "- 配置文件: $(pwd)/config"
EOF
chmod +x "$EXPORT_DIR/deploy.sh"
# 6. 创建README
echo "[信息] 创建README..."
cat > "$EXPORT_DIR/README.md" << 'EOF'
# DMS合规性测试工具 - Docker Compose版本简化版
## 特点
- 基于Alpine Linux镜像体积极小约300MB
- 多阶段构建,优化层结构
- 完全兼容原版架构5050+5051端口
- 使用Docker Compose管理服务
- 支持数据持久化和健康检查
- 自动检测当前平台架构,无需手动选择
## 部署方法
1. 解压部署包
2. 运行部署脚本:
```bash
./deploy.sh
```
3. 访问服务:
- API服务器: http://localhost:5050
- 历史查看器: http://localhost:5051
## 管理命令
- 查看服务状态:`docker-compose ps`
- 查看日志:`docker-compose logs`
- 停止服务:`docker-compose down`
- 重启服务:`docker-compose restart`
- 查看实时日志:`docker-compose logs -f`
## 文件说明
- `docker-image.tar.gz` - Docker镜像文件
- `docker-compose.yml` - Docker Compose配置文件
- `deploy.sh` - 一键部署脚本
- `README.md` - 说明文档
## 数据持久化
所有重要数据都会持久化到本地目录:
- `test_reports/` - 测试报告
- `uploads/` - 上传文件
- `logs/` - 日志文件
- `config/` - 配置文件(只读)
## 架构支持
本版本会自动检测当前平台架构:
- AMD64 (x86_64) - 适用于大多数Intel/AMD服务器
- ARM64 (aarch64) - 适用于Apple Silicon Mac、ARM服务器
## 故障排除
如果遇到问题:
1. 检查Docker是否正常运行`docker info`
2. 检查端口是否被占用:`netstat -tlnp | grep 505`
3. 查看容器日志:`docker-compose logs`
4. 重启服务:`docker-compose restart`
EOF
# 7. 显示镜像信息
echo "[信息] Docker镜像信息:"
docker images "$IMAGE_NAME:latest"
# 8. 压缩最终包
echo "[信息] 压缩最终部署包..."
tar -czf "$ARCHIVE_NAME" "$EXPORT_DIR"
# 9. 显示结果
echo ""
echo "=== 创建完成 ==="
echo "部署包: $ARCHIVE_NAME"
echo "部署包大小: $(du -sh "$ARCHIVE_NAME" | cut -f1)"
echo "构建架构: $TARGET_PLATFORM ($ARCH_NAME)"
echo "Docker镜像大小: $(docker images "$IMAGE_NAME:latest" --format "{{.Size}}" 2>/dev/null || echo "约300MB")"
echo ""
echo "部署方法:"
echo "1. 解压: tar -xzf $ARCHIVE_NAME"
echo "2. 进入目录: cd $EXPORT_DIR"
echo "3. 运行: ./deploy.sh"
echo ""
# 清理Docker镜像可选
read -p "是否删除本地Docker镜像(y/N): " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
docker rmi "$IMAGE_NAME:latest"
echo "[信息] 已删除本地Docker镜像"
fi
echo "[完成] Docker Compose版本部署包创建完成简化版"

View File

@ -100,25 +100,43 @@ cp requirements.txt "$TEMP_BUILD_DIR/"
# 创建超轻量级Dockerfile
echo "[信息] 创建超轻量级Dockerfile..."
# 首先尝试创建优化的多阶段构建Dockerfile
cat > "$TEMP_BUILD_DIR/Dockerfile" << 'EOF'
# 多阶段构建:第一阶段安装依赖
FROM python:3.9-alpine AS builder
# 安装构建依赖
RUN apk add --no-cache gcc musl-dev linux-headers
# 更新包索引并安装构建依赖
RUN apk update && \
apk add --no-cache \
gcc \
musl-dev \
linux-headers \
libffi-dev \
openssl-dev \
cargo \
rust && \
rm -rf /var/cache/apk/*
# 设置工作目录
WORKDIR /app
# 复制requirements并安装Python依赖
COPY requirements.txt .
RUN pip install --no-cache-dir --user -r requirements.txt
# 升级pip并安装依赖
RUN pip install --upgrade pip setuptools wheel && \
pip install --no-cache-dir --user -r requirements.txt
# 第二阶段:运行时镜像
FROM python:3.9-alpine
# 安装运行时依赖
RUN apk add --no-cache supervisor curl && \
RUN apk update && \
apk add --no-cache \
supervisor \
curl \
bash && \
rm -rf /var/cache/apk/*
# 从构建阶段复制Python包
@ -146,6 +164,61 @@ ENV PYTHONUNBUFFERED=1
# 暴露端口(两个服务的端口)
EXPOSE 5050 5051
# 健康检查
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD curl -f http://localhost:5050/ || exit 1
# 启动supervisor
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
EOF
# 创建备用的简化Dockerfile如果多阶段构建失败
cat > "$TEMP_BUILD_DIR/Dockerfile.simple" << 'EOF'
# 简化版本:单阶段构建
FROM python:3.9-alpine
# 安装所有必要的依赖
RUN apk update && \
apk add --no-cache \
gcc \
musl-dev \
linux-headers \
libffi-dev \
openssl-dev \
supervisor \
curl \
bash && \
rm -rf /var/cache/apk/*
# 设置工作目录
WORKDIR /app
# 复制应用代码
COPY . .
# 安装Python依赖
RUN pip install --upgrade pip setuptools wheel && \
pip install --no-cache-dir -r requirements.txt
# 创建supervisor配置目录
RUN mkdir -p /etc/supervisor/conf.d
COPY supervisord.conf /etc/supervisor/conf.d/
# 创建必要目录
RUN mkdir -p /var/log/supervisor /app/logs /app/test_reports /app/uploads
# 设置环境变量
ENV PYTHONPATH=/app
ENV FLASK_ENV=production
ENV PYTHONUNBUFFERED=1
# 暴露端口
EXPOSE 5050 5051
# 健康检查
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
CMD curl -f http://localhost:5050/ || exit 1
# 启动supervisor
CMD ["/usr/bin/supervisord", "-c", "/etc/supervisor/conf.d/supervisord.conf"]
EOF
@ -217,34 +290,70 @@ if [ "$USE_BUILDX" = true ]; then
# 检查是否为多架构构建
if [[ "$TARGET_PLATFORMS" == *","* ]]; then
echo "[信息] 执行多架构构建,这可能需要几分钟时间..."
echo "[提示] 多架构构建会自动推送到本地registry然后再导出"
echo "[提示] 多架构构建将分别构建各架构镜像"
# 多架构构建需要先推送到registry再导出
# 这里我们使用一个临时的本地registry
echo "[信息] 启动临时本地registry..."
docker run -d --rm --name temp-registry -p 5555:5000 registry:2 2>/dev/null || true
sleep 2
# 分别构建各个架构的镜像
IFS=',' read -ra PLATFORMS <<< "$TARGET_PLATFORMS"
BUILD_SUCCESS=true
# 构建并推送到临时registry
docker buildx build --platform "$TARGET_PLATFORMS" \
--tag "localhost:5555/$IMAGE_NAME:latest" \
--push .
for platform in "${PLATFORMS[@]}"; do
platform_tag="${platform//\//-}" # 将 linux/amd64 转换为 linux-amd64
echo "[信息] 构建 $platform 架构镜像..."
# 从registry拉取并重新标记
docker pull "localhost:5555/$IMAGE_NAME:latest"
docker tag "localhost:5555/$IMAGE_NAME:latest" "$IMAGE_NAME:latest"
# 首先尝试多阶段构建
if docker buildx build --platform "$platform" --load -t "$IMAGE_NAME:$platform_tag" . 2>&1; then
echo "[成功] $platform 架构构建完成(多阶段构建)"
else
echo "[警告] $platform 架构多阶段构建失败,尝试简化构建..."
# 尝试简化版本
if docker buildx build --platform "$platform" --load -t "$IMAGE_NAME:$platform_tag" -f Dockerfile.simple . 2>&1; then
echo "[成功] $platform 架构构建完成(简化构建)"
else
echo "[错误] $platform 架构所有构建方式都失败,跳过该架构"
BUILD_SUCCESS=false
fi
fi
done
# 清理临时registry
docker stop temp-registry 2>/dev/null || true
# 检查是否至少有一个架构构建成功
if docker images "$IMAGE_NAME" --format "{{.Tag}}" | grep -q "linux-"; then
# 使用第一个成功构建的架构作为latest标签
first_successful=$(docker images "$IMAGE_NAME" --format "{{.Tag}}" | grep "linux-" | head -1)
docker tag "$IMAGE_NAME:$first_successful" "$IMAGE_NAME:latest"
echo "[信息] 多架构构建完成latest标签指向 $first_successful 架构"
else
echo "[错误] 所有架构构建都失败了"
exit 1
fi
else
# 单架构构建可以直接load
echo "[信息] 执行单架构构建..."
docker buildx build --platform "$TARGET_PLATFORMS" --load -t "$IMAGE_NAME:latest" .
if docker buildx build --platform "$TARGET_PLATFORMS" --load -t "$IMAGE_NAME:latest" .; then
echo "[成功] 单架构构建完成(多阶段构建)"
else
echo "[警告] 多阶段构建失败,尝试简化构建..."
if docker buildx build --platform "$TARGET_PLATFORMS" --load -t "$IMAGE_NAME:latest" -f Dockerfile.simple .; then
echo "[成功] 单架构构建完成(简化构建)"
else
echo "[错误] 所有构建方式都失败"
exit 1
fi
fi
fi
else
# 使用传统构建方式
echo "[信息] 使用传统Docker构建..."
docker build --platform "$TARGET_PLATFORMS" -t "$IMAGE_NAME:latest" .
if docker build --platform "$TARGET_PLATFORMS" -t "$IMAGE_NAME:latest" .; then
echo "[成功] 传统Docker构建完成多阶段构建"
else
echo "[警告] 多阶段构建失败,尝试简化构建..."
if docker build --platform "$TARGET_PLATFORMS" -t "$IMAGE_NAME:latest" -f Dockerfile.simple .; then
echo "[成功] 传统Docker构建完成简化构建"
else
echo "[错误] 所有构建方式都失败"
exit 1
fi
fi
fi
cd - > /dev/null
@ -422,7 +531,13 @@ echo ""
echo "=== 创建完成 ==="
echo "部署包: $ARCHIVE_NAME"
echo "部署包大小: $(du -sh "$ARCHIVE_NAME" | cut -f1)"
echo "Docker镜像大小: $(docker images "$IMAGE_NAME:latest" --format "{{.Size}}" 2>/dev/null || echo "约300MB")"
echo "构建架构: $TARGET_PLATFORMS"
# 显示所有构建的镜像
echo ""
echo "构建的Docker镜像:"
docker images "$IMAGE_NAME" --format "table {{.Repository}}:{{.Tag}}\t{{.Size}}\t{{.CreatedAt}}" 2>/dev/null || echo "镜像信息获取失败"
echo ""
echo "部署方法:"
echo "1. 解压: tar -xzf $ARCHIVE_NAME"

View File

@ -538,7 +538,7 @@ class InputParser:
self.logger.error(f"An unexpected error occurred while parsing Swagger spec {file_path}: {e}", exc_info=True)
return None
def parse_dms_spec(self, domain_mapping_path: str, base_url: str, headers: Optional[Dict[str, str]] = None, ignore_ssl: bool = False) -> Optional[ParsedDMSSpec]:
def parse_dms_spec(self, domain_mapping_path: str, base_url: str, headers: Optional[Dict[str, str]] = None, ignore_ssl: bool = False, page_size: int = 1000) -> Optional[Tuple[ParsedDMSSpec, Dict[str, Any]]]:
self.logger.info(f"Starting DMS spec parsing. Base URL: {base_url}, Domain Map: {domain_mapping_path}")
if ignore_ssl:
@ -565,31 +565,75 @@ class InputParser:
keyword_to_domain_id[keyword] = domain_id
self.logger.debug(f"映射关键词 '{keyword}' -> 领域ID '{domain_id}'")
list_url = urljoin(base_url, "/api/schema/manage/schema?pageNo=1&pageSize=100000")
self.logger.info(f"Fetching API list from: {list_url}")
# 实现分页获取API列表
self.logger.info(f"Fetching API list with pagination (page_size={page_size})")
api_records = []
page_no = 1
total_fetched = 0
pagination_info = {
"page_size": page_size,
"total_pages": 0,
"total_records": 0,
"pages_fetched": 0
}
try:
response = requests.get(list_url, headers=headers, verify=not ignore_ssl)
response.raise_for_status()
api_list_data = response.json()
while True:
list_url = urljoin(base_url, f"/api/schema/manage/schema?pageNo={page_no}&pageSize={page_size}")
self.logger.debug(f"Fetching page {page_no} from: {list_url}")
# 检查业务代码是否成功
if api_list_data.get("code") != 0:
self.logger.error(f"DMS API list endpoint returned a business error: {api_list_data.get('message')}")
return None
response = requests.get(list_url, headers=headers, verify=not ignore_ssl)
response.raise_for_status()
api_list_data = response.json()
# 检查业务代码是否成功
if api_list_data.get("code") != 0:
self.logger.error(f"DMS API list endpoint returned a business error: {api_list_data.get('message')}")
return None, {}
# 从分页结构中提取 'records'
page_records = api_list_data.get("data", {}).get("records", [])
if not page_records:
self.logger.info(f"No more records found on page {page_no}, stopping pagination")
break
api_records.extend(page_records)
total_fetched += len(page_records)
self.logger.info(f"Fetched {len(page_records)} records from page {page_no}, total: {total_fetched}")
# 更新分页信息
data = api_list_data.get("data", {})
total_count = data.get("total", 0)
current_count = data.get("current", 0) * data.get("size", page_size)
# 第一次获取时更新总数信息
if page_no == 1:
pagination_info["total_records"] = total_count
pagination_info["total_pages"] = (total_count + page_size - 1) // page_size # 向上取整
pagination_info["pages_fetched"] = page_no
if current_count >= total_count or len(page_records) < page_size:
self.logger.info(f"Reached end of data. Total records: {total_fetched}")
break
page_no += 1
# 安全检查:防止无限循环
if page_no > 1000: # 最多1000页
self.logger.warning("Reached maximum page limit (1000), stopping pagination")
break
# 从分页结构中提取 'records'
api_records = api_list_data.get("data", {}).get("records", [])
if not api_records:
self.logger.warning("DMS API list is empty or 'records' key is missing in the response data.")
# Returning an empty spec is valid if the list is just empty.
return ParsedDMSSpec(endpoints=[], spec={"dms_api_list": []})
self.logger.warning("DMS API list is empty after pagination.")
return ParsedDMSSpec(endpoints=[], spec={"dms_api_list": []}), pagination_info
except requests.exceptions.RequestException as e:
self.logger.error(f"Failed to fetch API list from DMS: {e}")
return None
return None, {}
except json.JSONDecodeError:
self.logger.error("Failed to decode JSON response from DMS API list.")
return None
return None, {}
endpoints: List[DMSEndpoint] = []
@ -756,7 +800,7 @@ class InputParser:
# The 'spec' for ParsedDMSSpec should represent the whole document.
# We can construct a dictionary holding all the raw data we fetched.
dms_full_spec_dict = {"dms_api_list": api_records}
return ParsedDMSSpec(endpoints=endpoints, spec=dms_full_spec_dict)
return ParsedDMSSpec(endpoints=endpoints, spec=dms_full_spec_dict), pagination_info
class DmsConfig:
def __init__(self, base_url: str, domain_map_file: str, headers: Optional[Dict[str, str]] = None):

View File

@ -2681,8 +2681,9 @@ class APITestOrchestrator:
def run_tests_from_dms(self, domain_mapping_path: str,
categories: Optional[List[str]] = None,
custom_test_cases_dir: Optional[str] = None,
ignore_ssl: bool = False
) -> Tuple[TestSummary, Optional[ParsedAPISpec]]:
ignore_ssl: bool = False,
page_size: int = 1000
) -> Tuple[TestSummary, Optional[ParsedAPISpec], Dict[str, Any]]:
"""
通过动态DMS服务发现来执行测试
"""
@ -2692,20 +2693,22 @@ class APITestOrchestrator:
self.logger.info("从DMS动态服务启动测试...")
# 如果方法参数中没有传递ignore_ssl使用实例的设置
actual_ignore_ssl = ignore_ssl if ignore_ssl else self.ignore_ssl
parsed_spec = parser.parse_dms_spec(domain_mapping_path, base_url=self.base_url, ignore_ssl=actual_ignore_ssl)
parse_result = parser.parse_dms_spec(domain_mapping_path, base_url=self.base_url, ignore_ssl=actual_ignore_ssl, page_size=page_size)
if not parsed_spec:
if not parse_result or parse_result[0] is None:
self.logger.error("无法从DMS服务解析API测试终止。")
summary.add_error("Could not parse APIs from DMS service.")
summary.finalize_summary()
return summary, None
return summary, None, {}
parsed_spec, pagination_info = parse_result
# 🔧 移除重复的run_stages_from_spec调用
# Stage执行将在主程序中统一处理
summary = self._execute_tests_from_parsed_spec(parsed_spec, summary, categories=categories, custom_test_cases_dir=custom_test_cases_dir)
summary.finalize_summary()
return summary, parsed_spec
return summary, parsed_spec, pagination_info
def _validate_status_code(self, actual_code: int, expected_codes: List[int]) -> ValidationResult:
"""Helper to validate the HTTP status code."""

View File

@ -0,0 +1,228 @@
# DMS分页功能实现文档
## 概述
为了解决当待测试的节点数量非常多时出现的内存溢出问题我们实现了DMS API的分页获取功能。原来的实现一次性获取所有数据pageSize=100000现在改为分页获取大大减少了内存使用。
## 问题背景
### 原始问题
```
MemoryError
```
### 原因分析
- 原代码使用 `pageSize=100000` 一次性获取所有API数据
- 当API数量很大时会导致内存溢出
- JSON序列化大量数据时也会消耗大量内存
## 解决方案
### 1. 分页获取API列表
**修改文件**: `ddms_compliance_suite/input_parser/parser.py`
**主要改进**:
- 添加 `page_size` 参数默认1000
- 实现循环分页获取逻辑
- 收集并返回分页统计信息
```python
def parse_dms_spec(self, domain_mapping_path: str, base_url: str,
headers: Optional[Dict[str, str]] = None,
ignore_ssl: bool = False,
page_size: int = 1000) -> Optional[Tuple[ParsedDMSSpec, Dict[str, Any]]]:
```
**分页逻辑**:
```python
while True:
list_url = urljoin(base_url, f"/api/schema/manage/schema?pageNo={page_no}&pageSize={page_size}")
# 获取当前页数据
# 检查是否还有更多页面
# 更新分页统计信息
```
### 2. 分页信息收集
**返回的分页信息**:
```json
{
"page_size": 1000,
"total_pages": 15,
"total_records": 14523,
"pages_fetched": 15
}
```
### 3. 命令行支持
**修改文件**: `run_api_tests.py`
**新增参数**:
```bash
--page-size 1000 # DMS API分页大小默认1000
```
**使用示例**:
```bash
python run_api_tests.py \
--dms ./assets/doc/dms/domain.json \
--base-url https://www.dev.ideas.cnpc \
--page-size 500 \
--ignore-ssl
```
### 4. API服务器支持
**修改文件**: `api_server.py`
**配置参数**:
```json
{
"dms": "./assets/doc/dms/domain.json",
"base-url": "https://www.dev.ideas.cnpc",
"page-size": 500,
"ignore-ssl": true
}
```
**响应格式**:
```json
{
"status": "completed",
"message": "Tests finished.",
"report_directory": "/path/to/reports",
"summary": { ... },
"pagination": {
"page_size": 500,
"total_pages": 30,
"total_records": 14523,
"pages_fetched": 30
}
}
```
## 性能优化
### 内存使用对比
| 分页大小 | 估算内存使用 | 请求次数 | 推荐场景 |
|---------|-------------|---------|----------|
| 100 | ~10MB | 多 | 内存受限环境 |
| 500 | ~50MB | 中等 | 平衡选择 |
| 1000 | ~100MB | 较少 | 默认推荐 |
| 5000 | ~500MB | 很少 | 高性能环境 |
### 建议配置
**开发环境**:
```bash
--page-size 100 # 快速测试,减少内存使用
```
**测试环境**:
```bash
--page-size 500 # 平衡性能和内存
```
**生产环境**:
```bash
--page-size 1000 # 默认配置,性能最优
```
## 安全特性
### 防护机制
1. **最大页数限制**: 防止无限循环
```python
if page_no > 1000: # 最多1000页
logger.warning("Reached maximum page limit (1000), stopping pagination")
break
```
2. **空页面检测**: 自动停止分页
```python
if not page_records:
logger.info(f"No more records found on page {page_no}, stopping pagination")
break
```
3. **错误处理**: 网络异常时的优雅降级
```python
except requests.exceptions.RequestException as e:
self.logger.error(f"Failed to fetch API list from DMS: {e}")
return None, {}
```
## 向后兼容性
- 所有现有的调用方式仍然有效
- `page_size` 参数是可选的默认值为1000
- 不使用分页信息的代码不受影响
## 测试
### 测试脚本
```bash
python test_pagination.py # 基本分页测试
python test_pagination.py --api-server # API服务器集成测试
```
### 验证要点
1. 分页获取的总记录数与一次性获取相同
2. 内存使用显著降低
3. 分页信息准确反映获取状态
4. 网络异常时能正确处理
## 监控和日志
### 日志输出示例
```
[INFO] Fetching API list with pagination (page_size=1000)
[INFO] Fetched 1000 records from page 1, total: 1000
[INFO] Fetched 1000 records from page 2, total: 2000
[INFO] Fetched 523 records from page 15, total: 14523
[INFO] Reached end of data. Total records: 14523
[INFO] DMS分页信息: 总记录数=14523, 页面大小=1000, 获取页数=15/15
```
### 性能指标
- 总记录数
- 分页大小
- 获取页数
- 估算内存使用
- 网络请求次数
## 故障排除
### 常见问题
1. **内存仍然不足**
- 减小 `page_size`如100或50
- 检查其他内存消耗源
2. **网络超时**
- 增加请求超时时间
- 检查网络连接稳定性
3. **分页信息不准确**
- 检查DMS API响应格式
- 验证 `total` 字段的准确性
### 调试技巧
```bash
# 启用详细日志
python run_api_tests.py --dms ... --verbose
# 使用小分页测试
python run_api_tests.py --dms ... --page-size 10
```
## 未来改进
1. **并行分页**: 同时获取多个页面
2. **缓存机制**: 缓存已获取的页面数据
3. **断点续传**: 支持从中断点继续获取
4. **压缩传输**: 减少网络传输数据量

View File

@ -63,6 +63,8 @@ def parse_args():
api_group.add_argument('--yapi', help='YAPI定义文件路径')
api_group.add_argument('--swagger', help='Swagger定义文件路径')
api_group.add_argument('--dms', help='DMS服务发现的domain mapping文件路径')
api_group.add_argument('--page-size', type=int, default=1000,
help='DMS API分页大小默认1000。较小的值可以减少内存使用但会增加请求次数')
# 过滤参数
filter_group = parser.add_argument_group('过滤选项')
@ -941,16 +943,25 @@ def main():
elif args.dms:
logger.info(f"从DMS服务动态发现运行测试: {args.dms}")
test_summary, parsed_spec_for_scenarios = orchestrator.run_tests_from_dms(
test_summary, parsed_spec_for_scenarios, pagination_info = orchestrator.run_tests_from_dms(
domain_mapping_path=args.dms,
categories=categories,
custom_test_cases_dir=args.custom_test_cases_dir,
ignore_ssl=args.ignore_ssl
ignore_ssl=args.ignore_ssl,
page_size=args.page_size
)
if not parsed_spec_for_scenarios: # 检查解析是否成功
logger.error(f"从DMS服务 '{args.dms}' 解析失败 (由编排器报告)。程序将退出。")
sys.exit(1)
# 显示分页信息
if pagination_info:
logger.info(f"DMS分页信息: 总记录数={pagination_info.get('total_records', 0)}, "
f"页面大小={pagination_info.get('page_size', 0)}, "
f"获取页数={pagination_info.get('pages_fetched', 0)}/{pagination_info.get('total_pages', 0)}")
else:
logger.warning("未获取到分页信息")
except Exception as e:
logger.error(f"执行测试用例时发生意外错误: {e}", exc_info=True)
sys.exit(1)

135
test_pagination.py Normal file
View File

@ -0,0 +1,135 @@
#!/usr/bin/env python3
"""
测试DMS分页功能的脚本
"""
import sys
import json
import logging
from pathlib import Path
# 添加项目路径
sys.path.insert(0, str(Path(__file__).parent))
from ddms_compliance_suite.input_parser.parser import InputParser
# 配置日志
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
def test_pagination():
"""测试DMS分页功能"""
# 测试参数
domain_mapping_path = "./assets/doc/dms/domain.json" # 请根据实际路径调整
base_url = "https://www.dev.ideas.cnpc" # 请根据实际URL调整
# 测试不同的分页大小
page_sizes = [10, 50, 100, 1000]
parser = InputParser()
for page_size in page_sizes:
logger.info(f"\n=== 测试分页大小: {page_size} ===")
try:
result = parser.parse_dms_spec(
domain_mapping_path=domain_mapping_path,
base_url=base_url,
ignore_ssl=True, # 测试环境忽略SSL
page_size=page_size
)
if result and len(result) == 2:
parsed_spec, pagination_info = result
if parsed_spec:
logger.info(f"成功解析 {len(parsed_spec.endpoints)} 个API端点")
# 显示分页信息
logger.info("分页信息:")
logger.info(f" 页面大小: {pagination_info.get('page_size', 'N/A')}")
logger.info(f" 总记录数: {pagination_info.get('total_records', 'N/A')}")
logger.info(f" 总页数: {pagination_info.get('total_pages', 'N/A')}")
logger.info(f" 已获取页数: {pagination_info.get('pages_fetched', 'N/A')}")
# 计算内存使用情况(简单估算)
estimated_memory = len(str(parsed_spec.spec)) / 1024 / 1024 # MB
logger.info(f" 估算内存使用: {estimated_memory:.2f} MB")
else:
logger.error("解析失败返回的parsed_spec为None")
else:
logger.error("解析失败:返回格式不正确")
except Exception as e:
logger.error(f"测试分页大小 {page_size} 时发生错误: {e}")
logger.info("-" * 50)
def test_api_server_integration():
"""测试API服务器集成"""
import requests
logger.info("\n=== 测试API服务器集成 ===")
# API服务器配置
api_url = "http://localhost:5050/run"
test_config = {
"dms": "./assets/doc/dms/domain.json",
"base-url": "https://www.dev.ideas.cnpc",
"page-size": 50, # 测试较小的分页大小
"ignore-ssl": True,
"strictness-level": "CRITICAL",
"output": "./test_reports"
}
try:
logger.info("发送测试请求到API服务器...")
response = requests.post(api_url, json=test_config, timeout=300)
if response.status_code == 200:
result = response.json()
logger.info("API服务器响应成功")
# 检查分页信息
if "pagination" in result:
pagination = result["pagination"]
logger.info("分页信息:")
logger.info(f" 页面大小: {pagination.get('page_size', 'N/A')}")
logger.info(f" 总记录数: {pagination.get('total_records', 'N/A')}")
logger.info(f" 总页数: {pagination.get('total_pages', 'N/A')}")
logger.info(f" 已获取页数: {pagination.get('pages_fetched', 'N/A')}")
else:
logger.warning("响应中未包含分页信息")
# 显示测试摘要
if "summary" in result:
summary = result["summary"]
logger.info(f"测试摘要: 总端点数={summary.get('endpoints_total', 0)}, "
f"成功={summary.get('endpoints_passed', 0)}, "
f"失败={summary.get('endpoints_failed', 0)}")
else:
logger.error(f"API服务器响应错误: {response.status_code}")
logger.error(f"响应内容: {response.text}")
except requests.exceptions.ConnectionError:
logger.warning("无法连接到API服务器请确保服务器正在运行")
except Exception as e:
logger.error(f"测试API服务器时发生错误: {e}")
if __name__ == "__main__":
logger.info("开始测试DMS分页功能")
# 测试基本分页功能
test_pagination()
# 测试API服务器集成可选
if len(sys.argv) > 1 and sys.argv[1] == "--api-server":
test_api_server_integration()
logger.info("测试完成")