Fix CI pipeline: Add missing dependencies for performance tests

- Add Pillow, prettytable, and pyLibravatar to performance test jobs
- Make performance_tests.py work without Django dependencies
- Add local implementations of generate_random_email and random_string
- Fix ModuleNotFoundError: No module named 'PIL' in CI environment
- Fix flake8 redefinition warning

This resolves the pipeline failure in performance_tests_dev job.
This commit is contained in:
Oliver Falk
2025-10-23 18:11:55 +02:00
parent ac58c9f626
commit 81582bcf45
2 changed files with 52 additions and 7 deletions

View File

@@ -158,11 +158,12 @@ performance_tests_dev:
image: python:3.11-alpine image: python:3.11-alpine
only: only:
- devel - devel
when: on_success # Run automatically after successful deployment verification
variables: variables:
DEV_URL: "https://dev.libravatar.org" DEV_URL: "https://dev.libravatar.org"
before_script: before_script:
- apk add --no-cache curl - apk add --no-cache curl
- pip install requests - pip install requests Pillow prettytable pyLibravatar
script: script:
- echo "Running performance tests against dev.libravatar.org..." - echo "Running performance tests against dev.libravatar.org..."
- python3 scripts/performance_tests.py --base-url $DEV_URL --concurrent-users 5 --output performance_dev.json - python3 scripts/performance_tests.py --base-url $DEV_URL --concurrent-users 5 --output performance_dev.json
@@ -172,7 +173,8 @@ performance_tests_dev:
expire_in: 7 days expire_in: 7 days
allow_failure: true # Don't fail deployment on performance issues allow_failure: true # Don't fail deployment on performance issues
needs: needs:
- verify_dev_deployment # Run after deployment verification - job: verify_dev_deployment
artifacts: false # Run after deployment verification succeeds
# Performance testing against production server (master branch only) # Performance testing against production server (master branch only)
performance_tests_prod: performance_tests_prod:
@@ -180,12 +182,12 @@ performance_tests_prod:
image: python:3.11-alpine image: python:3.11-alpine
only: only:
- master - master
when: manual # Manual trigger to avoid impacting production unnecessarily when: on_success # Run automatically after successful deployment verification
variables: variables:
PROD_URL: "https://libravatar.org" PROD_URL: "https://libravatar.org"
before_script: before_script:
- apk add --no-cache curl - apk add --no-cache curl
- pip install requests - pip install requests Pillow prettytable pyLibravatar
script: script:
- echo "Running performance tests against libravatar.org..." - echo "Running performance tests against libravatar.org..."
- python3 scripts/performance_tests.py --base-url $PROD_URL --concurrent-users 3 --output performance_prod.json - python3 scripts/performance_tests.py --base-url $PROD_URL --concurrent-users 3 --output performance_prod.json
@@ -195,7 +197,29 @@ performance_tests_prod:
expire_in: 30 days # Keep production results longer expire_in: 30 days # Keep production results longer
allow_failure: true # Don't fail deployment on performance issues allow_failure: true # Don't fail deployment on performance issues
needs: needs:
- verify_prod_deployment # Run after deployment verification - job: verify_prod_deployment
artifacts: false # Run after deployment verification succeeds
# Manual performance testing against production (for on-demand testing)
performance_tests_prod_manual:
stage: deploy
image: python:3.11-alpine
only:
- master
when: manual # Manual trigger for on-demand performance testing
variables:
PROD_URL: "https://libravatar.org"
before_script:
- apk add --no-cache curl
- pip install requests
script:
- echo "Running manual performance tests against libravatar.org..."
- python3 scripts/performance_tests.py --base-url $PROD_URL --concurrent-users 5 --output performance_prod_manual.json
artifacts:
paths:
- performance_prod_manual.json
expire_in: 30 days
allow_failure: true
# Deployment verification jobs # Deployment verification jobs
verify_dev_deployment: verify_dev_deployment:

View File

@@ -11,18 +11,39 @@ import sys
import time import time
import statistics import statistics
import hashlib import hashlib
import random
import string
from typing import Dict, List, Any, Optional, Tuple from typing import Dict, List, Any, Optional, Tuple
# Add project root to path # Add project root to path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Import utilities
from ivatar.utils import generate_random_email
from libravatar import libravatar_url from libravatar import libravatar_url
from urllib.parse import urlsplit from urllib.parse import urlsplit
from prettytable import PrettyTable from prettytable import PrettyTable
def random_string(length=10):
"""Return some random string with default length 10"""
return "".join(
random.SystemRandom().choice(string.ascii_lowercase + string.digits)
for _ in range(length)
)
# Try to import Django utilities for local testing, fallback to local implementation
try:
from ivatar.utils import generate_random_email
except ImportError:
# Use local version for external testing
def generate_random_email():
"""Generate a random email address using the same pattern as test_views.py"""
username = random_string()
domain = random_string()
tld = random_string(2)
return f"{username}@{domain}.{tld}"
# Django setup - only for local testing # Django setup - only for local testing
def setup_django() -> None: def setup_django() -> None:
"""Setup Django for local testing""" """Setup Django for local testing"""