Enhance performance tests with comprehensive improvements

Major enhancements to scripts/performance_tests.py:

🚀 Features Added:
- Complete avatar style coverage (identicon, monsterid, robohash, pagan, retro, wavatar, mm, mmng)
- All sizes tested (80px, 256px) for each style
- Cache hit/miss tracking and display
- Random email generation for realistic testing
- Full libravatar URL generation using official library
- Professional table output with PrettyTable

📊 Display Improvements:
- Perfect alignment with PrettyTable library
- Visual dividers between avatar styles
- Status icons ( success, ⚠️ mixed,  failed)
- Cache status indicators (hit/miss/mixed/error)
- Email address and example URL display
- Grouped results by avatar style with averages

🔧 Technical Improvements:
- Integrated libravatar library for URL generation
- Replaced manual URL construction with proper library calls
- Enhanced error handling and reporting
- Added prettytable dependency to requirements.txt
- Improved code organization and maintainability

🎯 Testing Coverage:
- 8 avatar styles × 2 sizes = 16 test combinations
- Cache performance testing with hit/miss analysis
- Concurrent load testing with cache statistics
- Both local and remote testing modes supported

The performance tests now provide comprehensive, professional output
that's easy to read and analyze, with complete coverage of all
avatar generation functionality.
This commit is contained in:
Oliver Falk
2025-10-23 15:26:38 +02:00
parent 63dd743dca
commit 202ae44346
2 changed files with 124 additions and 49 deletions

View File

@@ -35,6 +35,7 @@ opentelemetry-instrumentation-urllib3>=0.42b0
opentelemetry-sdk>=1.20.0 opentelemetry-sdk>=1.20.0
Pillow Pillow
pip pip
prettytable
prometheus-client>=0.20.0 prometheus-client>=0.20.0
psycopg2-binary psycopg2-binary
py3dns py3dns

View File

@@ -15,6 +15,12 @@ import hashlib
# Add project root to path # Add project root to path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Import utilities
from ivatar.utils import generate_random_email
from libravatar import libravatar_url
from urllib.parse import urlsplit
from prettytable import PrettyTable
# Django setup - only for local testing # Django setup - only for local testing
def setup_django(): def setup_django():
@@ -99,10 +105,16 @@ class PerformanceTestRunner:
test_cases.append({"default": style, "size": size}) test_cases.append({"default": style, "size": size})
return test_cases return test_cases
def _test_single_avatar_request(self, case, email_hash, use_requests=False): def _test_single_avatar_request(self, case, email, use_requests=False):
"""Test a single avatar request - shared logic for local and remote testing""" """Test a single avatar request - shared logic for local and remote testing"""
url_path = f"/avatar/{email_hash}" # Use libravatar library to generate the URL
params = {"d": case["default"], "s": case["size"]} full_url = libravatar_url(
email=email, size=case["size"], default=case["default"]
)
# Extract path and query from the full URL
urlobj = urlsplit(full_url)
url_path = f"{urlobj.path}?{urlobj.query}"
start_time = time.time() start_time = time.time()
@@ -112,7 +124,7 @@ class PerformanceTestRunner:
url = f"{self.base_url}{url_path}" url = f"{self.base_url}{url_path}"
try: try:
response = requests.get(url, params=params, timeout=10) response = requests.get(url, timeout=10)
end_time = time.time() end_time = time.time()
duration = (end_time - start_time) * 1000 duration = (end_time - start_time) * 1000
@@ -135,6 +147,8 @@ class PerformanceTestRunner:
"cache_status": cache_status, "cache_status": cache_status,
"cache_detail": cache_detail, "cache_detail": cache_detail,
"age": age, "age": age,
"full_url": full_url,
"email": email,
} }
except Exception as e: except Exception as e:
end_time = time.time() end_time = time.time()
@@ -146,10 +160,12 @@ class PerformanceTestRunner:
"success": False, "success": False,
"error": str(e), "error": str(e),
"cache_status": "error", "cache_status": "error",
"full_url": full_url,
"email": email,
} }
else: else:
# Local testing with Django test client # Local testing with Django test client
response = self.client.get(url_path, params) response = self.client.get(url_path)
end_time = time.time() end_time = time.time()
duration = (end_time - start_time) * 1000 duration = (end_time - start_time) * 1000
@@ -172,10 +188,12 @@ class PerformanceTestRunner:
"content_length": len(response.content) if response.content else 0, "content_length": len(response.content) if response.content else 0,
"cache_status": cache_status, "cache_status": cache_status,
"success": response.status_code == 200, "success": response.status_code == 200,
"full_url": full_url,
"email": email,
} }
def _display_avatar_results(self, results): def _display_avatar_results(self, results):
"""Display avatar test results grouped by style with improved formatting""" """Display avatar test results using prettytable for perfect alignment"""
# Group results by avatar style # Group results by avatar style
style_results = {} style_results = {}
for result in results: for result in results:
@@ -184,21 +202,32 @@ class PerformanceTestRunner:
style_results[style] = [] style_results[style] = []
style_results[style].append(result) style_results[style].append(result)
# Display results grouped by style # Create table
for style in self.AVATAR_STYLES: table = PrettyTable()
if style not in style_results: table.field_names = ["Avatar Style", "Size", "Time (ms)", "Status", "Cache"]
continue table.align["Avatar Style"] = "l"
table.align["Size"] = "r"
table.align["Time (ms)"] = "r"
table.align["Status"] = "c"
table.align["Cache"] = "c"
# Add data to table
styles_with_data = [
style for style in self.AVATAR_STYLES if style in style_results
]
for i, style in enumerate(styles_with_data):
style_data = style_results[style] style_data = style_results[style]
# Calculate style averages and cache status summary
successful_results = [r for r in style_data if r.get("success", True)] successful_results = [r for r in style_data if r.get("success", True)]
failed_results = [r for r in style_data if not r.get("success", True)]
if successful_results: if successful_results:
# Calculate average
avg_duration = statistics.mean( avg_duration = statistics.mean(
[r["duration_ms"] for r in successful_results] [r["duration_ms"] for r in successful_results]
) )
# Determine overall cache status for this style # Determine overall cache status
cache_statuses = [ cache_statuses = [
r["cache_status"] r["cache_status"]
for r in successful_results for r in successful_results
@@ -213,27 +242,57 @@ class PerformanceTestRunner:
else: else:
cache_summary = "mixed" cache_summary = "mixed"
print(f" {style} - {avg_duration:.2f}ms ({cache_summary})") # Determine status icon for average line
if len(failed_results) == 0:
avg_status_icon = "" # All successful
elif len(successful_results) == 0:
avg_status_icon = "" # All failed
else:
avg_status_icon = "⚠️" # Mixed results
# Display individual size results with indentation # Add average row
table.add_row(
[
f"{style} (avg)",
"",
f"{avg_duration:.2f}",
avg_status_icon,
cache_summary,
]
)
# Add individual size rows
for result in style_data: for result in style_data:
size = result["test"].split("_")[1] # Extract size from test name size = result["test"].split("_")[1] # Extract size from test name
status_icon = "" if result.get("success", True) else "" status_icon = "" if result.get("success", True) else ""
cache_status = result["cache_status"] cache_status = result["cache_status"]
if result.get("success", True): if result.get("success", True):
print( table.add_row(
f" {size}: {result['duration_ms']:.2f}ms {status_icon} ({cache_status})" [
"",
size,
f"{result['duration_ms']:.2f}",
status_icon,
cache_status,
]
) )
else: else:
error_msg = result.get("error", "Failed") error_msg = result.get("error", "Failed")
print(f" {size}: ❌ {error_msg} ({cache_status})") table.add_row(["", size, error_msg, status_icon, cache_status])
else: else:
print(f" {style} - Failed") # All requests failed
table.add_row([f"{style} (avg)", "", "Failed", "", "error"])
for result in style_data: for result in style_data:
size = result["test"].split("_")[1] size = result["test"].split("_")[1]
error_msg = result.get("error", "Failed") error_msg = result.get("error", "Failed")
print(f" {size}: ❌ {error_msg}") table.add_row(["", size, error_msg, "", "error"])
# Add divider line between styles (except after the last style)
if i < len(styles_with_data) - 1:
table.add_row(["-" * 15, "-" * 5, "-" * 9, "-" * 6, "-" * 5])
print(table)
def test_avatar_generation_performance(self): def test_avatar_generation_performance(self):
"""Test avatar generation performance""" """Test avatar generation performance"""
@@ -243,16 +302,20 @@ class PerformanceTestRunner:
test_cases = self._generate_test_cases() test_cases = self._generate_test_cases()
results = [] results = []
# Generate test hash # Generate random email for testing
test_email = "perftest@example.com" test_email = generate_random_email()
email_hash = hashlib.md5(test_email.encode()).hexdigest() print(f" Testing with email: {test_email}")
for case in test_cases: for case in test_cases:
result = self._test_single_avatar_request( result = self._test_single_avatar_request(
case, email_hash, use_requests=False case, test_email, use_requests=False
) )
results.append(result) results.append(result)
# Show example URL from first result
if results:
print(f" Example URL: {results[0]['full_url']}")
# Display results grouped by style # Display results grouped by style
self._display_avatar_results(results) self._display_avatar_results(results)
@@ -387,14 +450,17 @@ class PerformanceTestRunner:
from concurrent.futures import ThreadPoolExecutor, as_completed from concurrent.futures import ThreadPoolExecutor, as_completed
def make_remote_request(thread_id): def make_remote_request(thread_id):
test_email = f"perftest{thread_id % 10}@example.com" test_email = generate_random_email()
email_hash = hashlib.md5(test_email.encode()).hexdigest()
url = f"{self.base_url}/avatar/{email_hash}" # Use libravatar library to generate the URL
params = {"d": "identicon", "s": 80} full_url = libravatar_url(email=test_email, size=80, default="identicon")
urlobj = urlsplit(full_url)
url_path = f"{urlobj.path}?{urlobj.query}"
url = f"{self.base_url}{url_path}"
start_time = time.time() start_time = time.time()
try: try:
response = requests.get(url, params=params, timeout=10) response = requests.get(url, timeout=10)
end_time = time.time() end_time = time.time()
# Determine cache status # Determine cache status
@@ -448,7 +514,7 @@ class PerformanceTestRunner:
import Identicon import Identicon
for i in range(num_requests): for i in range(num_requests):
test_email = f"perftest{i % 10}@example.com" test_email = generate_random_email()
email_hash = hashlib.md5(test_email.encode()).hexdigest() email_hash = hashlib.md5(test_email.encode()).hexdigest()
request_start = time.time() request_start = time.time()
@@ -570,18 +636,17 @@ class PerformanceTestRunner:
print("\n=== Cache Performance Test ===") print("\n=== Cache Performance Test ===")
# Use an actual email address that exists in the system # Generate a random email address for cache testing
test_email = "dev@libravatar.org" test_email = generate_random_email()
email_hash = hashlib.md5(test_email.encode()).hexdigest()
print(f" Testing with: {test_email}") print(f" Testing with: {test_email}")
if self.remote_testing: if self.remote_testing:
first_duration, second_duration = self._test_remote_cache_performance( first_duration, second_duration = self._test_remote_cache_performance(
email_hash test_email
) )
else: else:
first_duration, second_duration = self._test_local_cache_performance( first_duration, second_duration = self._test_local_cache_performance(
email_hash test_email
) )
print(f" First request: {first_duration:.2f}ms") print(f" First request: {first_duration:.2f}ms")
@@ -636,16 +701,19 @@ class PerformanceTestRunner:
"cache_headers": getattr(self, "cache_info", {}), "cache_headers": getattr(self, "cache_info", {}),
} }
def _test_remote_cache_performance(self, email_hash): def _test_remote_cache_performance(self, email):
"""Test cache performance against remote server""" """Test cache performance against remote server"""
import requests import requests
url = f"{self.base_url}/avatar/{email_hash}" # Use libravatar library to generate the URL
params = {"d": "identicon", "s": 80} full_url = libravatar_url(email=email, size=80, default="identicon")
urlobj = urlsplit(full_url)
url_path = f"{urlobj.path}?{urlobj.query}"
url = f"{self.base_url}{url_path}"
# First request (should be cache miss or fresh) # First request (should be cache miss or fresh)
start_time = time.time() start_time = time.time()
response1 = requests.get(url, params=params, timeout=10) response1 = requests.get(url, timeout=10)
first_duration = (time.time() - start_time) * 1000 first_duration = (time.time() - start_time) * 1000
# Check first request headers # Check first request headers
@@ -663,7 +731,7 @@ class PerformanceTestRunner:
# Second request (should be cache hit) # Second request (should be cache hit)
start_time = time.time() start_time = time.time()
response2 = requests.get(url, params=params, timeout=10) response2 = requests.get(url, timeout=10)
second_duration = (time.time() - start_time) * 1000 second_duration = (time.time() - start_time) * 1000
# Check second request headers # Check second request headers
@@ -708,19 +776,21 @@ class PerformanceTestRunner:
return first_duration, second_duration return first_duration, second_duration
def _test_local_cache_performance(self, email_hash): def _test_local_cache_performance(self, email):
"""Test cache performance locally""" """Test cache performance locally"""
url = f"/avatar/{email_hash}" # Use libravatar library to generate the URL
params = {"d": "identicon", "s": 80} full_url = libravatar_url(email=email, size=80, default="identicon")
urlobj = urlsplit(full_url)
url_path = f"{urlobj.path}?{urlobj.query}"
# First request (cache miss) # First request (cache miss)
start_time = time.time() start_time = time.time()
self.client.get(url, params) self.client.get(url_path)
first_duration = (time.time() - start_time) * 1000 first_duration = (time.time() - start_time) * 1000
# Second request (should be cache hit) # Second request (should be cache hit)
start_time = time.time() start_time = time.time()
self.client.get(url, params) self.client.get(url_path)
second_duration = (time.time() - start_time) * 1000 second_duration = (time.time() - start_time) * 1000
return first_duration, second_duration return first_duration, second_duration
@@ -775,16 +845,20 @@ class PerformanceTestRunner:
test_cases = self._generate_test_cases() test_cases = self._generate_test_cases()
results = [] results = []
# Generate test hash # Generate random email for testing
test_email = "perftest@example.com" test_email = generate_random_email()
email_hash = hashlib.md5(test_email.encode()).hexdigest() print(f" Testing with email: {test_email}")
for case in test_cases: for case in test_cases:
result = self._test_single_avatar_request( result = self._test_single_avatar_request(
case, email_hash, use_requests=True case, test_email, use_requests=True
) )
results.append(result) results.append(result)
# Show example URL from first result
if results:
print(f" Example URL: {results[0]['full_url']}")
# Display results grouped by style # Display results grouped by style
self._display_avatar_results(results) self._display_avatar_results(results)