Enhance the performance tests

This commit is contained in:
Oliver Falk
2025-10-22 14:00:23 +02:00
parent 671ebbdae2
commit b4f224cd4d
3 changed files with 227 additions and 88 deletions

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" """
Libravatar Deployment Verification Script Libravatar Deployment Verification Script

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" """
Performance testing script for Libravatar CI/CD pipeline Performance testing script for Libravatar CI/CD pipeline
@@ -29,6 +28,19 @@ def setup_django():
class PerformanceTestRunner: class PerformanceTestRunner:
"""Main performance test runner""" """Main performance test runner"""
# Define all avatar styles and sizes to test
AVATAR_STYLES = [
"identicon",
"monsterid",
"robohash",
"pagan",
"retro",
"wavatar",
"mm",
"mmng",
]
AVATAR_SIZES = [80, 256]
def __init__( def __init__(
self, self,
base_url="http://localhost:8000", base_url="http://localhost:8000",
@@ -79,52 +91,180 @@ class PerformanceTestRunner:
print(f"Created {len(test_emails)} test users and emails") print(f"Created {len(test_emails)} test users and emails")
def test_avatar_generation_performance(self): def _generate_test_cases(self):
"""Test avatar generation performance""" """Generate test cases for all avatar styles and sizes"""
print("\n=== Avatar Generation Performance Test ===") test_cases = []
for style in self.AVATAR_STYLES:
for size in self.AVATAR_SIZES:
test_cases.append({"default": style, "size": size})
return test_cases
# Test different avatar types and sizes def _test_single_avatar_request(self, case, email_hash, use_requests=False):
test_cases = [ """Test a single avatar request - shared logic for local and remote testing"""
{"default": "identicon", "size": 80}, url_path = f"/avatar/{email_hash}"
{"default": "monsterid", "size": 80},
{"default": "robohash", "size": 80},
{"default": "identicon", "size": 256},
{"default": "monsterid", "size": 256},
]
results = []
for case in test_cases:
# Generate test hash
test_email = "perftest@example.com"
email_hash = hashlib.md5(test_email.encode()).hexdigest()
# Build URL
url = f"/avatar/{email_hash}"
params = {"d": case["default"], "s": case["size"]} params = {"d": case["default"], "s": case["size"]}
# Time the request
start_time = time.time() start_time = time.time()
response = self.client.get(url, params)
if use_requests:
# Remote testing with requests
import requests
url = f"{self.base_url}{url_path}"
try:
response = requests.get(url, params=params, timeout=10)
end_time = time.time() end_time = time.time()
duration = (end_time - start_time) * 1000
duration = (end_time - start_time) * 1000 # Convert to ms # Determine cache status from response headers
cache_detail = response.headers.get("x-cache-detail", "").lower()
age = response.headers.get("age", "0")
cache_status = "unknown"
results.append( if "cache hit" in cache_detail or int(age) > 0:
{ cache_status = "hit"
elif "cache miss" in cache_detail or age == "0":
cache_status = "miss"
return {
"test": f"{case['default']}_{case['size']}px", "test": f"{case['default']}_{case['size']}px",
"duration_ms": duration, "duration_ms": duration,
"status_code": response.status_code, "status_code": response.status_code,
"content_length": len(response.content) if response.content else 0, "content_length": len(response.content) if response.content else 0,
"success": response.status_code == 200,
"cache_status": cache_status,
"cache_detail": cache_detail,
"age": age,
} }
except Exception as e:
end_time = time.time()
duration = (end_time - start_time) * 1000
return {
"test": f"{case['default']}_{case['size']}px",
"duration_ms": duration,
"status_code": 0,
"success": False,
"error": str(e),
"cache_status": "error",
}
else:
# Local testing with Django test client
response = self.client.get(url_path, params)
end_time = time.time()
duration = (end_time - start_time) * 1000
# Check for cache information in response headers
cache_status = "unknown"
if hasattr(response, "get") and callable(response.get):
cache_control = response.get("Cache-Control", "")
age = response.get("Age", "0")
if age and int(age) > 0:
cache_status = "hit"
elif "no-cache" in cache_control:
cache_status = "miss"
else:
cache_status = "miss" # Default assumption for first generation
return {
"test": f"{case['default']}_{case['size']}px",
"duration_ms": duration,
"status_code": response.status_code,
"content_length": len(response.content) if response.content else 0,
"cache_status": cache_status,
"success": response.status_code == 200,
}
def _display_avatar_results(self, results):
"""Display avatar test results grouped by style with improved formatting"""
# Group results by avatar style
style_results = {}
for result in results:
style = result["test"].split("_")[0] # Extract style from test name
if style not in style_results:
style_results[style] = []
style_results[style].append(result)
# Display results grouped by style
for style in self.AVATAR_STYLES:
if style not in style_results:
continue
style_data = style_results[style]
# Calculate style averages and cache status summary
successful_results = [r for r in style_data if r.get("success", True)]
if successful_results:
avg_duration = statistics.mean(
[r["duration_ms"] for r in successful_results]
) )
print(f" {case['default']} ({case['size']}px): {duration:.2f}ms") # Determine overall cache status for this style
cache_statuses = [
r["cache_status"]
for r in successful_results
if r["cache_status"] != "unknown"
]
if not cache_statuses:
cache_summary = "unknown"
elif all(status == "hit" for status in cache_statuses):
cache_summary = "hit"
elif all(status == "miss" for status in cache_statuses):
cache_summary = "miss"
else:
cache_summary = "mixed"
print(f" {style} - {avg_duration:.2f}ms ({cache_summary})")
# Display individual size results with indentation
for result in style_data:
size = result["test"].split("_")[1] # Extract size from test name
status_icon = "" if result.get("success", True) else ""
cache_status = result["cache_status"]
if result.get("success", True):
print(
f" {size}: {result['duration_ms']:.2f}ms {status_icon} ({cache_status})"
)
else:
error_msg = result.get("error", "Failed")
print(f" {size}: ❌ {error_msg} ({cache_status})")
else:
print(f" {style} - Failed")
for result in style_data:
size = result["test"].split("_")[1]
error_msg = result.get("error", "Failed")
print(f" {size}: ❌ {error_msg}")
def test_avatar_generation_performance(self):
"""Test avatar generation performance"""
print("\n=== Avatar Generation Performance Test ===")
# Generate test cases for all avatar styles and sizes
test_cases = self._generate_test_cases()
results = []
# Generate test hash
test_email = "perftest@example.com"
email_hash = hashlib.md5(test_email.encode()).hexdigest()
for case in test_cases:
result = self._test_single_avatar_request(
case, email_hash, use_requests=False
)
results.append(result)
# Display results grouped by style
self._display_avatar_results(results)
# Calculate statistics # Calculate statistics
durations = [r["duration_ms"] for r in results] successful_results = [r for r in results if r.get("success", True)]
if successful_results:
durations = [r["duration_ms"] for r in successful_results]
avg_duration = statistics.mean(durations) avg_duration = statistics.mean(durations)
max_duration = max(durations) max_duration = max(durations)
else:
avg_duration = 0
max_duration = 0
print(f"\n Average: {avg_duration:.2f}ms") print(f"\n Average: {avg_duration:.2f}ms")
print(f" Maximum: {max_duration:.2f}ms") print(f" Maximum: {max_duration:.2f}ms")
@@ -160,6 +300,11 @@ class PerformanceTestRunner:
successful_requests = [r for r in results if r["success"]] successful_requests = [r for r in results if r["success"]]
failed_requests = [r for r in results if not r["success"]] failed_requests = [r for r in results if not r["success"]]
# Analyze cache performance
cache_hits = [r for r in results if r.get("cache_status") == "hit"]
cache_misses = [r for r in results if r.get("cache_status") == "miss"]
cache_errors = [r for r in results if r.get("cache_status") == "error"]
total_duration = ( total_duration = (
sum(r["duration_ms"] for r in results) / 1000 sum(r["duration_ms"] for r in results) / 1000
) # Convert to seconds ) # Convert to seconds
@@ -168,6 +313,20 @@ class PerformanceTestRunner:
print(f" Successful requests: {len(successful_requests)}/{num_requests}") print(f" Successful requests: {len(successful_requests)}/{num_requests}")
print(f" Failed requests: {len(failed_requests)}") print(f" Failed requests: {len(failed_requests)}")
# Show cache statistics if available
if cache_hits or cache_misses:
print(f" Cache hits: {len(cache_hits)}")
print(f" Cache misses: {len(cache_misses)}")
if cache_errors:
print(f" Cache errors: {len(cache_errors)}")
cache_hit_rate = (
len(cache_hits) / (len(cache_hits) + len(cache_misses)) * 100
if (cache_hits or cache_misses)
else 0
)
print(f" Cache hit rate: {cache_hit_rate:.1f}%")
if successful_requests: if successful_requests:
durations = [r["duration_ms"] for r in successful_requests] durations = [r["duration_ms"] for r in successful_requests]
avg_duration = statistics.mean(durations) avg_duration = statistics.mean(durations)
@@ -212,6 +371,14 @@ class PerformanceTestRunner:
"requests_per_second": ( "requests_per_second": (
len(successful_requests) / total_duration if total_duration > 0 else 0 len(successful_requests) / total_duration if total_duration > 0 else 0
), ),
"cache_hits": len(cache_hits),
"cache_misses": len(cache_misses),
"cache_errors": len(cache_errors),
"cache_hit_rate": (
len(cache_hits) / (len(cache_hits) + len(cache_misses)) * 100
if (cache_hits or cache_misses)
else 0
),
} }
def _test_remote_concurrent_load(self, num_requests): def _test_remote_concurrent_load(self, num_requests):
@@ -230,11 +397,22 @@ class PerformanceTestRunner:
response = requests.get(url, params=params, timeout=10) response = requests.get(url, params=params, timeout=10)
end_time = time.time() end_time = time.time()
# Determine cache status
cache_detail = response.headers.get("x-cache-detail", "").lower()
age = response.headers.get("age", "0")
cache_status = "unknown"
if "cache hit" in cache_detail or int(age) > 0:
cache_status = "hit"
elif "cache miss" in cache_detail or age == "0":
cache_status = "miss"
return { return {
"thread_id": thread_id, "thread_id": thread_id,
"duration_ms": (end_time - start_time) * 1000, "duration_ms": (end_time - start_time) * 1000,
"status_code": response.status_code, "status_code": response.status_code,
"success": response.status_code == 200, "success": response.status_code == 200,
"cache_status": cache_status,
} }
except Exception as e: except Exception as e:
end_time = time.time() end_time = time.time()
@@ -243,6 +421,7 @@ class PerformanceTestRunner:
"duration_ms": (end_time - start_time) * 1000, "duration_ms": (end_time - start_time) * 1000,
"success": False, "success": False,
"error": str(e), "error": str(e),
"cache_status": "error",
} }
results = [] results = []
@@ -283,6 +462,7 @@ class PerformanceTestRunner:
"thread_id": i, "thread_id": i,
"duration_ms": (request_end - request_start) * 1000, "duration_ms": (request_end - request_start) * 1000,
"success": len(identicon_data) > 0, "success": len(identicon_data) > 0,
"cache_status": "miss", # Direct generation is always a cache miss
} }
) )
except Exception as e: except Exception as e:
@@ -293,6 +473,7 @@ class PerformanceTestRunner:
"duration_ms": (request_end - request_start) * 1000, "duration_ms": (request_end - request_start) * 1000,
"success": False, "success": False,
"error": str(e), "error": str(e),
"cache_status": "error",
} }
) )
@@ -314,6 +495,7 @@ class PerformanceTestRunner:
"thread_id": i, "thread_id": i,
"duration_ms": (request_end - request_start) * 1000, "duration_ms": (request_end - request_start) * 1000,
"success": True, "success": True,
"cache_status": "n/a", # Database queries don't use image cache
} }
) )
except Exception as e: except Exception as e:
@@ -324,6 +506,7 @@ class PerformanceTestRunner:
"duration_ms": (request_end - request_start) * 1000, "duration_ms": (request_end - request_start) * 1000,
"success": False, "success": False,
"error": str(e), "error": str(e),
"cache_status": "error",
} }
) )
@@ -588,64 +771,22 @@ class PerformanceTestRunner:
"""Test avatar generation performance on remote server""" """Test avatar generation performance on remote server"""
print("\n=== Remote Avatar Performance Test ===") print("\n=== Remote Avatar Performance Test ===")
import requests # Generate test cases for all avatar styles and sizes
test_cases = self._generate_test_cases()
# Test different avatar types and sizes
test_cases = [
{"default": "identicon", "size": 80},
{"default": "monsterid", "size": 80},
{"default": "robohash", "size": 80},
{"default": "identicon", "size": 256},
{"default": "monsterid", "size": 256},
]
results = [] results = []
for case in test_cases:
# Generate test hash # Generate test hash
test_email = "perftest@example.com" test_email = "perftest@example.com"
email_hash = hashlib.md5(test_email.encode()).hexdigest() email_hash = hashlib.md5(test_email.encode()).hexdigest()
# Build URL for case in test_cases:
url = f"{self.base_url}/avatar/{email_hash}" result = self._test_single_avatar_request(
params = {"d": case["default"], "s": case["size"]} case, email_hash, use_requests=True
# Time the request
start_time = time.time()
try:
response = requests.get(url, params=params, timeout=10)
end_time = time.time()
duration = (end_time - start_time) * 1000 # Convert to ms
results.append(
{
"test": f"{case['default']}_{case['size']}px",
"duration_ms": duration,
"status_code": response.status_code,
"content_length": (
len(response.content) if response.content else 0
),
"success": response.status_code == 200,
}
) )
results.append(result)
status = "" if response.status_code == 200 else "" # Display results grouped by style
print( self._display_avatar_results(results)
f" {case['default']} ({case['size']}px): {duration:.2f}ms {status}"
)
except Exception as e:
print(f" {case['default']} ({case['size']}px): ❌ Failed - {e}")
results.append(
{
"test": f"{case['default']}_{case['size']}px",
"duration_ms": 0,
"status_code": 0,
"success": False,
"error": str(e),
}
)
# Calculate statistics for successful requests # Calculate statistics for successful requests
successful_results = [r for r in results if r["success"]] successful_results = [r for r in results if r["success"]]

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" """
Run tests with OpenTelemetry instrumentation and export enabled, plus coverage measurement. Run tests with OpenTelemetry instrumentation and export enabled, plus coverage measurement.
This script is designed to be used with 'coverage run' command. This script is designed to be used with 'coverage run' command.