diff --git a/scripts/check_deployment.py b/scripts/check_deployment.py index d632a55..6fcbe4e 100755 --- a/scripts/check_deployment.py +++ b/scripts/check_deployment.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Libravatar Deployment Verification Script diff --git a/scripts/performance_tests.py b/scripts/performance_tests.py index 485543e..3c025e0 100644 --- a/scripts/performance_tests.py +++ b/scripts/performance_tests.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Performance testing script for Libravatar CI/CD pipeline @@ -29,6 +28,19 @@ def setup_django(): class PerformanceTestRunner: """Main performance test runner""" + # Define all avatar styles and sizes to test + AVATAR_STYLES = [ + "identicon", + "monsterid", + "robohash", + "pagan", + "retro", + "wavatar", + "mm", + "mmng", + ] + AVATAR_SIZES = [80, 256] + def __init__( self, base_url="http://localhost:8000", @@ -79,52 +91,180 @@ class PerformanceTestRunner: print(f"Created {len(test_emails)} test users and emails") - def test_avatar_generation_performance(self): - """Test avatar generation performance""" - print("\n=== Avatar Generation Performance Test ===") + def _generate_test_cases(self): + """Generate test cases for all avatar styles and sizes""" + test_cases = [] + for style in self.AVATAR_STYLES: + for size in self.AVATAR_SIZES: + test_cases.append({"default": style, "size": size}) + return test_cases - # Test different avatar types and sizes - test_cases = [ - {"default": "identicon", "size": 80}, - {"default": "monsterid", "size": 80}, - {"default": "robohash", "size": 80}, - {"default": "identicon", "size": 256}, - {"default": "monsterid", "size": 256}, - ] + def _test_single_avatar_request(self, case, email_hash, use_requests=False): + """Test a single avatar request - shared logic for local and remote testing""" + url_path = f"/avatar/{email_hash}" + params = {"d": case["default"], "s": case["size"]} - results = [] + start_time = time.time() - for case in test_cases: - # Generate test hash - test_email = "perftest@example.com" - email_hash = hashlib.md5(test_email.encode()).hexdigest() + if use_requests: + # Remote testing with requests + import requests - # Build URL - url = f"/avatar/{email_hash}" - params = {"d": case["default"], "s": case["size"]} + url = f"{self.base_url}{url_path}" + try: + response = requests.get(url, params=params, timeout=10) + end_time = time.time() + duration = (end_time - start_time) * 1000 - # Time the request - start_time = time.time() - response = self.client.get(url, params) - end_time = time.time() + # Determine cache status from response headers + cache_detail = response.headers.get("x-cache-detail", "").lower() + age = response.headers.get("age", "0") + cache_status = "unknown" - duration = (end_time - start_time) * 1000 # Convert to ms + if "cache hit" in cache_detail or int(age) > 0: + cache_status = "hit" + elif "cache miss" in cache_detail or age == "0": + cache_status = "miss" - results.append( - { + return { "test": f"{case['default']}_{case['size']}px", "duration_ms": duration, "status_code": response.status_code, "content_length": len(response.content) if response.content else 0, + "success": response.status_code == 200, + "cache_status": cache_status, + "cache_detail": cache_detail, + "age": age, } - ) + except Exception as e: + end_time = time.time() + duration = (end_time - start_time) * 1000 + return { + "test": f"{case['default']}_{case['size']}px", + "duration_ms": duration, + "status_code": 0, + "success": False, + "error": str(e), + "cache_status": "error", + } + else: + # Local testing with Django test client + response = self.client.get(url_path, params) + end_time = time.time() + duration = (end_time - start_time) * 1000 - print(f" {case['default']} ({case['size']}px): {duration:.2f}ms") + # Check for cache information in response headers + cache_status = "unknown" + if hasattr(response, "get") and callable(response.get): + cache_control = response.get("Cache-Control", "") + age = response.get("Age", "0") + if age and int(age) > 0: + cache_status = "hit" + elif "no-cache" in cache_control: + cache_status = "miss" + else: + cache_status = "miss" # Default assumption for first generation + + return { + "test": f"{case['default']}_{case['size']}px", + "duration_ms": duration, + "status_code": response.status_code, + "content_length": len(response.content) if response.content else 0, + "cache_status": cache_status, + "success": response.status_code == 200, + } + + def _display_avatar_results(self, results): + """Display avatar test results grouped by style with improved formatting""" + # Group results by avatar style + style_results = {} + for result in results: + style = result["test"].split("_")[0] # Extract style from test name + if style not in style_results: + style_results[style] = [] + style_results[style].append(result) + + # Display results grouped by style + for style in self.AVATAR_STYLES: + if style not in style_results: + continue + + style_data = style_results[style] + + # Calculate style averages and cache status summary + successful_results = [r for r in style_data if r.get("success", True)] + if successful_results: + avg_duration = statistics.mean( + [r["duration_ms"] for r in successful_results] + ) + + # Determine overall cache status for this style + cache_statuses = [ + r["cache_status"] + for r in successful_results + if r["cache_status"] != "unknown" + ] + if not cache_statuses: + cache_summary = "unknown" + elif all(status == "hit" for status in cache_statuses): + cache_summary = "hit" + elif all(status == "miss" for status in cache_statuses): + cache_summary = "miss" + else: + cache_summary = "mixed" + + print(f" {style} - {avg_duration:.2f}ms ({cache_summary})") + + # Display individual size results with indentation + for result in style_data: + size = result["test"].split("_")[1] # Extract size from test name + status_icon = "✅" if result.get("success", True) else "❌" + cache_status = result["cache_status"] + + if result.get("success", True): + print( + f" {size}: {result['duration_ms']:.2f}ms {status_icon} ({cache_status})" + ) + else: + error_msg = result.get("error", "Failed") + print(f" {size}: ❌ {error_msg} ({cache_status})") + else: + print(f" {style} - Failed") + for result in style_data: + size = result["test"].split("_")[1] + error_msg = result.get("error", "Failed") + print(f" {size}: ❌ {error_msg}") + + def test_avatar_generation_performance(self): + """Test avatar generation performance""" + print("\n=== Avatar Generation Performance Test ===") + + # Generate test cases for all avatar styles and sizes + test_cases = self._generate_test_cases() + results = [] + + # Generate test hash + test_email = "perftest@example.com" + email_hash = hashlib.md5(test_email.encode()).hexdigest() + + for case in test_cases: + result = self._test_single_avatar_request( + case, email_hash, use_requests=False + ) + results.append(result) + + # Display results grouped by style + self._display_avatar_results(results) # Calculate statistics - durations = [r["duration_ms"] for r in results] - avg_duration = statistics.mean(durations) - max_duration = max(durations) + successful_results = [r for r in results if r.get("success", True)] + if successful_results: + durations = [r["duration_ms"] for r in successful_results] + avg_duration = statistics.mean(durations) + max_duration = max(durations) + else: + avg_duration = 0 + max_duration = 0 print(f"\n Average: {avg_duration:.2f}ms") print(f" Maximum: {max_duration:.2f}ms") @@ -160,6 +300,11 @@ class PerformanceTestRunner: successful_requests = [r for r in results if r["success"]] failed_requests = [r for r in results if not r["success"]] + # Analyze cache performance + cache_hits = [r for r in results if r.get("cache_status") == "hit"] + cache_misses = [r for r in results if r.get("cache_status") == "miss"] + cache_errors = [r for r in results if r.get("cache_status") == "error"] + total_duration = ( sum(r["duration_ms"] for r in results) / 1000 ) # Convert to seconds @@ -168,6 +313,20 @@ class PerformanceTestRunner: print(f" Successful requests: {len(successful_requests)}/{num_requests}") print(f" Failed requests: {len(failed_requests)}") + # Show cache statistics if available + if cache_hits or cache_misses: + print(f" Cache hits: {len(cache_hits)}") + print(f" Cache misses: {len(cache_misses)}") + if cache_errors: + print(f" Cache errors: {len(cache_errors)}") + + cache_hit_rate = ( + len(cache_hits) / (len(cache_hits) + len(cache_misses)) * 100 + if (cache_hits or cache_misses) + else 0 + ) + print(f" Cache hit rate: {cache_hit_rate:.1f}%") + if successful_requests: durations = [r["duration_ms"] for r in successful_requests] avg_duration = statistics.mean(durations) @@ -212,6 +371,14 @@ class PerformanceTestRunner: "requests_per_second": ( len(successful_requests) / total_duration if total_duration > 0 else 0 ), + "cache_hits": len(cache_hits), + "cache_misses": len(cache_misses), + "cache_errors": len(cache_errors), + "cache_hit_rate": ( + len(cache_hits) / (len(cache_hits) + len(cache_misses)) * 100 + if (cache_hits or cache_misses) + else 0 + ), } def _test_remote_concurrent_load(self, num_requests): @@ -230,11 +397,22 @@ class PerformanceTestRunner: response = requests.get(url, params=params, timeout=10) end_time = time.time() + # Determine cache status + cache_detail = response.headers.get("x-cache-detail", "").lower() + age = response.headers.get("age", "0") + cache_status = "unknown" + + if "cache hit" in cache_detail or int(age) > 0: + cache_status = "hit" + elif "cache miss" in cache_detail or age == "0": + cache_status = "miss" + return { "thread_id": thread_id, "duration_ms": (end_time - start_time) * 1000, "status_code": response.status_code, "success": response.status_code == 200, + "cache_status": cache_status, } except Exception as e: end_time = time.time() @@ -243,6 +421,7 @@ class PerformanceTestRunner: "duration_ms": (end_time - start_time) * 1000, "success": False, "error": str(e), + "cache_status": "error", } results = [] @@ -283,6 +462,7 @@ class PerformanceTestRunner: "thread_id": i, "duration_ms": (request_end - request_start) * 1000, "success": len(identicon_data) > 0, + "cache_status": "miss", # Direct generation is always a cache miss } ) except Exception as e: @@ -293,6 +473,7 @@ class PerformanceTestRunner: "duration_ms": (request_end - request_start) * 1000, "success": False, "error": str(e), + "cache_status": "error", } ) @@ -314,6 +495,7 @@ class PerformanceTestRunner: "thread_id": i, "duration_ms": (request_end - request_start) * 1000, "success": True, + "cache_status": "n/a", # Database queries don't use image cache } ) except Exception as e: @@ -324,6 +506,7 @@ class PerformanceTestRunner: "duration_ms": (request_end - request_start) * 1000, "success": False, "error": str(e), + "cache_status": "error", } ) @@ -588,64 +771,22 @@ class PerformanceTestRunner: """Test avatar generation performance on remote server""" print("\n=== Remote Avatar Performance Test ===") - import requests - - # Test different avatar types and sizes - test_cases = [ - {"default": "identicon", "size": 80}, - {"default": "monsterid", "size": 80}, - {"default": "robohash", "size": 80}, - {"default": "identicon", "size": 256}, - {"default": "monsterid", "size": 256}, - ] - + # Generate test cases for all avatar styles and sizes + test_cases = self._generate_test_cases() results = [] + # Generate test hash + test_email = "perftest@example.com" + email_hash = hashlib.md5(test_email.encode()).hexdigest() + for case in test_cases: - # Generate test hash - test_email = "perftest@example.com" - email_hash = hashlib.md5(test_email.encode()).hexdigest() + result = self._test_single_avatar_request( + case, email_hash, use_requests=True + ) + results.append(result) - # Build URL - url = f"{self.base_url}/avatar/{email_hash}" - params = {"d": case["default"], "s": case["size"]} - - # Time the request - start_time = time.time() - try: - response = requests.get(url, params=params, timeout=10) - end_time = time.time() - - duration = (end_time - start_time) * 1000 # Convert to ms - - results.append( - { - "test": f"{case['default']}_{case['size']}px", - "duration_ms": duration, - "status_code": response.status_code, - "content_length": ( - len(response.content) if response.content else 0 - ), - "success": response.status_code == 200, - } - ) - - status = "✅" if response.status_code == 200 else "❌" - print( - f" {case['default']} ({case['size']}px): {duration:.2f}ms {status}" - ) - - except Exception as e: - print(f" {case['default']} ({case['size']}px): ❌ Failed - {e}") - results.append( - { - "test": f"{case['default']}_{case['size']}px", - "duration_ms": 0, - "status_code": 0, - "success": False, - "error": str(e), - } - ) + # Display results grouped by style + self._display_avatar_results(results) # Calculate statistics for successful requests successful_results = [r for r in results if r["success"]] diff --git a/scripts/run_tests_with_coverage.py b/scripts/run_tests_with_coverage.py index 73210c5..40e1308 100755 --- a/scripts/run_tests_with_coverage.py +++ b/scripts/run_tests_with_coverage.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Run tests with OpenTelemetry instrumentation and export enabled, plus coverage measurement. This script is designed to be used with 'coverage run' command.