diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d1cda47..0f5cb7b 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -166,7 +166,7 @@ performance_tests_dev: - pip install requests Pillow prettytable pyLibravatar dnspython py3dns script: - echo "Running performance tests against dev.libravatar.org..." - - python3 scripts/performance_tests.py --base-url $DEV_URL --concurrent-users 5 --avatar-threshold 2500 --response-threshold 2500 --p95-threshold 5000 --output performance_dev.json + - python3 scripts/performance_tests.py --base-url $DEV_URL --concurrent-users 5 --avatar-threshold 2500 --response-threshold 2500 --p95-threshold 5000 --ignore-cache-warnings --output performance_dev.json artifacts: paths: - performance_dev.json diff --git a/scripts/performance_tests.py b/scripts/performance_tests.py index cab52aa..0f14696 100644 --- a/scripts/performance_tests.py +++ b/scripts/performance_tests.py @@ -831,7 +831,7 @@ class PerformanceTestRunner: return first_duration, second_duration - def run_all_tests(self, avatar_threshold: int = 1000, response_threshold: int = 1000, p95_threshold: int = 2000) -> Optional[Dict[str, Any]]: + def run_all_tests(self, avatar_threshold: int = 1000, response_threshold: int = 1000, p95_threshold: int = 2000, ignore_cache_warnings: bool = False) -> Optional[Dict[str, Any]]: """Run all performance tests""" print("Starting Libravatar Performance Tests") print("=" * 50) @@ -865,7 +865,7 @@ class PerformanceTestRunner: print(f"Performance tests completed in {total_duration:.2f}s") # Overall assessment - self.assess_overall_performance(avatar_threshold, response_threshold, p95_threshold) + self.assess_overall_performance(avatar_threshold, response_threshold, p95_threshold, ignore_cache_warnings) return self.results @@ -928,7 +928,7 @@ class PerformanceTestRunner: "success_rate": len(successful_results) / len(results) if results else 0, } - def assess_overall_performance(self, avatar_threshold: int = 1000, response_threshold: int = 1000, p95_threshold: int = 2000) -> bool: + def assess_overall_performance(self, avatar_threshold: int = 1000, response_threshold: int = 1000, p95_threshold: int = 2000, ignore_cache_warnings: bool = False) -> bool: """Provide overall performance assessment""" print("\n=== OVERALL PERFORMANCE ASSESSMENT ===") @@ -947,7 +947,7 @@ class PerformanceTestRunner: warnings.append(f"{failed} requests failed under concurrent load") # Check cache performance - if "cache_performance" in self.results: + if "cache_performance" in self.results and not ignore_cache_warnings: cache_working = self.results["cache_performance"].get( "cache_working", False ) @@ -1018,6 +1018,11 @@ def main() -> Optional[Dict[str, Any]]: default=2000, help="95th percentile threshold in ms (default: 2000ms, use 5000 for dev environments)", ) + parser.add_argument( + "--ignore-cache-warnings", + action="store_true", + help="Don't fail on cache performance warnings (useful for dev environments)", + ) args = parser.parse_args() @@ -1034,7 +1039,7 @@ def main() -> Optional[Dict[str, Any]]: remote_testing=remote_testing, ) - results = runner.run_all_tests(args.avatar_threshold, args.response_threshold, args.p95_threshold) + results = runner.run_all_tests(args.avatar_threshold, args.response_threshold, args.p95_threshold, args.ignore_cache_warnings) if args.output and results: import json