mirror of
https://git.linux-kernel.at/oliver/ivatar.git
synced 2025-11-16 04:58:01 +00:00
Make all performance thresholds configurable for dev environment
- Add --response-threshold and --p95-threshold parameters - Dev environment now uses relaxed thresholds: * Avatar generation: 2500ms (vs 1000ms prod) * Response time: 2500ms (vs 1000ms prod) * 95th percentile: 5000ms (vs 2000ms prod) - Fixes CI failures due to dev environment being slower than production - Production maintains strict performance standards
This commit is contained in:
@@ -166,7 +166,7 @@ performance_tests_dev:
|
|||||||
- pip install requests Pillow prettytable pyLibravatar dnspython py3dns
|
- pip install requests Pillow prettytable pyLibravatar dnspython py3dns
|
||||||
script:
|
script:
|
||||||
- echo "Running performance tests against dev.libravatar.org..."
|
- echo "Running performance tests against dev.libravatar.org..."
|
||||||
- python3 scripts/performance_tests.py --base-url $DEV_URL --concurrent-users 5 --avatar-threshold 2500 --output performance_dev.json
|
- python3 scripts/performance_tests.py --base-url $DEV_URL --concurrent-users 5 --avatar-threshold 2500 --response-threshold 2500 --p95-threshold 5000 --output performance_dev.json
|
||||||
artifacts:
|
artifacts:
|
||||||
paths:
|
paths:
|
||||||
- performance_dev.json
|
- performance_dev.json
|
||||||
|
|||||||
@@ -380,7 +380,7 @@ class PerformanceTestRunner:
|
|||||||
"results": results,
|
"results": results,
|
||||||
}
|
}
|
||||||
|
|
||||||
def test_concurrent_load(self) -> None:
|
def test_concurrent_load(self, response_threshold: int = 1000, p95_threshold: int = 2000) -> None:
|
||||||
"""Test concurrent load handling"""
|
"""Test concurrent load handling"""
|
||||||
print("\n=== Concurrent Load Test ===")
|
print("\n=== Concurrent Load Test ===")
|
||||||
|
|
||||||
@@ -448,10 +448,10 @@ class PerformanceTestRunner:
|
|||||||
# Performance evaluation
|
# Performance evaluation
|
||||||
if len(failed_requests) > 0:
|
if len(failed_requests) > 0:
|
||||||
print(" ⚠️ WARNING: Some operations failed under load")
|
print(" ⚠️ WARNING: Some operations failed under load")
|
||||||
elif p95_duration > 2000: # 2 seconds
|
elif p95_duration > p95_threshold:
|
||||||
print(" ⚠️ WARNING: 95th percentile response time exceeds 2s")
|
print(f" ⚠️ WARNING: 95th percentile response time exceeds {p95_threshold}ms")
|
||||||
elif avg_duration > 1000: # 1 second
|
elif avg_duration > response_threshold:
|
||||||
print(" ⚠️ CAUTION: Average response time exceeds 1s under load")
|
print(f" ⚠️ CAUTION: Average response time exceeds {response_threshold}ms under load")
|
||||||
else:
|
else:
|
||||||
print(" ✅ Load handling is good")
|
print(" ✅ Load handling is good")
|
||||||
else:
|
else:
|
||||||
@@ -831,7 +831,7 @@ class PerformanceTestRunner:
|
|||||||
|
|
||||||
return first_duration, second_duration
|
return first_duration, second_duration
|
||||||
|
|
||||||
def run_all_tests(self, avatar_threshold: int = 1000) -> Optional[Dict[str, Any]]:
|
def run_all_tests(self, avatar_threshold: int = 1000, response_threshold: int = 1000, p95_threshold: int = 2000) -> Optional[Dict[str, Any]]:
|
||||||
"""Run all performance tests"""
|
"""Run all performance tests"""
|
||||||
print("Starting Libravatar Performance Tests")
|
print("Starting Libravatar Performance Tests")
|
||||||
print("=" * 50)
|
print("=" * 50)
|
||||||
@@ -846,14 +846,14 @@ class PerformanceTestRunner:
|
|||||||
# Run tests based on mode
|
# Run tests based on mode
|
||||||
if self.remote_testing:
|
if self.remote_testing:
|
||||||
print("🌐 Running remote server tests...")
|
print("🌐 Running remote server tests...")
|
||||||
self.test_remote_avatar_performance()
|
self.test_remote_avatar_performance(response_threshold)
|
||||||
else:
|
else:
|
||||||
print("🏠 Running local tests...")
|
print("🏠 Running local tests...")
|
||||||
self.test_avatar_generation_performance()
|
self.test_avatar_generation_performance()
|
||||||
self.test_database_performance()
|
self.test_database_performance()
|
||||||
|
|
||||||
# Always test concurrent load
|
# Always test concurrent load
|
||||||
self.test_concurrent_load()
|
self.test_concurrent_load(response_threshold, p95_threshold)
|
||||||
|
|
||||||
# Test cache performance if enabled
|
# Test cache performance if enabled
|
||||||
self.test_cache_performance()
|
self.test_cache_performance()
|
||||||
@@ -865,7 +865,7 @@ class PerformanceTestRunner:
|
|||||||
print(f"Performance tests completed in {total_duration:.2f}s")
|
print(f"Performance tests completed in {total_duration:.2f}s")
|
||||||
|
|
||||||
# Overall assessment
|
# Overall assessment
|
||||||
self.assess_overall_performance(avatar_threshold)
|
self.assess_overall_performance(avatar_threshold, response_threshold, p95_threshold)
|
||||||
|
|
||||||
return self.results
|
return self.results
|
||||||
|
|
||||||
@@ -873,7 +873,7 @@ class PerformanceTestRunner:
|
|||||||
print(f"Performance test failed: {e}")
|
print(f"Performance test failed: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def test_remote_avatar_performance(self) -> None:
|
def test_remote_avatar_performance(self, response_threshold: int = 1000) -> None:
|
||||||
"""Test avatar generation performance on remote server"""
|
"""Test avatar generation performance on remote server"""
|
||||||
print("\n=== Remote Avatar Performance Test ===")
|
print("\n=== Remote Avatar Performance Test ===")
|
||||||
|
|
||||||
@@ -910,10 +910,10 @@ class PerformanceTestRunner:
|
|||||||
print(f" Success rate: {len(successful_results)}/{len(results)}")
|
print(f" Success rate: {len(successful_results)}/{len(results)}")
|
||||||
|
|
||||||
# Performance thresholds for remote testing
|
# Performance thresholds for remote testing
|
||||||
if avg_duration > 2000: # 2 seconds
|
if avg_duration > (response_threshold * 2): # 2x threshold for warning
|
||||||
print(" ⚠️ WARNING: Average response time exceeds 2s")
|
print(f" ⚠️ WARNING: Average response time exceeds {response_threshold * 2}ms")
|
||||||
elif avg_duration > 1000: # 1 second
|
elif avg_duration > response_threshold:
|
||||||
print(" ⚠️ CAUTION: Average response time exceeds 1s")
|
print(f" ⚠️ CAUTION: Average response time exceeds {response_threshold}ms")
|
||||||
else:
|
else:
|
||||||
print(" ✅ Remote avatar performance is good")
|
print(" ✅ Remote avatar performance is good")
|
||||||
else:
|
else:
|
||||||
@@ -928,7 +928,7 @@ class PerformanceTestRunner:
|
|||||||
"success_rate": len(successful_results) / len(results) if results else 0,
|
"success_rate": len(successful_results) / len(results) if results else 0,
|
||||||
}
|
}
|
||||||
|
|
||||||
def assess_overall_performance(self, avatar_threshold: int = 1000) -> bool:
|
def assess_overall_performance(self, avatar_threshold: int = 1000, response_threshold: int = 1000, p95_threshold: int = 2000) -> bool:
|
||||||
"""Provide overall performance assessment"""
|
"""Provide overall performance assessment"""
|
||||||
print("\n=== OVERALL PERFORMANCE ASSESSMENT ===")
|
print("\n=== OVERALL PERFORMANCE ASSESSMENT ===")
|
||||||
|
|
||||||
@@ -1006,6 +1006,18 @@ def main() -> Optional[Dict[str, Any]]:
|
|||||||
default=1000,
|
default=1000,
|
||||||
help="Avatar generation threshold in ms (default: 1000ms, use 2500 for dev environments)",
|
help="Avatar generation threshold in ms (default: 1000ms, use 2500 for dev environments)",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--response-threshold",
|
||||||
|
type=int,
|
||||||
|
default=1000,
|
||||||
|
help="Response time threshold in ms (default: 1000ms, use 2500 for dev environments)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--p95-threshold",
|
||||||
|
type=int,
|
||||||
|
default=2000,
|
||||||
|
help="95th percentile threshold in ms (default: 2000ms, use 5000 for dev environments)",
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
@@ -1022,7 +1034,7 @@ def main() -> Optional[Dict[str, Any]]:
|
|||||||
remote_testing=remote_testing,
|
remote_testing=remote_testing,
|
||||||
)
|
)
|
||||||
|
|
||||||
results = runner.run_all_tests(args.avatar_threshold)
|
results = runner.run_all_tests(args.avatar_threshold, args.response_threshold, args.p95_threshold)
|
||||||
|
|
||||||
if args.output and results:
|
if args.output and results:
|
||||||
import json
|
import json
|
||||||
|
|||||||
Reference in New Issue
Block a user