diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 64ad5e1..7914f75 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -167,7 +167,7 @@ semgrep: # Deployment verification jobs verify_dev_deployment: stage: deploy - image: alpine:latest + image: python:3.11-alpine only: - devel variables: @@ -175,70 +175,16 @@ verify_dev_deployment: MAX_RETRIES: 30 RETRY_DELAY: 60 before_script: - - apk add --no-cache curl jq + - apk add --no-cache curl + - pip install Pillow script: - echo "Waiting for dev.libravatar.org deployment to complete..." - - | - for i in $(seq 1 $MAX_RETRIES); do - echo "Attempt $i/$MAX_RETRIES: Checking deployment status..." - - # Get current commit hash from GitLab - CURRENT_COMMIT="$CI_COMMIT_SHA" - echo "Expected commit: $CURRENT_COMMIT" - - # Check if dev site is responding - if curl -sf "$DEV_URL/deployment/version/" > /dev/null 2>&1; then - echo "Dev site is responding, checking version..." - - # Get deployed version - DEPLOYED_VERSION=$(curl -sf "$DEV_URL/deployment/version/" | jq -r '.commit_hash // empty') - - if [ "$DEPLOYED_VERSION" = "$CURRENT_COMMIT" ]; then - echo "✅ SUCCESS: Dev deployment verified!" - echo "Deployed version: $DEPLOYED_VERSION" - echo "Expected version: $CURRENT_COMMIT" - - # Run basic functionality tests - echo "Running basic functionality tests..." - - # Test avatar endpoint - if curl -sf "$DEV_URL/avatar/test@example.com" > /dev/null; then - echo "✅ Avatar endpoint working" - else - echo "❌ Avatar endpoint failed" - exit 1 - fi - - # Test stats endpoint - if curl -sf "$DEV_URL/stats/" > /dev/null; then - echo "✅ Stats endpoint working" - else - echo "❌ Stats endpoint failed" - exit 1 - fi - - echo "🎉 Dev deployment verification completed successfully!" - exit 0 - else - echo "Version mismatch. Deployed: $DEPLOYED_VERSION, Expected: $CURRENT_COMMIT" - fi - else - echo "Dev site not responding yet..." - fi - - if [ $i -lt $MAX_RETRIES ]; then - echo "Waiting $RETRY_DELAY seconds before next attempt..." - sleep $RETRY_DELAY - fi - done - - echo "❌ FAILED: Dev deployment verification timed out after $MAX_RETRIES attempts" - exit 1 + - python3 scripts/check_deployment.py --dev --max-retries $MAX_RETRIES --retry-delay $RETRY_DELAY allow_failure: false verify_prod_deployment: stage: deploy - image: alpine:latest + image: python:3.11-alpine only: - master when: manual @@ -247,65 +193,11 @@ verify_prod_deployment: MAX_RETRIES: 10 RETRY_DELAY: 30 before_script: - - apk add --no-cache curl jq + - apk add --no-cache curl + - pip install Pillow script: - echo "Verifying production deployment..." - - | - for i in $(seq 1 $MAX_RETRIES); do - echo "Attempt $i/$MAX_RETRIES: Checking production deployment..." - - # Get current commit hash from GitLab - CURRENT_COMMIT="$CI_COMMIT_SHA" - echo "Expected commit: $CURRENT_COMMIT" - - # Check if prod site is responding - if curl -sf "$PROD_URL/deployment/version/" > /dev/null 2>&1; then - echo "Production site is responding, checking version..." - - # Get deployed version - DEPLOYED_VERSION=$(curl -sf "$PROD_URL/deployment/version/" | jq -r '.commit_hash // empty') - - if [ "$DEPLOYED_VERSION" = "$CURRENT_COMMIT" ]; then - echo "✅ SUCCESS: Production deployment verified!" - echo "Deployed version: $DEPLOYED_VERSION" - echo "Expected version: $CURRENT_COMMIT" - - # Run basic functionality tests - echo "Running production functionality tests..." - - # Test avatar endpoint - if curl -sf "$PROD_URL/avatar/test@example.com" > /dev/null; then - echo "✅ Production avatar endpoint working" - else - echo "❌ Production avatar endpoint failed" - exit 1 - fi - - # Test stats endpoint - if curl -sf "$PROD_URL/stats/" > /dev/null; then - echo "✅ Production stats endpoint working" - else - echo "❌ Production stats endpoint failed" - exit 1 - fi - - echo "🎉 Production deployment verification completed successfully!" - exit 0 - else - echo "Version mismatch. Deployed: $DEPLOYED_VERSION, Expected: $CURRENT_COMMIT" - fi - else - echo "Production site not responding..." - fi - - if [ $i -lt $MAX_RETRIES ]; then - echo "Waiting $RETRY_DELAY seconds before next attempt..." - sleep $RETRY_DELAY - fi - done - - echo "❌ FAILED: Production deployment verification timed out after $MAX_RETRIES attempts" - exit 1 + - python3 scripts/check_deployment.py --prod --max-retries $MAX_RETRIES --retry-delay $RETRY_DELAY allow_failure: false include: diff --git a/run_tests_local.sh b/run_tests_local.sh deleted file mode 100755 index f662bfe..0000000 --- a/run_tests_local.sh +++ /dev/null @@ -1,37 +0,0 @@ -#!/bin/bash -# Run tests locally, skipping Bluesky tests that require external API credentials -# OpenTelemetry is disabled by default for local testing - -echo "Running tests locally (skipping Bluesky tests, OpenTelemetry disabled)..." -echo "=======================================================================" - -# Ensure OpenTelemetry is disabled for local testing -export ENABLE_OPENTELEMETRY=false -export OTEL_ENABLED=false - -# Run Django tests excluding the Bluesky test file and OpenTelemetry tests -python3 manage.py test \ - ivatar.ivataraccount.test_auth \ - ivatar.ivataraccount.test_views \ - ivatar.test_auxiliary \ - ivatar.test_file_security \ - ivatar.test_static_pages \ - ivatar.test_utils \ - ivatar.test_views \ - ivatar.test_views_stats \ - ivatar.tools.test_views \ - ivatar.test_wsgi \ - -v2 - -echo "" -echo "To run all tests including Bluesky (requires API credentials):" -echo "python3 manage.py test -v2" -echo "" -echo "To run only Bluesky tests:" -echo "python3 manage.py test ivatar.ivataraccount.test_views_bluesky -v2" -echo "" -echo "To run tests with OpenTelemetry enabled:" -echo "./run_tests_with_ot.sh" -echo "" -echo "To run tests without OpenTelemetry (default):" -echo "./run_tests_no_ot.sh" diff --git a/run_tests_no_ot.sh b/run_tests_no_ot.sh deleted file mode 100755 index 4720101..0000000 --- a/run_tests_no_ot.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Run tests without OpenTelemetry enabled (default mode) -# This is the default test mode for most users - -set -e - -echo "Running tests without OpenTelemetry (default mode)..." -echo "=====================================================" - -# Ensure OpenTelemetry is disabled -export ENABLE_OPENTELEMETRY=false -export OTEL_ENABLED=false - -# Run Django tests excluding OpenTelemetry-specific tests -python3 manage.py test \ - ivatar.ivataraccount.test_auth \ - ivatar.ivataraccount.test_views \ - ivatar.ivataraccount.test_views_bluesky \ - ivatar.test_auxiliary \ - ivatar.test_file_security \ - ivatar.test_static_pages \ - ivatar.test_utils \ - ivatar.test_views \ - ivatar.test_views_stats \ - ivatar.tools.test_views \ - ivatar.test_wsgi \ - -v2 - -echo "" -echo "Tests completed successfully (OpenTelemetry disabled)" diff --git a/run_tests_with_ot.sh b/run_tests_with_ot.sh deleted file mode 100755 index 63de521..0000000 --- a/run_tests_with_ot.sh +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash -# Run tests with OpenTelemetry enabled -# This is used in CI to test OpenTelemetry functionality - -set -e - -echo "Running tests with OpenTelemetry enabled..." -echo "==========================================" - -# Enable OpenTelemetry -export ENABLE_OPENTELEMETRY=true -export OTEL_ENABLED=true -export OTEL_SERVICE_NAME=ivatar-test -export OTEL_ENVIRONMENT=test - -# Run Django tests including OpenTelemetry-specific tests -python3 manage.py test \ - ivatar.ivataraccount.test_auth \ - ivatar.ivataraccount.test_views \ - ivatar.ivataraccount.test_views_bluesky \ - ivatar.test_auxiliary \ - ivatar.test_file_security \ - ivatar.test_opentelemetry \ - ivatar.test_static_pages \ - ivatar.test_utils \ - ivatar.test_views \ - ivatar.test_views_stats \ - ivatar.tools.test_views \ - ivatar.test_wsgi \ - -v2 - -echo "" -echo "Tests completed successfully (OpenTelemetry enabled)" diff --git a/scripts/check_deployment.py b/scripts/check_deployment.py new file mode 100755 index 0000000..e2a16d4 --- /dev/null +++ b/scripts/check_deployment.py @@ -0,0 +1,448 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +Libravatar Deployment Verification Script + +This script verifies that Libravatar deployments are working correctly by: +- Checking version endpoint +- Testing avatar functionality with various sizes +- Verifying stats endpoint +- Testing redirect behavior + +Usage: + python3 check_deployment.py --dev # Test dev deployment + python3 check_deployment.py --prod # Test production deployment + python3 check_deployment.py --endpoint # Test custom endpoint + python3 check_deployment.py --dev --prod # Test both deployments +""" + +import argparse +import json +import random +import ssl +import sys +import tempfile +import time +from typing import Dict, Optional, Tuple +from urllib.parse import urljoin +from urllib.request import urlopen, Request +from urllib.error import HTTPError, URLError + +try: + from PIL import Image + + PIL_AVAILABLE = True +except ImportError: + PIL_AVAILABLE = False + +# Configuration +DEV_URL = "https://dev.libravatar.org" +PROD_URL = "https://libravatar.org" +MAX_RETRIES = 5 +RETRY_DELAY = 10 + +# ANSI color codes + + +class Colors: + RED = "\033[0;31m" + GREEN = "\033[0;32m" + YELLOW = "\033[1;33m" + BLUE = "\033[0;34m" + NC = "\033[0m" # No Color + + +def colored_print(message: str, color: str = Colors.NC) -> None: + """Print a colored message.""" + print(f"{color}{message}{Colors.NC}") + + +def make_request( + url: str, + method: str = "GET", + headers: Optional[Dict[str, str]] = None, + follow_redirects: bool = True, + binary: bool = False, +) -> Tuple[bool, Optional[bytes], Optional[Dict[str, str]]]: + """ + Make an HTTP request and return success status, content, and headers. + + Args: + url: URL to request + method: HTTP method + headers: Additional headers + follow_redirects: Whether to follow redirects automatically + + Returns: + Tuple of (success, content, headers) + """ + req = Request(url, headers=headers or {}) + req.get_method = lambda: method + + # Create SSL context that handles certificate verification issues + ssl_context = ssl.create_default_context() + + # Try with SSL verification first + try: + opener = urlopen + if not follow_redirects: + # Create a custom opener that doesn't follow redirects + import urllib.request + + class NoRedirectHandler(urllib.request.HTTPRedirectHandler): + def redirect_request(self, req, fp, code, msg, headers, newurl): + return None + + opener = urllib.request.build_opener(NoRedirectHandler) + + if follow_redirects: + with opener(req, context=ssl_context) as response: + content = response.read() + if not binary: + try: + content = content.decode("utf-8") + except UnicodeDecodeError: + # If content is not text (e.g., binary image), return empty string + content = "" + headers = dict(response.headers) + return True, content, headers + else: + response = opener.open(req) + content = response.read() + if not binary: + try: + content = content.decode("utf-8") + except UnicodeDecodeError: + content = "" + headers = dict(response.headers) + return True, content, headers + except URLError as url_err: + # Check if this is an SSL error wrapped in URLError + if isinstance(url_err.reason, ssl.SSLError): + # If SSL fails, try with unverified context (less secure but works for testing) + ssl_context_unverified = ssl.create_default_context() + ssl_context_unverified.check_hostname = False + ssl_context_unverified.verify_mode = ssl.CERT_NONE + + try: + if follow_redirects: + with urlopen(req, context=ssl_context_unverified) as response: + content = response.read() + if not binary: + try: + content = content.decode("utf-8") + except UnicodeDecodeError: + content = "" + headers = dict(response.headers) + return True, content, headers + else: + import urllib.request + + class NoRedirectHandler(urllib.request.HTTPRedirectHandler): + def redirect_request(self, req, fp, code, msg, headers, newurl): + return None + + opener = urllib.request.build_opener(NoRedirectHandler) + response = opener.open(req) + content = response.read() + if not binary: + try: + content = content.decode("utf-8") + except UnicodeDecodeError: + content = "" + headers = dict(response.headers) + return True, content, headers + except Exception: + return False, None, None + else: + return False, None, None + except HTTPError: + return False, None, None + + +def check_version_endpoint(base_url: str) -> Tuple[bool, Optional[Dict]]: + """Check the version endpoint and return deployment info.""" + version_url = urljoin(base_url, "/deployment/version/") + success, content, _ = make_request(version_url) + + if not success or not content: + return False, None + + try: + version_info = json.loads(content) + return True, version_info + except json.JSONDecodeError: + return False, None + + +def test_avatar_redirect(base_url: str) -> bool: + """Test that invalid avatar requests redirect to default image.""" + avatar_url = urljoin(base_url, "/avatar/test@example.com") + + # Use a simple approach: check if the final URL after redirect contains deadbeef.png + try: + req = Request(avatar_url) + ssl_context = ssl.create_default_context() + ssl_context.check_hostname = False + ssl_context.verify_mode = ssl.CERT_NONE + + with urlopen(req, context=ssl_context) as response: + final_url = response.geturl() + return "deadbeef.png" in final_url + except Exception: + return False + + +def test_avatar_sizing(base_url: str) -> bool: + """Test avatar endpoint with random sizes.""" + # Use a known test hash for consistent testing + test_hash = "63a75a80e6b1f4adfdb04c1ca02e596c" + + # Generate random sizes between 50-250 + sizes = [random.randint(50, 250) for _ in range(2)] + + for size in sizes: + avatar_url = urljoin(base_url, f"/avatar/{test_hash}?s={size}") + + # Download image to temporary file + success, content, _ = make_request(avatar_url, binary=True) + if not success or not content: + colored_print(f"❌ Avatar endpoint failed for size {size}", Colors.RED) + return False + + # Check image dimensions + if PIL_AVAILABLE: + try: + with tempfile.NamedTemporaryFile(suffix=".jpg") as temp_file: + temp_file.write(content) + temp_file.flush() + + with Image.open(temp_file.name) as img: + width, height = img.size + if width == size and height == size: + colored_print( + f"✅ Avatar size {size}x{size} verified", Colors.GREEN + ) + else: + colored_print( + f"❌ Avatar wrong size: expected {size}x{size}, got {width}x{height}", + Colors.RED, + ) + return False + except Exception as e: + colored_print(f"❌ Error checking image dimensions: {e}", Colors.RED) + return False + else: + # Fallback: just check if we got some content + if len(content) > 100: # Assume valid image if we got substantial content + colored_print( + f"✅ Avatar size {size} downloaded (PIL not available for verification)", + Colors.YELLOW, + ) + else: + colored_print( + f"❌ Avatar endpoint returned insufficient content for size {size}", + Colors.RED, + ) + return False + + return True + + +def test_stats_endpoint(base_url: str) -> bool: + """Test that the stats endpoint is accessible.""" + stats_url = urljoin(base_url, "/stats/") + success, _, _ = make_request(stats_url) + return success + + +def test_deployment( + base_url: str, + name: str, + max_retries: int = MAX_RETRIES, + retry_delay: int = RETRY_DELAY, +) -> bool: + """ + Test a deployment with retry logic. + + Args: + base_url: Base URL of the deployment + name: Human-readable name for the deployment + max_retries: Maximum number of retry attempts + + Returns: + True if all tests pass, False otherwise + """ + colored_print(f"Testing {name} deployment at {base_url}", Colors.YELLOW) + + for attempt in range(1, max_retries + 1): + colored_print( + f"Attempt {attempt}/{max_retries}: Checking {name} deployment...", + Colors.BLUE, + ) + + # Check if site is responding + success, version_info = check_version_endpoint(base_url) + if success and version_info: + colored_print( + f"{name} site is responding, checking version...", Colors.GREEN + ) + + # Display version information + commit_hash = version_info.get("commit_hash", "Unknown") + branch = version_info.get("branch", "Unknown") + version = version_info.get("version", "Unknown") + + colored_print(f"Deployed commit: {commit_hash}", Colors.BLUE) + colored_print(f"Deployed branch: {branch}", Colors.BLUE) + colored_print(f"Deployed version: {version}", Colors.BLUE) + + # Run functionality tests + colored_print("Running basic functionality tests...", Colors.YELLOW) + + # Test avatar redirect + if test_avatar_redirect(base_url): + colored_print("✅ Invalid avatar redirects correctly", Colors.GREEN) + else: + colored_print("❌ Invalid avatar redirect failed", Colors.RED) + return False + + # Test avatar sizing + if test_avatar_sizing(base_url): + pass # Success messages are printed within the function + else: + return False + + # Test stats endpoint + if test_stats_endpoint(base_url): + colored_print("✅ Stats endpoint working", Colors.GREEN) + else: + colored_print("❌ Stats endpoint failed", Colors.RED) + return False + + colored_print( + f"🎉 {name} deployment verification completed successfully!", + Colors.GREEN, + ) + return True + else: + colored_print(f"{name} site not responding yet...", Colors.YELLOW) + + if attempt < max_retries: + colored_print( + f"Waiting {retry_delay} seconds before next attempt...", Colors.BLUE + ) + time.sleep(retry_delay) + + colored_print( + f"❌ FAILED: {name} deployment verification timed out after {max_retries} attempts", + Colors.RED, + ) + return False + + +def main(): + """Main function with command-line argument parsing.""" + parser = argparse.ArgumentParser( + description="Libravatar Deployment Verification Script", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python3 check_deployment.py --dev # Test dev deployment + python3 check_deployment.py --prod # Test production deployment + python3 check_deployment.py --endpoint # Test custom endpoint + python3 check_deployment.py --dev --prod # Test both deployments + """, + ) + + parser.add_argument( + "--dev", + action="store_true", + help="Test dev deployment (https://dev.libravatar.org)", + ) + parser.add_argument( + "--prod", + action="store_true", + help="Test production deployment (https://libravatar.org)", + ) + parser.add_argument("--endpoint", type=str, help="Test custom endpoint URL") + parser.add_argument( + "--max-retries", + type=int, + default=MAX_RETRIES, + help=f"Maximum number of retry attempts (default: {MAX_RETRIES})", + ) + parser.add_argument( + "--retry-delay", + type=int, + default=RETRY_DELAY, + help=f"Delay between retry attempts in seconds (default: {RETRY_DELAY})", + ) + + args = parser.parse_args() + + # Validate arguments + if not any([args.dev, args.prod, args.endpoint]): + parser.error("At least one of --dev, --prod, or --endpoint must be specified") + + # Update configuration if custom values provided + max_retries = args.max_retries + retry_delay = args.retry_delay + + colored_print("Libravatar Deployment Verification Script", Colors.BLUE) + colored_print("=" * 50, Colors.BLUE) + + # Check dependencies + if not PIL_AVAILABLE: + colored_print( + "⚠️ Warning: PIL/Pillow not available. Image dimension verification will be limited.", + Colors.YELLOW, + ) + colored_print(" Install with: pip install Pillow", Colors.YELLOW) + + results = [] + + # Test dev deployment + if args.dev: + colored_print("", Colors.NC) + dev_result = test_deployment(DEV_URL, "Dev", max_retries, retry_delay) + results.append(("Dev", dev_result)) + + # Test production deployment + if args.prod: + colored_print("", Colors.NC) + prod_result = test_deployment(PROD_URL, "Production", max_retries, retry_delay) + results.append(("Production", prod_result)) + + # Test custom endpoint + if args.endpoint: + colored_print("", Colors.NC) + custom_result = test_deployment( + args.endpoint, "Custom", max_retries, retry_delay + ) + results.append(("Custom", custom_result)) + + # Summary + colored_print("", Colors.NC) + colored_print("=" * 50, Colors.BLUE) + colored_print("Deployment Verification Summary:", Colors.BLUE) + colored_print("=" * 50, Colors.BLUE) + + all_passed = True + for name, result in results: + if result: + colored_print(f"✅ {name} deployment: PASSED", Colors.GREEN) + else: + colored_print(f"❌ {name} deployment: FAILED", Colors.RED) + all_passed = False + + if all_passed: + colored_print("🎉 All deployment verifications passed!", Colors.GREEN) + sys.exit(0) + else: + colored_print("❌ Some deployment verifications failed!", Colors.RED) + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/scripts/test_deployment.sh b/scripts/test_deployment.sh deleted file mode 100755 index 130b6c9..0000000 --- a/scripts/test_deployment.sh +++ /dev/null @@ -1,125 +0,0 @@ -#!/bin/bash -# Test deployment verification script - -set -e - -# Configuration -DEV_URL="https://dev.libravatar.org" -PROD_URL="https://libravatar.org" -MAX_RETRIES=5 -RETRY_DELAY=10 - -# Colors for output -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -NC='\033[0m' # No Color - -# Function to test deployment -test_deployment() { - local url=$1 - local name=$2 - local max_retries=$3 - - echo -e "${YELLOW}Testing $name deployment at $url${NC}" - - for i in $(seq 1 $max_retries); do - echo "Attempt $i/$max_retries: Checking $name deployment..." - - # Check if site is responding - if curl -sf "$url/deployment/version/" >/dev/null 2>&1; then - echo "$name site is responding, checking version..." - - # Get deployed version info - VERSION_INFO=$(curl -sf "$url/deployment/version/") - echo "Version info: $VERSION_INFO" - - # Extract commit hash - COMMIT_HASH=$(echo "$VERSION_INFO" | jq -r '.commit_hash // empty') - BRANCH=$(echo "$VERSION_INFO" | jq -r '.branch // empty') - VERSION=$(echo "$VERSION_INFO" | jq -r '.version // empty') - - echo "Deployed commit: $COMMIT_HASH" - echo "Deployed branch: $BRANCH" - echo "Deployed version: $VERSION" - - # Run basic functionality tests - echo "Running basic functionality tests..." - - # Test avatar endpoint - if curl -sf "$url/avatar/test@example.com" >/dev/null; then - echo -e "${GREEN}✅ Avatar endpoint working${NC}" - else - echo -e "${RED}❌ Avatar endpoint failed${NC}" - return 1 - fi - - # Test stats endpoint - if curl -sf "$url/stats/" >/dev/null; then - echo -e "${GREEN}✅ Stats endpoint working${NC}" - else - echo -e "${RED}❌ Stats endpoint failed${NC}" - return 1 - fi - - echo -e "${GREEN}🎉 $name deployment verification completed successfully!${NC}" - return 0 - else - echo "$name site not responding yet..." - fi - - if [ $i -lt $max_retries ]; then - echo "Waiting $RETRY_DELAY seconds before next attempt..." - sleep $RETRY_DELAY - fi - done - - echo -e "${RED}❌ FAILED: $name deployment verification timed out after $max_retries attempts${NC}" - return 1 -} - -# Main execution -echo "Libravatar Deployment Verification Script" -echo "==========================================" - -# Check if jq is available -if ! command -v jq &>/dev/null; then - echo -e "${RED}Error: jq is required but not installed${NC}" - echo "Install with: brew install jq (macOS) or apt-get install jq (Ubuntu)" - exit 1 -fi - -# Test dev deployment -echo "" -test_deployment "$DEV_URL" "Dev" $MAX_RETRIES -DEV_RESULT=$? - -# Test production deployment -echo "" -test_deployment "$PROD_URL" "Production" $MAX_RETRIES -PROD_RESULT=$? - -# Summary -echo "" -echo "==========================================" -echo "Deployment Verification Summary:" -echo "==========================================" - -if [ $DEV_RESULT -eq 0 ]; then - echo -e "${GREEN}✅ Dev deployment: PASSED${NC}" -else - echo -e "${RED}❌ Dev deployment: FAILED${NC}" -fi - -if [ $PROD_RESULT -eq 0 ]; then - echo -e "${GREEN}✅ Production deployment: PASSED${NC}" -else - echo -e "${RED}❌ Production deployment: FAILED${NC}" -fi - -# Exit with error if any test failed -if [ $DEV_RESULT -ne 0 ] || [ $PROD_RESULT -ne 0 ]; then - exit 1 -fi - -echo -e "${GREEN}🎉 All deployment verifications passed!${NC}"