Enhance performance tests

This commit is contained in:
Oliver Falk
2025-10-24 13:51:45 +02:00
parent f0c604a523
commit 9cf1cb4745
74 changed files with 965 additions and 578 deletions

View File

@@ -158,21 +158,24 @@ performance_tests_dev:
image: python:3.11-alpine
only:
- devel
when: on_success # Run automatically after successful deployment verification
variables:
DEV_URL: "https://dev.libravatar.org"
PYTHONUNBUFFERED: 1
before_script:
- apk add --no-cache curl
- pip install requests
- pip install requests Pillow prettytable pyLibravatar dnspython py3dns
script:
- echo "Running performance tests against dev.libravatar.org..."
- python3 scripts/performance_tests.py --base-url $DEV_URL --concurrent-users 5 --output performance_dev.json
- python3 scripts/performance_tests.py --base-url $DEV_URL --concurrent-users 5 --avatar-threshold 2500 --response-threshold 2500 --p95-threshold 5000 --ignore-cache-warnings --output performance_dev.json
artifacts:
paths:
- performance_dev.json
expire_in: 7 days
allow_failure: true # Don't fail deployment on performance issues
needs:
- verify_dev_deployment # Run after deployment verification
- job: verify_dev_deployment
artifacts: false # Run after deployment verification succeeds
# Performance testing against production server (master branch only)
performance_tests_prod:
@@ -180,12 +183,13 @@ performance_tests_prod:
image: python:3.11-alpine
only:
- master
when: manual # Manual trigger to avoid impacting production unnecessarily
when: on_success # Run automatically after successful deployment verification
variables:
PROD_URL: "https://libravatar.org"
PYTHONUNBUFFERED: 1
before_script:
- apk add --no-cache curl
- pip install requests
- pip install requests Pillow prettytable pyLibravatar dnspython py3dns
script:
- echo "Running performance tests against libravatar.org..."
- python3 scripts/performance_tests.py --base-url $PROD_URL --concurrent-users 3 --output performance_prod.json
@@ -195,7 +199,30 @@ performance_tests_prod:
expire_in: 30 days # Keep production results longer
allow_failure: true # Don't fail deployment on performance issues
needs:
- verify_prod_deployment # Run after deployment verification
- job: verify_prod_deployment
artifacts: false # Run after deployment verification succeeds
# Manual performance testing against production (for on-demand testing)
performance_tests_prod_manual:
stage: deploy
image: python:3.11-alpine
only:
- master
when: manual # Manual trigger for on-demand performance testing
variables:
PROD_URL: "https://libravatar.org"
PYTHONUNBUFFERED: 1
before_script:
- apk add --no-cache curl
- pip install requests Pillow prettytable pyLibravatar dnspython py3dns
script:
- echo "Running manual performance tests against libravatar.org..."
- python3 scripts/performance_tests.py --base-url $PROD_URL --concurrent-users 5 --output performance_prod_manual.json
artifacts:
paths:
- performance_prod_manual.json
expire_in: 30 days
allow_failure: true
# Deployment verification jobs
verify_dev_deployment:
@@ -207,8 +234,9 @@ verify_dev_deployment:
DEV_URL: "https://dev.libravatar.org"
MAX_RETRIES: 30
RETRY_DELAY: 60
PYTHONUNBUFFERED: 1
before_script:
- apk add --no-cache curl
- apk add --no-cache curl git
- pip install Pillow
script:
- echo "Waiting for dev.libravatar.org deployment to complete..."
@@ -225,8 +253,9 @@ verify_prod_deployment:
PROD_URL: "https://libravatar.org"
MAX_RETRIES: 10
RETRY_DELAY: 30
PYTHONUNBUFFERED: 1
before_script:
- apk add --no-cache curl
- apk add --no-cache curl git
- pip install Pillow
script:
- echo "Verifying production deployment..."

View File

@@ -4,16 +4,20 @@ repos:
hooks:
- id: check-useless-excludes
- repo: https://github.com/pre-commit/mirrors-prettier
rev: v3.0.0-alpha.4
rev: v4.0.0-alpha.8
hooks:
- id: prettier
files: \.(css|js|md|markdown|json)
- repo: https://github.com/python/black
rev: 22.12.0
rev: 25.9.0
hooks:
- id: black
- repo: https://github.com/asottile/pyupgrade
rev: v3.21.0
hooks:
- id: pyupgrade
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.4.0
rev: v6.0.0
hooks:
- id: check-added-large-files
- id: check-ast
@@ -28,7 +32,6 @@ repos:
args:
- --unsafe
- id: end-of-file-fixer
- id: fix-encoding-pragma
- id: forbid-new-submodules
- id: no-commit-to-branch
args:
@@ -38,7 +41,7 @@ repos:
- id: sort-simple-yaml
- id: trailing-whitespace
- repo: https://github.com/PyCQA/flake8
rev: 6.0.0
rev: 7.3.0
hooks:
- id: flake8
- repo: local
@@ -57,7 +60,7 @@ repos:
types:
- shell
- repo: https://github.com/asottile/blacken-docs
rev: v1.12.1
rev: 1.20.0
hooks:
- id: blacken-docs
# YASpeller does not seem to work anymore

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Configuration overrides for settings.py
"""

View File

@@ -1,3 +1,2 @@
# -*- coding: utf-8 -*-
# Test configuration to verify LOGS_DIR override
LOGS_DIR = "/tmp/ivatar_test_logs"

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Import the whole libravatar export
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Module init
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Default: useful variables for the base page templates.
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
File upload security utilities for ivatar
"""

View File

@@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
"""
Module init
"""
app_label = __name__ # pylint: disable=invalid-name

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
Register models in admin
"""
from django.contrib import admin
from .models import Photo, ConfirmedEmail, UnconfirmedEmail

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
from social_core.backends.open_id_connect import OpenIdConnectAuth
from ivatar.ivataraccount.models import ConfirmedEmail, Photo

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
Classes for our ivatar.ivataraccount.forms
"""
from urllib.parse import urlsplit, urlunsplit
from django import forms

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
Helper method to fetch Gravatar image
"""
from ssl import SSLError
from urllib.request import HTTPError, URLError
from ivatar.utils import urlopen

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.0.5 on 2018-05-07 07:13
from django.conf import settings

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.0.5 on 2018-05-07 07:23
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.0.5 on 2018-05-08 06:37
import datetime

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.0.5 on 2018-05-08 07:42
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.0.5 on 2018-05-22 11:55
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.0.6 on 2018-06-26 14:45
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.0.6 on 2018-06-27 06:24
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name,missing-docstring
# Generated by Django 2.0.6 on 2018-07-04 12:32

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.0.6 on 2018-07-05 11:52
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.0.6 on 2018-07-05 12:01
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.1.3 on 2018-11-07 15:50
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.1.3 on 2018-11-07 17:32
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.1.3 on 2018-12-03 14:21
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 2.1.5 on 2019-02-18 16:02
from django.db import migrations

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 3.0.3 on 2020-02-25 09:34
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 3.1.7 on 2021-04-13 09:04
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 3.2.3 on 2021-05-28 13:14
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 5.0 on 2024-05-31 15:00
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 5.1.5 on 2025-01-27 10:54
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated by Django 5.1.5 on 2025-01-27 13:33
from django.db import migrations, models

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# Generated manually for performance optimization
from typing import Any, List, Tuple, Optional

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Our models for ivatar.ivataraccount
"""
@@ -404,7 +403,7 @@ class ConfirmedEmail(BaseAccountModel):
logger.debug("Successfully cleaned up cached page: %s" % cache_key)
except Exception as exc:
logger.warning(
"Failed to clean up cached page %s: %s" % (cache_key, exc)
"Failed to clean up cached page {}: {}".format(cache_key, exc)
)
# Invalidate Bluesky avatar URL cache if bluesky_handle changed
@@ -455,9 +454,7 @@ class UnconfirmedEmail(BaseAccountModel):
+ self.user.username.encode("utf-8") # pylint: disable=no-member
) # pylint: disable=no-member
self.verification_key = hash_object.hexdigest()
super(UnconfirmedEmail, self).save(
force_insert, force_update, using, update_fields
)
super().save(force_insert, force_update, using, update_fields)
def send_confirmation_mail(self, url=SECURE_BASE_URL):
"""
@@ -602,7 +599,7 @@ class ConfirmedOpenId(BaseAccountModel):
logger.debug("Successfully cleaned up cached page: %s" % cache_key)
except Exception as exc:
logger.warning(
"Failed to clean up cached page %s: %s" % (cache_key, exc)
"Failed to clean up cached page {}: {}".format(cache_key, exc)
)
# Invalidate Bluesky avatar URL cache if bluesky_handle exists

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Reading libravatar export
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
from unittest import mock
from django.test import TestCase

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Test our views in ivatar.ivataraccount.views and ivatar.views
"""
@@ -53,9 +52,9 @@ class Tester(TestCase): # pylint: disable=too-many-public-methods
user = None
username = random_string()
password = random_string()
email = "%s@%s.org" % (username, random_string())
email = "{}@{}.org".format(username, random_string())
# Dunno why random tld doesn't work, but I'm too lazy now to investigate
openid = "http://%s.%s.%s/" % (username, random_string(), "org")
openid = "http://{}.{}.{}/".format(username, random_string(), "org")
first_name = random_string()
last_name = random_string()

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Test our views in ivatar.ivataraccount.views and ivatar.views
"""
@@ -37,9 +36,9 @@ class Tester(TestCase): # pylint: disable=too-many-public-methods
user = None
username = random_string()
password = random_string()
email = "%s@%s.%s" % (username, random_string(), random_string(2))
email = "{}@{}.{}".format(username, random_string(), random_string(2))
# Dunno why random tld doesn't work, but I'm too lazy now to investigate
openid = "http://%s.%s.%s/" % (username, random_string(), "org")
openid = "http://{}.{}.{}/".format(username, random_string(), "org")
first_name = random_string()
last_name = random_string()
bsky_test_account = "libravatar.org"

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
URLs for ivatar.ivataraccount
"""
from django.urls import path, re_path
from django.contrib.auth.views import LogoutView

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
View classes for ivatar/ivataraccount/
"""
@@ -140,7 +139,7 @@ class PasswordSetView(SuccessMessageMixin, FormView):
success_url = reverse_lazy("profile")
def get_form_kwargs(self):
kwargs = super(PasswordSetView, self).get_form_kwargs()
kwargs = super().get_form_kwargs()
kwargs["user"] = self.request.user
return kwargs
@@ -712,7 +711,9 @@ class RemoveUnconfirmedOpenIDView(View):
)
openid.delete()
messages.success(request, _("ID removed"))
except self.model.DoesNotExist: # pragma: no cover pylint: disable=no-member,line-too-long
except (
self.model.DoesNotExist
): # pragma: no cover pylint: disable=no-member,line-too-long
messages.error(request, _("ID does not exist"))
return HttpResponseRedirect(reverse_lazy("profile"))
@@ -766,7 +767,9 @@ class RedirectOpenIDView(View):
unconfirmed = self.model.objects.get( # pylint: disable=no-member
user=request.user, id=kwargs["openid_id"]
)
except self.model.DoesNotExist: # pragma: no cover pylint: disable=no-member,line-too-long
except (
self.model.DoesNotExist
): # pragma: no cover pylint: disable=no-member,line-too-long
messages.error(request, _("ID does not exist"))
return HttpResponseRedirect(reverse_lazy("profile"))
@@ -1321,7 +1324,7 @@ class ExportView(SuccessMessageMixin, TemplateView):
def xml_account(user):
escaped_username = saxutils.quoteattr(user.username)
escaped_password = saxutils.quoteattr(user.password)
return " <account username=%s password=%s/>\n" % (
return " <account username={} password={}/>\n".format(
escaped_username,
escaped_password,
)
@@ -1387,8 +1390,8 @@ class ExportView(SuccessMessageMixin, TemplateView):
bytesobj.seek(0)
response = HttpResponse(content_type="application/gzip")
response[
"Content-Disposition"
] = f'attachment; filename="libravatar-export_{user.username}.xml.gz"'
response["Content-Disposition"] = (
f'attachment; filename="libravatar-export_{user.username}.xml.gz"'
)
response.write(bytesobj.read())
return response

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Middleware classes
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
OpenTelemetry configuration for ivatar project.

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
OpenTelemetry middleware and custom instrumentation for ivatar.
@@ -94,9 +93,9 @@ class OpenTelemetryMiddleware(MiddlewareMixin):
span.set_attributes(
{
"http.status_code": response.status_code,
"http.response_size": len(response.content)
if hasattr(response, "content")
else 0,
"http.response_size": (
len(response.content) if hasattr(response, "content") else 0
),
"http.request.duration": duration,
}
)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Django settings for ivatar project.
"""

View File

@@ -260,7 +260,8 @@ th {
@font-face {
font-family: "Glyphicons Halflings";
src: url(../fonts/glyphicons-halflings-regular.eot);
src: url(../fonts/glyphicons-halflings-regular.eot?#iefix)
src:
url(../fonts/glyphicons-halflings-regular.eot?#iefix)
format("embedded-opentype"),
url(../fonts/glyphicons-halflings-regular.woff2) format("woff2"),
url(../fonts/glyphicons-halflings-regular.woff) format("woff"),
@@ -2651,17 +2652,24 @@ output {
border-radius: 4px;
-webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075);
-webkit-transition: border-color ease-in-out 0.15s,
-webkit-transition:
border-color ease-in-out 0.15s,
-webkit-box-shadow ease-in-out 0.15s;
-o-transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s;
transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s;
-o-transition:
border-color ease-in-out 0.15s,
box-shadow ease-in-out 0.15s;
transition:
border-color ease-in-out 0.15s,
box-shadow ease-in-out 0.15s;
}
.form-control:focus {
border-color: #66afe9;
outline: 0;
-webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075),
-webkit-box-shadow:
inset 0 1px 1px rgba(0, 0, 0, 0.075),
0 0 8px rgba(102, 175, 233, 0.6);
box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075),
box-shadow:
inset 0 1px 1px rgba(0, 0, 0, 0.075),
0 0 8px rgba(102, 175, 233, 0.6);
}
.form-control::-moz-placeholder {
@@ -2923,8 +2931,12 @@ textarea.input-lg {
}
.has-success .form-control:focus {
border-color: #2b542c;
-webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;
box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168;
-webkit-box-shadow:
inset 0 1px 1px rgba(0, 0, 0, 0.075),
0 0 6px #67b168;
box-shadow:
inset 0 1px 1px rgba(0, 0, 0, 0.075),
0 0 6px #67b168;
}
.has-success .input-group-addon {
color: #3c763d;
@@ -2953,8 +2965,12 @@ textarea.input-lg {
}
.has-warning .form-control:focus {
border-color: #66512c;
-webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;
box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b;
-webkit-box-shadow:
inset 0 1px 1px rgba(0, 0, 0, 0.075),
0 0 6px #c0a16b;
box-shadow:
inset 0 1px 1px rgba(0, 0, 0, 0.075),
0 0 6px #c0a16b;
}
.has-warning .input-group-addon {
color: #8a6d3b;
@@ -2983,8 +2999,12 @@ textarea.input-lg {
}
.has-error .form-control:focus {
border-color: #843534;
-webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;
box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483;
-webkit-box-shadow:
inset 0 1px 1px rgba(0, 0, 0, 0.075),
0 0 6px #ce8483;
box-shadow:
inset 0 1px 1px rgba(0, 0, 0, 0.075),
0 0 6px #ce8483;
}
.has-error .input-group-addon {
color: #a94442;
@@ -4470,9 +4490,11 @@ textarea.input-group-sm > .input-group-btn > .btn {
margin-left: -15px;
border-top: 1px solid transparent;
border-bottom: 1px solid transparent;
-webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1),
-webkit-box-shadow:
inset 0 1px 0 rgba(255, 255, 255, 0.1),
0 1px 0 rgba(255, 255, 255, 0.1);
box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1),
box-shadow:
inset 0 1px 0 rgba(255, 255, 255, 0.1),
0 1px 0 rgba(255, 255, 255, 0.1);
}
@media (min-width: 768px) {

View File

@@ -4,7 +4,9 @@
font-style: normal;
font-weight: 300;
src: url("../fonts/lato-v15-latin-300.eot"); /* IE9 Compat Modes */
src: local("Lato Light"), local("Lato-Light"),
src:
local("Lato Light"),
local("Lato-Light"),
url("../fonts/LatoLatin-Light.eot?#iefix") format("embedded-opentype"),
/* IE6-IE8 */ url("../fonts/LatoLatin-Light.woff2") format("woff2"),
/* Super Modern Browsers */ url("../fonts/LatoLatin-Light.woff")
@@ -19,7 +21,9 @@
font-style: normal;
font-weight: 400;
src: url("../fonts/lato-v15-latin-regular.eot"); /* IE9 Compat Modes */
src: local("Lato Regular"), local("Lato-Regular"),
src:
local("Lato Regular"),
local("Lato-Regular"),
url("../fonts/LatoLatin-Regular.eot?#iefix") format("embedded-opentype"),
/* IE6-IE8 */ url("../fonts/LatoLatin-Regular.woff2") format("woff2"),
/* Super Modern Browsers */ url("../fonts/LatoLatin-Regular.woff")
@@ -35,7 +39,9 @@
font-style: normal;
font-weight: 700;
src: url("../fonts/lato-v15-latin-700.eot"); /* IE9 Compat Modes */
src: local("Lato Bold"), local("Lato-Bold"),
src:
local("Lato Bold"),
local("Lato-Bold"),
url("../fonts/LatoLatin-Bold.eot?#iefix") format("embedded-opentype"),
/* IE6-IE8 */ url("../fonts/LatoLatin-Bold.woff2") format("woff2"),
/* Super Modern Browsers */ url("../fonts/LatoLatin-Bold.woff")
@@ -50,7 +56,9 @@
font-style: normal;
font-weight: 400;
src: url("../fonts/open-sans-v16-latin-regular.eot"); /* IE9 Compat Modes */
src: local("Open Sans Regular"), local("OpenSans-Regular"),
src:
local("Open Sans Regular"),
local("OpenSans-Regular"),
url("../fonts/open-sans-v16-latin-regular.eot?#iefix")
format("embedded-opentype"),
/* IE6-IE8 */ url("../fonts/open-sans-v16-latin-regular.woff2")

View File

@@ -12,7 +12,8 @@
? (module.exports = e())
: "function" == typeof define && define.amd
? define(e)
: ((t = "undefined" != typeof globalThis ? globalThis : t || self).Cropper =
: ((t =
"undefined" != typeof globalThis ? globalThis : t || self).Cropper =
e());
})(this, function () {
"use strict";
@@ -50,7 +51,11 @@
: Object.getOwnPropertyDescriptors
? Object.defineProperties(a, Object.getOwnPropertyDescriptors(n))
: C(Object(n)).forEach(function (t) {
Object.defineProperty(a, t, Object.getOwnPropertyDescriptor(n, t));
Object.defineProperty(
a,
t,
Object.getOwnPropertyDescriptor(n, t),
);
});
}
return a;

View File

@@ -821,9 +821,11 @@
: "$=" === r
? i && t.slice(-i.length) === i
: "~=" === r
? -1 < (" " + t.replace(v, " ") + " ").indexOf(i)
? -1 <
(" " + t.replace(v, " ") + " ").indexOf(i)
: "|=" === r &&
(t === i || t.slice(0, i.length + 1) === i + "-"));
(t === i ||
t.slice(0, i.length + 1) === i + "-"));
};
},
CHILD: function (d, e, t, h, g) {
@@ -4119,7 +4121,8 @@
: (e = kt(t)).length
? this.each(function () {
if (
((r = Ct(this)), (n = 1 === this.nodeType && " " + Tt(r) + " "))
((r = Ct(this)),
(n = 1 === this.nodeType && " " + Tt(r) + " "))
) {
for (o = 0; o < e.length; o++)
(i = e[o]), n.indexOf(" " + i + " ") < 0 && (n += i + " ");
@@ -4171,7 +4174,8 @@
this.each(function () {
if (s)
for (o = ce(this), i = 0; i < e.length; i++)
(r = e[i]), o.hasClass(r) ? o.removeClass(r) : o.addClass(r);
(r = e[i]),
o.hasClass(r) ? o.removeClass(r) : o.addClass(r);
else
(void 0 !== t && "boolean" !== a) ||
((r = Ct(this)) && _.set(this, "__className__", r),
@@ -4221,7 +4225,8 @@
(this.value = t));
}))
: t
? (r = ce.valHooks[t.type] || ce.valHooks[t.nodeName.toLowerCase()]) &&
? (r =
ce.valHooks[t.type] || ce.valHooks[t.nodeName.toLowerCase()]) &&
"get" in r &&
void 0 !== (e = r.get(t, "value"))
? e

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Test various other parts of ivatar/libravatar in order
to increase the overall test coverage. Test in here, didn't

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Tests for file upload security enhancements
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Tests for OpenTelemetry integration in ivatar.

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
Test our views in ivatar.ivataraccount.views and ivatar.views
"""
# pylint: disable=too-many-lines
import os
import django
@@ -25,9 +25,9 @@ class Tester(TestCase): # pylint: disable=too-many-public-methods
user = None
username = random_string()
password = random_string()
email = "%s@%s.%s" % (username, random_string(), random_string(2))
email = "{}@{}.{}".format(username, random_string(), random_string(2))
# Dunno why random tld doesn't work, but I'm too lazy now to investigate
openid = "http://%s.%s.%s/" % (username, random_string(), "org")
openid = "http://{}.{}.{}/".format(username, random_string(), "org")
def login(self):
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Test our utils from ivatar.utils
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Test our views in ivatar.ivataraccount.views and ivatar.views
"""
@@ -31,9 +30,9 @@ class Tester(TestCase): # pylint: disable=too-many-public-methods
user = None
username = random_string()
password = random_string()
email = "%s@%s.%s" % (username, random_string(), random_string(2))
email = "{}@{}.{}".format(username, random_string(), random_string(2))
# Dunno why random tld doesn't work, but I'm too lazy now to investigate
openid = "http://%s.%s.%s/" % (username, random_string(), "org")
openid = "http://{}.{}.{}/".format(username, random_string(), "org")
def login(self):
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Test our StatsView in ivatar.views
"""

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
Unit tests for WSGI
"""
import unittest
import os

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
Classes for our ivatar.tools.forms
"""
from django import forms
from django.utils.translation import gettext_lazy as _
from django.core.exceptions import ValidationError

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
Test our views in ivatar.ivataraccount.views and ivatar.views
"""
# pylint: disable=too-many-lines
import os
import django
@@ -28,9 +28,9 @@ class Tester(TestCase): # pylint: disable=too-many-public-methods
user = None
username = random_string()
password = random_string()
email = "%s@%s.%s" % (username, random_string(), random_string(2))
email = "{}@{}.{}".format(username, random_string(), random_string(2))
# Dunno why random tld doesn't work, but I'm too lazy now to investigate
openid = "http://%s.%s.%s/" % (username, random_string(), "org")
openid = "http://{}.{}.{}/".format(username, random_string(), "org")
def login(self):
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
ivatar/tools URL configuration
"""

View File

@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
View classes for ivatar/tools/
"""
from socket import inet_ntop, AF_INET6
import hashlib
import random

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
ivatar URL configuration
"""

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
Simple module providing reusable random_string function
"""
@@ -179,6 +178,16 @@ def random_string(length=10):
)
def generate_random_email():
"""
Generate a random email address using the same pattern as test_views.py
"""
username = random_string()
domain = random_string()
tld = random_string(2)
return f"{username}@{domain}.{tld}"
def random_ip_address():
"""
Return a random IP address (IPv4)

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
views under /
"""
@@ -762,9 +761,9 @@ class StatsView(TemplateView, JsonResponse):
)
retval["photo_size_stats"] = {
"average_size_bytes": round(avg_size_bytes, 2)
if avg_size_bytes
else 0,
"average_size_bytes": (
round(avg_size_bytes, 2) if avg_size_bytes else 0
),
"average_size_kb": avg_size_kb,
"average_size_mb": avg_size_mb,
"total_photos_analyzed": photo_count,
@@ -839,7 +838,7 @@ def _get_git_info_from_files():
if not path.exists(head_file):
return None
with open(head_file, "r") as f:
with open(head_file) as f:
head_content = f.read().strip()
# Parse HEAD content
@@ -851,7 +850,7 @@ def _get_git_info_from_files():
# Read the commit hash from the ref
ref_file = path.join(git_dir, branch_ref)
if path.exists(ref_file):
with open(ref_file, "r") as f:
with open(ref_file) as f:
commit_hash = f.read().strip()
else:
return None

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
"""
WSGI config for ivatar project.

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import urllib.request
import sys

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys

View File

@@ -35,6 +35,7 @@ opentelemetry-instrumentation-urllib3>=0.42b0
opentelemetry-sdk>=1.20.0
Pillow
pip
prettytable
prometheus-client>=0.20.0
psycopg2-binary
py3dns

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Libravatar Deployment Verification Script
@@ -18,6 +17,7 @@ Usage:
import argparse
import json
import os
import random
import ssl
import subprocess
@@ -54,12 +54,19 @@ class Colors:
def colored_print(message: str, color: str = Colors.NC) -> None:
"""Print a colored message."""
print(f"{color}{message}{Colors.NC}")
"""Print a colored message with immediate flush."""
print(f"{color}{message}{Colors.NC}", flush=True)
def get_current_commit_hash() -> Optional[str]:
"""Get the current commit hash from git."""
"""Get the current commit hash from git or CI environment."""
# First try GitLab CI environment variable (most reliable in CI)
ci_commit = os.environ.get("CI_COMMIT_SHA")
if ci_commit:
colored_print(f"Using CI commit hash: {ci_commit}", Colors.BLUE)
return ci_commit
# Fallback to git command
try:
result = subprocess.run(
["git", "rev-parse", "HEAD"],
@@ -67,8 +74,11 @@ def get_current_commit_hash() -> Optional[str]:
text=True,
check=True,
)
return result.stdout.strip()
commit_hash = result.stdout.strip()
colored_print(f"Using git commit hash: {commit_hash}", Colors.BLUE)
return commit_hash
except (subprocess.CalledProcessError, FileNotFoundError):
colored_print("Could not determine current commit hash", Colors.RED)
return None
@@ -82,16 +92,44 @@ def is_commit_newer_or_equal(commit1: str, commit2: str) -> Optional[bool]:
None if comparison fails
"""
try:
# Use git merge-base to check if commit1 is reachable from commit2
# If commit1 is newer or equal, it should be reachable from commit2
# First try to get commit timestamps for comparison
try:
result1 = subprocess.run(
["git", "show", "-s", "--format=%ct", commit1],
capture_output=True,
text=True,
check=True,
)
result2 = subprocess.run(
["git", "show", "-s", "--format=%ct", commit2],
capture_output=True,
text=True,
check=True,
)
timestamp1 = int(result1.stdout.strip())
timestamp2 = int(result2.stdout.strip())
colored_print(f"Commit {commit1[:8]} timestamp: {timestamp1}", Colors.BLUE)
colored_print(f"Commit {commit2[:8]} timestamp: {timestamp2}", Colors.BLUE)
# commit1 is newer if it has a later timestamp
return timestamp1 >= timestamp2
except (subprocess.CalledProcessError, ValueError):
# Fallback to merge-base if timestamp comparison fails
colored_print("Timestamp comparison failed, trying merge-base", Colors.YELLOW)
# Use git merge-base to check if commit2 is ancestor of commit1
subprocess.run(
["git", "merge-base", "--is-ancestor", commit2, commit1],
capture_output=True,
check=True,
)
return True
except subprocess.CalledProcessError:
# If the above fails, try the reverse - check if commit2 is newer
# If the above fails, try the reverse
try:
subprocess.run(
["git", "merge-base", "--is-ancestor", commit1, commit2],
@@ -100,7 +138,10 @@ def is_commit_newer_or_equal(commit1: str, commit2: str) -> Optional[bool]:
)
return False
except subprocess.CalledProcessError:
# If both fail, we can't determine the relationship
colored_print("Git comparison failed - shallow clone or missing commits", Colors.YELLOW)
return None
except Exception as e:
colored_print(f"Git comparison error: {e}", Colors.RED)
return None
@@ -346,17 +387,30 @@ def test_deployment(
# Check if we're looking for a specific version and compare
current_commit = get_current_commit_hash()
version_ok = True
if current_commit and deployed_commit != "Unknown":
colored_print(f"Expected commit: {current_commit[:8]}...", Colors.BLUE)
colored_print(f"Deployed commit: {deployed_commit[:8]}...", Colors.BLUE)
if deployed_commit == current_commit:
colored_print(
"✅ Exact version match - deployment is up to date!",
Colors.GREEN,
)
elif deployed_commit.startswith(current_commit[:8]) or current_commit.startswith(deployed_commit[:8]):
# Handle case where we have short vs long commit hashes
colored_print(
"✅ Version match (short hash) - deployment is up to date!",
Colors.GREEN,
)
else:
# Check if deployed version is newer
# Check if deployed version is newer using git
comparison = is_commit_newer_or_equal(
deployed_commit, current_commit
)
colored_print(f"Commit comparison result: {comparison}", Colors.BLUE)
if comparison is True:
colored_print(
" Note: A newer version is already deployed (this is fine!)",
@@ -364,15 +418,38 @@ def test_deployment(
)
elif comparison is False:
colored_print(
"⚠️ Warning: Deployed version appears to be older than expected",
f"⚠️ Deployed version ({deployed_commit[:8]}) is older than expected ({current_commit[:8]})",
Colors.YELLOW,
)
else:
colored_print(
"⚠️ Warning: Could not determine version relationship",
f"Waiting for deployment to update... (attempt {attempt}/{max_retries})",
Colors.BLUE,
)
version_ok = False
else:
# Git comparison failed - use simple string comparison as fallback
colored_print(
"⚠️ Git comparison failed - using string comparison fallback",
Colors.YELLOW,
)
# If commits are different, assume we need to wait
# This is safer than proceeding with wrong version
colored_print(
f"⚠️ Deployed version ({deployed_commit[:8]}) differs from expected ({current_commit[:8]})",
Colors.YELLOW,
)
colored_print(
f"Waiting for deployment to update... (attempt {attempt}/{max_retries})",
Colors.BLUE,
)
version_ok = False
# Only proceed with functionality tests if version is correct
if not version_ok:
# Version is not correct, skip tests and retry
pass # Will continue to retry logic below
else:
# Run functionality tests
colored_print("Running basic functionality tests...", Colors.YELLOW)
@@ -408,7 +485,11 @@ def test_deployment(
colored_print(
f"Waiting {retry_delay} seconds before next attempt...", Colors.BLUE
)
time.sleep(retry_delay)
# Show progress during wait
for remaining in range(retry_delay, 0, -1):
print(f"\r⏳ Retrying in {remaining:2d} seconds...", end="", flush=True)
time.sleep(1)
print("\r" + " " * 30 + "\r", end="", flush=True) # Clear the line
colored_print(
f"❌ FAILED: {name} deployment verification timed out after {max_retries} attempts",

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Performance testing script for Libravatar CI/CD pipeline
@@ -12,13 +11,41 @@ import sys
import time
import statistics
import hashlib
import random
import string
from typing import Dict, List, Any, Optional, Tuple
# Add project root to path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from libravatar import libravatar_url
from urllib.parse import urlsplit
from prettytable import PrettyTable
def random_string(length=10):
"""Return some random string with default length 10"""
return "".join(
random.SystemRandom().choice(string.ascii_lowercase + string.digits)
for _ in range(length)
)
# Try to import Django utilities for local testing, fallback to local implementation
try:
from ivatar.utils import generate_random_email
except ImportError:
# Use local version for external testing
def generate_random_email():
"""Generate a random email address using the same pattern as test_views.py"""
username = random_string()
domain = random_string()
tld = random_string(2)
return f"{username}@{domain}.{tld}"
# Django setup - only for local testing
def setup_django():
def setup_django() -> None:
"""Setup Django for local testing"""
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ivatar.settings")
import django
@@ -29,19 +56,32 @@ def setup_django():
class PerformanceTestRunner:
"""Main performance test runner"""
# Define all avatar styles and sizes to test
AVATAR_STYLES: List[str] = [
"identicon",
"monsterid",
"robohash",
"pagan",
"retro",
"wavatar",
"mm",
"mmng",
]
AVATAR_SIZES: List[int] = [80, 256]
def __init__(
self,
base_url="http://localhost:8000",
concurrent_users=10,
test_cache=True,
remote_testing=False,
):
self.base_url = base_url
self.concurrent_users = concurrent_users
self.test_cache = test_cache
self.remote_testing = remote_testing
self.client = None
self.results = {}
base_url: str = "http://localhost:8000",
concurrent_users: int = 10,
test_cache: bool = True,
remote_testing: bool = False,
) -> None:
self.base_url: str = base_url
self.concurrent_users: int = concurrent_users
self.test_cache: bool = test_cache
self.remote_testing: bool = remote_testing
self.client: Optional[Any] = None # Django test client
self.results: Dict[str, Any] = {}
# Determine if we're testing locally or remotely
if remote_testing or not base_url.startswith("http://localhost"):
@@ -55,7 +95,7 @@ class PerformanceTestRunner:
self.client = Client()
def setup_test_data(self):
def setup_test_data(self) -> None:
"""Create test data for performance tests"""
print("Setting up test data...")
@@ -79,52 +119,249 @@ class PerformanceTestRunner:
print(f"Created {len(test_emails)} test users and emails")
def test_avatar_generation_performance(self):
"""Test avatar generation performance"""
print("\n=== Avatar Generation Performance Test ===")
def _generate_test_cases(self) -> List[Dict[str, Any]]:
"""Generate test cases for all avatar styles and sizes"""
test_cases = []
for style in self.AVATAR_STYLES:
for size in self.AVATAR_SIZES:
test_cases.append({"default": style, "size": size})
return test_cases
# Test different avatar types and sizes
test_cases = [
{"default": "identicon", "size": 80},
{"default": "monsterid", "size": 80},
{"default": "robohash", "size": 80},
{"default": "identicon", "size": 256},
{"default": "monsterid", "size": 256},
]
def _test_single_avatar_request(
self, case: Dict[str, Any], email: str, use_requests: bool = False
) -> Dict[str, Any]:
"""Test a single avatar request - shared logic for local and remote testing"""
# Use libravatar library to generate the URL
full_url = libravatar_url(
email=email, size=case["size"], default=case["default"]
)
results = []
# Extract path and query from the full URL
urlobj = urlsplit(full_url)
url_path = f"{urlobj.path}?{urlobj.query}"
for case in test_cases:
# Generate test hash
test_email = "perftest@example.com"
email_hash = hashlib.md5(test_email.encode()).hexdigest()
# Build URL
url = f"/avatar/{email_hash}"
params = {"d": case["default"], "s": case["size"]}
# Time the request
start_time = time.time()
response = self.client.get(url, params)
if use_requests:
# Remote testing with requests
import requests
url = f"{self.base_url}{url_path}"
try:
response = requests.get(url, timeout=10)
end_time = time.time()
duration = (end_time - start_time) * 1000
duration = (end_time - start_time) * 1000 # Convert to ms
# Determine cache status from response headers
cache_detail = response.headers.get("x-cache-detail", "").lower()
age = response.headers.get("age", "0")
cache_status = "unknown"
results.append(
{
if "cache hit" in cache_detail or int(age) > 0:
cache_status = "hit"
elif "cache miss" in cache_detail or age == "0":
cache_status = "miss"
return {
"test": f"{case['default']}_{case['size']}px",
"duration_ms": duration,
"status_code": response.status_code,
"content_length": len(response.content) if response.content else 0,
"success": response.status_code == 200,
"cache_status": cache_status,
"cache_detail": cache_detail,
"age": age,
"full_url": full_url,
"email": email,
}
except Exception as e:
end_time = time.time()
duration = (end_time - start_time) * 1000
return {
"test": f"{case['default']}_{case['size']}px",
"duration_ms": duration,
"status_code": 0,
"success": False,
"error": str(e),
"cache_status": "error",
"full_url": full_url,
"email": email,
}
else:
# Local testing with Django test client
if self.client is None:
raise RuntimeError("Django test client not initialized")
response = self.client.get(url_path, follow=True)
end_time = time.time()
duration = (end_time - start_time) * 1000
# Check for cache information in response headers
cache_status = "unknown"
if hasattr(response, "get") and callable(getattr(response, "get", None)):
cache_control = response.get("Cache-Control", "")
age = response.get("Age", "0")
if age and int(age) > 0:
cache_status = "hit"
elif "no-cache" in cache_control:
cache_status = "miss"
else:
cache_status = "miss" # Default assumption for first generation
# Handle content length for different response types
content_length = 0
if hasattr(response, "content"):
content_length = len(response.content) if response.content else 0
elif hasattr(response, "streaming_content"):
# For FileResponse, we can't easily get content length without consuming the stream
content_length = 1 # Just indicate there's content
return {
"test": f"{case['default']}_{case['size']}px",
"duration_ms": duration,
"status_code": response.status_code,
"content_length": content_length,
"cache_status": cache_status,
"success": response.status_code == 200,
"full_url": full_url,
"email": email,
}
def _display_avatar_results(self, results: List[Dict[str, Any]]) -> None:
"""Display avatar test results using prettytable for perfect alignment"""
# Group results by avatar style
style_results: Dict[str, List[Dict[str, Any]]] = {}
for result in results:
style = result["test"].split("_")[0] # Extract style from test name
if style not in style_results:
style_results[style] = []
style_results[style].append(result)
# Create table
table = PrettyTable()
table.field_names = ["Avatar Style", "Size", "Time (ms)", "Status", "Cache"]
table.align["Avatar Style"] = "l"
table.align["Size"] = "r"
table.align["Time (ms)"] = "r"
table.align["Status"] = "c"
table.align["Cache"] = "c"
# Add data to table
styles_with_data = [
style for style in self.AVATAR_STYLES if style in style_results
]
for i, style in enumerate(styles_with_data):
style_data = style_results[style]
successful_results = [r for r in style_data if r.get("success", True)]
failed_results = [r for r in style_data if not r.get("success", True)]
if successful_results:
# Calculate average
avg_duration = statistics.mean(
[r["duration_ms"] for r in successful_results]
)
print(f" {case['default']} ({case['size']}px): {duration:.2f}ms")
# Determine overall cache status
cache_statuses = [
r["cache_status"]
for r in successful_results
if r["cache_status"] != "unknown"
]
if not cache_statuses:
cache_summary = "unknown"
elif all(status == "hit" for status in cache_statuses):
cache_summary = "hit"
elif all(status == "miss" for status in cache_statuses):
cache_summary = "miss"
else:
cache_summary = "mixed"
# Determine status icon for average line
if len(failed_results) == 0:
avg_status_icon = "" # All successful
elif len(successful_results) == 0:
avg_status_icon = "" # All failed
else:
avg_status_icon = "⚠️" # Mixed results
# Add average row
table.add_row(
[
f"{style} (avg)",
"",
f"{avg_duration:.2f}",
avg_status_icon,
cache_summary,
]
)
# Add individual size rows
for result in style_data:
size = result["test"].split("_")[1] # Extract size from test name
status_icon = "" if result.get("success", True) else ""
cache_status = result["cache_status"]
if result.get("success", True):
table.add_row(
[
"",
size,
f"{result['duration_ms']:.2f}",
status_icon,
cache_status,
]
)
else:
error_msg = result.get("error", "Failed")
table.add_row(["", size, error_msg, status_icon, cache_status])
else:
# All requests failed
table.add_row([f"{style} (avg)", "", "Failed", "", "error"])
for result in style_data:
size = result["test"].split("_")[1]
error_msg = result.get("error", "Failed")
table.add_row(["", size, error_msg, "", "error"])
# Add divider line between styles (except after the last style)
if i < len(styles_with_data) - 1:
table.add_row(["-" * 15, "-" * 5, "-" * 9, "-" * 6, "-" * 5])
print(table)
def test_avatar_generation_performance(self) -> None:
"""Test avatar generation performance"""
print("\n=== Avatar Generation Performance Test ===")
# Generate test cases for all avatar styles and sizes
test_cases = self._generate_test_cases()
results = []
# Generate random email for testing
test_email = generate_random_email()
print(f" Testing with email: {test_email}")
for case in test_cases:
result = self._test_single_avatar_request(
case, test_email, use_requests=False
)
results.append(result)
# Show example URL from first result
if results:
print(f" Example URL: {results[0]['full_url']}")
# Display results grouped by style
self._display_avatar_results(results)
# Calculate statistics
durations = [r["duration_ms"] for r in results]
successful_results = [r for r in results if r.get("success", True)]
if successful_results:
durations = [r["duration_ms"] for r in successful_results]
avg_duration = statistics.mean(durations)
max_duration = max(durations)
else:
avg_duration = 0
max_duration = 0
print(f"\n Average: {avg_duration:.2f}ms")
print(f" Maximum: {max_duration:.2f}ms")
@@ -143,7 +380,7 @@ class PerformanceTestRunner:
"results": results,
}
def test_concurrent_load(self):
def test_concurrent_load(self, response_threshold: int = 1000, p95_threshold: int = 2000) -> None:
"""Test concurrent load handling"""
print("\n=== Concurrent Load Test ===")
@@ -160,6 +397,11 @@ class PerformanceTestRunner:
successful_requests = [r for r in results if r["success"]]
failed_requests = [r for r in results if not r["success"]]
# Analyze cache performance
cache_hits = [r for r in results if r.get("cache_status") == "hit"]
cache_misses = [r for r in results if r.get("cache_status") == "miss"]
cache_errors = [r for r in results if r.get("cache_status") == "error"]
total_duration = (
sum(r["duration_ms"] for r in results) / 1000
) # Convert to seconds
@@ -168,6 +410,20 @@ class PerformanceTestRunner:
print(f" Successful requests: {len(successful_requests)}/{num_requests}")
print(f" Failed requests: {len(failed_requests)}")
# Show cache statistics if available
if cache_hits or cache_misses:
print(f" Cache hits: {len(cache_hits)}")
print(f" Cache misses: {len(cache_misses)}")
if cache_errors:
print(f" Cache errors: {len(cache_errors)}")
cache_hit_rate = (
len(cache_hits) / (len(cache_hits) + len(cache_misses)) * 100
if (cache_hits or cache_misses)
else 0
)
print(f" Cache hit rate: {cache_hit_rate:.1f}%")
if successful_requests:
durations = [r["duration_ms"] for r in successful_requests]
avg_duration = statistics.mean(durations)
@@ -192,10 +448,10 @@ class PerformanceTestRunner:
# Performance evaluation
if len(failed_requests) > 0:
print(" ⚠️ WARNING: Some operations failed under load")
elif p95_duration > 2000: # 2 seconds
print(" ⚠️ WARNING: 95th percentile response time exceeds 2s")
elif avg_duration > 1000: # 1 second
print(" ⚠️ CAUTION: Average response time exceeds 1s under load")
elif p95_duration > p95_threshold:
print(f" ⚠️ WARNING: 95th percentile response time exceeds {p95_threshold}ms")
elif avg_duration > response_threshold:
print(f" ⚠️ CAUTION: Average response time exceeds {response_threshold}ms under load")
else:
print(" ✅ Load handling is good")
else:
@@ -212,29 +468,51 @@ class PerformanceTestRunner:
"requests_per_second": (
len(successful_requests) / total_duration if total_duration > 0 else 0
),
"cache_hits": len(cache_hits),
"cache_misses": len(cache_misses),
"cache_errors": len(cache_errors),
"cache_hit_rate": (
len(cache_hits) / (len(cache_hits) + len(cache_misses)) * 100
if (cache_hits or cache_misses)
else 0
),
}
def _test_remote_concurrent_load(self, num_requests):
def _test_remote_concurrent_load(self, num_requests: int) -> List[Dict[str, Any]]:
"""Test concurrent load against remote server"""
import requests # noqa: F401
from concurrent.futures import ThreadPoolExecutor, as_completed
def make_remote_request(thread_id):
test_email = f"perftest{thread_id % 10}@example.com"
email_hash = hashlib.md5(test_email.encode()).hexdigest()
url = f"{self.base_url}/avatar/{email_hash}"
params = {"d": "identicon", "s": 80}
test_email = generate_random_email()
# Use libravatar library to generate the URL
full_url = libravatar_url(email=test_email, size=80, default="identicon")
urlobj = urlsplit(full_url)
url_path = f"{urlobj.path}?{urlobj.query}"
url = f"{self.base_url}{url_path}"
start_time = time.time()
try:
response = requests.get(url, params=params, timeout=10)
response = requests.get(url, timeout=10)
end_time = time.time()
# Determine cache status
cache_detail = response.headers.get("x-cache-detail", "").lower()
age = response.headers.get("age", "0")
cache_status = "unknown"
if "cache hit" in cache_detail or int(age) > 0:
cache_status = "hit"
elif "cache miss" in cache_detail or age == "0":
cache_status = "miss"
return {
"thread_id": thread_id,
"duration_ms": (end_time - start_time) * 1000,
"status_code": response.status_code,
"success": response.status_code == 200,
"cache_status": cache_status,
}
except Exception as e:
end_time = time.time()
@@ -243,6 +521,7 @@ class PerformanceTestRunner:
"duration_ms": (end_time - start_time) * 1000,
"success": False,
"error": str(e),
"cache_status": "error",
}
results = []
@@ -260,7 +539,7 @@ class PerformanceTestRunner:
return results
def _test_local_concurrent_load(self, num_requests):
def _test_local_concurrent_load(self, num_requests: int) -> List[Dict[str, Any]]:
"""Test concurrent load locally using avatar generation functions"""
results = []
@@ -269,7 +548,7 @@ class PerformanceTestRunner:
import Identicon
for i in range(num_requests):
test_email = f"perftest{i % 10}@example.com"
test_email = generate_random_email()
email_hash = hashlib.md5(test_email.encode()).hexdigest()
request_start = time.time()
@@ -283,6 +562,7 @@ class PerformanceTestRunner:
"thread_id": i,
"duration_ms": (request_end - request_start) * 1000,
"success": len(identicon_data) > 0,
"cache_status": "miss", # Direct generation is always a cache miss
}
)
except Exception as e:
@@ -293,6 +573,7 @@ class PerformanceTestRunner:
"duration_ms": (request_end - request_start) * 1000,
"success": False,
"error": str(e),
"cache_status": "error",
}
)
@@ -314,6 +595,7 @@ class PerformanceTestRunner:
"thread_id": i,
"duration_ms": (request_end - request_start) * 1000,
"success": True,
"cache_status": "n/a", # Database queries don't use image cache
}
)
except Exception as e:
@@ -324,12 +606,13 @@ class PerformanceTestRunner:
"duration_ms": (request_end - request_start) * 1000,
"success": False,
"error": str(e),
"cache_status": "error",
}
)
return results
def test_database_performance(self):
def test_database_performance(self) -> None:
"""Test database query performance"""
print("\n=== Database Performance Test ===")
@@ -378,7 +661,7 @@ class PerformanceTestRunner:
else:
print(f" ✅ Database query count is reasonable ({query_count} queries)")
def test_cache_performance(self):
def test_cache_performance(self) -> None:
"""Test caching effectiveness"""
if not self.test_cache:
print("\n=== Cache Performance Test ===")
@@ -387,18 +670,17 @@ class PerformanceTestRunner:
print("\n=== Cache Performance Test ===")
# Use an actual email address that exists in the system
test_email = "dev@libravatar.org"
email_hash = hashlib.md5(test_email.encode()).hexdigest()
# Generate a random email address for cache testing
test_email = generate_random_email()
print(f" Testing with: {test_email}")
if self.remote_testing:
first_duration, second_duration = self._test_remote_cache_performance(
email_hash
test_email
)
else:
first_duration, second_duration = self._test_local_cache_performance(
email_hash
test_email
)
print(f" First request: {first_duration:.2f}ms")
@@ -453,16 +735,19 @@ class PerformanceTestRunner:
"cache_headers": getattr(self, "cache_info", {}),
}
def _test_remote_cache_performance(self, email_hash):
def _test_remote_cache_performance(self, email: str) -> Tuple[float, float]:
"""Test cache performance against remote server"""
import requests
url = f"{self.base_url}/avatar/{email_hash}"
params = {"d": "identicon", "s": 80}
# Use libravatar library to generate the URL
full_url = libravatar_url(email=email, size=80, default="identicon")
urlobj = urlsplit(full_url)
url_path = f"{urlobj.path}?{urlobj.query}"
url = f"{self.base_url}{url_path}"
# First request (should be cache miss or fresh)
start_time = time.time()
response1 = requests.get(url, params=params, timeout=10)
response1 = requests.get(url, timeout=10)
first_duration = (time.time() - start_time) * 1000
# Check first request headers
@@ -480,7 +765,7 @@ class PerformanceTestRunner:
# Second request (should be cache hit)
start_time = time.time()
response2 = requests.get(url, params=params, timeout=10)
response2 = requests.get(url, timeout=10)
second_duration = (time.time() - start_time) * 1000
# Check second request headers
@@ -525,24 +810,28 @@ class PerformanceTestRunner:
return first_duration, second_duration
def _test_local_cache_performance(self, email_hash):
def _test_local_cache_performance(self, email: str) -> Tuple[float, float]:
"""Test cache performance locally"""
url = f"/avatar/{email_hash}"
params = {"d": "identicon", "s": 80}
# Use libravatar library to generate the URL
full_url = libravatar_url(email=email, size=80, default="identicon")
urlobj = urlsplit(full_url)
url_path = f"{urlobj.path}?{urlobj.query}"
# First request (cache miss)
start_time = time.time()
self.client.get(url, params)
if self.client:
self.client.get(url_path)
first_duration = (time.time() - start_time) * 1000
# Second request (should be cache hit)
start_time = time.time()
self.client.get(url, params)
if self.client:
self.client.get(url_path)
second_duration = (time.time() - start_time) * 1000
return first_duration, second_duration
def run_all_tests(self):
def run_all_tests(self, avatar_threshold: int = 1000, response_threshold: int = 1000, p95_threshold: int = 2000, ignore_cache_warnings: bool = False) -> Optional[Dict[str, Any]]:
"""Run all performance tests"""
print("Starting Libravatar Performance Tests")
print("=" * 50)
@@ -557,14 +846,14 @@ class PerformanceTestRunner:
# Run tests based on mode
if self.remote_testing:
print("🌐 Running remote server tests...")
self.test_remote_avatar_performance()
self.test_remote_avatar_performance(response_threshold)
else:
print("🏠 Running local tests...")
self.test_avatar_generation_performance()
self.test_database_performance()
# Always test concurrent load
self.test_concurrent_load()
self.test_concurrent_load(response_threshold, p95_threshold)
# Test cache performance if enabled
self.test_cache_performance()
@@ -576,7 +865,7 @@ class PerformanceTestRunner:
print(f"Performance tests completed in {total_duration:.2f}s")
# Overall assessment
self.assess_overall_performance()
self.assess_overall_performance(avatar_threshold, response_threshold, p95_threshold, ignore_cache_warnings)
return self.results
@@ -584,68 +873,30 @@ class PerformanceTestRunner:
print(f"Performance test failed: {e}")
return None
def test_remote_avatar_performance(self):
def test_remote_avatar_performance(self, response_threshold: int = 1000) -> None:
"""Test avatar generation performance on remote server"""
print("\n=== Remote Avatar Performance Test ===")
import requests
# Test different avatar types and sizes
test_cases = [
{"default": "identicon", "size": 80},
{"default": "monsterid", "size": 80},
{"default": "robohash", "size": 80},
{"default": "identicon", "size": 256},
{"default": "monsterid", "size": 256},
]
# Generate test cases for all avatar styles and sizes
test_cases = self._generate_test_cases()
results = []
# Generate random email for testing
test_email = generate_random_email()
print(f" Testing with email: {test_email}")
for case in test_cases:
# Generate test hash
test_email = "perftest@example.com"
email_hash = hashlib.md5(test_email.encode()).hexdigest()
# Build URL
url = f"{self.base_url}/avatar/{email_hash}"
params = {"d": case["default"], "s": case["size"]}
# Time the request
start_time = time.time()
try:
response = requests.get(url, params=params, timeout=10)
end_time = time.time()
duration = (end_time - start_time) * 1000 # Convert to ms
results.append(
{
"test": f"{case['default']}_{case['size']}px",
"duration_ms": duration,
"status_code": response.status_code,
"content_length": (
len(response.content) if response.content else 0
),
"success": response.status_code == 200,
}
result = self._test_single_avatar_request(
case, test_email, use_requests=True
)
results.append(result)
status = "" if response.status_code == 200 else ""
print(
f" {case['default']} ({case['size']}px): {duration:.2f}ms {status}"
)
# Show example URL from first result
if results:
print(f" Example URL: {results[0]['full_url']}")
except Exception as e:
print(f" {case['default']} ({case['size']}px): ❌ Failed - {e}")
results.append(
{
"test": f"{case['default']}_{case['size']}px",
"duration_ms": 0,
"status_code": 0,
"success": False,
"error": str(e),
}
)
# Display results grouped by style
self._display_avatar_results(results)
# Calculate statistics for successful requests
successful_results = [r for r in results if r["success"]]
@@ -659,10 +910,10 @@ class PerformanceTestRunner:
print(f" Success rate: {len(successful_results)}/{len(results)}")
# Performance thresholds for remote testing
if avg_duration > 2000: # 2 seconds
print(" ⚠️ WARNING: Average response time exceeds 2s")
elif avg_duration > 1000: # 1 second
print(" ⚠️ CAUTION: Average response time exceeds 1s")
if avg_duration > (response_threshold * 2): # 2x threshold for warning
print(f" ⚠️ WARNING: Average response time exceeds {response_threshold * 2}ms")
elif avg_duration > response_threshold:
print(f" ⚠️ CAUTION: Average response time exceeds {response_threshold}ms")
else:
print(" ✅ Remote avatar performance is good")
else:
@@ -677,7 +928,7 @@ class PerformanceTestRunner:
"success_rate": len(successful_results) / len(results) if results else 0,
}
def assess_overall_performance(self):
def assess_overall_performance(self, avatar_threshold: int = 1000, response_threshold: int = 1000, p95_threshold: int = 2000, ignore_cache_warnings: bool = False) -> bool:
"""Provide overall performance assessment"""
print("\n=== OVERALL PERFORMANCE ASSESSMENT ===")
@@ -686,8 +937,8 @@ class PerformanceTestRunner:
# Check avatar generation
if "avatar_generation" in self.results:
avg_gen = self.results["avatar_generation"]["average_ms"]
if avg_gen > 1000:
warnings.append(f"Avatar generation is slow ({avg_gen:.0f}ms average)")
if avg_gen > avatar_threshold:
warnings.append(f"Avatar generation is slow ({avg_gen:.0f}ms average, threshold: {avatar_threshold}ms)")
# Check concurrent load
if "concurrent_load" in self.results:
@@ -696,7 +947,7 @@ class PerformanceTestRunner:
warnings.append(f"{failed} requests failed under concurrent load")
# Check cache performance
if "cache_performance" in self.results:
if "cache_performance" in self.results and not ignore_cache_warnings:
cache_working = self.results["cache_performance"].get(
"cache_working", False
)
@@ -722,7 +973,7 @@ class PerformanceTestRunner:
return len(warnings) > 0
def main():
def main() -> Optional[Dict[str, Any]]:
"""Main entry point"""
import argparse
@@ -749,6 +1000,29 @@ def main():
action="store_true",
help="Force remote testing mode (auto-detected for non-localhost URLs)",
)
parser.add_argument(
"--avatar-threshold",
type=int,
default=1000,
help="Avatar generation threshold in ms (default: 1000ms, use 2500 for dev environments)",
)
parser.add_argument(
"--response-threshold",
type=int,
default=1000,
help="Response time threshold in ms (default: 1000ms, use 2500 for dev environments)",
)
parser.add_argument(
"--p95-threshold",
type=int,
default=2000,
help="95th percentile threshold in ms (default: 2000ms, use 5000 for dev environments)",
)
parser.add_argument(
"--ignore-cache-warnings",
action="store_true",
help="Don't fail on cache performance warnings (useful for dev environments)",
)
args = parser.parse_args()
@@ -765,7 +1039,7 @@ def main():
remote_testing=remote_testing,
)
results = runner.run_all_tests()
results = runner.run_all_tests(args.avatar_threshold, args.response_threshold, args.p95_threshold, args.ignore_cache_warnings)
if args.output and results:
import json

View File

@@ -1,5 +1,4 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Run tests with OpenTelemetry instrumentation and export enabled, plus coverage measurement.
This script is designed to be used with 'coverage run' command.

View File

@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(