From 9cf1cb47457555229c06001314a19b808354a120 Mon Sep 17 00:00:00 2001 From: Oliver Falk Date: Fri, 24 Oct 2025 13:51:45 +0200 Subject: [PATCH] Enhance performance tests --- .gitlab-ci.yml | 45 +- .pre-commit-config.yaml | 15 +- config.py | 1 - config_local_test.py | 1 - import_libravatar.py | 1 - ivatar/__init__.py | 1 - ivatar/context_processors.py | 1 - ivatar/file_security.py | 1 - ivatar/ivataraccount/__init__.py | 2 +- ivatar/ivataraccount/admin.py | 2 +- ivatar/ivataraccount/auth.py | 1 - ivatar/ivataraccount/forms.py | 2 +- ivatar/ivataraccount/gravatar.py | 2 +- .../ivataraccount/migrations/0001_initial.py | 1 - .../0002_openidassociation_openidnonce.py | 1 - .../migrations/0003_auto_20180508_0637.py | 1 - .../migrations/0004_auto_20180508_0742.py | 1 - .../migrations/0005_auto_20180522_1155.py | 1 - .../migrations/0006_auto_20180626_1445.py | 1 - .../migrations/0007_auto_20180627_0624.py | 1 - .../migrations/0008_userpreference.py | 1 - .../migrations/0009_auto_20180705_1152.py | 1 - .../migrations/0010_auto_20180705_1201.py | 1 - .../migrations/0011_auto_20181107_1550.py | 1 - .../migrations/0012_auto_20181107_1732.py | 1 - .../migrations/0013_auto_20181203_1421.py | 1 - .../migrations/0014_auto_20190218_1602.py | 1 - .../migrations/0015_auto_20200225_0934.py | 1 - .../migrations/0016_auto_20210413_0904.py | 1 - .../migrations/0017_auto_20210528_1314.py | 1 - .../migrations/0018_alter_photo_format.py | 1 - .../0019_confirmedemail_bluesky_handle.py | 1 - .../0020_confirmedopenid_bluesky_handle.py | 1 - .../0021_add_performance_indexes.py | 1 - ivatar/ivataraccount/models.py | 9 +- .../ivataraccount/read_libravatar_export.py | 1 - ivatar/ivataraccount/test_auth.py | 1 - ivatar/ivataraccount/test_views.py | 5 +- ivatar/ivataraccount/test_views_bluesky.py | 5 +- ivatar/ivataraccount/urls.py | 2 +- ivatar/ivataraccount/views.py | 19 +- ivatar/middleware.py | 1 - ivatar/opentelemetry_config.py | 1 - ivatar/opentelemetry_middleware.py | 7 +- ivatar/settings.py | 1 - ivatar/static/css/bootstrap.min.css | 50 +- ivatar/static/css/libravatar_base.css | 16 +- ivatar/static/js/bootstrap.min.js | 32 +- ivatar/static/js/cropper.min.js | 131 ++-- ivatar/static/js/jcrop.js | 28 +- ivatar/static/js/jquery-3.7.1.min.js | 349 +++++------ ivatar/test_auxiliary.py | 1 - ivatar/test_file_security.py | 1 - ivatar/test_opentelemetry.py | 1 - ivatar/test_static_pages.py | 6 +- ivatar/test_utils.py | 1 - ivatar/test_views.py | 5 +- ivatar/test_views_stats.py | 1 - ivatar/test_wsgi.py | 2 +- ivatar/tools/forms.py | 2 +- ivatar/tools/test_views.py | 6 +- ivatar/tools/urls.py | 1 - ivatar/tools/views.py | 2 +- ivatar/urls.py | 1 - ivatar/utils.py | 11 +- ivatar/views.py | 11 +- ivatar/wsgi.py | 1 - libravatarproxy.py | 1 - manage.py | 1 - requirements.txt | 1 + scripts/check_deployment.py | 171 ++++-- scripts/performance_tests.py | 560 +++++++++++++----- scripts/run_tests_with_coverage.py | 1 - setup.py | 1 - 74 files changed, 965 insertions(+), 578 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 47fa8ae..3f1db78 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -158,21 +158,24 @@ performance_tests_dev: image: python:3.11-alpine only: - devel + when: on_success # Run automatically after successful deployment verification variables: DEV_URL: "https://dev.libravatar.org" + PYTHONUNBUFFERED: 1 before_script: - apk add --no-cache curl - - pip install requests + - pip install requests Pillow prettytable pyLibravatar dnspython py3dns script: - echo "Running performance tests against dev.libravatar.org..." - - python3 scripts/performance_tests.py --base-url $DEV_URL --concurrent-users 5 --output performance_dev.json + - python3 scripts/performance_tests.py --base-url $DEV_URL --concurrent-users 5 --avatar-threshold 2500 --response-threshold 2500 --p95-threshold 5000 --ignore-cache-warnings --output performance_dev.json artifacts: paths: - performance_dev.json expire_in: 7 days allow_failure: true # Don't fail deployment on performance issues needs: - - verify_dev_deployment # Run after deployment verification + - job: verify_dev_deployment + artifacts: false # Run after deployment verification succeeds # Performance testing against production server (master branch only) performance_tests_prod: @@ -180,12 +183,13 @@ performance_tests_prod: image: python:3.11-alpine only: - master - when: manual # Manual trigger to avoid impacting production unnecessarily + when: on_success # Run automatically after successful deployment verification variables: PROD_URL: "https://libravatar.org" + PYTHONUNBUFFERED: 1 before_script: - apk add --no-cache curl - - pip install requests + - pip install requests Pillow prettytable pyLibravatar dnspython py3dns script: - echo "Running performance tests against libravatar.org..." - python3 scripts/performance_tests.py --base-url $PROD_URL --concurrent-users 3 --output performance_prod.json @@ -195,7 +199,30 @@ performance_tests_prod: expire_in: 30 days # Keep production results longer allow_failure: true # Don't fail deployment on performance issues needs: - - verify_prod_deployment # Run after deployment verification + - job: verify_prod_deployment + artifacts: false # Run after deployment verification succeeds + +# Manual performance testing against production (for on-demand testing) +performance_tests_prod_manual: + stage: deploy + image: python:3.11-alpine + only: + - master + when: manual # Manual trigger for on-demand performance testing + variables: + PROD_URL: "https://libravatar.org" + PYTHONUNBUFFERED: 1 + before_script: + - apk add --no-cache curl + - pip install requests Pillow prettytable pyLibravatar dnspython py3dns + script: + - echo "Running manual performance tests against libravatar.org..." + - python3 scripts/performance_tests.py --base-url $PROD_URL --concurrent-users 5 --output performance_prod_manual.json + artifacts: + paths: + - performance_prod_manual.json + expire_in: 30 days + allow_failure: true # Deployment verification jobs verify_dev_deployment: @@ -207,8 +234,9 @@ verify_dev_deployment: DEV_URL: "https://dev.libravatar.org" MAX_RETRIES: 30 RETRY_DELAY: 60 + PYTHONUNBUFFERED: 1 before_script: - - apk add --no-cache curl + - apk add --no-cache curl git - pip install Pillow script: - echo "Waiting for dev.libravatar.org deployment to complete..." @@ -225,8 +253,9 @@ verify_prod_deployment: PROD_URL: "https://libravatar.org" MAX_RETRIES: 10 RETRY_DELAY: 30 + PYTHONUNBUFFERED: 1 before_script: - - apk add --no-cache curl + - apk add --no-cache curl git - pip install Pillow script: - echo "Verifying production deployment..." diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fec9586..ca6d859 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,16 +4,20 @@ repos: hooks: - id: check-useless-excludes - repo: https://github.com/pre-commit/mirrors-prettier - rev: v3.0.0-alpha.4 + rev: v4.0.0-alpha.8 hooks: - id: prettier files: \.(css|js|md|markdown|json) - repo: https://github.com/python/black - rev: 22.12.0 + rev: 25.9.0 hooks: - id: black +- repo: https://github.com/asottile/pyupgrade + rev: v3.21.0 + hooks: + - id: pyupgrade - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v6.0.0 hooks: - id: check-added-large-files - id: check-ast @@ -28,7 +32,6 @@ repos: args: - --unsafe - id: end-of-file-fixer - - id: fix-encoding-pragma - id: forbid-new-submodules - id: no-commit-to-branch args: @@ -38,7 +41,7 @@ repos: - id: sort-simple-yaml - id: trailing-whitespace - repo: https://github.com/PyCQA/flake8 - rev: 6.0.0 + rev: 7.3.0 hooks: - id: flake8 - repo: local @@ -57,7 +60,7 @@ repos: types: - shell - repo: https://github.com/asottile/blacken-docs - rev: v1.12.1 + rev: 1.20.0 hooks: - id: blacken-docs # YASpeller does not seem to work anymore diff --git a/config.py b/config.py index 0bb7add..556e594 100644 --- a/config.py +++ b/config.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Configuration overrides for settings.py """ diff --git a/config_local_test.py b/config_local_test.py index 2c7d906..534b7c0 100644 --- a/config_local_test.py +++ b/config_local_test.py @@ -1,3 +1,2 @@ -# -*- coding: utf-8 -*- # Test configuration to verify LOGS_DIR override LOGS_DIR = "/tmp/ivatar_test_logs" diff --git a/import_libravatar.py b/import_libravatar.py index 424850b..b14c8d8 100644 --- a/import_libravatar.py +++ b/import_libravatar.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ Import the whole libravatar export """ diff --git a/ivatar/__init__.py b/ivatar/__init__.py index 0649992..2aaa3f8 100644 --- a/ivatar/__init__.py +++ b/ivatar/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Module init """ diff --git a/ivatar/context_processors.py b/ivatar/context_processors.py index 4add854..92457b8 100644 --- a/ivatar/context_processors.py +++ b/ivatar/context_processors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Default: useful variables for the base page templates. """ diff --git a/ivatar/file_security.py b/ivatar/file_security.py index 0a486b5..0160566 100644 --- a/ivatar/file_security.py +++ b/ivatar/file_security.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ File upload security utilities for ivatar """ diff --git a/ivatar/ivataraccount/__init__.py b/ivatar/ivataraccount/__init__.py index a8a5815..2aaa3f8 100644 --- a/ivatar/ivataraccount/__init__.py +++ b/ivatar/ivataraccount/__init__.py @@ -1,5 +1,5 @@ -# -*- coding: utf-8 -*- """ Module init """ + app_label = __name__ # pylint: disable=invalid-name diff --git a/ivatar/ivataraccount/admin.py b/ivatar/ivataraccount/admin.py index adc28cd..5816ec9 100644 --- a/ivatar/ivataraccount/admin.py +++ b/ivatar/ivataraccount/admin.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- """ Register models in admin """ + from django.contrib import admin from .models import Photo, ConfirmedEmail, UnconfirmedEmail diff --git a/ivatar/ivataraccount/auth.py b/ivatar/ivataraccount/auth.py index a54e579..a5667aa 100644 --- a/ivatar/ivataraccount/auth.py +++ b/ivatar/ivataraccount/auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- from social_core.backends.open_id_connect import OpenIdConnectAuth from ivatar.ivataraccount.models import ConfirmedEmail, Photo diff --git a/ivatar/ivataraccount/forms.py b/ivatar/ivataraccount/forms.py index b5f686a..0bf2370 100644 --- a/ivatar/ivataraccount/forms.py +++ b/ivatar/ivataraccount/forms.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- """ Classes for our ivatar.ivataraccount.forms """ + from urllib.parse import urlsplit, urlunsplit from django import forms diff --git a/ivatar/ivataraccount/gravatar.py b/ivatar/ivataraccount/gravatar.py index 23c3703..0097e18 100644 --- a/ivatar/ivataraccount/gravatar.py +++ b/ivatar/ivataraccount/gravatar.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- """ Helper method to fetch Gravatar image """ + from ssl import SSLError from urllib.request import HTTPError, URLError from ivatar.utils import urlopen diff --git a/ivatar/ivataraccount/migrations/0001_initial.py b/ivatar/ivataraccount/migrations/0001_initial.py index 34bca35..919f7f3 100644 --- a/ivatar/ivataraccount/migrations/0001_initial.py +++ b/ivatar/ivataraccount/migrations/0001_initial.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.0.5 on 2018-05-07 07:13 from django.conf import settings diff --git a/ivatar/ivataraccount/migrations/0002_openidassociation_openidnonce.py b/ivatar/ivataraccount/migrations/0002_openidassociation_openidnonce.py index 25dbf39..c87c883 100644 --- a/ivatar/ivataraccount/migrations/0002_openidassociation_openidnonce.py +++ b/ivatar/ivataraccount/migrations/0002_openidassociation_openidnonce.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.0.5 on 2018-05-07 07:23 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0003_auto_20180508_0637.py b/ivatar/ivataraccount/migrations/0003_auto_20180508_0637.py index e27a8b3..ddb361c 100644 --- a/ivatar/ivataraccount/migrations/0003_auto_20180508_0637.py +++ b/ivatar/ivataraccount/migrations/0003_auto_20180508_0637.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.0.5 on 2018-05-08 06:37 import datetime diff --git a/ivatar/ivataraccount/migrations/0004_auto_20180508_0742.py b/ivatar/ivataraccount/migrations/0004_auto_20180508_0742.py index ca41b29..7b8aeed 100644 --- a/ivatar/ivataraccount/migrations/0004_auto_20180508_0742.py +++ b/ivatar/ivataraccount/migrations/0004_auto_20180508_0742.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.0.5 on 2018-05-08 07:42 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0005_auto_20180522_1155.py b/ivatar/ivataraccount/migrations/0005_auto_20180522_1155.py index 6db266b..d5aca65 100644 --- a/ivatar/ivataraccount/migrations/0005_auto_20180522_1155.py +++ b/ivatar/ivataraccount/migrations/0005_auto_20180522_1155.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.0.5 on 2018-05-22 11:55 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0006_auto_20180626_1445.py b/ivatar/ivataraccount/migrations/0006_auto_20180626_1445.py index b69780d..afd85d6 100644 --- a/ivatar/ivataraccount/migrations/0006_auto_20180626_1445.py +++ b/ivatar/ivataraccount/migrations/0006_auto_20180626_1445.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.0.6 on 2018-06-26 14:45 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0007_auto_20180627_0624.py b/ivatar/ivataraccount/migrations/0007_auto_20180627_0624.py index 1c157bc..eade4d4 100644 --- a/ivatar/ivataraccount/migrations/0007_auto_20180627_0624.py +++ b/ivatar/ivataraccount/migrations/0007_auto_20180627_0624.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.0.6 on 2018-06-27 06:24 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0008_userpreference.py b/ivatar/ivataraccount/migrations/0008_userpreference.py index d06a7bc..dbc9538 100644 --- a/ivatar/ivataraccount/migrations/0008_userpreference.py +++ b/ivatar/ivataraccount/migrations/0008_userpreference.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # pylint: disable=invalid-name,missing-docstring # Generated by Django 2.0.6 on 2018-07-04 12:32 diff --git a/ivatar/ivataraccount/migrations/0009_auto_20180705_1152.py b/ivatar/ivataraccount/migrations/0009_auto_20180705_1152.py index 32242a3..5568399 100644 --- a/ivatar/ivataraccount/migrations/0009_auto_20180705_1152.py +++ b/ivatar/ivataraccount/migrations/0009_auto_20180705_1152.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.0.6 on 2018-07-05 11:52 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0010_auto_20180705_1201.py b/ivatar/ivataraccount/migrations/0010_auto_20180705_1201.py index 3987825..07af7fd 100644 --- a/ivatar/ivataraccount/migrations/0010_auto_20180705_1201.py +++ b/ivatar/ivataraccount/migrations/0010_auto_20180705_1201.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.0.6 on 2018-07-05 12:01 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0011_auto_20181107_1550.py b/ivatar/ivataraccount/migrations/0011_auto_20181107_1550.py index f053437..2308c20 100644 --- a/ivatar/ivataraccount/migrations/0011_auto_20181107_1550.py +++ b/ivatar/ivataraccount/migrations/0011_auto_20181107_1550.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.1.3 on 2018-11-07 15:50 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0012_auto_20181107_1732.py b/ivatar/ivataraccount/migrations/0012_auto_20181107_1732.py index a6bd44c..5506f94 100644 --- a/ivatar/ivataraccount/migrations/0012_auto_20181107_1732.py +++ b/ivatar/ivataraccount/migrations/0012_auto_20181107_1732.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.1.3 on 2018-11-07 17:32 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0013_auto_20181203_1421.py b/ivatar/ivataraccount/migrations/0013_auto_20181203_1421.py index 38641bd..af9b522 100644 --- a/ivatar/ivataraccount/migrations/0013_auto_20181203_1421.py +++ b/ivatar/ivataraccount/migrations/0013_auto_20181203_1421.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.1.3 on 2018-12-03 14:21 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0014_auto_20190218_1602.py b/ivatar/ivataraccount/migrations/0014_auto_20190218_1602.py index ca4c0c9..862bf90 100644 --- a/ivatar/ivataraccount/migrations/0014_auto_20190218_1602.py +++ b/ivatar/ivataraccount/migrations/0014_auto_20190218_1602.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 2.1.5 on 2019-02-18 16:02 from django.db import migrations diff --git a/ivatar/ivataraccount/migrations/0015_auto_20200225_0934.py b/ivatar/ivataraccount/migrations/0015_auto_20200225_0934.py index 3446758..5046b13 100644 --- a/ivatar/ivataraccount/migrations/0015_auto_20200225_0934.py +++ b/ivatar/ivataraccount/migrations/0015_auto_20200225_0934.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 3.0.3 on 2020-02-25 09:34 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0016_auto_20210413_0904.py b/ivatar/ivataraccount/migrations/0016_auto_20210413_0904.py index 654c92b..fb90c61 100644 --- a/ivatar/ivataraccount/migrations/0016_auto_20210413_0904.py +++ b/ivatar/ivataraccount/migrations/0016_auto_20210413_0904.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 3.1.7 on 2021-04-13 09:04 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0017_auto_20210528_1314.py b/ivatar/ivataraccount/migrations/0017_auto_20210528_1314.py index 2be8ad1..bfe8111 100644 --- a/ivatar/ivataraccount/migrations/0017_auto_20210528_1314.py +++ b/ivatar/ivataraccount/migrations/0017_auto_20210528_1314.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 3.2.3 on 2021-05-28 13:14 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0018_alter_photo_format.py b/ivatar/ivataraccount/migrations/0018_alter_photo_format.py index da24866..41d1d7e 100644 --- a/ivatar/ivataraccount/migrations/0018_alter_photo_format.py +++ b/ivatar/ivataraccount/migrations/0018_alter_photo_format.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 5.0 on 2024-05-31 15:00 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0019_confirmedemail_bluesky_handle.py b/ivatar/ivataraccount/migrations/0019_confirmedemail_bluesky_handle.py index 61a3083..cae028f 100644 --- a/ivatar/ivataraccount/migrations/0019_confirmedemail_bluesky_handle.py +++ b/ivatar/ivataraccount/migrations/0019_confirmedemail_bluesky_handle.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 5.1.5 on 2025-01-27 10:54 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0020_confirmedopenid_bluesky_handle.py b/ivatar/ivataraccount/migrations/0020_confirmedopenid_bluesky_handle.py index 888fcdd..1d6b75f 100644 --- a/ivatar/ivataraccount/migrations/0020_confirmedopenid_bluesky_handle.py +++ b/ivatar/ivataraccount/migrations/0020_confirmedopenid_bluesky_handle.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated by Django 5.1.5 on 2025-01-27 13:33 from django.db import migrations, models diff --git a/ivatar/ivataraccount/migrations/0021_add_performance_indexes.py b/ivatar/ivataraccount/migrations/0021_add_performance_indexes.py index dae5501..7b5a3a5 100644 --- a/ivatar/ivataraccount/migrations/0021_add_performance_indexes.py +++ b/ivatar/ivataraccount/migrations/0021_add_performance_indexes.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Generated manually for performance optimization from typing import Any, List, Tuple, Optional diff --git a/ivatar/ivataraccount/models.py b/ivatar/ivataraccount/models.py index 61a6487..f88a248 100644 --- a/ivatar/ivataraccount/models.py +++ b/ivatar/ivataraccount/models.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Our models for ivatar.ivataraccount """ @@ -404,7 +403,7 @@ class ConfirmedEmail(BaseAccountModel): logger.debug("Successfully cleaned up cached page: %s" % cache_key) except Exception as exc: logger.warning( - "Failed to clean up cached page %s: %s" % (cache_key, exc) + "Failed to clean up cached page {}: {}".format(cache_key, exc) ) # Invalidate Bluesky avatar URL cache if bluesky_handle changed @@ -455,9 +454,7 @@ class UnconfirmedEmail(BaseAccountModel): + self.user.username.encode("utf-8") # pylint: disable=no-member ) # pylint: disable=no-member self.verification_key = hash_object.hexdigest() - super(UnconfirmedEmail, self).save( - force_insert, force_update, using, update_fields - ) + super().save(force_insert, force_update, using, update_fields) def send_confirmation_mail(self, url=SECURE_BASE_URL): """ @@ -602,7 +599,7 @@ class ConfirmedOpenId(BaseAccountModel): logger.debug("Successfully cleaned up cached page: %s" % cache_key) except Exception as exc: logger.warning( - "Failed to clean up cached page %s: %s" % (cache_key, exc) + "Failed to clean up cached page {}: {}".format(cache_key, exc) ) # Invalidate Bluesky avatar URL cache if bluesky_handle exists diff --git a/ivatar/ivataraccount/read_libravatar_export.py b/ivatar/ivataraccount/read_libravatar_export.py index 71ff8fb..78de23b 100644 --- a/ivatar/ivataraccount/read_libravatar_export.py +++ b/ivatar/ivataraccount/read_libravatar_export.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Reading libravatar export """ diff --git a/ivatar/ivataraccount/test_auth.py b/ivatar/ivataraccount/test_auth.py index fc78e9f..2f59a45 100644 --- a/ivatar/ivataraccount/test_auth.py +++ b/ivatar/ivataraccount/test_auth.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- from unittest import mock from django.test import TestCase diff --git a/ivatar/ivataraccount/test_views.py b/ivatar/ivataraccount/test_views.py index a3fc2b9..ca79849 100644 --- a/ivatar/ivataraccount/test_views.py +++ b/ivatar/ivataraccount/test_views.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Test our views in ivatar.ivataraccount.views and ivatar.views """ @@ -53,9 +52,9 @@ class Tester(TestCase): # pylint: disable=too-many-public-methods user = None username = random_string() password = random_string() - email = "%s@%s.org" % (username, random_string()) + email = "{}@{}.org".format(username, random_string()) # Dunno why random tld doesn't work, but I'm too lazy now to investigate - openid = "http://%s.%s.%s/" % (username, random_string(), "org") + openid = "http://{}.{}.{}/".format(username, random_string(), "org") first_name = random_string() last_name = random_string() diff --git a/ivatar/ivataraccount/test_views_bluesky.py b/ivatar/ivataraccount/test_views_bluesky.py index 0011737..92aea62 100644 --- a/ivatar/ivataraccount/test_views_bluesky.py +++ b/ivatar/ivataraccount/test_views_bluesky.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Test our views in ivatar.ivataraccount.views and ivatar.views """ @@ -37,9 +36,9 @@ class Tester(TestCase): # pylint: disable=too-many-public-methods user = None username = random_string() password = random_string() - email = "%s@%s.%s" % (username, random_string(), random_string(2)) + email = "{}@{}.{}".format(username, random_string(), random_string(2)) # Dunno why random tld doesn't work, but I'm too lazy now to investigate - openid = "http://%s.%s.%s/" % (username, random_string(), "org") + openid = "http://{}.{}.{}/".format(username, random_string(), "org") first_name = random_string() last_name = random_string() bsky_test_account = "libravatar.org" diff --git a/ivatar/ivataraccount/urls.py b/ivatar/ivataraccount/urls.py index ae6a1c4..2163a04 100644 --- a/ivatar/ivataraccount/urls.py +++ b/ivatar/ivataraccount/urls.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- """ URLs for ivatar.ivataraccount """ + from django.urls import path, re_path from django.contrib.auth.views import LogoutView diff --git a/ivatar/ivataraccount/views.py b/ivatar/ivataraccount/views.py index 3178b86..b664e27 100644 --- a/ivatar/ivataraccount/views.py +++ b/ivatar/ivataraccount/views.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ View classes for ivatar/ivataraccount/ """ @@ -140,7 +139,7 @@ class PasswordSetView(SuccessMessageMixin, FormView): success_url = reverse_lazy("profile") def get_form_kwargs(self): - kwargs = super(PasswordSetView, self).get_form_kwargs() + kwargs = super().get_form_kwargs() kwargs["user"] = self.request.user return kwargs @@ -712,7 +711,9 @@ class RemoveUnconfirmedOpenIDView(View): ) openid.delete() messages.success(request, _("ID removed")) - except self.model.DoesNotExist: # pragma: no cover pylint: disable=no-member,line-too-long + except ( + self.model.DoesNotExist + ): # pragma: no cover pylint: disable=no-member,line-too-long messages.error(request, _("ID does not exist")) return HttpResponseRedirect(reverse_lazy("profile")) @@ -766,7 +767,9 @@ class RedirectOpenIDView(View): unconfirmed = self.model.objects.get( # pylint: disable=no-member user=request.user, id=kwargs["openid_id"] ) - except self.model.DoesNotExist: # pragma: no cover pylint: disable=no-member,line-too-long + except ( + self.model.DoesNotExist + ): # pragma: no cover pylint: disable=no-member,line-too-long messages.error(request, _("ID does not exist")) return HttpResponseRedirect(reverse_lazy("profile")) @@ -1321,7 +1324,7 @@ class ExportView(SuccessMessageMixin, TemplateView): def xml_account(user): escaped_username = saxutils.quoteattr(user.username) escaped_password = saxutils.quoteattr(user.password) - return " \n" % ( + return " \n".format( escaped_username, escaped_password, ) @@ -1387,8 +1390,8 @@ class ExportView(SuccessMessageMixin, TemplateView): bytesobj.seek(0) response = HttpResponse(content_type="application/gzip") - response[ - "Content-Disposition" - ] = f'attachment; filename="libravatar-export_{user.username}.xml.gz"' + response["Content-Disposition"] = ( + f'attachment; filename="libravatar-export_{user.username}.xml.gz"' + ) response.write(bytesobj.read()) return response diff --git a/ivatar/middleware.py b/ivatar/middleware.py index ed1017e..86dcd6b 100644 --- a/ivatar/middleware.py +++ b/ivatar/middleware.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Middleware classes """ diff --git a/ivatar/opentelemetry_config.py b/ivatar/opentelemetry_config.py index a803f8f..1c290e7 100644 --- a/ivatar/opentelemetry_config.py +++ b/ivatar/opentelemetry_config.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ OpenTelemetry configuration for ivatar project. diff --git a/ivatar/opentelemetry_middleware.py b/ivatar/opentelemetry_middleware.py index 18fe01f..26d07a1 100644 --- a/ivatar/opentelemetry_middleware.py +++ b/ivatar/opentelemetry_middleware.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ OpenTelemetry middleware and custom instrumentation for ivatar. @@ -94,9 +93,9 @@ class OpenTelemetryMiddleware(MiddlewareMixin): span.set_attributes( { "http.status_code": response.status_code, - "http.response_size": len(response.content) - if hasattr(response, "content") - else 0, + "http.response_size": ( + len(response.content) if hasattr(response, "content") else 0 + ), "http.request.duration": duration, } ) diff --git a/ivatar/settings.py b/ivatar/settings.py index 3c28237..a6c843c 100644 --- a/ivatar/settings.py +++ b/ivatar/settings.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Django settings for ivatar project. """ diff --git a/ivatar/static/css/bootstrap.min.css b/ivatar/static/css/bootstrap.min.css index fcaded2..4980bbf 100644 --- a/ivatar/static/css/bootstrap.min.css +++ b/ivatar/static/css/bootstrap.min.css @@ -260,7 +260,8 @@ th { @font-face { font-family: "Glyphicons Halflings"; src: url(../fonts/glyphicons-halflings-regular.eot); - src: url(../fonts/glyphicons-halflings-regular.eot?#iefix) + src: + url(../fonts/glyphicons-halflings-regular.eot?#iefix) format("embedded-opentype"), url(../fonts/glyphicons-halflings-regular.woff2) format("woff2"), url(../fonts/glyphicons-halflings-regular.woff) format("woff"), @@ -2651,17 +2652,24 @@ output { border-radius: 4px; -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075); - -webkit-transition: border-color ease-in-out 0.15s, + -webkit-transition: + border-color ease-in-out 0.15s, -webkit-box-shadow ease-in-out 0.15s; - -o-transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s; - transition: border-color ease-in-out 0.15s, box-shadow ease-in-out 0.15s; + -o-transition: + border-color ease-in-out 0.15s, + box-shadow ease-in-out 0.15s; + transition: + border-color ease-in-out 0.15s, + box-shadow ease-in-out 0.15s; } .form-control:focus { border-color: #66afe9; outline: 0; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), + -webkit-box-shadow: + inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), + box-shadow: + inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 8px rgba(102, 175, 233, 0.6); } .form-control::-moz-placeholder { @@ -2923,8 +2931,12 @@ textarea.input-lg { } .has-success .form-control:focus { border-color: #2b542c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #67b168; + -webkit-box-shadow: + inset 0 1px 1px rgba(0, 0, 0, 0.075), + 0 0 6px #67b168; + box-shadow: + inset 0 1px 1px rgba(0, 0, 0, 0.075), + 0 0 6px #67b168; } .has-success .input-group-addon { color: #3c763d; @@ -2953,8 +2965,12 @@ textarea.input-lg { } .has-warning .form-control:focus { border-color: #66512c; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #c0a16b; + -webkit-box-shadow: + inset 0 1px 1px rgba(0, 0, 0, 0.075), + 0 0 6px #c0a16b; + box-shadow: + inset 0 1px 1px rgba(0, 0, 0, 0.075), + 0 0 6px #c0a16b; } .has-warning .input-group-addon { color: #8a6d3b; @@ -2983,8 +2999,12 @@ textarea.input-lg { } .has-error .form-control:focus { border-color: #843534; - -webkit-box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483; - box-shadow: inset 0 1px 1px rgba(0, 0, 0, 0.075), 0 0 6px #ce8483; + -webkit-box-shadow: + inset 0 1px 1px rgba(0, 0, 0, 0.075), + 0 0 6px #ce8483; + box-shadow: + inset 0 1px 1px rgba(0, 0, 0, 0.075), + 0 0 6px #ce8483; } .has-error .input-group-addon { color: #a94442; @@ -4470,9 +4490,11 @@ textarea.input-group-sm > .input-group-btn > .btn { margin-left: -15px; border-top: 1px solid transparent; border-bottom: 1px solid transparent; - -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), + -webkit-box-shadow: + inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); - box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.1), + box-shadow: + inset 0 1px 0 rgba(255, 255, 255, 0.1), 0 1px 0 rgba(255, 255, 255, 0.1); } @media (min-width: 768px) { diff --git a/ivatar/static/css/libravatar_base.css b/ivatar/static/css/libravatar_base.css index ee98454..c522a3a 100644 --- a/ivatar/static/css/libravatar_base.css +++ b/ivatar/static/css/libravatar_base.css @@ -4,7 +4,9 @@ font-style: normal; font-weight: 300; src: url("../fonts/lato-v15-latin-300.eot"); /* IE9 Compat Modes */ - src: local("Lato Light"), local("Lato-Light"), + src: + local("Lato Light"), + local("Lato-Light"), url("../fonts/LatoLatin-Light.eot?#iefix") format("embedded-opentype"), /* IE6-IE8 */ url("../fonts/LatoLatin-Light.woff2") format("woff2"), /* Super Modern Browsers */ url("../fonts/LatoLatin-Light.woff") @@ -19,7 +21,9 @@ font-style: normal; font-weight: 400; src: url("../fonts/lato-v15-latin-regular.eot"); /* IE9 Compat Modes */ - src: local("Lato Regular"), local("Lato-Regular"), + src: + local("Lato Regular"), + local("Lato-Regular"), url("../fonts/LatoLatin-Regular.eot?#iefix") format("embedded-opentype"), /* IE6-IE8 */ url("../fonts/LatoLatin-Regular.woff2") format("woff2"), /* Super Modern Browsers */ url("../fonts/LatoLatin-Regular.woff") @@ -35,7 +39,9 @@ font-style: normal; font-weight: 700; src: url("../fonts/lato-v15-latin-700.eot"); /* IE9 Compat Modes */ - src: local("Lato Bold"), local("Lato-Bold"), + src: + local("Lato Bold"), + local("Lato-Bold"), url("../fonts/LatoLatin-Bold.eot?#iefix") format("embedded-opentype"), /* IE6-IE8 */ url("../fonts/LatoLatin-Bold.woff2") format("woff2"), /* Super Modern Browsers */ url("../fonts/LatoLatin-Bold.woff") @@ -50,7 +56,9 @@ font-style: normal; font-weight: 400; src: url("../fonts/open-sans-v16-latin-regular.eot"); /* IE9 Compat Modes */ - src: local("Open Sans Regular"), local("OpenSans-Regular"), + src: + local("Open Sans Regular"), + local("OpenSans-Regular"), url("../fonts/open-sans-v16-latin-regular.eot?#iefix") format("embedded-opentype"), /* IE6-IE8 */ url("../fonts/open-sans-v16-latin-regular.woff2") diff --git a/ivatar/static/js/bootstrap.min.js b/ivatar/static/js/bootstrap.min.js index a8ddadc..34f3b04 100644 --- a/ivatar/static/js/bootstrap.min.js +++ b/ivatar/static/js/bootstrap.min.js @@ -199,8 +199,8 @@ if ("undefined" == typeof jQuery) "number" == typeof b ? e.to(b) : g - ? e[g]() - : f.interval && e.pause().cycle(); + ? e[g]() + : f.interval && e.pause().cycle(); }); } var c = function (b, c) { @@ -278,8 +278,8 @@ if ("undefined" == typeof jQuery) b.to(a); }) : c == a - ? this.pause().cycle() - : this.slide(a > c ? "next" : "prev", this.$items.eq(a)); + ? this.pause().cycle() + : this.slide(a > c ? "next" : "prev", this.$items.eq(a)); }), (c.prototype.pause = function (b) { return ( @@ -1106,12 +1106,12 @@ if ("undefined" == typeof jQuery) "bottom" == h && k.bottom + m > o.bottom ? "top" : "top" == h && k.top - m < o.top - ? "bottom" - : "right" == h && k.right + l > o.width - ? "left" - : "left" == h && k.left - l < o.left - ? "right" - : h), + ? "bottom" + : "right" == h && k.right + l > o.width + ? "left" + : "left" == h && k.left - l < o.left + ? "right" + : h), f.removeClass(n).addClass(h); } var p = this.getCalculatedOffset(h, k, l, m); @@ -1233,10 +1233,10 @@ if ("undefined" == typeof jQuery) return "bottom" == a ? { top: b.top + b.height, left: b.left + b.width / 2 - c / 2 } : "top" == a - ? { top: b.top - d, left: b.left + b.width / 2 - c / 2 } - : "left" == a - ? { top: b.top + b.height / 2 - d / 2, left: b.left - c } - : { top: b.top + b.height / 2 - d / 2, left: b.left + b.width }; + ? { top: b.top - d, left: b.left + b.width / 2 - c / 2 } + : "left" == a + ? { top: b.top + b.height / 2 - d / 2, left: b.left - c } + : { top: b.top + b.height / 2 - d / 2, left: b.left + b.width }; }), (c.prototype.getViewportAdjustedDelta = function (a, b, c, d) { var e = { top: 0, left: 0 }; @@ -1308,8 +1308,8 @@ if ("undefined" == typeof jQuery) ? ((c.inState.click = !c.inState.click), c.isInStateTrue() ? c.enter(c) : c.leave(c)) : c.tip().hasClass("in") - ? c.leave(c) - : c.enter(c); + ? c.leave(c) + : c.enter(c); }), (c.prototype.destroy = function () { var a = this; diff --git a/ivatar/static/js/cropper.min.js b/ivatar/static/js/cropper.min.js index 6f5400c..cc8550d 100644 --- a/ivatar/static/js/cropper.min.js +++ b/ivatar/static/js/cropper.min.js @@ -11,9 +11,10 @@ "object" == typeof exports && "undefined" != typeof module ? (module.exports = e()) : "function" == typeof define && define.amd - ? define(e) - : ((t = "undefined" != typeof globalThis ? globalThis : t || self).Cropper = - e()); + ? define(e) + : ((t = + "undefined" != typeof globalThis ? globalThis : t || self).Cropper = + e()); })(this, function () { "use strict"; function C(e, t) { @@ -48,10 +49,14 @@ : (e[t] = i); }) : Object.getOwnPropertyDescriptors - ? Object.defineProperties(a, Object.getOwnPropertyDescriptors(n)) - : C(Object(n)).forEach(function (t) { - Object.defineProperty(a, t, Object.getOwnPropertyDescriptor(n, t)); - }); + ? Object.defineProperties(a, Object.getOwnPropertyDescriptors(n)) + : C(Object(n)).forEach(function (t) { + Object.defineProperty( + a, + t, + Object.getOwnPropertyDescriptor(n, t), + ); + }); } return a; } @@ -107,17 +112,17 @@ return "string" == typeof t ? a(t, e) : "Map" === - (i = - "Object" === - (i = Object.prototype.toString.call(t).slice(8, -1)) && - t.constructor - ? t.constructor.name - : i) || "Set" === i - ? Array.from(t) - : "Arguments" === i || - /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(i) - ? a(t, e) - : void 0; + (i = + "Object" === + (i = Object.prototype.toString.call(t).slice(8, -1)) && + t.constructor + ? t.constructor.name + : i) || "Set" === i + ? Array.from(t) + : "Arguments" === i || + /^(?:Ui|I)nt(?:8|16|32)(?:Clamped)?Array$/.test(i) + ? a(t, e) + : void 0; })(t) || (function () { throw new TypeError( @@ -304,10 +309,10 @@ v(t, e); }) : t.classList - ? t.classList.add(e) - : (i = t.className.trim()) - ? i.indexOf(e) < 0 && (t.className = "".concat(i, " ").concat(e)) - : (t.className = e)); + ? t.classList.add(e) + : (i = t.className.trim()) + ? i.indexOf(e) < 0 && (t.className = "".concat(i, " ").concat(e)) + : (t.className = e)); } function X(t, e) { e && @@ -316,9 +321,9 @@ X(t, e); }) : t.classList - ? t.classList.remove(e) - : 0 <= t.className.indexOf(e) && - (t.className = t.className.replace(e, ""))); + ? t.classList.remove(e) + : 0 <= t.className.indexOf(e) && + (t.className = t.className.replace(e, ""))); } function r(t, e, i) { e && @@ -336,15 +341,15 @@ return o(t[e]) ? t[e] : t.dataset - ? t.dataset[e] - : t.getAttribute("data-".concat(Dt(e))); + ? t.dataset[e] + : t.getAttribute("data-".concat(Dt(e))); } function w(t, e, i) { o(i) ? (t[e] = i) : t.dataset - ? (t.dataset[e] = i) - : t.setAttribute("data-".concat(Dt(e)), i); + ? (t.dataset[e] = i) + : t.setAttribute("data-".concat(Dt(e)), i); } var kt, Ot, @@ -478,8 +483,8 @@ ? (n = t / a) : (t = n * a)) : o - ? (n = t / a) - : h && (t = n * a), + ? (n = t / a) + : h && (t = n * a), { width: t, height: n } ); } @@ -572,8 +577,8 @@ ? (o = t.height * e) : (h = t.width / e) : 3 === i - ? (h = t.width / e) - : (o = t.height * e), + ? (h = t.width / e) + : (o = t.height * e), { aspectRatio: e, naturalWidth: n, @@ -611,11 +616,11 @@ (t ? (t = Math.max(t, s ? o.width : 0)) : i - ? (i = Math.max(i, s ? o.height : 0)) - : s && - ((t = o.width) < (i = o.height) * r - ? (t = i * r) - : (i = t / r))), + ? (i = Math.max(i, s ? o.height : 0)) + : s && + ((t = o.width) < (i = o.height) * r + ? (t = i * r) + : (i = t / r))), (t = (r = R({ aspectRatio: r, width: t, height: i })).width), (i = r.height), (n.minWidth = t), @@ -770,8 +775,8 @@ ? (n = t / c) : (t = n * c) : t - ? (n = t / c) - : n && (t = n * c), + ? (n = t / c) + : n && (t = n * c), i < a * c ? (a = i / c) : (i = a * c)), (r.minWidth = Math.min(t, i)), (r.minHeight = Math.min(n, a)), @@ -1011,8 +1016,8 @@ t.deltaY ? (a = 0 < t.deltaY ? 1 : -1) : t.wheelDelta - ? (a = -t.wheelDelta / 120) - : t.detail && (a = 0 < t.detail ? 1 : -1), + ? (a = -t.wheelDelta / 120) + : t.detail && (a = 0 < t.detail ? 1 : -1), this.zoom(-a * i, t)); }, cropStart: function (t) { @@ -1176,8 +1181,8 @@ u < 0 && g < 0 ? ((d = N), (m -= g = -g), (p -= u = -u)) : u < 0 - ? ((d = W), (p -= u = -u)) - : g < 0 && ((d = H), (m -= g = -g)); + ? ((d = W), (p -= u = -u)) + : g < 0 && ((d = H), (m -= g = -g)); break; case W: if (l) { @@ -1196,8 +1201,8 @@ u < 0 && g < 0 ? ((d = H), (m -= g = -g), (p -= u = -u)) : u < 0 - ? ((d = E), (p -= u = -u)) - : g < 0 && ((d = N), (m -= g = -g)); + ? ((d = E), (p -= u = -u)) + : g < 0 && ((d = N), (m -= g = -g)); break; case N: if (l) { @@ -1216,8 +1221,8 @@ u < 0 && g < 0 ? ((d = E), (m -= g = -g), (p -= u = -u)) : u < 0 - ? ((d = H), (p -= u = -u)) - : g < 0 && ((d = W), (m -= g = -g)); + ? ((d = H), (p -= u = -u)) + : g < 0 && ((d = W), (m -= g = -g)); break; case H: if (l) { @@ -1236,8 +1241,8 @@ u < 0 && g < 0 ? ((d = W), (m -= g = -g), (p -= u = -u)) : u < 0 - ? ((d = N), (p -= u = -u)) - : g < 0 && ((d = E), (m -= g = -g)); + ? ((d = N), (p -= u = -u)) + : g < 0 && ((d = E), (m -= g = -g)); break; case q: this.move(D.x, D.y), (M = !1); @@ -1458,9 +1463,9 @@ (r.left -= (h - s) * ((t.pageX - d.left - r.left) / s)), (r.top -= (l - c) * ((t.pageY - d.top - r.top) / c))) : u(e) && p(e.x) && p(e.y) - ? ((r.left -= (h - s) * ((e.x - r.left) / s)), - (r.top -= (l - c) * ((e.y - r.top) / c))) - : ((r.left -= (h - s) / 2), (r.top -= (l - c) / 2)), + ? ((r.left -= (h - s) * ((e.x - r.left) / s)), + (r.top -= (l - c) * ((e.y - r.top) / c))) + : ((r.left -= (h - s) / 2), (r.top -= (l - c) / 2)), (r.width = h), (r.height = l), this.renderCanvas(!0); @@ -1777,13 +1782,13 @@ (v = e) <= -a || g < v ? (C = x = b = v = 0) : v <= 0 - ? ((x = -v), (v = 0), (C = b = Math.min(g, a + v))) - : v <= g && ((x = 0), (C = b = Math.min(a, g - v))), + ? ((x = -v), (v = 0), (C = b = Math.min(g, a + v))) + : v <= g && ((x = 0), (C = b = Math.min(a, g - v))), b <= 0 || w <= -n || f < w ? (D = M = y = w = 0) : w <= 0 - ? ((M = -w), (w = 0), (D = y = Math.min(f, n + w))) - : w <= f && ((M = 0), (D = y = Math.min(n, f - w))), + ? ((M = -w), (w = 0), (D = y = Math.min(f, n + w))) + : w <= f && ((M = 0), (D = y = Math.min(n, f - w))), (B = [v, w, b, y]), 0 < C && 0 < D && B.push(x * (k = c / a), M * k, C * k, D * k), p.drawImage.apply( @@ -2151,12 +2156,12 @@ this.ready ? (this.unbuild(), (this.ready = !1), (this.cropped = !1)) : this.sizing - ? ((this.sizingImage.onload = null), - (this.sizing = !1), - (this.sized = !1)) - : this.reloading - ? ((this.xhr.onabort = null), this.xhr.abort()) - : this.image && this.stop(); + ? ((this.sizingImage.onload = null), + (this.sizing = !1), + (this.sized = !1)) + : this.reloading + ? ((this.xhr.onabort = null), this.xhr.abort()) + : this.image && this.stop(); }, }, ]) && A(t.prototype, e), diff --git a/ivatar/static/js/jcrop.js b/ivatar/static/js/jcrop.js index b775957..6acf70a 100644 --- a/ivatar/static/js/jcrop.js +++ b/ivatar/static/js/jcrop.js @@ -115,13 +115,13 @@ return options.disabled ? !1 : "move" !== ord || options.allowMove - ? ((docOffset = getPos($img)), - (btndown = !0), - startDragMode(ord, mouseAbs(e)), - e.stopPropagation(), - e.preventDefault(), - !1) - : !1; + ? ((docOffset = getPos($img)), + (btndown = !0), + startDragMode(ord, mouseAbs(e)), + e.stopPropagation(), + e.preventDefault(), + !1) + : !1; }; } function presize($obj, w, h) { @@ -463,13 +463,13 @@ return options.disabled ? !1 : "move" !== ord || options.allowMove - ? ((docOffset = getPos($img)), - (btndown = !0), - startDragMode(ord, mouseAbs(Touch.cfilter(e)), !0), - e.stopPropagation(), - e.preventDefault(), - !1) - : !1; + ? ((docOffset = getPos($img)), + (btndown = !0), + startDragMode(ord, mouseAbs(Touch.cfilter(e)), !0), + e.stopPropagation(), + e.preventDefault(), + !1) + : !1; }; }, newSelection: function (e) { diff --git a/ivatar/static/js/jquery-3.7.1.min.js b/ivatar/static/js/jquery-3.7.1.min.js index 223228e..8a2f71f 100644 --- a/ivatar/static/js/jquery-3.7.1.min.js +++ b/ivatar/static/js/jquery-3.7.1.min.js @@ -56,8 +56,8 @@ return null == e ? e + "" : "object" == typeof e || "function" == typeof e - ? n[i.call(e)] || "object" - : typeof e; + ? n[i.call(e)] || "object" + : typeof e; } var t = "3.7.1", l = /HTML$/i, @@ -90,8 +90,8 @@ return null == e ? ae.call(this) : e < 0 - ? this[e + this.length] - : this[e]; + ? this[e + this.length] + : this[e]; }, pushStack: function (e) { var t = ce.merge(this.constructor(), e); @@ -172,8 +172,8 @@ i && !Array.isArray(n) ? [] : i || ce.isPlainObject(n) - ? n - : {}), + ? n + : {}), (i = !1), (a[t] = ce.extend(l, o, r))) : void 0 !== r && (a[t] = r)); @@ -222,10 +222,10 @@ return 1 === i || 11 === i ? e.textContent : 9 === i - ? e.documentElement.textContent - : 3 === i || 4 === i - ? e.nodeValue - : n; + ? e.documentElement.textContent + : 3 === i || 4 === i + ? e.nodeValue + : n; }, makeArray: function (e, t) { var n = t || []; @@ -666,13 +666,13 @@ ? e === T || (e.ownerDocument == ye && I.contains(ye, e)) ? -1 : t === T || (t.ownerDocument == ye && I.contains(ye, t)) - ? 1 - : o - ? se.call(o, e) - se.call(o, t) - : 0 + ? 1 + : o + ? se.call(o, e) - se.call(o, t) + : 0 : 4 & n - ? -1 - : 1) + ? -1 + : 1) ); })), T @@ -813,17 +813,19 @@ "=" === r ? t === i : "!=" === r - ? t !== i - : "^=" === r - ? i && 0 === t.indexOf(i) - : "*=" === r - ? i && -1 < t.indexOf(i) - : "$=" === r - ? i && t.slice(-i.length) === i - : "~=" === r - ? -1 < (" " + t.replace(v, " ") + " ").indexOf(i) - : "|=" === r && - (t === i || t.slice(0, i.length + 1) === i + "-")); + ? t !== i + : "^=" === r + ? i && 0 === t.indexOf(i) + : "*=" === r + ? i && -1 < t.indexOf(i) + : "$=" === r + ? i && t.slice(-i.length) === i + : "~=" === r + ? -1 < + (" " + t.replace(v, " ") + " ").indexOf(i) + : "|=" === r && + (t === i || + t.slice(0, i.length + 1) === i + "-")); }; }, CHILD: function (d, e, t, h, g) { @@ -894,18 +896,18 @@ return a[S] ? a(o) : 1 < a.length - ? ((t = [e, e, "", o]), - b.setFilters.hasOwnProperty(e.toLowerCase()) - ? F(function (e, t) { - var n, - r = a(e, o), - i = r.length; - while (i--) e[(n = se.call(e, r[i]))] = !(t[n] = r[i]); - }) - : function (e) { - return a(e, 0, t); - }) - : a; + ? ((t = [e, e, "", o]), + b.setFilters.hasOwnProperty(e.toLowerCase()) + ? F(function (e, t) { + var n, + r = a(e, o), + i = r.length; + while (i--) e[(n = se.call(e, r[i]))] = !(t[n] = r[i]); + }) + : function (e) { + return a(e, 0, t); + }) + : a; }, }, pseudos: { @@ -1372,14 +1374,14 @@ return !!n.call(e, t, e) !== r; }) : n.nodeType - ? ce.grep(e, function (e) { - return (e === n) !== r; - }) - : "string" != typeof n - ? ce.grep(e, function (e) { - return -1 < se.call(n, e) !== r; - }) - : ce.filter(n, e, r); + ? ce.grep(e, function (e) { + return (e === n) !== r; + }) + : "string" != typeof n + ? ce.grep(e, function (e) { + return -1 < se.call(n, e) !== r; + }) + : ce.filter(n, e, r); } (ce.filter = function (e, t, n) { var r = t[0]; @@ -1459,10 +1461,10 @@ return e.nodeType ? ((this[0] = e), (this.length = 1), this) : v(e) - ? void 0 !== n.ready - ? n.ready(e) - : e(ce) - : ce.makeArray(e, this); + ? void 0 !== n.ready + ? n.ready(e) + : e(ce) + : ce.makeArray(e, this); }).prototype = ce.fn), (k = ce(C)); var E = /^(?:parents|prev(?:Until|All))/, @@ -1505,8 +1507,8 @@ ? se.call(ce(e), this[0]) : se.call(this, e.jquery ? e[0] : e) : this[0] && this[0].parentNode - ? this.first().prevAll().length - : -1; + ? this.first().prevAll().length + : -1; }, add: function (e, t) { return this.pushStack(ce.uniqueSort(ce.merge(this.get(), ce(e, t)))); @@ -1584,8 +1586,8 @@ e && v((i = e.promise)) ? i.call(e).done(t).fail(n) : e && v((i = e.then)) - ? i.call(e, t, n) - : t.apply(void 0, [e].slice(r)); + ? i.call(e, t, n) + : t.apply(void 0, [e].slice(r)); } catch (e) { n.apply(void 0, [e]); } @@ -1974,8 +1976,8 @@ n = (t = Array.isArray(t) ? t.map(F) : (t = F(t)) in r - ? [t] - : t.match(D) || []).length; + ? [t] + : t.match(D) || []).length; while (n--) delete r[t[n]]; } (void 0 === t || ce.isEmptyObject(r)) && @@ -2005,10 +2007,10 @@ ("null" === i ? null : i === +i + "" - ? +i - : X.test(i) - ? JSON.parse(i) - : i)); + ? +i + : X.test(i) + ? JSON.parse(i) + : i)); } catch (e) {} z.set(e, t, n); } else n = void 0; @@ -2064,8 +2066,8 @@ return void 0 !== (t = z.get(o, n)) ? t : void 0 !== (t = V(o, n)) - ? t - : void 0; + ? t + : void 0; this.each(function () { z.set(this, n, e); }); @@ -2136,12 +2138,12 @@ arguments.length < e ? ce.queue(this[0], t) : void 0 === n - ? this - : this.each(function () { - var e = ce.queue(this, t, n); - ce._queueHooks(this, t), - "fx" === t && "inprogress" !== e[0] && ce.dequeue(this, t); - }) + ? this + : this.each(function () { + var e = ce.queue(this, t, n); + ce._queueHooks(this, t), + "fx" === t && "inprogress" !== e[0] && ce.dequeue(this, t); + }) ); }, dequeue: function (e) { @@ -2293,8 +2295,8 @@ "undefined" != typeof e.getElementsByTagName ? e.getElementsByTagName(t || "*") : "undefined" != typeof e.querySelectorAll - ? e.querySelectorAll(t || "*") - : []), + ? e.querySelectorAll(t || "*") + : []), void 0 === t || (t && fe(e, t)) ? ce.merge([e], n) : n ); } @@ -3522,16 +3524,16 @@ (null != e.elem[e.prop] && null == e.elem.style[e.prop]) ? e.elem[e.prop] : (t = ce.css(e.elem, e.prop, "")) && "auto" !== t - ? t - : 0; + ? t + : 0; }, set: function (e) { ce.fx.step[e.prop] ? ce.fx.step[e.prop](e) : 1 !== e.elem.nodeType || - (!ce.cssHooks[e.prop] && null == e.elem.style[Ze(e.prop)]) - ? (e.elem[e.prop] = e.now) - : ce.style(e.elem, e.prop, e.now + e.unit); + (!ce.cssHooks[e.prop] && null == e.elem.style[Ze(e.prop)]) + ? (e.elem[e.prop] = e.now) + : ce.style(e.elem, e.prop, e.now + e.unit); }, }, }).scrollTop = at.propHooks.scrollLeft = @@ -3982,13 +3984,13 @@ ? null === n ? void ce.removeAttr(e, t) : i && "set" in i && void 0 !== (r = i.set(e, n, t)) - ? r - : (e.setAttribute(t, n + ""), n) + ? r + : (e.setAttribute(t, n + ""), n) : i && "get" in i && null !== (r = i.get(e, t)) - ? r - : null == (r = ce.find.attr(e, t)) - ? void 0 - : r); + ? r + : null == (r = ce.find.attr(e, t)) + ? void 0 + : r); }, attrHooks: { type: { @@ -4063,8 +4065,8 @@ ? r : (e[t] = n) : i && "get" in i && null !== (r = i.get(e, t)) - ? r - : e[t] + ? r + : e[t] ); }, propHooks: { @@ -4074,8 +4076,8 @@ return t ? parseInt(t, 10) : bt.test(e.nodeName) || (wt.test(e.nodeName) && e.href) - ? 0 - : -1; + ? 0 + : -1; }, }, }, @@ -4117,16 +4119,17 @@ ce(this).addClass(t.call(this, e, Ct(this))); }) : (e = kt(t)).length - ? this.each(function () { - if ( - ((r = Ct(this)), (n = 1 === this.nodeType && " " + Tt(r) + " ")) - ) { - for (o = 0; o < e.length; o++) - (i = e[o]), n.indexOf(" " + i + " ") < 0 && (n += i + " "); - (a = Tt(n)), r !== a && this.setAttribute("class", a); - } - }) - : this; + ? this.each(function () { + if ( + ((r = Ct(this)), + (n = 1 === this.nodeType && " " + Tt(r) + " ")) + ) { + for (o = 0; o < e.length; o++) + (i = e[o]), n.indexOf(" " + i + " ") < 0 && (n += i + " "); + (a = Tt(n)), r !== a && this.setAttribute("class", a); + } + }) + : this; }, removeClass: function (t) { var e, n, r, i, o, a; @@ -4135,22 +4138,22 @@ ce(this).removeClass(t.call(this, e, Ct(this))); }) : arguments.length - ? (e = kt(t)).length - ? this.each(function () { - if ( - ((r = Ct(this)), - (n = 1 === this.nodeType && " " + Tt(r) + " ")) - ) { - for (o = 0; o < e.length; o++) { - i = e[o]; - while (-1 < n.indexOf(" " + i + " ")) - n = n.replace(" " + i + " ", " "); + ? (e = kt(t)).length + ? this.each(function () { + if ( + ((r = Ct(this)), + (n = 1 === this.nodeType && " " + Tt(r) + " ")) + ) { + for (o = 0; o < e.length; o++) { + i = e[o]; + while (-1 < n.indexOf(" " + i + " ")) + n = n.replace(" " + i + " ", " "); + } + (a = Tt(n)), r !== a && this.setAttribute("class", a); } - (a = Tt(n)), r !== a && this.setAttribute("class", a); - } - }) - : this - : this.attr("class", ""); + }) + : this + : this.attr("class", ""); }, toggleClass: function (t, n) { var e, @@ -4164,23 +4167,24 @@ ce(this).toggleClass(t.call(this, e, Ct(this), n), n); }) : "boolean" == typeof n && s - ? n - ? this.addClass(t) - : this.removeClass(t) - : ((e = kt(t)), - this.each(function () { - if (s) - for (o = ce(this), i = 0; i < e.length; i++) - (r = e[i]), o.hasClass(r) ? o.removeClass(r) : o.addClass(r); - else - (void 0 !== t && "boolean" !== a) || - ((r = Ct(this)) && _.set(this, "__className__", r), - this.setAttribute && - this.setAttribute( - "class", - r || !1 === t ? "" : _.get(this, "__className__") || "", - )); - })); + ? n + ? this.addClass(t) + : this.removeClass(t) + : ((e = kt(t)), + this.each(function () { + if (s) + for (o = ce(this), i = 0; i < e.length; i++) + (r = e[i]), + o.hasClass(r) ? o.removeClass(r) : o.addClass(r); + else + (void 0 !== t && "boolean" !== a) || + ((r = Ct(this)) && _.set(this, "__className__", r), + this.setAttribute && + this.setAttribute( + "class", + r || !1 === t ? "" : _.get(this, "__className__") || "", + )); + })); }, hasClass: function (e) { var t, @@ -4208,11 +4212,11 @@ (null == (t = i ? n.call(this, e, ce(this).val()) : n) ? (t = "") : "number" == typeof t - ? (t += "") - : Array.isArray(t) && - (t = ce.map(t, function (e) { - return null == e ? "" : e + ""; - })), + ? (t += "") + : Array.isArray(t) && + (t = ce.map(t, function (e) { + return null == e ? "" : e + ""; + })), ((r = ce.valHooks[this.type] || ce.valHooks[this.nodeName.toLowerCase()]) && @@ -4221,16 +4225,17 @@ (this.value = t)); })) : t - ? (r = ce.valHooks[t.type] || ce.valHooks[t.nodeName.toLowerCase()]) && - "get" in r && - void 0 !== (e = r.get(t, "value")) - ? e - : "string" == typeof (e = t.value) - ? e.replace(St, "") - : null == e - ? "" - : e - : void 0; + ? (r = + ce.valHooks[t.type] || ce.valHooks[t.nodeName.toLowerCase()]) && + "get" in r && + void 0 !== (e = r.get(t, "value")) + ? e + : "string" == typeof (e = t.value) + ? e.replace(St, "") + : null == e + ? "" + : e + : void 0; }, }), ce.extend({ @@ -4470,10 +4475,10 @@ return null == n ? null : Array.isArray(n) - ? ce.map(n, function (e) { - return { name: t.name, value: e.replace(Lt, "\r\n") }; - }) - : { name: t.name, value: n.replace(Lt, "\r\n") }; + ? ce.map(n, function (e) { + return { name: t.name, value: e.replace(Lt, "\r\n") }; + }) + : { name: t.name, value: n.replace(Lt, "\r\n") }; }) .get(); }, @@ -4826,8 +4831,8 @@ 204 === e || "HEAD" === v.type ? (l = "nocontent") : 304 === e - ? (l = "notmodified") - : ((l = s.state), (o = s.data), (i = !(a = s.error)))) + ? (l = "notmodified") + : ((l = s.state), (o = s.data), (i = !(a = s.error)))) : ((a = l), (!e && l) || ((l = "error"), e < 0 && (e = 0))), (T.status = e), (T.statusText = (t || l) + ""), @@ -4977,18 +4982,18 @@ "abort" === e ? r.abort() : "error" === e - ? "number" != typeof r.status - ? t(0, "error") - : t(r.status, r.statusText) - : t( - Yt[r.status] || r.status, - r.statusText, - "text" !== (r.responseType || "text") || - "string" != typeof r.responseText - ? { binary: r.response } - : { text: r.responseText }, - r.getAllResponseHeaders(), - )); + ? "number" != typeof r.status + ? t(0, "error") + : t(r.status, r.statusText) + : t( + Yt[r.status] || r.status, + r.statusText, + "text" !== (r.responseType || "text") || + "string" != typeof r.responseText + ? { binary: r.response } + : { text: r.responseText }, + r.getAllResponseHeaders(), + )); }; }), (r.onload = o()), @@ -5290,17 +5295,17 @@ ? e["inner" + a] : e.document.documentElement["client" + a] : 9 === e.nodeType - ? ((r = e.documentElement), - Math.max( - e.body["scroll" + a], - r["scroll" + a], - e.body["offset" + a], - r["offset" + a], - r["client" + a], - )) - : void 0 === n - ? ce.css(e, t, i) - : ce.style(e, t, n, i); + ? ((r = e.documentElement), + Math.max( + e.body["scroll" + a], + r["scroll" + a], + e.body["offset" + a], + r["offset" + a], + r["client" + a], + )) + : void 0 === n + ? ce.css(e, t, i) + : ce.style(e, t, n, i); }, s, n ? e : void 0, diff --git a/ivatar/test_auxiliary.py b/ivatar/test_auxiliary.py index b531196..2b4b570 100644 --- a/ivatar/test_auxiliary.py +++ b/ivatar/test_auxiliary.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Test various other parts of ivatar/libravatar in order to increase the overall test coverage. Test in here, didn't diff --git a/ivatar/test_file_security.py b/ivatar/test_file_security.py index a4acad7..1df6189 100644 --- a/ivatar/test_file_security.py +++ b/ivatar/test_file_security.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Tests for file upload security enhancements """ diff --git a/ivatar/test_opentelemetry.py b/ivatar/test_opentelemetry.py index bc14c7a..6f3c34a 100644 --- a/ivatar/test_opentelemetry.py +++ b/ivatar/test_opentelemetry.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Tests for OpenTelemetry integration in ivatar. diff --git a/ivatar/test_static_pages.py b/ivatar/test_static_pages.py index 280679d..45a61eb 100644 --- a/ivatar/test_static_pages.py +++ b/ivatar/test_static_pages.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- """ Test our views in ivatar.ivataraccount.views and ivatar.views """ + # pylint: disable=too-many-lines import os import django @@ -25,9 +25,9 @@ class Tester(TestCase): # pylint: disable=too-many-public-methods user = None username = random_string() password = random_string() - email = "%s@%s.%s" % (username, random_string(), random_string(2)) + email = "{}@{}.{}".format(username, random_string(), random_string(2)) # Dunno why random tld doesn't work, but I'm too lazy now to investigate - openid = "http://%s.%s.%s/" % (username, random_string(), "org") + openid = "http://{}.{}.{}/".format(username, random_string(), "org") def login(self): """ diff --git a/ivatar/test_utils.py b/ivatar/test_utils.py index 30b017b..a3f2eb7 100644 --- a/ivatar/test_utils.py +++ b/ivatar/test_utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Test our utils from ivatar.utils """ diff --git a/ivatar/test_views.py b/ivatar/test_views.py index 2049858..73d32ce 100644 --- a/ivatar/test_views.py +++ b/ivatar/test_views.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Test our views in ivatar.ivataraccount.views and ivatar.views """ @@ -31,9 +30,9 @@ class Tester(TestCase): # pylint: disable=too-many-public-methods user = None username = random_string() password = random_string() - email = "%s@%s.%s" % (username, random_string(), random_string(2)) + email = "{}@{}.{}".format(username, random_string(), random_string(2)) # Dunno why random tld doesn't work, but I'm too lazy now to investigate - openid = "http://%s.%s.%s/" % (username, random_string(), "org") + openid = "http://{}.{}.{}/".format(username, random_string(), "org") def login(self): """ diff --git a/ivatar/test_views_stats.py b/ivatar/test_views_stats.py index 630efbb..49877e6 100644 --- a/ivatar/test_views_stats.py +++ b/ivatar/test_views_stats.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Test our StatsView in ivatar.views """ diff --git a/ivatar/test_wsgi.py b/ivatar/test_wsgi.py index 73a33e8..a8b0705 100644 --- a/ivatar/test_wsgi.py +++ b/ivatar/test_wsgi.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- """ Unit tests for WSGI """ + import unittest import os diff --git a/ivatar/tools/forms.py b/ivatar/tools/forms.py index 5d06894..245b51d 100644 --- a/ivatar/tools/forms.py +++ b/ivatar/tools/forms.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- """ Classes for our ivatar.tools.forms """ + from django import forms from django.utils.translation import gettext_lazy as _ from django.core.exceptions import ValidationError diff --git a/ivatar/tools/test_views.py b/ivatar/tools/test_views.py index d26da7c..f7a5d8d 100644 --- a/ivatar/tools/test_views.py +++ b/ivatar/tools/test_views.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- """ Test our views in ivatar.ivataraccount.views and ivatar.views """ + # pylint: disable=too-many-lines import os import django @@ -28,9 +28,9 @@ class Tester(TestCase): # pylint: disable=too-many-public-methods user = None username = random_string() password = random_string() - email = "%s@%s.%s" % (username, random_string(), random_string(2)) + email = "{}@{}.{}".format(username, random_string(), random_string(2)) # Dunno why random tld doesn't work, but I'm too lazy now to investigate - openid = "http://%s.%s.%s/" % (username, random_string(), "org") + openid = "http://{}.{}.{}/".format(username, random_string(), "org") def login(self): """ diff --git a/ivatar/tools/urls.py b/ivatar/tools/urls.py index c8f629f..b0a5381 100644 --- a/ivatar/tools/urls.py +++ b/ivatar/tools/urls.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ ivatar/tools URL configuration """ diff --git a/ivatar/tools/views.py b/ivatar/tools/views.py index 2522d0e..2276a4f 100644 --- a/ivatar/tools/views.py +++ b/ivatar/tools/views.py @@ -1,7 +1,7 @@ -# -*- coding: utf-8 -*- """ View classes for ivatar/tools/ """ + from socket import inet_ntop, AF_INET6 import hashlib import random diff --git a/ivatar/urls.py b/ivatar/urls.py index 0457c35..c1ecfe5 100644 --- a/ivatar/urls.py +++ b/ivatar/urls.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ ivatar URL configuration """ diff --git a/ivatar/utils.py b/ivatar/utils.py index dc950d2..bbfd797 100644 --- a/ivatar/utils.py +++ b/ivatar/utils.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ Simple module providing reusable random_string function """ @@ -179,6 +178,16 @@ def random_string(length=10): ) +def generate_random_email(): + """ + Generate a random email address using the same pattern as test_views.py + """ + username = random_string() + domain = random_string() + tld = random_string(2) + return f"{username}@{domain}.{tld}" + + def random_ip_address(): """ Return a random IP address (IPv4) diff --git a/ivatar/views.py b/ivatar/views.py index bea9cbe..319281f 100644 --- a/ivatar/views.py +++ b/ivatar/views.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ views under / """ @@ -762,9 +761,9 @@ class StatsView(TemplateView, JsonResponse): ) retval["photo_size_stats"] = { - "average_size_bytes": round(avg_size_bytes, 2) - if avg_size_bytes - else 0, + "average_size_bytes": ( + round(avg_size_bytes, 2) if avg_size_bytes else 0 + ), "average_size_kb": avg_size_kb, "average_size_mb": avg_size_mb, "total_photos_analyzed": photo_count, @@ -839,7 +838,7 @@ def _get_git_info_from_files(): if not path.exists(head_file): return None - with open(head_file, "r") as f: + with open(head_file) as f: head_content = f.read().strip() # Parse HEAD content @@ -851,7 +850,7 @@ def _get_git_info_from_files(): # Read the commit hash from the ref ref_file = path.join(git_dir, branch_ref) if path.exists(ref_file): - with open(ref_file, "r") as f: + with open(ref_file) as f: commit_hash = f.read().strip() else: return None diff --git a/ivatar/wsgi.py b/ivatar/wsgi.py index 883517b..18866fb 100644 --- a/ivatar/wsgi.py +++ b/ivatar/wsgi.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ WSGI config for ivatar project. diff --git a/libravatarproxy.py b/libravatarproxy.py index c9dcca6..248902e 100755 --- a/libravatarproxy.py +++ b/libravatarproxy.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- import urllib.request import sys diff --git a/manage.py b/manage.py index 21b3133..fcd61ac 100755 --- a/manage.py +++ b/manage.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- import os import sys diff --git a/requirements.txt b/requirements.txt index f4cfbe9..ca357b4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -35,6 +35,7 @@ opentelemetry-instrumentation-urllib3>=0.42b0 opentelemetry-sdk>=1.20.0 Pillow pip +prettytable prometheus-client>=0.20.0 psycopg2-binary py3dns diff --git a/scripts/check_deployment.py b/scripts/check_deployment.py index d632a55..9b98cff 100755 --- a/scripts/check_deployment.py +++ b/scripts/check_deployment.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Libravatar Deployment Verification Script @@ -18,6 +17,7 @@ Usage: import argparse import json +import os import random import ssl import subprocess @@ -54,12 +54,19 @@ class Colors: def colored_print(message: str, color: str = Colors.NC) -> None: - """Print a colored message.""" - print(f"{color}{message}{Colors.NC}") + """Print a colored message with immediate flush.""" + print(f"{color}{message}{Colors.NC}", flush=True) def get_current_commit_hash() -> Optional[str]: - """Get the current commit hash from git.""" + """Get the current commit hash from git or CI environment.""" + # First try GitLab CI environment variable (most reliable in CI) + ci_commit = os.environ.get("CI_COMMIT_SHA") + if ci_commit: + colored_print(f"Using CI commit hash: {ci_commit}", Colors.BLUE) + return ci_commit + + # Fallback to git command try: result = subprocess.run( ["git", "rev-parse", "HEAD"], @@ -67,8 +74,11 @@ def get_current_commit_hash() -> Optional[str]: text=True, check=True, ) - return result.stdout.strip() + commit_hash = result.stdout.strip() + colored_print(f"Using git commit hash: {commit_hash}", Colors.BLUE) + return commit_hash except (subprocess.CalledProcessError, FileNotFoundError): + colored_print("Could not determine current commit hash", Colors.RED) return None @@ -82,16 +92,44 @@ def is_commit_newer_or_equal(commit1: str, commit2: str) -> Optional[bool]: None if comparison fails """ try: - # Use git merge-base to check if commit1 is reachable from commit2 - # If commit1 is newer or equal, it should be reachable from commit2 - subprocess.run( - ["git", "merge-base", "--is-ancestor", commit2, commit1], - capture_output=True, - check=True, - ) - return True + # First try to get commit timestamps for comparison + try: + result1 = subprocess.run( + ["git", "show", "-s", "--format=%ct", commit1], + capture_output=True, + text=True, + check=True, + ) + result2 = subprocess.run( + ["git", "show", "-s", "--format=%ct", commit2], + capture_output=True, + text=True, + check=True, + ) + + timestamp1 = int(result1.stdout.strip()) + timestamp2 = int(result2.stdout.strip()) + + colored_print(f"Commit {commit1[:8]} timestamp: {timestamp1}", Colors.BLUE) + colored_print(f"Commit {commit2[:8]} timestamp: {timestamp2}", Colors.BLUE) + + # commit1 is newer if it has a later timestamp + return timestamp1 >= timestamp2 + + except (subprocess.CalledProcessError, ValueError): + # Fallback to merge-base if timestamp comparison fails + colored_print("Timestamp comparison failed, trying merge-base", Colors.YELLOW) + + # Use git merge-base to check if commit2 is ancestor of commit1 + subprocess.run( + ["git", "merge-base", "--is-ancestor", commit2, commit1], + capture_output=True, + check=True, + ) + return True + except subprocess.CalledProcessError: - # If the above fails, try the reverse - check if commit2 is newer + # If the above fails, try the reverse try: subprocess.run( ["git", "merge-base", "--is-ancestor", commit1, commit2], @@ -100,8 +138,11 @@ def is_commit_newer_or_equal(commit1: str, commit2: str) -> Optional[bool]: ) return False except subprocess.CalledProcessError: - # If both fail, we can't determine the relationship + colored_print("Git comparison failed - shallow clone or missing commits", Colors.YELLOW) return None + except Exception as e: + colored_print(f"Git comparison error: {e}", Colors.RED) + return None def make_request( @@ -346,17 +387,30 @@ def test_deployment( # Check if we're looking for a specific version and compare current_commit = get_current_commit_hash() + version_ok = True + if current_commit and deployed_commit != "Unknown": + colored_print(f"Expected commit: {current_commit[:8]}...", Colors.BLUE) + colored_print(f"Deployed commit: {deployed_commit[:8]}...", Colors.BLUE) + if deployed_commit == current_commit: colored_print( "✅ Exact version match - deployment is up to date!", Colors.GREEN, ) + elif deployed_commit.startswith(current_commit[:8]) or current_commit.startswith(deployed_commit[:8]): + # Handle case where we have short vs long commit hashes + colored_print( + "✅ Version match (short hash) - deployment is up to date!", + Colors.GREEN, + ) else: - # Check if deployed version is newer + # Check if deployed version is newer using git comparison = is_commit_newer_or_equal( deployed_commit, current_commit ) + colored_print(f"Commit comparison result: {comparison}", Colors.BLUE) + if comparison is True: colored_print( "ℹ️ Note: A newer version is already deployed (this is fine!)", @@ -364,43 +418,66 @@ def test_deployment( ) elif comparison is False: colored_print( - "⚠️ Warning: Deployed version appears to be older than expected", + f"⚠️ Deployed version ({deployed_commit[:8]}) is older than expected ({current_commit[:8]})", Colors.YELLOW, ) - else: colored_print( - "⚠️ Warning: Could not determine version relationship", + f"Waiting for deployment to update... (attempt {attempt}/{max_retries})", + Colors.BLUE, + ) + version_ok = False + else: + # Git comparison failed - use simple string comparison as fallback + colored_print( + "⚠️ Git comparison failed - using string comparison fallback", Colors.YELLOW, ) - - # Run functionality tests - colored_print("Running basic functionality tests...", Colors.YELLOW) - - # Test avatar redirect - if test_avatar_redirect(base_url): - colored_print("✅ Invalid avatar redirects correctly", Colors.GREEN) + + # If commits are different, assume we need to wait + # This is safer than proceeding with wrong version + colored_print( + f"⚠️ Deployed version ({deployed_commit[:8]}) differs from expected ({current_commit[:8]})", + Colors.YELLOW, + ) + colored_print( + f"Waiting for deployment to update... (attempt {attempt}/{max_retries})", + Colors.BLUE, + ) + version_ok = False + + # Only proceed with functionality tests if version is correct + if not version_ok: + # Version is not correct, skip tests and retry + pass # Will continue to retry logic below else: - colored_print("❌ Invalid avatar redirect failed", Colors.RED) - return False + # Run functionality tests + colored_print("Running basic functionality tests...", Colors.YELLOW) - # Test avatar sizing - if test_avatar_sizing(base_url): - pass # Success messages are printed within the function - else: - return False + # Test avatar redirect + if test_avatar_redirect(base_url): + colored_print("✅ Invalid avatar redirects correctly", Colors.GREEN) + else: + colored_print("❌ Invalid avatar redirect failed", Colors.RED) + return False - # Test stats endpoint - if test_stats_endpoint(base_url): - colored_print("✅ Stats endpoint working", Colors.GREEN) - else: - colored_print("❌ Stats endpoint failed", Colors.RED) - return False + # Test avatar sizing + if test_avatar_sizing(base_url): + pass # Success messages are printed within the function + else: + return False - colored_print( - f"🎉 {name} deployment verification completed successfully!", - Colors.GREEN, - ) - return True + # Test stats endpoint + if test_stats_endpoint(base_url): + colored_print("✅ Stats endpoint working", Colors.GREEN) + else: + colored_print("❌ Stats endpoint failed", Colors.RED) + return False + + colored_print( + f"🎉 {name} deployment verification completed successfully!", + Colors.GREEN, + ) + return True else: colored_print(f"{name} site not responding yet...", Colors.YELLOW) @@ -408,7 +485,11 @@ def test_deployment( colored_print( f"Waiting {retry_delay} seconds before next attempt...", Colors.BLUE ) - time.sleep(retry_delay) + # Show progress during wait + for remaining in range(retry_delay, 0, -1): + print(f"\r⏳ Retrying in {remaining:2d} seconds...", end="", flush=True) + time.sleep(1) + print("\r" + " " * 30 + "\r", end="", flush=True) # Clear the line colored_print( f"❌ FAILED: {name} deployment verification timed out after {max_retries} attempts", diff --git a/scripts/performance_tests.py b/scripts/performance_tests.py index 485543e..0f14696 100644 --- a/scripts/performance_tests.py +++ b/scripts/performance_tests.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Performance testing script for Libravatar CI/CD pipeline @@ -12,13 +11,41 @@ import sys import time import statistics import hashlib +import random +import string +from typing import Dict, List, Any, Optional, Tuple # Add project root to path sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +from libravatar import libravatar_url +from urllib.parse import urlsplit +from prettytable import PrettyTable + + +def random_string(length=10): + """Return some random string with default length 10""" + return "".join( + random.SystemRandom().choice(string.ascii_lowercase + string.digits) + for _ in range(length) + ) + + +# Try to import Django utilities for local testing, fallback to local implementation +try: + from ivatar.utils import generate_random_email +except ImportError: + # Use local version for external testing + def generate_random_email(): + """Generate a random email address using the same pattern as test_views.py""" + username = random_string() + domain = random_string() + tld = random_string(2) + return f"{username}@{domain}.{tld}" + # Django setup - only for local testing -def setup_django(): +def setup_django() -> None: """Setup Django for local testing""" os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ivatar.settings") import django @@ -29,19 +56,32 @@ def setup_django(): class PerformanceTestRunner: """Main performance test runner""" + # Define all avatar styles and sizes to test + AVATAR_STYLES: List[str] = [ + "identicon", + "monsterid", + "robohash", + "pagan", + "retro", + "wavatar", + "mm", + "mmng", + ] + AVATAR_SIZES: List[int] = [80, 256] + def __init__( self, - base_url="http://localhost:8000", - concurrent_users=10, - test_cache=True, - remote_testing=False, - ): - self.base_url = base_url - self.concurrent_users = concurrent_users - self.test_cache = test_cache - self.remote_testing = remote_testing - self.client = None - self.results = {} + base_url: str = "http://localhost:8000", + concurrent_users: int = 10, + test_cache: bool = True, + remote_testing: bool = False, + ) -> None: + self.base_url: str = base_url + self.concurrent_users: int = concurrent_users + self.test_cache: bool = test_cache + self.remote_testing: bool = remote_testing + self.client: Optional[Any] = None # Django test client + self.results: Dict[str, Any] = {} # Determine if we're testing locally or remotely if remote_testing or not base_url.startswith("http://localhost"): @@ -55,7 +95,7 @@ class PerformanceTestRunner: self.client = Client() - def setup_test_data(self): + def setup_test_data(self) -> None: """Create test data for performance tests""" print("Setting up test data...") @@ -79,52 +119,249 @@ class PerformanceTestRunner: print(f"Created {len(test_emails)} test users and emails") - def test_avatar_generation_performance(self): - """Test avatar generation performance""" - print("\n=== Avatar Generation Performance Test ===") + def _generate_test_cases(self) -> List[Dict[str, Any]]: + """Generate test cases for all avatar styles and sizes""" + test_cases = [] + for style in self.AVATAR_STYLES: + for size in self.AVATAR_SIZES: + test_cases.append({"default": style, "size": size}) + return test_cases - # Test different avatar types and sizes - test_cases = [ - {"default": "identicon", "size": 80}, - {"default": "monsterid", "size": 80}, - {"default": "robohash", "size": 80}, - {"default": "identicon", "size": 256}, - {"default": "monsterid", "size": 256}, - ] + def _test_single_avatar_request( + self, case: Dict[str, Any], email: str, use_requests: bool = False + ) -> Dict[str, Any]: + """Test a single avatar request - shared logic for local and remote testing""" + # Use libravatar library to generate the URL + full_url = libravatar_url( + email=email, size=case["size"], default=case["default"] + ) - results = [] + # Extract path and query from the full URL + urlobj = urlsplit(full_url) + url_path = f"{urlobj.path}?{urlobj.query}" - for case in test_cases: - # Generate test hash - test_email = "perftest@example.com" - email_hash = hashlib.md5(test_email.encode()).hexdigest() + start_time = time.time() - # Build URL - url = f"/avatar/{email_hash}" - params = {"d": case["default"], "s": case["size"]} + if use_requests: + # Remote testing with requests + import requests - # Time the request - start_time = time.time() - response = self.client.get(url, params) - end_time = time.time() + url = f"{self.base_url}{url_path}" + try: + response = requests.get(url, timeout=10) + end_time = time.time() + duration = (end_time - start_time) * 1000 - duration = (end_time - start_time) * 1000 # Convert to ms + # Determine cache status from response headers + cache_detail = response.headers.get("x-cache-detail", "").lower() + age = response.headers.get("age", "0") + cache_status = "unknown" - results.append( - { + if "cache hit" in cache_detail or int(age) > 0: + cache_status = "hit" + elif "cache miss" in cache_detail or age == "0": + cache_status = "miss" + + return { "test": f"{case['default']}_{case['size']}px", "duration_ms": duration, "status_code": response.status_code, "content_length": len(response.content) if response.content else 0, + "success": response.status_code == 200, + "cache_status": cache_status, + "cache_detail": cache_detail, + "age": age, + "full_url": full_url, + "email": email, } - ) + except Exception as e: + end_time = time.time() + duration = (end_time - start_time) * 1000 + return { + "test": f"{case['default']}_{case['size']}px", + "duration_ms": duration, + "status_code": 0, + "success": False, + "error": str(e), + "cache_status": "error", + "full_url": full_url, + "email": email, + } + else: + # Local testing with Django test client + if self.client is None: + raise RuntimeError("Django test client not initialized") + response = self.client.get(url_path, follow=True) + end_time = time.time() + duration = (end_time - start_time) * 1000 - print(f" {case['default']} ({case['size']}px): {duration:.2f}ms") + # Check for cache information in response headers + cache_status = "unknown" + if hasattr(response, "get") and callable(getattr(response, "get", None)): + cache_control = response.get("Cache-Control", "") + age = response.get("Age", "0") + if age and int(age) > 0: + cache_status = "hit" + elif "no-cache" in cache_control: + cache_status = "miss" + else: + cache_status = "miss" # Default assumption for first generation + + # Handle content length for different response types + content_length = 0 + if hasattr(response, "content"): + content_length = len(response.content) if response.content else 0 + elif hasattr(response, "streaming_content"): + # For FileResponse, we can't easily get content length without consuming the stream + content_length = 1 # Just indicate there's content + + return { + "test": f"{case['default']}_{case['size']}px", + "duration_ms": duration, + "status_code": response.status_code, + "content_length": content_length, + "cache_status": cache_status, + "success": response.status_code == 200, + "full_url": full_url, + "email": email, + } + + def _display_avatar_results(self, results: List[Dict[str, Any]]) -> None: + """Display avatar test results using prettytable for perfect alignment""" + # Group results by avatar style + style_results: Dict[str, List[Dict[str, Any]]] = {} + for result in results: + style = result["test"].split("_")[0] # Extract style from test name + if style not in style_results: + style_results[style] = [] + style_results[style].append(result) + + # Create table + table = PrettyTable() + table.field_names = ["Avatar Style", "Size", "Time (ms)", "Status", "Cache"] + table.align["Avatar Style"] = "l" + table.align["Size"] = "r" + table.align["Time (ms)"] = "r" + table.align["Status"] = "c" + table.align["Cache"] = "c" + + # Add data to table + styles_with_data = [ + style for style in self.AVATAR_STYLES if style in style_results + ] + + for i, style in enumerate(styles_with_data): + style_data = style_results[style] + successful_results = [r for r in style_data if r.get("success", True)] + failed_results = [r for r in style_data if not r.get("success", True)] + + if successful_results: + # Calculate average + avg_duration = statistics.mean( + [r["duration_ms"] for r in successful_results] + ) + + # Determine overall cache status + cache_statuses = [ + r["cache_status"] + for r in successful_results + if r["cache_status"] != "unknown" + ] + if not cache_statuses: + cache_summary = "unknown" + elif all(status == "hit" for status in cache_statuses): + cache_summary = "hit" + elif all(status == "miss" for status in cache_statuses): + cache_summary = "miss" + else: + cache_summary = "mixed" + + # Determine status icon for average line + if len(failed_results) == 0: + avg_status_icon = "✅" # All successful + elif len(successful_results) == 0: + avg_status_icon = "❌" # All failed + else: + avg_status_icon = "⚠️" # Mixed results + + # Add average row + table.add_row( + [ + f"{style} (avg)", + "", + f"{avg_duration:.2f}", + avg_status_icon, + cache_summary, + ] + ) + + # Add individual size rows + for result in style_data: + size = result["test"].split("_")[1] # Extract size from test name + status_icon = "✅" if result.get("success", True) else "❌" + cache_status = result["cache_status"] + + if result.get("success", True): + table.add_row( + [ + "", + size, + f"{result['duration_ms']:.2f}", + status_icon, + cache_status, + ] + ) + else: + error_msg = result.get("error", "Failed") + table.add_row(["", size, error_msg, status_icon, cache_status]) + else: + # All requests failed + table.add_row([f"{style} (avg)", "", "Failed", "❌", "error"]) + for result in style_data: + size = result["test"].split("_")[1] + error_msg = result.get("error", "Failed") + table.add_row(["", size, error_msg, "❌", "error"]) + + # Add divider line between styles (except after the last style) + if i < len(styles_with_data) - 1: + table.add_row(["-" * 15, "-" * 5, "-" * 9, "-" * 6, "-" * 5]) + + print(table) + + def test_avatar_generation_performance(self) -> None: + """Test avatar generation performance""" + print("\n=== Avatar Generation Performance Test ===") + + # Generate test cases for all avatar styles and sizes + test_cases = self._generate_test_cases() + results = [] + + # Generate random email for testing + test_email = generate_random_email() + print(f" Testing with email: {test_email}") + + for case in test_cases: + result = self._test_single_avatar_request( + case, test_email, use_requests=False + ) + results.append(result) + + # Show example URL from first result + if results: + print(f" Example URL: {results[0]['full_url']}") + + # Display results grouped by style + self._display_avatar_results(results) # Calculate statistics - durations = [r["duration_ms"] for r in results] - avg_duration = statistics.mean(durations) - max_duration = max(durations) + successful_results = [r for r in results if r.get("success", True)] + if successful_results: + durations = [r["duration_ms"] for r in successful_results] + avg_duration = statistics.mean(durations) + max_duration = max(durations) + else: + avg_duration = 0 + max_duration = 0 print(f"\n Average: {avg_duration:.2f}ms") print(f" Maximum: {max_duration:.2f}ms") @@ -143,7 +380,7 @@ class PerformanceTestRunner: "results": results, } - def test_concurrent_load(self): + def test_concurrent_load(self, response_threshold: int = 1000, p95_threshold: int = 2000) -> None: """Test concurrent load handling""" print("\n=== Concurrent Load Test ===") @@ -160,6 +397,11 @@ class PerformanceTestRunner: successful_requests = [r for r in results if r["success"]] failed_requests = [r for r in results if not r["success"]] + # Analyze cache performance + cache_hits = [r for r in results if r.get("cache_status") == "hit"] + cache_misses = [r for r in results if r.get("cache_status") == "miss"] + cache_errors = [r for r in results if r.get("cache_status") == "error"] + total_duration = ( sum(r["duration_ms"] for r in results) / 1000 ) # Convert to seconds @@ -168,6 +410,20 @@ class PerformanceTestRunner: print(f" Successful requests: {len(successful_requests)}/{num_requests}") print(f" Failed requests: {len(failed_requests)}") + # Show cache statistics if available + if cache_hits or cache_misses: + print(f" Cache hits: {len(cache_hits)}") + print(f" Cache misses: {len(cache_misses)}") + if cache_errors: + print(f" Cache errors: {len(cache_errors)}") + + cache_hit_rate = ( + len(cache_hits) / (len(cache_hits) + len(cache_misses)) * 100 + if (cache_hits or cache_misses) + else 0 + ) + print(f" Cache hit rate: {cache_hit_rate:.1f}%") + if successful_requests: durations = [r["duration_ms"] for r in successful_requests] avg_duration = statistics.mean(durations) @@ -192,10 +448,10 @@ class PerformanceTestRunner: # Performance evaluation if len(failed_requests) > 0: print(" ⚠️ WARNING: Some operations failed under load") - elif p95_duration > 2000: # 2 seconds - print(" ⚠️ WARNING: 95th percentile response time exceeds 2s") - elif avg_duration > 1000: # 1 second - print(" ⚠️ CAUTION: Average response time exceeds 1s under load") + elif p95_duration > p95_threshold: + print(f" ⚠️ WARNING: 95th percentile response time exceeds {p95_threshold}ms") + elif avg_duration > response_threshold: + print(f" ⚠️ CAUTION: Average response time exceeds {response_threshold}ms under load") else: print(" ✅ Load handling is good") else: @@ -212,29 +468,51 @@ class PerformanceTestRunner: "requests_per_second": ( len(successful_requests) / total_duration if total_duration > 0 else 0 ), + "cache_hits": len(cache_hits), + "cache_misses": len(cache_misses), + "cache_errors": len(cache_errors), + "cache_hit_rate": ( + len(cache_hits) / (len(cache_hits) + len(cache_misses)) * 100 + if (cache_hits or cache_misses) + else 0 + ), } - def _test_remote_concurrent_load(self, num_requests): + def _test_remote_concurrent_load(self, num_requests: int) -> List[Dict[str, Any]]: """Test concurrent load against remote server""" import requests # noqa: F401 from concurrent.futures import ThreadPoolExecutor, as_completed def make_remote_request(thread_id): - test_email = f"perftest{thread_id % 10}@example.com" - email_hash = hashlib.md5(test_email.encode()).hexdigest() - url = f"{self.base_url}/avatar/{email_hash}" - params = {"d": "identicon", "s": 80} + test_email = generate_random_email() + + # Use libravatar library to generate the URL + full_url = libravatar_url(email=test_email, size=80, default="identicon") + urlobj = urlsplit(full_url) + url_path = f"{urlobj.path}?{urlobj.query}" + url = f"{self.base_url}{url_path}" start_time = time.time() try: - response = requests.get(url, params=params, timeout=10) + response = requests.get(url, timeout=10) end_time = time.time() + # Determine cache status + cache_detail = response.headers.get("x-cache-detail", "").lower() + age = response.headers.get("age", "0") + cache_status = "unknown" + + if "cache hit" in cache_detail or int(age) > 0: + cache_status = "hit" + elif "cache miss" in cache_detail or age == "0": + cache_status = "miss" + return { "thread_id": thread_id, "duration_ms": (end_time - start_time) * 1000, "status_code": response.status_code, "success": response.status_code == 200, + "cache_status": cache_status, } except Exception as e: end_time = time.time() @@ -243,6 +521,7 @@ class PerformanceTestRunner: "duration_ms": (end_time - start_time) * 1000, "success": False, "error": str(e), + "cache_status": "error", } results = [] @@ -260,7 +539,7 @@ class PerformanceTestRunner: return results - def _test_local_concurrent_load(self, num_requests): + def _test_local_concurrent_load(self, num_requests: int) -> List[Dict[str, Any]]: """Test concurrent load locally using avatar generation functions""" results = [] @@ -269,7 +548,7 @@ class PerformanceTestRunner: import Identicon for i in range(num_requests): - test_email = f"perftest{i % 10}@example.com" + test_email = generate_random_email() email_hash = hashlib.md5(test_email.encode()).hexdigest() request_start = time.time() @@ -283,6 +562,7 @@ class PerformanceTestRunner: "thread_id": i, "duration_ms": (request_end - request_start) * 1000, "success": len(identicon_data) > 0, + "cache_status": "miss", # Direct generation is always a cache miss } ) except Exception as e: @@ -293,6 +573,7 @@ class PerformanceTestRunner: "duration_ms": (request_end - request_start) * 1000, "success": False, "error": str(e), + "cache_status": "error", } ) @@ -314,6 +595,7 @@ class PerformanceTestRunner: "thread_id": i, "duration_ms": (request_end - request_start) * 1000, "success": True, + "cache_status": "n/a", # Database queries don't use image cache } ) except Exception as e: @@ -324,12 +606,13 @@ class PerformanceTestRunner: "duration_ms": (request_end - request_start) * 1000, "success": False, "error": str(e), + "cache_status": "error", } ) return results - def test_database_performance(self): + def test_database_performance(self) -> None: """Test database query performance""" print("\n=== Database Performance Test ===") @@ -378,7 +661,7 @@ class PerformanceTestRunner: else: print(f" ✅ Database query count is reasonable ({query_count} queries)") - def test_cache_performance(self): + def test_cache_performance(self) -> None: """Test caching effectiveness""" if not self.test_cache: print("\n=== Cache Performance Test ===") @@ -387,18 +670,17 @@ class PerformanceTestRunner: print("\n=== Cache Performance Test ===") - # Use an actual email address that exists in the system - test_email = "dev@libravatar.org" - email_hash = hashlib.md5(test_email.encode()).hexdigest() + # Generate a random email address for cache testing + test_email = generate_random_email() print(f" Testing with: {test_email}") if self.remote_testing: first_duration, second_duration = self._test_remote_cache_performance( - email_hash + test_email ) else: first_duration, second_duration = self._test_local_cache_performance( - email_hash + test_email ) print(f" First request: {first_duration:.2f}ms") @@ -453,16 +735,19 @@ class PerformanceTestRunner: "cache_headers": getattr(self, "cache_info", {}), } - def _test_remote_cache_performance(self, email_hash): + def _test_remote_cache_performance(self, email: str) -> Tuple[float, float]: """Test cache performance against remote server""" import requests - url = f"{self.base_url}/avatar/{email_hash}" - params = {"d": "identicon", "s": 80} + # Use libravatar library to generate the URL + full_url = libravatar_url(email=email, size=80, default="identicon") + urlobj = urlsplit(full_url) + url_path = f"{urlobj.path}?{urlobj.query}" + url = f"{self.base_url}{url_path}" # First request (should be cache miss or fresh) start_time = time.time() - response1 = requests.get(url, params=params, timeout=10) + response1 = requests.get(url, timeout=10) first_duration = (time.time() - start_time) * 1000 # Check first request headers @@ -480,7 +765,7 @@ class PerformanceTestRunner: # Second request (should be cache hit) start_time = time.time() - response2 = requests.get(url, params=params, timeout=10) + response2 = requests.get(url, timeout=10) second_duration = (time.time() - start_time) * 1000 # Check second request headers @@ -525,24 +810,28 @@ class PerformanceTestRunner: return first_duration, second_duration - def _test_local_cache_performance(self, email_hash): + def _test_local_cache_performance(self, email: str) -> Tuple[float, float]: """Test cache performance locally""" - url = f"/avatar/{email_hash}" - params = {"d": "identicon", "s": 80} + # Use libravatar library to generate the URL + full_url = libravatar_url(email=email, size=80, default="identicon") + urlobj = urlsplit(full_url) + url_path = f"{urlobj.path}?{urlobj.query}" # First request (cache miss) start_time = time.time() - self.client.get(url, params) + if self.client: + self.client.get(url_path) first_duration = (time.time() - start_time) * 1000 # Second request (should be cache hit) start_time = time.time() - self.client.get(url, params) + if self.client: + self.client.get(url_path) second_duration = (time.time() - start_time) * 1000 return first_duration, second_duration - def run_all_tests(self): + def run_all_tests(self, avatar_threshold: int = 1000, response_threshold: int = 1000, p95_threshold: int = 2000, ignore_cache_warnings: bool = False) -> Optional[Dict[str, Any]]: """Run all performance tests""" print("Starting Libravatar Performance Tests") print("=" * 50) @@ -557,14 +846,14 @@ class PerformanceTestRunner: # Run tests based on mode if self.remote_testing: print("🌐 Running remote server tests...") - self.test_remote_avatar_performance() + self.test_remote_avatar_performance(response_threshold) else: print("🏠 Running local tests...") self.test_avatar_generation_performance() self.test_database_performance() # Always test concurrent load - self.test_concurrent_load() + self.test_concurrent_load(response_threshold, p95_threshold) # Test cache performance if enabled self.test_cache_performance() @@ -576,7 +865,7 @@ class PerformanceTestRunner: print(f"Performance tests completed in {total_duration:.2f}s") # Overall assessment - self.assess_overall_performance() + self.assess_overall_performance(avatar_threshold, response_threshold, p95_threshold, ignore_cache_warnings) return self.results @@ -584,68 +873,30 @@ class PerformanceTestRunner: print(f"Performance test failed: {e}") return None - def test_remote_avatar_performance(self): + def test_remote_avatar_performance(self, response_threshold: int = 1000) -> None: """Test avatar generation performance on remote server""" print("\n=== Remote Avatar Performance Test ===") - import requests - - # Test different avatar types and sizes - test_cases = [ - {"default": "identicon", "size": 80}, - {"default": "monsterid", "size": 80}, - {"default": "robohash", "size": 80}, - {"default": "identicon", "size": 256}, - {"default": "monsterid", "size": 256}, - ] - + # Generate test cases for all avatar styles and sizes + test_cases = self._generate_test_cases() results = [] + # Generate random email for testing + test_email = generate_random_email() + print(f" Testing with email: {test_email}") + for case in test_cases: - # Generate test hash - test_email = "perftest@example.com" - email_hash = hashlib.md5(test_email.encode()).hexdigest() + result = self._test_single_avatar_request( + case, test_email, use_requests=True + ) + results.append(result) - # Build URL - url = f"{self.base_url}/avatar/{email_hash}" - params = {"d": case["default"], "s": case["size"]} + # Show example URL from first result + if results: + print(f" Example URL: {results[0]['full_url']}") - # Time the request - start_time = time.time() - try: - response = requests.get(url, params=params, timeout=10) - end_time = time.time() - - duration = (end_time - start_time) * 1000 # Convert to ms - - results.append( - { - "test": f"{case['default']}_{case['size']}px", - "duration_ms": duration, - "status_code": response.status_code, - "content_length": ( - len(response.content) if response.content else 0 - ), - "success": response.status_code == 200, - } - ) - - status = "✅" if response.status_code == 200 else "❌" - print( - f" {case['default']} ({case['size']}px): {duration:.2f}ms {status}" - ) - - except Exception as e: - print(f" {case['default']} ({case['size']}px): ❌ Failed - {e}") - results.append( - { - "test": f"{case['default']}_{case['size']}px", - "duration_ms": 0, - "status_code": 0, - "success": False, - "error": str(e), - } - ) + # Display results grouped by style + self._display_avatar_results(results) # Calculate statistics for successful requests successful_results = [r for r in results if r["success"]] @@ -659,10 +910,10 @@ class PerformanceTestRunner: print(f" Success rate: {len(successful_results)}/{len(results)}") # Performance thresholds for remote testing - if avg_duration > 2000: # 2 seconds - print(" ⚠️ WARNING: Average response time exceeds 2s") - elif avg_duration > 1000: # 1 second - print(" ⚠️ CAUTION: Average response time exceeds 1s") + if avg_duration > (response_threshold * 2): # 2x threshold for warning + print(f" ⚠️ WARNING: Average response time exceeds {response_threshold * 2}ms") + elif avg_duration > response_threshold: + print(f" ⚠️ CAUTION: Average response time exceeds {response_threshold}ms") else: print(" ✅ Remote avatar performance is good") else: @@ -677,7 +928,7 @@ class PerformanceTestRunner: "success_rate": len(successful_results) / len(results) if results else 0, } - def assess_overall_performance(self): + def assess_overall_performance(self, avatar_threshold: int = 1000, response_threshold: int = 1000, p95_threshold: int = 2000, ignore_cache_warnings: bool = False) -> bool: """Provide overall performance assessment""" print("\n=== OVERALL PERFORMANCE ASSESSMENT ===") @@ -686,8 +937,8 @@ class PerformanceTestRunner: # Check avatar generation if "avatar_generation" in self.results: avg_gen = self.results["avatar_generation"]["average_ms"] - if avg_gen > 1000: - warnings.append(f"Avatar generation is slow ({avg_gen:.0f}ms average)") + if avg_gen > avatar_threshold: + warnings.append(f"Avatar generation is slow ({avg_gen:.0f}ms average, threshold: {avatar_threshold}ms)") # Check concurrent load if "concurrent_load" in self.results: @@ -696,7 +947,7 @@ class PerformanceTestRunner: warnings.append(f"{failed} requests failed under concurrent load") # Check cache performance - if "cache_performance" in self.results: + if "cache_performance" in self.results and not ignore_cache_warnings: cache_working = self.results["cache_performance"].get( "cache_working", False ) @@ -722,7 +973,7 @@ class PerformanceTestRunner: return len(warnings) > 0 -def main(): +def main() -> Optional[Dict[str, Any]]: """Main entry point""" import argparse @@ -749,6 +1000,29 @@ def main(): action="store_true", help="Force remote testing mode (auto-detected for non-localhost URLs)", ) + parser.add_argument( + "--avatar-threshold", + type=int, + default=1000, + help="Avatar generation threshold in ms (default: 1000ms, use 2500 for dev environments)", + ) + parser.add_argument( + "--response-threshold", + type=int, + default=1000, + help="Response time threshold in ms (default: 1000ms, use 2500 for dev environments)", + ) + parser.add_argument( + "--p95-threshold", + type=int, + default=2000, + help="95th percentile threshold in ms (default: 2000ms, use 5000 for dev environments)", + ) + parser.add_argument( + "--ignore-cache-warnings", + action="store_true", + help="Don't fail on cache performance warnings (useful for dev environments)", + ) args = parser.parse_args() @@ -765,7 +1039,7 @@ def main(): remote_testing=remote_testing, ) - results = runner.run_all_tests() + results = runner.run_all_tests(args.avatar_threshold, args.response_threshold, args.p95_threshold, args.ignore_cache_warnings) if args.output and results: import json diff --git a/scripts/run_tests_with_coverage.py b/scripts/run_tests_with_coverage.py index 73210c5..40e1308 100755 --- a/scripts/run_tests_with_coverage.py +++ b/scripts/run_tests_with_coverage.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Run tests with OpenTelemetry instrumentation and export enabled, plus coverage measurement. This script is designed to be used with 'coverage run' command. diff --git a/setup.py b/setup.py index 3524089..5b94644 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- from setuptools import setup, find_packages setup(