diff --git a/config.py b/config.py
index 1cd2228..22ecdde 100644
--- a/config.py
+++ b/config.py
@@ -296,6 +296,33 @@ TRUSTED_DEFAULT_URLS = list(map(map_legacy_config, TRUSTED_DEFAULT_URLS))
BLUESKY_IDENTIFIER = os.environ.get("BLUESKY_IDENTIFIER", None)
BLUESKY_APP_PASSWORD = os.environ.get("BLUESKY_APP_PASSWORD", None)
+# Celery Configuration
+# Try Redis first, fallback to memory broker for development
+try:
+ import redis
+
+ redis.Redis(host="localhost", port=6379, db=0).ping()
+ CELERY_BROKER_URL = "redis://localhost:6379/0"
+except Exception: # pylint: disable=broad-except
+ # Fallback to memory broker for development
+ CELERY_BROKER_URL = "memory://"
+ print("Warning: Redis not available, using memory broker for development")
+
+CELERY_RESULT_BACKEND = "django-db"
+CELERY_ACCEPT_CONTENT = ["json"]
+CELERY_TASK_SERIALIZER = "json"
+CELERY_RESULT_SERIALIZER = "json"
+CELERY_TIMEZONE = "UTC"
+CELERY_TASK_TRACK_STARTED = True
+CELERY_TASK_TIME_LIMIT = 300 # 5 minutes
+CELERY_TASK_SOFT_TIME_LIMIT = 240 # 4 minutes
+CELERY_WORKER_PREFETCH_MULTIPLIER = 1
+CELERY_TASK_ACKS_LATE = True
+CELERY_RESULT_EXPIRES = 3600 # 1 hour
+CELERY_WORKER_CONCURRENCY = (
+ 1 # Max 1 parallel avatar generation task for local development
+)
+
# This MUST BE THE LAST!
if os.path.isfile(os.path.join(BASE_DIR, "config_local.py")):
from config_local import * # noqa # flake8: noqa # NOQA # pragma: no cover
diff --git a/ivatar/__init__.py b/ivatar/__init__.py
index 0649992..13dc53f 100644
--- a/ivatar/__init__.py
+++ b/ivatar/__init__.py
@@ -3,4 +3,10 @@
Module init
"""
+# This will make sure the app is always imported when
+# Django starts so that shared_task will use this app.
+from .celery import app as celery_app
+
+__all__ = ("celery_app",)
+
app_label = __name__ # pylint: disable=invalid-name
diff --git a/ivatar/ai_service.py b/ivatar/ai_service.py
new file mode 100644
index 0000000..bdb0acc
--- /dev/null
+++ b/ivatar/ai_service.py
@@ -0,0 +1,461 @@
+# -*- coding: utf-8 -*-
+"""
+AI service module for text-to-image avatar generation
+Supports Stable Diffusion (local and API) for professional avatar generation
+"""
+
+import logging
+import requests
+import base64
+from io import BytesIO
+from PIL import Image
+from django.conf import settings
+
+logger = logging.getLogger(__name__)
+
+
+class AIServiceError(Exception):
+ """Custom exception for AI service errors"""
+
+ pass
+
+
+class StableDiffusionService:
+ """
+ Service for generating images using Stable Diffusion (local or API)
+ """
+
+ # Model-specific token limits
+ TOKEN_LIMITS = {
+ "stable_diffusion": 77, # CLIP tokenizer limit
+ "stable_diffusion_v2": 77,
+ "stable_diffusion_xl": 77,
+ }
+
+ def __init__(self):
+ self.api_url = getattr(settings, "STABLE_DIFFUSION_API_URL", None)
+ self.api_key = getattr(settings, "STABLE_DIFFUSION_API_KEY", None)
+ self.timeout = getattr(settings, "STABLE_DIFFUSION_TIMEOUT", 60)
+ self._pipe = None # Cache for local model
+ self._tokenizer = None # Cache for tokenizer
+
+ def generate_image(
+ self, prompt, size=(512, 512), quality="medium", allow_nsfw=False
+ ):
+ """
+ Generate an image from text prompt using Stable Diffusion
+
+ Args:
+ prompt (str): Text description of the desired image
+ size (tuple): Image dimensions (width, height)
+ quality (str): Generation quality ('low', 'medium', 'high')
+ allow_nsfw (bool): Whether to allow potentially NSFW content
+
+ Returns:
+ PIL.Image: Generated image
+
+ Raises:
+ AIServiceError: If generation fails or prompt is too long
+ """
+ # Validate prompt length first
+ validation = self.validate_prompt(prompt)
+ if not validation["valid"]:
+ raise AIServiceError(validation["warning"])
+
+ try:
+ if self.api_url and self.api_key:
+ return self._generate_via_api(prompt, size, quality, allow_nsfw)
+ else:
+ return self._generate_locally(prompt, size, quality, allow_nsfw)
+ except Exception as e:
+ logger.error(f"Failed to generate image: {e}")
+ raise AIServiceError(f"Image generation failed: {str(e)}")
+
+ def validate_prompt(self, prompt, model="stable_diffusion"):
+ """
+ Validate prompt length against model token limits
+
+ Args:
+ prompt (str): Text prompt to validate
+ model (str): Model name to check limits for
+
+ Returns:
+ dict: Validation result with 'valid', 'token_count', 'limit', 'warning'
+ """
+ try:
+ token_count = self._count_tokens(prompt)
+ limit = self.TOKEN_LIMITS.get(model, 77)
+
+ is_valid = token_count <= limit
+ warning = None
+
+ if not is_valid:
+ warning = f"Prompt too long: {token_count} tokens (limit: {limit}). Please shorten your prompt."
+
+ return {
+ "valid": is_valid,
+ "token_count": token_count,
+ "limit": limit,
+ "warning": warning,
+ }
+
+ except Exception as e:
+ logger.warning(f"Token counting failed: {e}")
+ return {
+ "valid": True, # Allow generation if counting fails
+ "token_count": 0,
+ "limit": 77,
+ "warning": None,
+ }
+
+ def _count_tokens(self, prompt):
+ """
+ Count tokens in a prompt using CLIP tokenizer
+ """
+ try:
+ if self._tokenizer is None:
+ from transformers import CLIPTokenizer
+
+ self._tokenizer = CLIPTokenizer.from_pretrained(
+ "openai/clip-vit-base-patch32"
+ )
+
+ tokens = self._tokenizer(prompt, return_tensors="pt", truncation=False)[
+ "input_ids"
+ ]
+ return tokens.shape[1]
+
+ except ImportError:
+ # Fallback: more accurate estimation
+ # CLIP tokenizer typically produces ~1.3 tokens per word for English
+ words = len(prompt.split())
+ return int(words * 1.3)
+ except Exception as e:
+ logger.warning(f"Token counting error: {e}")
+ # Fallback: more accurate estimation
+ words = len(prompt.split())
+ return int(words * 1.3)
+
+ def _is_black_image(self, image):
+ """
+ Check if an image is completely black (common NSFW response from APIs)
+
+ Args:
+ image (PIL.Image): Image to check
+
+ Returns:
+ bool: True if image is completely black
+ """
+ # Convert to RGB if necessary
+ if image.mode != "RGB":
+ image = image.convert("RGB")
+
+ # Get image data
+ pixels = list(image.getdata())
+
+ # Check if all pixels are black (0, 0, 0)
+ black_pixels = sum(1 for r, g, b in pixels if r == 0 and g == 0 and b == 0)
+ total_pixels = len(pixels)
+
+ # Consider it a black image if more than 95% of pixels are black
+ return (black_pixels / total_pixels) > 0.95
+
+ def _generate_via_api(self, prompt, size, quality, allow_nsfw=False):
+ """
+ Generate image via Stable Diffusion API (Replicate, Hugging Face, etc.)
+ """
+ # Enhanced prompt for avatar generation
+ enhanced_prompt = f"""professional avatar portrait, {prompt}, high quality, detailed, clean background, centered composition, profile picture style, photorealistic"""
+
+ headers = {
+ "Authorization": f"Bearer {self.api_key}",
+ "Content-Type": "application/json",
+ }
+
+ payload = {
+ "prompt": enhanced_prompt,
+ "width": size[0],
+ "height": size[1],
+ "num_inference_steps": 25
+ if quality == "high"
+ else (20 if quality == "medium" else 15),
+ "guidance_scale": 7.5, # Balanced for quality and speed
+ "negative_prompt": "blurry, low quality, distorted, ugly, deformed, bad anatomy",
+ }
+
+ # Add NSFW safety setting if supported by the API
+ if allow_nsfw:
+ payload["safety_tolerance"] = 2 # Some APIs support this
+ payload["nsfw"] = True # Some APIs support this
+
+ response = requests.post(
+ self.api_url, json=payload, headers=headers, timeout=self.timeout
+ )
+
+ if response.status_code != 200:
+ error_msg = f"Stable Diffusion API request failed: {response.status_code}"
+ try:
+ error_detail = response.json()
+ error_msg += f" - {error_detail}"
+
+ # Check for NSFW content detection
+ if isinstance(error_detail, dict):
+ error_text = str(error_detail).lower()
+ if (
+ "nsfw" in error_text
+ or "inappropriate" in error_text
+ or "black image" in error_text
+ ):
+ if allow_nsfw:
+ # If user allowed NSFW but still got blocked, provide a different message
+ raise AIServiceError(
+ "Content warning: The AI service still detected inappropriate content even with relaxed settings. Please try a different prompt or contact support if you believe this is an error."
+ )
+ else:
+ raise AIServiceError(
+ "Content warning: The AI service detected potentially inappropriate content in your prompt. Please modify your description to be more appropriate for all ages and try again."
+ )
+ elif isinstance(error_detail, str):
+ if (
+ "nsfw" in error_detail.lower()
+ or "inappropriate" in error_detail.lower()
+ or "black image" in error_detail.lower()
+ ):
+ if allow_nsfw:
+ raise AIServiceError(
+ "Content warning: The AI service still detected inappropriate content even with relaxed settings. Please try a different prompt or contact support if you believe this is an error."
+ )
+ else:
+ raise AIServiceError(
+ "Content warning: The AI service detected potentially inappropriate content in your prompt. Please modify your description to be more appropriate for all ages and try again."
+ )
+
+ except AIServiceError:
+ # Re-raise our custom NSFW error
+ raise
+ except Exception: # pylint: disable=broad-except
+ error_msg += f" - {response.text}"
+
+ # Also check response text for NSFW warnings
+ if (
+ "nsfw" in response.text.lower()
+ or "inappropriate" in response.text.lower()
+ or "black image" in response.text.lower()
+ ):
+ if allow_nsfw:
+ raise AIServiceError(
+ "Content warning: The AI service still detected inappropriate content even with relaxed settings. Please try a different prompt or contact support if you believe this is an error."
+ )
+ else:
+ raise AIServiceError(
+ "Content warning: The AI service detected potentially inappropriate content in your prompt. Please modify your description to be more appropriate for all ages and try again."
+ )
+
+ raise AIServiceError(error_msg)
+
+ result = response.json()
+
+ if "image" in result:
+ # Decode base64 image
+ image_data = base64.b64decode(result["image"])
+ image = Image.open(BytesIO(image_data))
+
+ # Check if the image is completely black (common NSFW response)
+ if not allow_nsfw and self._is_black_image(image):
+ raise AIServiceError(
+ "Content warning: The AI service detected potentially inappropriate content in your prompt and returned a black image. Please modify your description to be more appropriate for all ages and try again."
+ )
+
+ return image
+ else:
+ raise AIServiceError("No image data in API response")
+
+ def _generate_locally(self, prompt, size, quality, allow_nsfw=False):
+ """
+ Generate image using local Stable Diffusion installation
+ This requires diffusers library and a local model
+ """
+ try:
+ from diffusers import StableDiffusionPipeline
+ import torch
+
+ # Enhanced prompt for avatar generation
+ enhanced_prompt = f"""professional avatar portrait, {prompt}, high quality, detailed, clean background, centered composition, profile picture style, photorealistic"""
+
+ # Use cached model if available, otherwise load it
+ if self._pipe is None:
+ logger.info("Loading Stable Diffusion model (first time or cache miss)")
+ self._pipe = StableDiffusionPipeline.from_pretrained(
+ "runwayml/stable-diffusion-v1-5",
+ torch_dtype=torch.float16
+ if torch.cuda.is_available()
+ else torch.float32,
+ )
+
+ if torch.cuda.is_available():
+ self._pipe = self._pipe.to("cuda")
+ else:
+ logger.info("Using cached Stable Diffusion model")
+
+ pipe = self._pipe
+
+ # Disable safety checker if NSFW override is enabled
+ if allow_nsfw:
+ pipe.safety_checker = None
+ pipe.requires_safety_checker = False
+
+ # Generate image with optimized settings for speed
+ image = pipe(
+ enhanced_prompt,
+ height=size[1],
+ width=size[0],
+ num_inference_steps=25
+ if quality == "high"
+ else (20 if quality == "medium" else 15),
+ guidance_scale=7.5, # Balanced for quality and speed
+ negative_prompt="blurry, low quality, distorted, ugly, deformed, bad anatomy",
+ ).images[0]
+
+ return image
+
+ except ImportError:
+ logger.warning(
+ "diffusers library not installed, falling back to placeholder"
+ )
+ return self._generate_placeholder(prompt, size)
+ except Exception as e:
+ logger.error(f"Local Stable Diffusion generation failed: {e}")
+ return self._generate_placeholder(prompt, size)
+
+ def _generate_placeholder(self, prompt, size):
+ """
+ Generate a placeholder image when Stable Diffusion is not available
+ """
+ logger.info("Generating placeholder image")
+
+ # Create a more sophisticated placeholder
+ img = Image.new("RGBA", size, color=(240, 248, 255, 255))
+
+ from PIL import ImageDraw, ImageFont
+
+ draw = ImageDraw.Draw(img)
+
+ try:
+ font = ImageFont.load_default()
+ except Exception: # pylint: disable=broad-except
+ font = None
+
+ # Add title
+ title = "AI Avatar (Stable Diffusion)"
+ draw.text((10, 10), title, fill="darkblue", font=font)
+
+ # Add prompt
+ prompt_text = f"Prompt: {prompt[:50]}..."
+ draw.text((10, 40), prompt_text, fill="black", font=font)
+
+ # Add note
+ note = "Install Stable Diffusion for real generation"
+ draw.text((10, 70), note, fill="darkgreen", font=font)
+
+ # Create a more sophisticated avatar placeholder
+ center_x, center_y = size[0] // 2, size[1] // 2 + 20
+ radius = min(size) // 4
+
+ # Face circle
+ draw.ellipse(
+ [
+ center_x - radius,
+ center_y - radius,
+ center_x + radius,
+ center_y + radius,
+ ],
+ outline="purple",
+ width=3,
+ fill=(255, 240, 245),
+ )
+
+ # Eyes
+ eye_radius = radius // 4
+ draw.ellipse(
+ [
+ center_x - radius // 2 - eye_radius,
+ center_y - radius // 2 - eye_radius,
+ center_x - radius // 2 + eye_radius,
+ center_y - radius // 2 + eye_radius,
+ ],
+ fill="blue",
+ )
+ draw.ellipse(
+ [
+ center_x + radius // 2 - eye_radius,
+ center_y - radius // 2 - eye_radius,
+ center_x + radius // 2 + eye_radius,
+ center_y - radius // 2 + eye_radius,
+ ],
+ fill="blue",
+ )
+
+ # Smile
+ smile_y = center_y + radius // 3
+ draw.arc(
+ [
+ center_x - radius // 2,
+ smile_y - radius // 4,
+ center_x + radius // 2,
+ smile_y + radius // 4,
+ ],
+ 0,
+ 180,
+ fill="red",
+ width=3,
+ )
+
+ return img
+
+
+def validate_avatar_prompt(prompt, model="stable_diffusion"):
+ """
+ Convenience function to validate avatar prompts
+
+ Args:
+ prompt (str): Text description of the avatar
+ model (str): AI model to use
+
+ Returns:
+ dict: Validation result with 'valid', 'token_count', 'limit', 'warning'
+ """
+ if model == "stable_diffusion":
+ service = StableDiffusionService()
+ return service.validate_prompt(prompt, model)
+ else:
+ # For other models, assume they're valid
+ return {"valid": True, "token_count": 0, "limit": 0, "warning": None}
+
+
+def generate_avatar_image(
+ prompt,
+ model="stable_diffusion",
+ size=(512, 512),
+ quality="medium",
+ allow_nsfw=False,
+):
+ """
+ Convenience function to generate avatar images
+
+ Args:
+ prompt (str): Text description of the avatar
+ model (str): AI model to use (currently only 'stable_diffusion')
+ size (tuple): Image dimensions
+ quality (str): Generation quality ('low', 'medium', 'high')
+ allow_nsfw (bool): Whether to allow potentially NSFW content
+
+ Returns:
+ PIL.Image: Generated avatar image
+ """
+ if model == "stable_diffusion":
+ service = StableDiffusionService()
+ return service.generate_image(prompt, size, quality, allow_nsfw)
+ else:
+ raise AIServiceError(
+ f"Unsupported model: {model}. Only 'stable_diffusion' is currently supported."
+ )
diff --git a/ivatar/celery.py b/ivatar/celery.py
new file mode 100644
index 0000000..0a6ea92
--- /dev/null
+++ b/ivatar/celery.py
@@ -0,0 +1,56 @@
+# -*- coding: utf-8 -*-
+"""
+Celery configuration for ivatar
+"""
+
+import os
+from celery import Celery
+from django.conf import settings
+
+# Set the default Django settings module for the 'celery' program.
+os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ivatar.settings")
+
+app = Celery("ivatar")
+
+# Using a string here means the worker doesn't have to serialize
+# the configuration object to child processes.
+app.config_from_object("django.conf:settings", namespace="CELERY")
+
+# Load task modules from all registered Django apps.
+app.autodiscover_tasks()
+
+# Celery configuration
+app.conf.update(
+ # Task routing - use default queue for simplicity
+ task_default_queue="default",
+ task_routes={
+ "ivatar.tasks.generate_avatar_task": {"queue": "default"},
+ "ivatar.tasks.update_queue_positions": {"queue": "default"},
+ "ivatar.tasks.cleanup_old_tasks": {"queue": "default"},
+ },
+ # Worker configuration
+ worker_prefetch_multiplier=1,
+ task_acks_late=True,
+ # Result backend
+ result_backend="django-db",
+ result_expires=3600, # 1 hour
+ # Task time limits
+ task_time_limit=300, # 5 minutes
+ task_soft_time_limit=240, # 4 minutes
+ # Task serialization
+ task_serializer="json",
+ accept_content=["json"],
+ result_serializer="json",
+ # Timezone
+ timezone="UTC",
+ enable_utc=True,
+)
+
+# Set worker concurrency from Django settings
+if hasattr(settings, "CELERY_WORKER_CONCURRENCY"):
+ app.conf.worker_concurrency = settings.CELERY_WORKER_CONCURRENCY
+
+
+@app.task(bind=True)
+def debug_task(self):
+ print(f"Request: {self.request!r}")
diff --git a/ivatar/ivataraccount/forms.py b/ivatar/ivataraccount/forms.py
index a2f4dcd..f06e9be 100644
--- a/ivatar/ivataraccount/forms.py
+++ b/ivatar/ivataraccount/forms.py
@@ -217,6 +217,89 @@ class UploadLibravatarExportForm(forms.Form):
)
+class GenerateAvatarForm(forms.Form):
+ """
+ Form for generating avatars using AI text-to-image
+ """
+
+ MODEL_CHOICES = [
+ ("stable_diffusion", "Stable Diffusion"),
+ # Future models can be added here
+ ]
+
+ prompt = forms.CharField(
+ label=_("Avatar Description"),
+ max_length=500,
+ widget=forms.Textarea(
+ attrs={
+ "rows": 3,
+ "placeholder": _(
+ 'Describe the avatar you want to create, e.g., "A friendly robot with blue eyes"'
+ ),
+ "id": "id_prompt",
+ "data-token-limit": "77",
+ "data-model": "stable_diffusion",
+ }
+ ),
+ help_text=_(
+ "Describe the avatar you want to generate. Be specific about appearance, style, and mood.
Stable Diffusion has a 77-token limit. Keep your description concise for best results."
+ ),
+ )
+
+ model = forms.ChoiceField(
+ label=_("AI Model"),
+ choices=MODEL_CHOICES,
+ initial="stable_diffusion",
+ help_text=_("Select the AI model to use for generation."),
+ )
+
+ quality = forms.ChoiceField(
+ label=_("Generation Quality"),
+ choices=[
+ ("low", _("Low (faster, lower quality)")),
+ ("medium", _("Medium (balanced)")),
+ ("high", _("High (slower, better quality)")),
+ ],
+ initial="medium",
+ help_text=_("Higher quality takes longer but produces better results."),
+ )
+
+ not_porn = forms.BooleanField(
+ label=_("Suitable for all ages (no offensive content)"),
+ required=True,
+ error_messages={
+ "required": _(
+ 'We only host "G-rated" images and so this field must be checked.'
+ )
+ },
+ )
+
+ can_distribute = forms.BooleanField(
+ label=_("Can be freely copied"),
+ required=True,
+ error_messages={
+ "required": _(
+ "This field must be checked since we need to be able to distribute photos to third parties."
+ )
+ },
+ )
+
+ def clean_prompt(self):
+ """Validate prompt length against token limits"""
+ prompt = self.cleaned_data.get("prompt", "")
+ model = self.cleaned_data.get("model", "stable_diffusion")
+
+ if prompt:
+ from ivatar.ai_service import validate_avatar_prompt
+
+ validation = validate_avatar_prompt(prompt, model)
+
+ if not validation["valid"]:
+ raise forms.ValidationError(validation["warning"])
+
+ return prompt
+
+
class DeleteAccountForm(forms.Form):
password = forms.CharField(
label=_("Password"), required=False, widget=forms.PasswordInput()
diff --git a/ivatar/ivataraccount/migrations/0021_add_ai_generation_fields.py b/ivatar/ivataraccount/migrations/0021_add_ai_generation_fields.py
new file mode 100644
index 0000000..6559c15
--- /dev/null
+++ b/ivatar/ivataraccount/migrations/0021_add_ai_generation_fields.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Generated by Django 5.2.1 on 2025-09-17 10:06
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("ivataraccount", "0020_confirmedopenid_bluesky_handle"),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name="photo",
+ name="ai_generated",
+ field=models.BooleanField(
+ default=False, help_text="Whether this photo was generated by AI"
+ ),
+ ),
+ migrations.AddField(
+ model_name="photo",
+ name="ai_model",
+ field=models.CharField(
+ blank=True,
+ help_text="The AI model used for generation",
+ max_length=50,
+ null=True,
+ ),
+ ),
+ migrations.AddField(
+ model_name="photo",
+ name="ai_prompt",
+ field=models.TextField(
+ blank=True,
+ help_text="The prompt used to generate this image",
+ null=True,
+ ),
+ ),
+ migrations.AddField(
+ model_name="photo",
+ name="ai_quality",
+ field=models.CharField(
+ blank=True,
+ help_text="The quality setting used",
+ max_length=20,
+ null=True,
+ ),
+ ),
+ ]
diff --git a/ivatar/ivataraccount/migrations/0022_add_generation_task.py b/ivatar/ivataraccount/migrations/0022_add_generation_task.py
new file mode 100644
index 0000000..dfc67f7
--- /dev/null
+++ b/ivatar/ivataraccount/migrations/0022_add_generation_task.py
@@ -0,0 +1,80 @@
+# -*- coding: utf-8 -*-
+# Generated by Django 5.2.1 on 2025-09-17 10:25
+
+import django.db.models.deletion
+import django.utils.timezone
+from django.conf import settings
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("ivataraccount", "0021_add_ai_generation_fields"),
+ migrations.swappable_dependency(settings.AUTH_USER_MODEL),
+ ]
+
+ operations = [
+ migrations.CreateModel(
+ name="GenerationTask",
+ fields=[
+ (
+ "id",
+ models.BigAutoField(
+ auto_created=True,
+ primary_key=True,
+ serialize=False,
+ verbose_name="ID",
+ ),
+ ),
+ (
+ "ip_address",
+ models.GenericIPAddressField(null=True, unpack_ipv4=True),
+ ),
+ ("add_date", models.DateTimeField(default=django.utils.timezone.now)),
+ ("prompt", models.TextField()),
+ ("model", models.CharField(max_length=50)),
+ ("quality", models.CharField(max_length=20)),
+ ("allow_nsfw", models.BooleanField(default=False)),
+ (
+ "status",
+ models.CharField(
+ choices=[
+ ("pending", "Pending"),
+ ("processing", "Processing"),
+ ("completed", "Completed"),
+ ("failed", "Failed"),
+ ("cancelled", "Cancelled"),
+ ],
+ default="pending",
+ max_length=20,
+ ),
+ ),
+ ("progress", models.IntegerField(default=0)),
+ ("queue_position", models.IntegerField(default=0)),
+ ("task_id", models.CharField(blank=True, max_length=255, null=True)),
+ ("error_message", models.TextField(blank=True, null=True)),
+ (
+ "generated_photo",
+ models.ForeignKey(
+ blank=True,
+ null=True,
+ on_delete=django.db.models.deletion.SET_NULL,
+ to="ivataraccount.photo",
+ ),
+ ),
+ (
+ "user",
+ models.ForeignKey(
+ on_delete=django.db.models.deletion.CASCADE,
+ to=settings.AUTH_USER_MODEL,
+ ),
+ ),
+ ],
+ options={
+ "verbose_name": "Generation Task",
+ "verbose_name_plural": "Generation Tasks",
+ "ordering": ["-add_date"],
+ },
+ ),
+ ]
diff --git a/ivatar/ivataraccount/migrations/0023_add_ai_invalid_field.py b/ivatar/ivataraccount/migrations/0023_add_ai_invalid_field.py
new file mode 100644
index 0000000..ddfe845
--- /dev/null
+++ b/ivatar/ivataraccount/migrations/0023_add_ai_invalid_field.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+# Generated by Django 5.2.1 on 2025-09-17 10:41
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("ivataraccount", "0022_add_generation_task"),
+ ]
+
+ operations = [
+ migrations.AddField(
+ model_name="photo",
+ name="ai_invalid",
+ field=models.BooleanField(
+ default=False,
+ help_text="Whether this AI-generated image is invalid (black, etc.)",
+ ),
+ ),
+ ]
diff --git a/ivatar/ivataraccount/models.py b/ivatar/ivataraccount/models.py
index 25a4d86..9f6ebbb 100644
--- a/ivatar/ivataraccount/models.py
+++ b/ivatar/ivataraccount/models.py
@@ -118,6 +118,42 @@ class BaseAccountModel(models.Model):
abstract = True
+class GenerationTask(BaseAccountModel):
+ """
+ Model to track avatar generation tasks in the queue
+ """
+
+ STATUS_CHOICES = [
+ ("pending", _("Pending")),
+ ("processing", _("Processing")),
+ ("completed", _("Completed")),
+ ("failed", _("Failed")),
+ ("cancelled", _("Cancelled")),
+ ]
+
+ user = models.ForeignKey(User, on_delete=models.CASCADE)
+ prompt = models.TextField()
+ model = models.CharField(max_length=50)
+ quality = models.CharField(max_length=20)
+ allow_nsfw = models.BooleanField(default=False)
+ status = models.CharField(max_length=20, choices=STATUS_CHOICES, default="pending")
+ progress = models.IntegerField(default=0) # 0-100
+ queue_position = models.IntegerField(default=0)
+ task_id = models.CharField(max_length=255, blank=True, null=True) # Celery task ID
+ error_message = models.TextField(blank=True, null=True)
+ generated_photo = models.ForeignKey(
+ "Photo", on_delete=models.SET_NULL, blank=True, null=True
+ )
+
+ class Meta:
+ ordering = ["-add_date"]
+ verbose_name = _("Generation Task")
+ verbose_name_plural = _("Generation Tasks")
+
+ def __str__(self):
+ return f"Task {self.pk}: {self.prompt[:50]}... ({self.status})"
+
+
class Photo(BaseAccountModel):
"""
Model holding the photos and information about them
@@ -128,6 +164,27 @@ class Photo(BaseAccountModel):
format = models.CharField(max_length=4)
access_count = models.BigIntegerField(default=0, editable=False)
+ # AI Generation metadata
+ ai_generated = models.BooleanField(
+ default=False, help_text=_("Whether this photo was generated by AI")
+ )
+ ai_prompt = models.TextField(
+ blank=True, null=True, help_text=_("The prompt used to generate this image")
+ )
+ ai_model = models.CharField(
+ max_length=50,
+ blank=True,
+ null=True,
+ help_text=_("The AI model used for generation"),
+ )
+ ai_quality = models.CharField(
+ max_length=20, blank=True, null=True, help_text=_("The quality setting used")
+ )
+ ai_invalid = models.BooleanField(
+ default=False,
+ help_text=_("Whether this AI-generated image is invalid (black, etc.)"),
+ )
+
class Meta: # pylint: disable=too-few-public-methods
"""
Class attributes
@@ -136,6 +193,49 @@ class Photo(BaseAccountModel):
verbose_name = _("photo")
verbose_name_plural = _("photos")
+ def is_valid_avatar(self):
+ """
+ Check if this photo is a valid avatar (not black/invalid)
+ """
+ if not self.ai_generated:
+ return True # Non-AI photos are assumed valid
+
+ # If we've already marked it as invalid, return False
+ if self.ai_invalid:
+ return False
+
+ try:
+ from PIL import Image
+ import io
+
+ # Load the image data
+ image_data = io.BytesIO(self.data)
+ image = Image.open(image_data)
+
+ # Convert to RGB if needed
+ if image.mode != "RGB":
+ image = image.convert("RGB")
+
+ # Check if image is predominantly black (common NSFW response)
+ pixels = list(image.getdata())
+ black_pixels = sum(1 for r, g, b in pixels if r == 0 and g == 0 and b == 0)
+ total_pixels = len(pixels)
+
+ # If more than 95% black pixels, consider it invalid
+ black_ratio = black_pixels / total_pixels
+ is_valid = black_ratio < 0.95
+
+ # Cache the result
+ if not is_valid:
+ self.ai_invalid = True
+ self.save(update_fields=["ai_invalid"])
+
+ return is_valid
+
+ except Exception:
+ # If we can't analyze the image, assume it's valid
+ return True
+
def import_image(self, service_name, email_address):
"""
Allow to import image from other (eg. Gravatar) service
diff --git a/ivatar/ivataraccount/templates/assign_photo_email.html b/ivatar/ivataraccount/templates/assign_photo_email.html
index 32c13e8..6cdc2ff 100644
--- a/ivatar/ivataraccount/templates/assign_photo_email.html
+++ b/ivatar/ivataraccount/templates/assign_photo_email.html
@@ -78,7 +78,7 @@
{% endif %}
-
+
{% endblock content %}
diff --git a/ivatar/ivataraccount/templates/assign_photo_openid.html b/ivatar/ivataraccount/templates/assign_photo_openid.html
index a062cac..094d10d 100644
--- a/ivatar/ivataraccount/templates/assign_photo_openid.html
+++ b/ivatar/ivataraccount/templates/assign_photo_openid.html
@@ -75,7 +75,7 @@
{% endif %}
-
+
{% endblock content %}
diff --git a/ivatar/ivataraccount/templates/avatar_gallery.html b/ivatar/ivataraccount/templates/avatar_gallery.html
new file mode 100644
index 0000000..dd4bce4
--- /dev/null
+++ b/ivatar/ivataraccount/templates/avatar_gallery.html
@@ -0,0 +1,318 @@
+{% extends 'base.html' %}
+{% load i18n %}
+{% load static %}
+{% load bootstrap4 %}
+
+{% block title %}{% trans 'Avatar Gallery' %}{% endblock %}
+
+{% block extra_css %}
+
+{% endblock %}
+
+{% block content %}
+
+
+ {% trans 'Browse recently generated avatars for inspiration. Click on any avatar to reuse its prompt.' %} +
+ + + {% if user_avatars %} ++ {% trans 'Recently generated avatars from all users (last 30)' %} +
+ + {% if avatars %} ++ {% trans 'No AI-generated avatars have been created yet. Be the first to generate one!' %} +
+ + {% trans 'Generate Avatar' %} + ++ {% trans 'Here\'s your generated avatar. You can refine it or assign it to your email addresses.' %} +
+ + ++ {% trans 'Generated on' %} {{ photo.add_date|date:"F d, Y \a\t H:i" }} +
++ {% trans 'Not satisfied with the result? Modify your description and generate a new avatar.' %} +
+ + ++ {% trans 'Assign this avatar to one or more of your confirmed email addresses.' %} +
+ ++ {% trans 'You need to add and confirm email addresses before you can assign avatars to them.' %} +
+ + {% trans 'Add Email Address' %} + ++ {% trans 'Create a unique avatar using artificial intelligence. Describe what you want and our AI will generate it for you.' %} +
+ ++ "A friendly robot with blue eyes and a silver body, cartoon style" +
++ "Colorful geometric shapes forming a face, modern art style" +
+{% trans 'Prompt:' %}
+{{ task.prompt }}
+{% trans 'Model:' %} {{ task.model|title }}
+{% trans 'Quality:' %} {{ task.quality|title }}
+{% trans 'Status:' %} + + {{ task.get_status_display }} + {% if task.status == 'processing' %} + + {% elif task.status == 'pending' %} + + {% endif %} + + + + Live + +
+{% trans 'Currently Processing' %}
+{% trans 'Pending Tasks' %}
+{% trans 'Max Parallel Jobs' %}
+| {% trans 'Prompt' %} | +{% trans 'Status' %} | +{% trans 'Created' %} | +{% trans 'Actions' %} | +
|---|---|---|---|
| + + {{ user_task.prompt }} + + | ++ + {{ user_task.get_status_display }} + + | +{{ user_task.add_date|date:"M d, H:i" }} | ++ {% if user_task.status == 'completed' and user_task.generated_photo %} + + + + {% elif user_task.status == 'failed' %} + + + + {% elif user_task.status == 'pending' or user_task.status == 'processing' %} + + + + {% endif %} + | +