From 4937eb2d18e3cae60c729b1893fb7debfcb26845 Mon Sep 17 00:00:00 2001 From: AlanRockefeller Date: Sun, 25 Jan 2026 22:30:27 -0800 Subject: [PATCH 1/6] Release 1.5.2 --- ChangeLog.md | 38 + faststack/app.py | 219 +-- faststack/check_scipy.py | 6 + faststack/config.py | 11 +- faststack/error_log.txt | 10 + faststack/imaging/editor.py | 1172 +++++++++++------ faststack/imaging/math_utils.py | 266 ++++ faststack/imaging/optional_deps.py | 8 + faststack/imaging/orientation.py | 81 ++ faststack/imaging/prefetch.py | 94 +- faststack/qml/ImageEditorDialog.qml | 149 ++- faststack/qml/SingleChannelHistogram.qml | 40 +- faststack/result.txt | Bin 0 -> 1548 bytes faststack/test_err.txt | Bin 0 -> 616 bytes faststack/test_manual_output.txt | Bin 0 -> 370 bytes faststack/test_pil_blur.py | 28 + faststack/test_traceback.txt | Bin 0 -> 4990 bytes faststack/tests/manual_test_error_handling.py | 90 ++ faststack/tests/mini_test.py | 47 + faststack/tests/reproduce_exif_bug.py | 58 + faststack/tests/result.txt | Bin 0 -> 1186 bytes faststack/tests/test_cache_invalidation.py | 75 ++ faststack/tests/test_editor.py | 8 + faststack/tests/test_editor_error_handling.py | 60 + faststack/tests/test_exif_compat.py | 214 +-- faststack/tests/test_exif_display_rotation.py | 173 +++ faststack/tests/test_exif_orientation.py | 41 +- faststack/tests/test_fallback_blur.py | 47 + .../tests/test_generation_aware_preview.py | 111 ++ faststack/tests/test_headroom_semantics.py | 61 + faststack/tests/test_highlight_recovery.py | 212 +++ .../test_highlight_state_normalization.py | 63 + .../tests/test_highlights_responsiveness.py | 46 + faststack/tests/test_highlights_v2.py | 93 ++ faststack/tests/test_prefetch_logic.py | 8 +- faststack/tests/test_rolloff.py | 138 +- faststack/tests/test_sensitivity.py | 52 + faststack/tests/test_version_sort.py | 58 + faststack/ui/provider.py | 74 +- pyproject.toml | 2 +- tests/test_highlights_v2.py | 67 + 41 files changed, 3206 insertions(+), 714 deletions(-) create mode 100644 faststack/check_scipy.py create mode 100644 faststack/error_log.txt create mode 100644 faststack/imaging/math_utils.py create mode 100644 faststack/imaging/optional_deps.py create mode 100644 faststack/imaging/orientation.py create mode 100644 faststack/result.txt create mode 100644 faststack/test_err.txt create mode 100644 faststack/test_manual_output.txt create mode 100644 faststack/test_pil_blur.py create mode 100644 faststack/test_traceback.txt create mode 100644 faststack/tests/manual_test_error_handling.py create mode 100644 faststack/tests/mini_test.py create mode 100644 faststack/tests/reproduce_exif_bug.py create mode 100644 faststack/tests/result.txt create mode 100644 faststack/tests/test_cache_invalidation.py create mode 100644 faststack/tests/test_editor_error_handling.py create mode 100644 faststack/tests/test_exif_display_rotation.py create mode 100644 faststack/tests/test_fallback_blur.py create mode 100644 faststack/tests/test_generation_aware_preview.py create mode 100644 faststack/tests/test_headroom_semantics.py create mode 100644 faststack/tests/test_highlight_recovery.py create mode 100644 faststack/tests/test_highlight_state_normalization.py create mode 100644 faststack/tests/test_highlights_responsiveness.py create mode 100644 faststack/tests/test_highlights_v2.py create mode 100644 faststack/tests/test_sensitivity.py create mode 100644 faststack/tests/test_version_sort.py create mode 100644 tests/test_highlights_v2.py diff --git a/ChangeLog.md b/ChangeLog.md index 633c664..b730572 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -2,6 +2,44 @@ Todo: Make it work on Linux / Mac. Create Windows .exe. Write better documentation / help. Add splash screen / icon. + +## 1.5.2 (2026-01-25) + +#### Added +- **Highlight recovery telemetry + UI indicators** + - New highlight state analysis in the editor pipeline (headroom/clipping/near-white metrics) and a UI signal (`highlightStateChanged`) to keep it live. + - Image editor dialog now shows **Headroom** and **Clipped** indicators under the histogram when relevant. +- **Unified EXIF orientation handling** + - Editor now **bakes EXIF orientation into pixels on load** (Pillow original + float master buffer), and saving **sanitizes Orientation to 1** to prevent double-rotation in viewers. + - Prefetcher now applies EXIF orientation in a **single unified post-decode block**, reusing pre-read EXIF when available. +- **Optional OpenCV dependency support** + - Centralized optional OpenCV usage (`optional_deps`) with fallbacks (e.g., Pillow-based Gaussian blur fallback when OpenCV is unavailable). + - Tests updated to skip/patch appropriately when OpenCV isn’t installed. + +#### Changed +- **Save flow restored to “old behavior”** + - Saving now: **closes editor → clears editor state → refreshes image list → reselects saved image → clears cache/prefetches → syncs UI**. + - Save errors now surface as user-visible status messages with safer exception handling. +- **Histogram rendering and layout improvements** + - Histogram panel height increased; channel labels expanded (Red/Green/Blue); non-minimal histogram display enabled. + - Single-channel histogram drawing now downsamples using **max-pooling** when canvas width is smaller than bin count to reduce aliasing/spikes. +- **Slider double-click reset robustness** + - Reworked double-click-to-reset behavior with better drag-vs-click detection, an explicit reset path, a failsafe timer, and a binding to force value to 0 while resetting. +- **Color/edit pipeline tuning** + - Contrast and saturation slider sensitivity reduced (scaled effect). + - Headroom “safety” shoulder moved to linear space (`_apply_headroom_shoulder`) replacing the old sRGB-side shoulder. + - Auto-levels now kicks the preview worker for immediate visual feedback; histogram updates are guarded by visibility in more places. + +#### Fixed +- **EXIF orientation “double rotation” bugs** + - Saving now consistently drops/sanitizes EXIF when orientation can’t be safely serialized, preventing incorrect viewer rotations. + - Developed JPG sidecar EXIF from a paired JPEG is sanitized for Orientation as well. +- **Prefetch stability under rapid scrolling** + - ImageProvider keepalive queue increased (32 → 128) to reduce crashes from QML/texture lifetime mismatches during thrash. +- **RawTherapee Windows path detection** + - Improved version selection by sorting detected installs via a version-aware path component key (e.g., `5.10` > `5.9`). + + ## [1.5.1] - 2026-01-23 ### Added diff --git a/faststack/app.py b/faststack/app.py index b5b2d0b..8110b40 100644 --- a/faststack/app.py +++ b/faststack/app.py @@ -712,6 +712,7 @@ def sync_ui_state(self): # tell QML that index and image changed self.ui_state.currentIndexChanged.emit() self.ui_state.currentImageSourceChanged.emit() + self.ui_state.highlightStateChanged.emit() # Notify UI of new highlight stats # this is the one your footer needs self.ui_state.metadataChanged.emit() @@ -734,34 +735,19 @@ def sync_ui_state(self): # --- Image Editor Integration --- - @Slot() - def rotate_image_cw(self): - if self.image_editor: - self.image_editor.rotate_image_cw() - self.ui_refresh_generation += 1 - self.ui_state.currentImageSourceChanged.emit() - self.update_histogram() - - @Slot() - def rotate_image_ccw(self): - if self.image_editor: - self.image_editor.rotate_image_ccw() - self.ui_refresh_generation += 1 - self.ui_state.currentImageSourceChanged.emit() - self.update_histogram() - @Slot() - def reset_edit_parameters(self): - if self.image_editor: - self.image_editor.reset_edits() - self.ui_refresh_generation += 1 - self.ui_state.currentImageSourceChanged.emit() - self.update_histogram() - self.update_status_message("Edits reset") @Slot() def save_edited_image(self): - """Saves functionality delegating to ImageEditor.""" + """Saves functionality delegating to ImageEditor. + + Restores "Old" behavior: + - Save image + - Close Editor + - Clear Editor State + - Refresh List + - Re-select saved image + """ if not self.image_editor.original_image: return @@ -771,60 +757,67 @@ def save_edited_image(self): if write_sidecar and 0 <= self.current_index < len(self.image_files): dev_path = self.image_files[self.current_index].developed_jpg_path - result = self.image_editor.save_image(write_developed_jpg=write_sidecar, developed_path=dev_path) + try: + result = self.image_editor.save_image(write_developed_jpg=write_sidecar, developed_path=dev_path) + except RuntimeError as e: + self.update_status_message(str(e)) + return + except Exception as e: + log.exception(f"Unexpected error during save: {e}") + self.update_status_message("Failed to save image") + return + if result: saved_path, backup_path = result - # If we overwrote the current file, we need to refresh checks - # But usually we save as a new file or overwrite. - # Use get_active_edit_path logic? ImageEditor handles correct path logic? - # ImageEditor.save_image saves to self.current_filepath + # --- Restore Old Behavior --- - # Invalidate cache for this file - self.display_generation += 1 - self.image_cache.clear() # Brute force clear to ensure fresh load + # 1. Close Editor UI + self.ui_state.isEditorOpen = False + + # 2. Clear Editor State (release memory) + self.image_editor.clear() + + # 3. Refresh List (to see new file or updated timestamp) + self.refresh_image_list() + + # 4. Find and Select the saved image + new_index = self.current_index # Default to keeping selection if not found + + # Try to find by exact path match + found_index = -1 + if saved_path: + try: + target_resolve = saved_path.resolve() + for i, img in enumerate(self.image_files): + try: + # Robust path comparison + if img.path.resolve() == target_resolve: + new_index = i + found_index = i + break + except (OSError, RuntimeError): + # Fallback to string compare + if str(img.path) == str(saved_path): + new_index = i + found_index = i + break + except (OSError, RuntimeError): + pass # Keep current selection if resolution fails + + self.current_index = new_index + + # 5. Force UI Sync / Prefetch + self.image_cache.clear() # Clear cache to ensure we reload valid image self.prefetcher.cancel_all() self.prefetcher.update_prefetch(self.current_index) - self.sync_ui_state() + self.update_status_message(f"Image saved") else: self.update_status_message("Failed to save image") - @Slot() - def auto_levels(self): - if not self.image_editor.original_image: - return - - blacks, whites, p_low, p_high = self.image_editor.auto_levels(self.auto_level_threshold) - - # Apply strength if needed (editor auto_levels returns values, doesn't apply them automatically to params?) - # Wait, ImageEditor.auto_levels just CALCULATES. We need to APPLY. - # Let's check ImageEditor.auto_levels signature in editor.py... - # It returns (blacks, whites, p_low, p_high). - - # We need to set them: - range_ = whites - blacks - if range_ < 0.001: range_ = 0.001 - - # Apply strict auto-levels (ignoring strength for now as per simple impl, or use strength?) - # The QML just calls auto_levels() and increments updatePulse. - - # Setup parameters - # Note: 'blacks' and 'whites' in editor params usually map to exposure/contrast or specific black/white point? - # Standard Lightroom-style: - # Blacks: shifts black point. Whites: shifts white point. - # But our ImageEditor might use specific params. - # Looking at ImageEditor... it has 'blacks', 'whites' in _apply_edits. - - # We just set the params: - self.image_editor.set_edit_param("blacks", blacks * self.auto_level_strength) - self.image_editor.set_edit_param("whites", whites * self.auto_level_strength) - - self.ui_refresh_generation += 1 - self.ui_state.currentImageSourceChanged.emit() - self.update_histogram() - self.update_status_message("Auto levels applied") + @@ -1746,10 +1739,9 @@ def set_straighten_angle(self, angle: float, target_aspect_ratio: float = -1.0): self.image_editor.set_crop_box((left, top, right, bottom)) log.debug(f"AppController.set_straighten_angle: {angle}") - # Invert angle because QML rotation is CW but PIL rotation (used in editor) handles direction logic internally - # (ImageEditor._apply_edits uses negative angle for PIL). - # We pass the raw angle from QML (degrees CW for UI rotation) to the editor. - # Editor takes care of sign. + # Pass the angle as-is (degrees CW). + # QML rotation is CW-positive. + # ImageEditor expects CW-positive and handles the inversion for PIL internally. self.image_editor.set_edit_param("straighten_angle", angle) # Trigger refresh. Since we are editing, we are viewing the preview. @@ -3119,44 +3111,15 @@ def reset_edit_parameters(self): self.image_editor.reset_edits() if hasattr(self.ui_state, 'reset_editor_state'): self.ui_state.reset_editor_state() + + self.update_status_message("Edits reset") # Trigger a refresh to show the reset image self.ui_refresh_generation += 1 self._kick_preview_worker() - - @Slot() - def save_edited_image(self): - """Saves the edited image.""" - save_result = self.image_editor.save_image() - if not save_result: - self.update_status_message("Failed to save image") - log.error("Failed to save edited image") - return - - saved_path, _ = save_result - self.update_status_message(f"Edits saved to {saved_path.name}") - # Clear the image editor state so it will reload fresh next time - self.image_editor.clear() - - # Reset all edit parameters in the controller/UI - self.reset_edit_parameters() - - # Refresh the view - need to refresh image list since backup file was created - original_path = saved_path - self.refresh_image_list() - - # Find the edited image (not the backup) in the refreshed list - for i, img_file in enumerate(self.image_files): - if img_file.path == original_path: - self.current_index = i - break - # Invalidate cache and refresh display - self.display_generation += 1 - self.image_cache.clear() - self.prefetcher.cancel_all() - self.prefetcher.update_prefetch(self.current_index) - self.sync_ui_state() + if self.ui_state.isHistogramVisible: + self.update_histogram() @Slot() @@ -3165,6 +3128,8 @@ def rotate_image_cw(self): current = self.image_editor.current_edits.get('rotation', 0) new_rotation = (current - 90) % 360 self.set_edit_parameter('rotation', new_rotation) + if self.ui_state.isHistogramVisible: + self.update_histogram() @Slot() def rotate_image_ccw(self): @@ -3172,6 +3137,8 @@ def rotate_image_ccw(self): current = self.image_editor.current_edits.get('rotation', 0) new_rotation = (current + 90) % 360 self.set_edit_parameter('rotation', new_rotation) + if self.ui_state.isHistogramVisible: + self.update_histogram() @Slot() def toggle_histogram(self): @@ -3360,6 +3327,7 @@ def _apply_histogram_result(self, payload): if hist is not None: if token == self._hist_token: self.ui_state.histogramData = hist + self.ui_state.highlightStateChanged.emit() # If more updates arrived while we computed, run again soon pending = self._hist_pending is not None @@ -3430,7 +3398,13 @@ def _apply_preview_result(self, payload): self._last_rendered_preview = decoded # Ensure QML/provider URL changes so we don't get a cached frame self.ui_refresh_generation += 1 + + # Track exactly what generation/index this preview corresponds to + self._last_rendered_preview_index = self.current_index + self._last_rendered_preview_gen = self.ui_refresh_generation + self.ui_state.currentImageSourceChanged.emit() + self.ui_state.highlightStateChanged.emit() # Trigger histogram update (always call, let update_histogram handle visibility guards) self.update_histogram() @@ -3707,7 +3681,16 @@ def execute_crop(self): self.image_editor.set_edit_param('straighten_angle', current_rotation) # Save via ImageEditor (handles rotation + crop correctly) - save_result = self.image_editor.save_image() + try: + save_result = self.image_editor.save_image() + except RuntimeError as e: + log.warning(f"execute_crop: Save failed: {e}") + self.update_status_message(f"Failed to save cropped image: {e}") + return + except Exception as e: + log.exception(f"execute_crop: Unexpected error during save: {e}") + self.update_status_message("Failed to save cropped image") + return if save_result: saved_path, backup_path = save_result @@ -3851,6 +3834,8 @@ def auto_levels(self): msg = "Auto levels: highlights already clipped; only adjusting shadows" elif p_low <= 0.0: msg = "Auto levels: shadows already clipped; only adjusting highlights" + + self._kick_preview_worker() self.update_status_message(f"{msg} (preview only)") log.info("Auto levels preview applied to %s (clip %.2f%%, str %.2f). Msg: %s", @@ -3874,7 +3859,17 @@ def quick_auto_levels(self): # Save import time - save_result = self.image_editor.save_image() + try: + save_result = self.image_editor.save_image() + except RuntimeError as e: + log.warning(f"quick_auto_levels: Save failed: {e}") + self.update_status_message(f"Failed to save image: {e}") + return + except Exception as e: + log.exception(f"quick_auto_levels: Unexpected error during save: {e}") + self.update_status_message("Failed to save image") + return + if save_result: saved_path, backup_path = save_result timestamp = time.time() @@ -3941,7 +3936,17 @@ def quick_auto_white_balance(self): self.auto_white_balance() # Save the edited image (this creates a backup automatically) - save_result = self.image_editor.save_image() + try: + save_result = self.image_editor.save_image() + except RuntimeError as e: + log.warning(f"quick_auto_white_balance: Save failed: {e}") + self.update_status_message(f"Failed to save image: {e}") + return + except Exception as e: + log.exception(f"quick_auto_white_balance: Unexpected error during save: {e}") + self.update_status_message("Failed to save image") + return + if save_result: saved_path, backup_path = save_result # Track this action for undo diff --git a/faststack/check_scipy.py b/faststack/check_scipy.py new file mode 100644 index 0000000..b6a42a7 --- /dev/null +++ b/faststack/check_scipy.py @@ -0,0 +1,6 @@ + +try: + import scipy.ndimage + print("scipy available") +except ImportError: + print("scipy NOT available") diff --git a/faststack/config.py b/faststack/config.py index 6cff384..0fcda7a 100644 --- a/faststack/config.py +++ b/faststack/config.py @@ -6,7 +6,7 @@ import glob import os import re -from pathlib import Path +from pathlib import Path, PureWindowsPath from faststack.logging_setup import get_app_data_dir @@ -36,12 +36,15 @@ def detect_rawtherapee_path(): # Helper to extract version numbers for natural sorting # e.g., "5.10" -> [5, 10] - def natural_sort_key(path): - return [int(c) if c.isdigit() else c.lower() for c in re.split(r'(\d+)', path)] + def version_sort_key(path): + for part in reversed(PureWindowsPath(path).parts): + if re.fullmatch(r'\d+(?:\.\d+)*', part): + return [int(n) for n in part.split(".")] + return [0] # Sort matches to try and get the latest version (by path name) # 5.10 > 5.9 - matches.sort(key=natural_sort_key, reverse=True) + matches.sort(key=version_sort_key, reverse=True) return matches[0] except Exception as e: log.warning(f"Error detecting RawTherapee path: {e}") diff --git a/faststack/error_log.txt b/faststack/error_log.txt new file mode 100644 index 0000000..941b39e --- /dev/null +++ b/faststack/error_log.txt @@ -0,0 +1,10 @@ +ERROR in test_raw_mode_exif_preservation (tests.test_exif_orientation.TestExifOrientation.test_raw_mode_exif_preservation): +Traceback (most recent call last): + File "C:\code\faststack\faststack\tests\test_exif_orientation.py", line 129, in test_raw_mode_exif_preservation + with Image.open(developed_path) as dev: + ^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "C:\Users\alanr\AppData\Local\Programs\Python\Python312\Lib\site-packages\PIL\Image.py", line 3512, in open + fp = builtins.open(filename, "rb") + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\alanr\\AppData\\Local\\Temp\\tmph5g2del1\\working_source-developed.jpg' + diff --git a/faststack/imaging/editor.py b/faststack/imaging/editor.py index 7b04451..9f9a72e 100644 --- a/faststack/imaging/editor.py +++ b/faststack/imaging/editor.py @@ -7,19 +7,32 @@ from pathlib import Path from typing import Optional, Dict, Any, Tuple import numpy as np -from PIL import Image, ImageEnhance, ImageFilter +from PIL import Image, ImageEnhance, ImageFilter, ImageOps, ExifTags from io import BytesIO - from faststack.models import DecodedImage +from faststack.imaging.math_utils import ( + _srgb_to_linear, + _linear_to_srgb, + _smoothstep01, + _apply_headroom_shoulder, + _analyze_highlight_state, + _lerp, + _highlight_recover_linear, + _highlight_boost_linear, +) +from faststack.imaging.orientation import get_exif_orientation, apply_orientation_to_np + try: from PySide6.QtGui import QImage except ImportError: QImage = None +from faststack.imaging.optional_deps import cv2, HAS_OPENCV + import threading -import cv2 + log = logging.getLogger(__name__) @@ -32,31 +45,54 @@ "9:16 (Story)": (9, 16), } + +def sanitize_exif_orientation(exif_bytes: bytes | None) -> bytes | None: + """ + Parses EXIF bytes and resets Orientation to 1 (Normal). + Returns cleaned bytes or None if parsing/sanitizing fails. + """ + if not exif_bytes: + return None + try: + exif = Image.Exif() + exif.load(exif_bytes) + # Pillow 9.1.0+ has ExifTags.Base.Orientation, fallback to 0x0112 if needed + orientation_tag = getattr(ExifTags.Base, 'Orientation', 0x0112) + exif[orientation_tag] = 1 + return exif.tobytes() + except Exception: + # If we can't parse/sanitize, safest is to drop EXIF to avoid rotation bugs + return None + + + def create_backup_file(original_path: Path) -> Optional[Path]: """ Creates a backup of the original file with naming pattern: filename-backup.jpg, filename-backup2.jpg, etc. - + Returns: Path to the backup file on success, None on failure. """ if not original_path.exists(): return None - + # Extract base name without any existing -backup suffix stem = original_path.stem # Remove any existing -backup, -backup2, -backup-1, etc. (handles both old and new formats) - base_stem = re.sub(r'-backup(-?\d+)?$', '', stem) - + base_stem = re.sub(r"-backup(-?\d+)?$", "", stem) + # Try filename-backup.jpg first backup_path = original_path.parent / f"{base_stem}-backup{original_path.suffix}" - + # If that exists, try filename-backup2.jpg, filename-backup3.jpg, etc. i = 2 while backup_path.exists(): - backup_path = original_path.parent / f"{base_stem}-backup{i}{original_path.suffix}" + backup_path = ( + original_path.parent / f"{base_stem}-backup{i}{original_path.suffix}" + ) i += 1 - + try: # Perform the backup shutil.copy2(original_path, backup_path) @@ -65,82 +101,94 @@ def create_backup_file(original_path: Path) -> Optional[Path]: log.exception(f"Failed to create backup: {e}") return None + # ---------------------------- # sRGB ↔ Linear Conversion Helpers # ---------------------------- -def _srgb_to_linear(x: np.ndarray) -> np.ndarray: - """Convert sRGB values to linear light. - - Preserves headroom (values > 1.0) for highlight recovery. - Clamps negatives to 0 since the power function requires non-negative input. - """ - # Clamp negatives to 0, but preserve values > 1.0 for headroom - x = np.clip(x, 0.0, None) - a = 0.055 - # Apply the standard sRGB transfer function - works for values > 1.0 too - return np.where(x <= 0.04045, x / 12.92, ((x + a) / (1.0 + a)) ** 2.4) +# Constants for Highlight Recovery -def _linear_to_srgb(x: np.ndarray) -> np.ndarray: - """Convert linear light values to sRGB (0-1).""" - x = np.clip(x, 0.0, None) - a = 0.055 - return np.where(x <= 0.0031308, 12.92 * x, (1.0 + a) * (x ** (1.0 / 2.4)) - a) +# Highlight Compression Curve +HEADROOM_COMPRESSION_STEEPNESS = 2.0 +# Adaptive Parameters (tuned by image content analysis) +# Pivot: Brightness threshold where recovery starts +ADAPTIVE_PIVOT_MIN = 0.45 +ADAPTIVE_PIVOT_MAX = 0.65 -def _apply_soft_shoulder(x: np.ndarray, threshold: float = 0.9) -> np.ndarray: - """Applies a tone-mapping shoulder to roll off highlights above the threshold. - - This prevents hard clipping by compressing the range [threshold, inf) into [threshold, 1.0). - The function is monotonic and smooth. - """ - if threshold >= 1.0: - return x - - # We only apply the shoulder to values above the threshold - mask = x > threshold - if not np.any(mask): - return x - - # Scale and compress: 1 - exp(-(x - threshold) / (1 - threshold)) - # This maps [threshold, inf) to [threshold, threshold + (1 - threshold)) = [threshold, 1.0) - # The derivative at threshold is 1.0, matching the linear part. - scaled = (x[mask] - threshold) / (1.0 - threshold) - compressed = threshold + (1.0 - threshold) * (1.0 - np.exp(-scaled)) - - out = x.copy() - out[mask] = compressed - return out +# K Factor: Steepness of the compression shoulder +ADAPTIVE_K_BASE = 8.0 +ADAPTIVE_K_SCALING = 6.0 +ADAPTIVE_K_HEADROOM_BASE = 6.0 +ADAPTIVE_K_HEADROOM_SCALING = 8.0 +# Chroma Rolloff: Desaturation in extreme highlights +ADAPTIVE_ROLLOFF_MIN = 0.10 +ADAPTIVE_ROLLOFF_MAX = 0.30 -def _smoothstep01(x: np.ndarray) -> np.ndarray: - """Hermite smoothstep: 0 at x<=0, 1 at x>=1, smooth S-curve between.""" - x = np.clip(x, 0.0, 1.0) - return x * x * (3.0 - 2.0 * x) +# Analysis Safety +HEADROOM_MAX_BRIGHTNESS_PERCENTILE = 99.5 def _gaussian_blur_float(arr: np.ndarray, radius: float) -> np.ndarray: """Apply Gaussian Blur to a float32 array using OpenCV. - + Preserves values outside [0, 1] range. """ if radius <= 0: return arr - + + if cv2 is None: + # Fallback: Use Pillow's GaussianBlur in 'F' mode (float32) per channel + # This preserves values > 1.0 (headroom) which is critical for highlight recovery. + try: + h, w, c = arr.shape + blurred_channels = [] + + # Process each channel independently + for i in range(c): + ch_data = arr[:, :, i] + # Scale float range to uint8 to allow Pillow filters (they don't support 'F' mode) + # We scale the actual range [min, max] (but at least [0, 1]) to [0, 255] + mx = max(1.0, float(ch_data.max())) + mn = min(0.0, float(ch_data.min())) + scale = mx - mn + + if scale > 0: + ch_u8 = ((ch_data - mn) / scale * 255).astype(np.uint8) + ch_img = Image.fromarray(ch_u8, mode='L') + # Pillow's GaussianBlur radius is roughly comparable to OpenCV sigma + blurred_ch_img = ch_img.filter(ImageFilter.GaussianBlur(radius=radius)) + # Scale back to original float range + blurred_ch = np.array(blurred_ch_img).astype(np.float32) / 255.0 * scale + mn + blurred_channels.append(blurred_ch) + else: + blurred_channels.append(ch_data.copy()) + + # Stack back into (H, W, C) + return np.stack(blurred_channels, axis=-1) + + except Exception as e: + log.warning(f"Fallback blur failed: {e}") + return arr + # Sigma calculation matching Pillow's radius-to-sigma # Radius in Pillow is the radius of the kernel, sigma is approx radius / 2 # OpenCV's GaussianBlur takes sigma. sigma = radius / 2.0 - + # We use (0, 0) for ksize to let OpenCV calculate it based on sigma - return cv2.GaussianBlur(arr, (0, 0), sigmaX=sigma, sigmaY=sigma, borderType=cv2.BORDER_REFLECT) + return cv2.GaussianBlur( + arr, (0, 0), sigmaX=sigma, sigmaY=sigma, borderType=cv2.BORDER_REFLECT + ) # ---------------------------- # Rotate + Autocrop helper # ---------------------------- + def _rotated_rect_with_max_area(w: int, h: int, angle_rad: float) -> tuple[int, int]: """ Largest axis-aligned rectangle within a w x h rectangle rotated by angle_rad. @@ -186,7 +234,9 @@ def _rotated_rect_with_max_area(w: int, h: int, angle_rad: float) -> tuple[int, return cw, ch -def rotate_autocrop_rgb(img: Image.Image, angle_deg: float, inset: int = 2) -> Image.Image: +def rotate_autocrop_rgb( + img: Image.Image, angle_deg: float, inset: int = 2 +) -> Image.Image: """ Rotate by any angle and then crop to the largest axis-aligned rectangle that contains ONLY valid pixels (no wedges). Works for large angles. @@ -243,6 +293,7 @@ def rotate_autocrop_rgb(img: Image.Image, angle_deg: float, inset: int = 2) -> I class ImageEditor: """Handles core image manipulation using PIL.""" + def __init__(self): # Stores the currently loaded PIL Image object (original) self.original_image: Optional[Image.Image] = None @@ -250,24 +301,44 @@ def __init__(self): self.float_image: Optional[np.ndarray] = None # Float32 normalized preview image self.float_preview: Optional[np.ndarray] = None - + # Stores the currently applied edits (used for preview) self.current_edits: Dict[str, Any] = self._initial_edits() self.current_filepath: Optional[Path] = None - + # Caching support for smooth updates self._lock = threading.RLock() self._edits_rev = 0 self._cached_rev = -1 self._cached_preview = None - + # Bit depth of the loaded image (8 or 16) self.bit_depth: int = 8 - + # Cached EXIF bytes from original source (e.g., paired JPEG for RAW mode) # Used to preserve camera metadata when saving developed JPGs self._source_exif_bytes: Optional[bytes] = None - + + # Last computed highlight state for UI display (thread-safe read via property) + self._last_highlight_state: Optional[Dict[str, float]] = None + + # Timestamp of the currently loaded file (for cache invalidation) + self.current_mtime: float = 0.0 + + # Caching for expensive percentile calculation in highlight recovery + # Stores: {'rev': int, 'max_brightness': float} + # We rely on _edits_rev to invalidate, but strictly we also need to check if + # edits that affect 'upstream' data (exposure, wb, crop) have changed vs just 'highlights' slider. + # For simplicity/robustness, we just cache per full edit revision + a check on upstream params? + # Actually, simpler: just cache the result for a given (image_id/path) + (upstream_params_hash). + # But wait, self._edits_rev increments on ANY edit. + # If I change "highlights" slider, _edits_rev increments. + # But input to _apply_highlights_shadows depends on Exposure, WB, etc. + # So if I only change Highlights, the input ARR is largely same (ignoring previous stages being re-run). + # We need to cache the 'max_brightness' of 'arr' entering the function. + self._cached_max_brightness_state: Optional[Dict[str, Any]] = None + self._cached_highlight_analysis: Optional[Dict[str, Any]] = None + def clear(self): """Clear all editor state so the next edit starts from a clean slate.""" with self._lock: @@ -280,12 +351,14 @@ def clear(self): self._cached_rev = -1 self.bit_depth = 8 self._source_exif_bytes = None + self._last_highlight_state = None # Explicit reset + self._cached_highlight_analysis = None # Optionally also reset edits if that matches your mental model: # self.current_edits = self._initial_edits() def set_source_exif(self, exif_bytes: Optional[bytes]): """Store EXIF bytes from the original source (e.g., paired JPEG). - + Call this when switching to RAW mode to preserve camera metadata in the developed JPG output. """ @@ -299,29 +372,34 @@ def reset_edits(self): def _initial_edits(self) -> Dict[str, Any]: return { - 'brightness': 0.0, - 'contrast': 0.0, - 'saturation': 0.0, - 'white_balance_by': 0.0, # Blue/Yellow (Cool/Warm) - 'white_balance_mg': 0.0, # Magenta/Green (Tint) - 'crop_box': None, # (left, top, right, bottom) normalized to 0-1000 - 'sharpness': 0.0, - 'rotation': 0, - 'exposure': 0.0, - 'highlights': 0.0, - 'shadows': 0.0, - 'vibrance': 0.0, - 'vignette': 0.0, - 'blacks': 0.0, - 'whites': 0.0, - 'clarity': 0.0, - 'texture': 0.0, - 'straighten_angle': 0.0, + "brightness": 0.0, + "contrast": 0.0, + "saturation": 0.0, + "white_balance_by": 0.0, # Blue/Yellow (Cool/Warm) + "white_balance_mg": 0.0, # Magenta/Green (Tint) + "crop_box": None, # (left, top, right, bottom) normalized to 0-1000 + "sharpness": 0.0, + "rotation": 0, + "exposure": 0.0, + "highlights": 0.0, + "shadows": 0.0, + "vibrance": 0.0, + "vignette": 0.0, + "blacks": 0.0, + "whites": 0.0, + "clarity": 0.0, + "texture": 0.0, + "straighten_angle": 0.0, } - def load_image(self, filepath: str, cached_preview: Optional[DecodedImage] = None, source_exif: Optional[bytes] = None): + def load_image( + self, + filepath: str, + cached_preview: Optional[DecodedImage] = None, + source_exif: Optional[bytes] = None, + ): """Load a new image for editing. - + Args: filepath: Path to the image file cached_preview: Optional byte-buffer for faster initial display @@ -337,61 +415,73 @@ def load_image(self, filepath: str, cached_preview: Optional[DecodedImage] = Non self._edits_rev += 1 self._cached_preview = None self._cached_rev = -1 + log.error(f"Image file not found: {filepath}") return False - + load_filepath = Path(filepath) - + try: + new_mtime = load_filepath.stat().st_mtime + except OSError: + new_mtime = 0.0 + with self._lock: # Clear previous cached EXIF and set new one if provided + self.current_mtime = new_mtime self._source_exif_bytes = source_exif - + try: # We must load and close the original file handle immediately with Image.open(load_filepath) as im: # Keep original PIL for EXIF/Format preservation loaded_original = im.copy() - + # --- Convert to Float32 --- # Use OpenCV for reliable 16-bit loading as Pillow often downsamples to 8-bit RGB - import cv2 - - # Use IMREAD_UNCHANGED to preserve bit depth - # Note: OpenCV loads as BGR by default - cv_img = cv2.imread(str(load_filepath), cv2.IMREAD_UNCHANGED) - + if cv2 is None: + log.warning( + "OpenCV not installed, falling back to Pillow (may lose 16-bit depth)" + ) + cv_img = None + else: + # Use IMREAD_UNCHANGED to preserve bit depth + # Note: OpenCV loads as BGR by default + cv_img = cv2.imread(str(load_filepath), cv2.IMREAD_UNCHANGED) + # Robust validation: cv2.imread can return None or an empty/invalid array cv_img_valid = ( cv_img is not None and isinstance(cv_img, np.ndarray) and cv_img.size > 0 ) - + loaded_bit_depth = 8 loaded_float_image = None - + if cv_img_valid and cv_img.dtype == np.uint16: loaded_bit_depth = 16 # Normalize 0-65535 -> 0.0-1.0 arr = cv_img.astype(np.float32) / 65535.0 - + # Handle channels if len(arr.shape) == 2: # Grayscale -> RGB - arr = np.stack((arr,)*3, axis=-1) + arr = np.stack((arr,) * 3, axis=-1) elif len(arr.shape) == 3 and arr.shape[2] == 3: - # BGR -> RGB (OpenCV default) - # Note: If IMREAD_UNCHANGED loads a TIFF, it *might* be RGB depending on backend (libtiff). - # But consistently OpenCV uses BGR layout for 3-channel images. - # Let's verify by assuming BGR and swapping. - arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB) + # BGR -> RGB (OpenCV default) + # Note: If IMREAD_UNCHANGED loads a TIFF, it *might* be RGB depending on backend (libtiff). + # But consistently OpenCV uses BGR layout for 3-channel images. + # Let's verify by assuming BGR and swapping. + arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB) else: # Invalid channel count, fall back to Pillow cv_img_valid = False loaded_bit_depth = 8 rgb = loaded_original.convert("RGB") arr = np.array(rgb).astype(np.float32) / 255.0 - log.warning(f"OpenCV loaded unexpected channel count, falling back to Pillow: {load_filepath}") - + log.warning( + f"OpenCV loaded unexpected channel count, falling back to Pillow: {load_filepath}" + ) + loaded_float_image = arr log.info(f"Loaded 16-bit image via OpenCV: {load_filepath}") else: @@ -401,23 +491,52 @@ def load_image(self, filepath: str, cached_preview: Optional[DecodedImage] = Non loaded_float_image = np.array(rgb).astype(np.float32) / 255.0 log.info(f"Loaded 8-bit image via Pillow: {load_filepath}") + # --- Apply EXIF Orientation --- + # Read orientation from the loaded original image's EXIF + orientation = get_exif_orientation( + load_filepath, exif=loaded_original.getexif() + ) + if orientation > 1: + log.info(f"Applying EXIF orientation {orientation} to {load_filepath}") + # 1. Correct the Pillow original (used for metadata/export fallback) + loaded_original = ImageOps.exif_transpose(loaded_original) + + # 2. Correct the float master buffer + if loaded_float_image is not None: + loaded_float_image = apply_orientation_to_np( + loaded_float_image, orientation + ) + # --- Create Float Preview --- # Use the cached, display-sized preview if available to speed up if cached_preview: # cached_preview.buffer is uint8 - preview_arr = np.frombuffer(cached_preview.buffer, dtype=np.uint8).reshape( - (cached_preview.height, cached_preview.width, 3) - ) + preview_arr = np.frombuffer( + cached_preview.buffer, dtype=np.uint8 + ).reshape((cached_preview.height, cached_preview.width, 3)) + + # IMPORTANT: The cached_preview coming from the viewer is already "cooked" + # (it has Color Management / Saturation applied). + # Our editor expects a "raw" float buffer (non-managed) as its starting point for _apply_edits. + # To prevent a color "pop" when edits start, we have two choices: + # 1. "Un-cook" the preview (expensive/inaccurate). + # 2. Use the cooked preview for the VERY FIRST frame, but immediately + # re-render from the master float_image in the background. + # Since we already apply EXIF orientation to the master float_image above, + # we should also ensure the preview_arr matches orientation if it doesn't already. + # Generally, the Prefetcher already applies orientation to the cached preview. loaded_float_preview = preview_arr.astype(np.float32) / 255.0 else: - # Downscale from float_image - # Simple striding for speed or creating a PIL thumbnail from original? - # PIL thumbnail is faster and better quality usually. + # Downscale from float_image (which now has orientation applied) thumb = loaded_original.copy() thumb.thumbnail((1920, 1080)) thumb_rgb = thumb.convert("RGB") loaded_float_preview = np.array(thumb_rgb).astype(np.float32) / 255.0 + # If we applied orientation to the original, the thumbnail will already be correct + # because we derived it from loaded_original AFTER exif_transpose. + # If we derived from cached_preview, we might still need to apply orientation. + # Assign all state atomically under lock to prevent race with preview worker with self._lock: self.current_filepath = load_filepath @@ -433,7 +552,10 @@ def load_image(self, filepath: str, cached_preview: Optional[DecodedImage] = Non return True except Exception as e: - log.exception(f"Error loading image for editing: {e}") + # We catch specific errors during the process if needed, but for general failure + # we should cleanup and then RETURN FALSE so the caller (UI) knows what happened. + # This matches the legacy contract (exceptions for programmer errors, False for runtime/IO failure) + log.warning(f"Error loading image for editing: {e}") with self._lock: self.original_image = None self.float_image = None @@ -444,8 +566,9 @@ def load_image(self, filepath: str, cached_preview: Optional[DecodedImage] = Non self._cached_rev = -1 return False - - def _rotate_float_image(self, img_arr: np.ndarray, angle_deg: float, expand: bool = False) -> np.ndarray: + def _rotate_float_image( + self, img_arr: np.ndarray, angle_deg: float, expand: bool = False + ) -> np.ndarray: """Rotates a float32 RGB image using PIL 'F' mode per channel to preserve precision.""" if abs(angle_deg) < 0.01: return img_arr @@ -454,42 +577,54 @@ def _rotate_float_image(self, img_arr: np.ndarray, angle_deg: float, expand: boo channels = [] for i in range(c): # Convert channel to PIL Float image - im_c = Image.fromarray(img_arr[:, :, i], mode='F') + im_c = Image.fromarray(img_arr[:, :, i], mode="F") # Rotate rot_c = im_c.rotate( angle_deg, resample=Image.Resampling.BICUBIC, expand=expand, - fillcolor=0.0 + fillcolor=0.0, ) channels.append(rot_c) - + # Merge back # Assume all channels rotated to same size nw, nh = channels[0].size new_arr = np.stack([np.array(ch) for ch in channels], axis=-1) return new_arr - def _apply_edits(self, img_arr: np.ndarray, edits: Optional[Dict[str, Any]] = None, *, for_export: bool = False) -> np.ndarray: + def _apply_edits( + self, + img_arr: np.ndarray, + edits: Optional[Dict[str, Any]] = None, + *, + for_export: bool = False, + ) -> np.ndarray: """Applies all current edits to the provided float32 numpy array. - Returns float32 array (H, W, 3). + Returns float32 array (H, W, 3). """ if edits is None: edits = self.current_edits - arr = img_arr # Alias + is_export = for_export + # Alias + arr = img_arr + + # NOTE: For UI analysis, we want to capture the state AFTER White Balance and Exposure + # but BEFORE Highlights/Shadows/ToneMapping, so the indicators reflect the + # "available headroom" and "current clipping" accurately for the recovery tools. # 1. Rotation (90 degree steps) # np.rot90 rotates 90 degrees CCW k times. - rotation = edits.get('rotation', 0) + rotation = edits.get("rotation", 0) k = (rotation // 90) % 4 if k > 0: # np.rot90 rotates first two axes by default (rows, cols) arr = np.rot90(arr, k=k) # 2. Straighten (Free Rotation) - straighten_angle = float(edits.get('straighten_angle', 0.0)) - has_crop_box = 'crop_box' in edits and edits.get('crop_box', 0.0) + straighten_angle = float(edits.get("straighten_angle", 0.0)) + has_crop_box = "crop_box" in edits and edits.get("crop_box", 0.0) # Apply rotation if significant # During preview (for_export=False), we might skip this if QML handles visuals, @@ -501,41 +636,41 @@ def _apply_edits(self, img_arr: np.ndarray, edits: Optional[Dict[str, Any]] = No # The previous code skipped it if NOT for_export? # "Only apply rotation if... and we are exporting" was the comment. implies preview logic handles it. # However, for accurate cropping, we need to rotate. - + apply_rotation = abs(straighten_angle) > 0.001 and (for_export or has_crop_box) - + if apply_rotation: - # Use the float rotation helper - # Note: rotate_autocrop_rgb logic was complex. - # If we have crop box, we manually crop later. - # If no crop box, we might auto-crop (remove wedges). - # For floating point, standard 'expand' rotation + manual crop is best. - - # Calculate auto-crop parameters BEFORE rotation if needed - crop_rect = None - if not has_crop_box: - h, w = arr.shape[:2] - # Normalize angle for helper (helper expects radians, handles quadrants but ensuring positive can help) - angle_rad = math.radians(straighten_angle) - # Helper logic for crop size - cw, ch = _rotated_rect_with_max_area(w, h, angle_rad) - crop_rect = (cw, ch) - - # Perform rotation (Expanded) - arr = self._rotate_float_image(arr, -straighten_angle, expand=True) - - # Apply Auto-Crop if calculated - if crop_rect: + # Use the float rotation helper + # Note: rotate_autocrop_rgb logic was complex. + # If we have crop box, we manually crop later. + # If no crop box, we might auto-crop (remove wedges). + # For floating point, standard 'expand' rotation + manual crop is best. + + # Calculate auto-crop parameters BEFORE rotation if needed + crop_rect = None + if not has_crop_box: + h, w = arr.shape[:2] + # Normalize angle for helper (helper expects radians, handles quadrants but ensuring positive can help) + angle_rad = math.radians(straighten_angle) + # Helper logic for crop size + cw, ch = _rotated_rect_with_max_area(w, h, angle_rad) + crop_rect = (cw, ch) + + # Perform rotation (Expanded) + arr = self._rotate_float_image(arr, -straighten_angle, expand=True) + + # Apply Auto-Crop if calculated + if crop_rect: cw, ch = crop_rect # Center crop on the new expanded image rh, rw = arr.shape[:2] cx, cy = rw / 2.0, rh / 2.0 - + left = round(cx - cw / 2.0) top = round(cy - ch / 2.0) right = left + cw bottom = top + ch - + # Apply inset (2px) to match legacy behavior and avoid edge artifacts inset = 2 if (right - left) > 2 * inset and (bottom - top) > 2 * inset: @@ -543,18 +678,18 @@ def _apply_edits(self, img_arr: np.ndarray, edits: Optional[Dict[str, Any]] = No top += inset right -= inset bottom -= inset - + # Clamp left = max(0, min(rw - 1, left)) top = max(0, min(rh - 1, top)) right = max(left + 1, min(rw, right)) bottom = max(top + 1, min(rh, bottom)) - + arr = arr[top:bottom, left:right, :] # 3. Crop if has_crop_box: - crop_box = edits.get('crop_box', 0.0) + crop_box = edits.get("crop_box", 0.0) if len(crop_box) == 4: # 0-1000 relative to current size h, w = arr.shape[:2] @@ -571,112 +706,196 @@ def _apply_edits(self, img_arr: np.ndarray, edits: Optional[Dict[str, Any]] = No if r > left and b > t: arr = arr[t:b, left:r, :] - # --- Color Pipeline (Linear / Float) --- - # 4. Conversion to Linear Light - # We convert once and do most color work in linear space. + # Cache sRGB u8 BEFORE linearization for accurate JPEG clipping detection. + # JPEG clipping happens in sRGB after gamma/quantization, so we need the + # original sRGB values to detect flat-top clipping correctly. + # MOVED to after WB/Exposure so indicators reflect current pipeline state. + + # Capture strided view for analysis ONLY if needed + # We need analysis if: + # 1. We are in preview (not for_export) -> To show UI indicators. + # 2. OR if we have highlights/shadows active -> To drive adaptive params. + + highlights = float(edits.get("highlights", 0.0)) + shadows = float(edits.get("shadows", 0.0)) + should_analyze = (not for_export) or ( + abs(highlights) > 0.001 or abs(shadows) > 0.001 + ) + + arr_stride = None + srgb_u8_stride = None + analysis_state = None + + if should_analyze: + # Capture strided view for analysis + arr_stride = arr[::4, ::4, :] + if cv2 is not None: + # cv2.convertScaleAbs is very fast for saturation casting [0,1]*255 to uint8 + srgb_u8_stride = cv2.convertScaleAbs(arr_stride, alpha=255.0) + else: + srgb_u8_stride = (np.clip(arr_stride, 0.0, 1.0) * 255).astype(np.uint8) + arr = _srgb_to_linear(arr) # 5. White Balance (Multipliers in Linear Space) - by = edits.get('white_balance_by', 0.0) * 0.5 - mg = edits.get('white_balance_mg', 0.0) * 0.5 + by = edits.get("white_balance_by", 0.0) * 0.5 + mg = edits.get("white_balance_mg", 0.0) * 0.5 if abs(by) > 0.001 or abs(mg) > 0.001: - r_gain = 1.0 + by - b_gain = 1.0 - by - g_gain = 1.0 - mg - arr[:,:,0] *= r_gain - arr[:,:,1] *= g_gain - arr[:,:,2] *= b_gain + r_gain = 1.0 + by + b_gain = 1.0 - by + g_gain = 1.0 - mg + arr[:, :, 0] *= r_gain + arr[:, :, 1] *= g_gain + arr[:, :, 2] *= b_gain + + # --- Analyzed Highlight State (Post-WB, Pre-Exposure) --- + # Capture pre-exposure linear state for "True Headroom" calculation + pre_exposure_linear_stride = None + if should_analyze: + pre_exposure_linear_stride = arr[::4, ::4, :] # 6. Exposure (Linear Gain for True Headroom) - exposure = edits.get('exposure', 0.0) + exposure = edits.get("exposure", 0.0) if abs(exposure) > 0.001: - # EV units: 2^exposure - gain = 2.0 ** exposure - arr = arr * gain + # EV units: 2^exposure + gain = 2.0**exposure + arr = arr * gain + + # --- Analyzed Highlight State (Post-Exposure, Pre-Recovery) --- + # We do this UNCONDITIONALLY for display so UI indicators are live. + # We use the current linear array 'arr' which now includes WB and Exposure. + # We pass srgb_u8=None to force using linear thresholds on the current data (or pre-exposure data if passed). + + if should_analyze: + # Check cache for analysis state to avoid expensive re-computation on downstream edits + upstream_hash = self._get_upstream_edits_hash(edits) + + cached_analysis = None + with self._lock: + if self._cached_highlight_analysis and self._cached_highlight_analysis['hash'] == upstream_hash: + cached_analysis = self._cached_highlight_analysis['state'] + + if cached_analysis: + analysis_state = cached_analysis + else: + # Use strided views for speed (re-stride linear if it changed, but usually we just want current) + arr_linear_stride = arr[::4, ::4, :] + # Pass the srgb_u8_stride captured BEFORE linearization for true JPEG clipping detection + # Pass pre_exposure_linear_stride to measure "True Headroom" before exposure boost + # arr_linear_stride is "Current State" (Post-WB, Post-Exposure) + analysis_state = _analyze_highlight_state( + arr_linear_stride, + srgb_u8=srgb_u8_stride, # Source (Pre-Edit) State + pre_exposure_linear=pre_exposure_linear_stride + ) + + with self._lock: + self._cached_highlight_analysis = { + 'hash': upstream_hash, + 'state': analysis_state + } - # 7. Highlights/Shadows - Using linear light and luminance-based processing - highlights = float(edits.get('highlights', 0.0)) - shadows = float(edits.get('shadows', 0.0)) + if not for_export: + with self._lock: + self._last_highlight_state = analysis_state + + # 7. Highlights/Shadows - Using linear light and brightness-based processing if abs(highlights) > 0.001 or abs(shadows) > 0.001: - arr = self._apply_highlights_shadows(arr, highlights, shadows) + arr = self._apply_highlights_shadows( + arr, + highlights, + shadows, + srgb_u8_stride=srgb_u8_stride, # Pass if we need to recompute analysis + analysis_state=analysis_state, + edits=edits, + ) # 8. Clarity (Local Contrast) - clarity = edits.get('clarity', 0.0) + clarity = edits.get("clarity", 0.0) if abs(clarity) > 0.001: - # Apply in linear space, preservation of highlights - blurred_arr = _gaussian_blur_float(arr, radius=20.0) - - # Apply: (original - blurred) is high pass. - # mask = midtones - # mean = axis 2 - gray = arr.mean(axis=2, keepdims=True) - midtone_mask = 1.0 - np.abs(np.clip(gray, 0, 1) - 0.5) * 2.0 - - local_contrast = (arr - blurred_arr) * clarity * midtone_mask - arr += local_contrast + # Apply in linear space, preservation of highlights + blurred_arr = _gaussian_blur_float(arr, radius=20.0) + + # Apply: (original - blurred) is high pass. + # mask = midtones + # mean = axis 2 + gray = arr.mean(axis=2, keepdims=True) + midtone_mask = 1.0 - np.abs(np.clip(gray, 0, 1) - 0.5) * 2.0 + + local_contrast = (arr - blurred_arr) * clarity * midtone_mask + arr += local_contrast # 9. Texture (Fine Detail) - texture = edits.get('texture', 0.0) + texture = edits.get("texture", 0.0) if abs(texture) > 0.001: - # Small radius for texture/fine detail - blurred_arr = _gaussian_blur_float(arr, radius=3.0) - high_pass = arr - blurred_arr - arr += high_pass * texture + # Small radius for texture/fine detail + blurred_arr = _gaussian_blur_float(arr, radius=3.0) + high_pass = arr - blurred_arr + arr += high_pass * texture # 10. Sharpness - sharpness = edits.get('sharpness', 0.0) + sharpness = edits.get("sharpness", 0.0) if abs(sharpness) > 0.001: - # Unsharp mask with radius ~1.0 - blurred_arr = _gaussian_blur_float(arr, radius=1.0) - high_pass = arr - blurred_arr - arr += high_pass * sharpness + # Unsharp mask with radius ~1.0 + blurred_arr = _gaussian_blur_float(arr, radius=1.0) + high_pass = arr - blurred_arr + arr += high_pass * sharpness + + # 11. Global Headroom Shoulder (safety net for values > 1.0) + # This ONLY affects values above 1.0, compressing headroom smoothly. + # It does NOT interfere with normal highlight slider work below 1.0. + # Applied here in linear space before gamma conversion. + # Use small max_overshoot (0.05) to keep values very close to 1.0 + arr = _apply_headroom_shoulder(arr, max_overshoot=0.05) # --- Conversion back to sRGB --- arr = _linear_to_srgb(arr) # 11. Brightness / Contrast (sRGB Space) # 7. Brightness - b_val = edits.get('brightness', 0.0) + b_val = edits.get("brightness", 0.0) if abs(b_val) > 0.001: factor = 1.0 + b_val arr = arr * factor # 8. Contrast - c_val = edits.get('contrast', 0.0) + c_val = edits.get("contrast", 0.0) if abs(c_val) > 0.001: - factor = 1.0 + c_val + # Scale effect to reduce sensitivity (0.4x) + factor = 1.0 + c_val * 0.4 arr = (arr - 0.5) * factor + 0.5 # 12. Saturation / Vibrance (sRGB Space) # 10. Saturation - sat_val = edits.get('saturation', 0.0) + sat_val = edits.get("saturation", 0.0) if abs(sat_val) > 0.001: - factor = 1.0 + sat_val - gray = arr.dot([0.299, 0.587, 0.114]) - gray = np.expand_dims(gray, axis=2) - arr = gray + (arr - gray) * factor + # Scale effect to reduce sensitivity (0.5x) + factor = 1.0 + sat_val * 0.5 + gray = arr.dot([0.299, 0.587, 0.114]) + gray = np.expand_dims(gray, axis=2) + arr = gray + (arr - gray) * factor # 12. Vibrance (Smart Saturation) - vibrance = edits.get('vibrance', 0.0) + vibrance = edits.get("vibrance", 0.0) if abs(vibrance) > 0.001: - cmax = arr.max(axis=2) - cmin = arr.min(axis=2) - delta = cmax - cmin - sat = np.zeros_like(cmax) - mask = cmax > 0.0001 - sat[mask] = delta[mask] / cmax[mask] - - sat_mask = np.clip(1.0 - sat, 0.0, 1.0) - factor = 1.0 + vibrance * sat_mask - - gray = arr.dot([0.299, 0.587, 0.114]) - gray = np.expand_dims(gray, axis=2) - arr = gray + (arr - gray) * np.expand_dims(factor, axis=2) + cmax = arr.max(axis=2) + cmin = arr.min(axis=2) + delta = cmax - cmin + sat = np.zeros_like(cmax) + mask = cmax > 0.0001 + sat[mask] = delta[mask] / cmax[mask] + + sat_mask = np.clip(1.0 - sat, 0.0, 1.0) + factor = 1.0 + vibrance * sat_mask + + gray = arr.dot([0.299, 0.587, 0.114]) + gray = np.expand_dims(gray, axis=2) + arr = gray + (arr - gray) * np.expand_dims(factor, axis=2) # 13. Levels (Blacks/Whites) - blacks = edits.get('blacks', 0.0) - whites = edits.get('whites', 0.0) + blacks = edits.get("blacks", 0.0) + whites = edits.get("whites", 0.0) if abs(blacks) > 0.001 or abs(whites) > 0.001: bp = -blacks * 0.15 wp = 1.0 - (whites * 0.15) @@ -685,37 +904,48 @@ def _apply_edits(self, img_arr: np.ndarray, edits: Optional[Dict[str, Any]] = No arr = (arr - bp) / (wp - bp) # 14. Vignette - vignette = edits.get('vignette', 0.0) + vignette = edits.get("vignette", 0.0) if abs(vignette) > 0.001: - h, w = arr.shape[:2] - y, x = np.ogrid[:h, :w] - cx = (x - w/2) / (w/2) - cy = (y - h/2) / (h/2) - dist_sq = cx**2 + cy**2 - - if vignette > 0: - gain = 1.0 - np.clip(dist_sq * vignette, 0.0, 1.0) - arr *= np.expand_dims(gain, axis=2) - else: - gain = 1.0 + dist_sq * (-vignette) - arr *= np.expand_dims(gain, axis=2) - + h, w = arr.shape[:2] + y, x = np.ogrid[:h, :w] + cx = (x - w / 2) / (w / 2) + cy = (y - h / 2) / (h / 2) + dist_sq = cx**2 + cy**2 + + if vignette > 0: + gain = 1.0 - np.clip(dist_sq * vignette, 0.0, 1.0) + arr *= np.expand_dims(gain, axis=2) + else: + gain = 1.0 + dist_sq * (-vignette) + arr *= np.expand_dims(gain, axis=2) + return arr # Potentially > 1.0 if not clipped elsewhere - def auto_levels(self, threshold_percent: float = 0.1) -> Tuple[float, float, float, float]: + + def auto_levels( + self, threshold_percent: float = 0.1 + ) -> Tuple[float, float, float, float]: """ Returns (blacks, whites, p_low, p_high). p_low/p_high are computed conservatively from RGB to avoid introducing new channel clipping. """ threshold_percent = max(0.0, min(10.0, threshold_percent)) # Use preview for speed - img_arr = self.float_preview if self.float_preview is not None else self.float_image + img_arr = ( + self.float_preview if self.float_preview is not None else self.float_image + ) if img_arr is None: # Fallback for tests or cases where float data isn't initialized yet - if hasattr(self, '_preview_image') and self._preview_image is not None: - img_arr = np.array(self._preview_image.convert("RGB")).astype(np.float32) / 255.0 + if hasattr(self, "_preview_image") and self._preview_image is not None: + img_arr = ( + np.array(self._preview_image.convert("RGB")).astype(np.float32) + / 255.0 + ) elif self.original_image is not None: - img_arr = np.array(self.original_image.convert("RGB")).astype(np.float32) / 255.0 + img_arr = ( + np.array(self.original_image.convert("RGB")).astype(np.float32) + / 255.0 + ) else: return 0.0, 0.0, 0.0, 255.0 @@ -729,7 +959,7 @@ def auto_levels(self, threshold_percent: float = 0.1) -> Tuple[float, float, flo # --- Detect pre-clipping (per-channel) --- # If *any* channel already has clipped pixels, do not push that end further. - # eps_pct strategy: "Practical" - ignore tiny hot pixels (0.01%) but pin + # eps_pct strategy: "Practical" - ignore tiny hot pixels (0.01%) but pin # if there is any meaningful pre-clipping, even if below the full threshold. eps_pct = min(threshold_percent, 0.01) @@ -742,9 +972,13 @@ def auto_levels(self, threshold_percent: float = 0.1) -> Tuple[float, float, flo for c in range(3): chan = rgb[:, :, c] # Treat near-white/near-black as clipped (JPEG artifacts often land on 254/1) - clipped_low_pct.append(100.0 * float(np.count_nonzero(chan <= 1)) / float(total)) - clipped_high_pct.append(100.0 * float(np.count_nonzero(chan >= 254)) / float(total)) - + clipped_low_pct.append( + 100.0 * float(np.count_nonzero(chan <= 1)) / float(total) + ) + clipped_high_pct.append( + 100.0 * float(np.count_nonzero(chan >= 254)) / float(total) + ) + # Use discrete selection methods to avoid interpolation surprises on uint8. # Fallback for older numpy (<1.22) that doesn't support method=. try: @@ -752,14 +986,16 @@ def auto_levels(self, threshold_percent: float = 0.1) -> Tuple[float, float, flo p_highs.append(float(np.percentile(chan, high_p, method="higher"))) except TypeError: p_lows.append(float(np.percentile(chan, low_p, interpolation="lower"))) - p_highs.append(float(np.percentile(chan, high_p, interpolation="higher"))) + p_highs.append( + float(np.percentile(chan, high_p, interpolation="higher")) + ) # Conservative anchors to avoid new channel clipping p_low = min(p_lows) p_high = max(p_highs) - # NOTE: applying this stretch uniformly to RGB can clip individual channels - # more than luminance predicts. That's usually acceptable, but if we + # NOTE: applying this stretch uniformly to RGB can clip individual channels + # more than luminance predicts. That's usually acceptable, but if we # ever see weird color clipping, that might be why. # Pin ends if pre-clipping exists (prevents making it worse) @@ -787,9 +1023,42 @@ def auto_levels(self, threshold_percent: float = 0.1) -> Tuple[float, float, flo return blacks, whites, float(p_low), float(p_high) - def get_preview_data_cached(self, allow_compute: bool = True) -> Optional[DecodedImage]: + def _get_upstream_edits_hash(self, edits: Dict[str, Any]) -> int: + """Returns a hash of edit parameters that affect the input to highlight recovery.""" + # Parameters that affect the image BEFORE highlight recovery: + # bit_depth (implicit), crop_box, rotation, straighten_angle, + # white_balance_by, white_balance_mg, exposure. + # Note: 'highlights' and 'shadows' are applied IN this step, so they don't affect input. + keys = [ + "crop_box", + "rotation", + "straighten_angle", + "white_balance_by", + "white_balance_mg", + "exposure", + ] + + def _freeze(v): + if isinstance(v, list): + return tuple(v) + if isinstance(v, dict): + return tuple(sorted(v.items())) + if isinstance(v, np.ndarray): + return v.tobytes() + return v + + values = [_freeze(edits.get(k)) for k in keys] + # Also include file path to distinguish different images + values.append(str(self.current_filepath)) + # Include float_image ID to catch reload-in-place or content changes (e.g. forced reload) + values.append(self.current_mtime) + return hash(tuple(values)) + + def get_preview_data_cached( + self, allow_compute: bool = True + ) -> Optional[DecodedImage]: """Return cached preview if available, otherwise compute and cache. - + Args: allow_compute: If False, returns None immediately if cache is stale (avoids blocking). """ @@ -797,10 +1066,10 @@ def get_preview_data_cached(self, allow_compute: bool = True) -> Optional[Decode # Check cache validity if self._cached_preview is not None and self._cached_rev == self._edits_rev: return self._cached_preview - + if not allow_compute: return None - + # Prepare for computation - snapshot data under lock base = self.float_preview.copy() if self.float_preview is not None else None edits = dict(self.current_edits) @@ -812,18 +1081,18 @@ def get_preview_data_cached(self, allow_compute: bool = True) -> Optional[Decode # Heavy computation outside lock using snapshot # base is float32 (H, W, 3) 0-1 arr = self._apply_edits(base, edits=edits, for_export=False) - + # Convert to 8-bit for display - # TONE MAP / CLIP - # Apply highlight roll-off shoulder - arr = _apply_soft_shoulder(arr) - # Clip to 0-1 + # Global shoulder is now applied in linear space within _apply_edits() + # Just clip to 0-1 as safety clamp arr = np.clip(arr, 0.0, 1.0) # Map to 0-255 arr_u8 = (arr * 255).astype(np.uint8) if QImage is None: - raise ImportError("PySide6.QtGui.QImage is required for get_preview_data_cached") + raise ImportError( + "PySide6.QtGui.QImage is required for get_preview_data_cached" + ) # Create QImage from buffer img_buffer = arr_u8.tobytes() @@ -832,7 +1101,7 @@ def get_preview_data_cached(self, allow_compute: bool = True) -> Optional[Decode width=arr_u8.shape[1], height=arr_u8.shape[0], bytes_per_line=arr_u8.shape[1] * 3, - format=QImage.Format.Format_RGB888 + format=QImage.Format.Format_RGB888, ) with self._lock: @@ -840,7 +1109,7 @@ def get_preview_data_cached(self, allow_compute: bool = True) -> Optional[Decode if self._edits_rev == rev: self._cached_preview = decoded self._cached_rev = rev - + return decoded def get_preview_data(self) -> Optional[DecodedImage]: @@ -855,7 +1124,7 @@ def get_edit_value(self, key: str, default: Any = None) -> Any: def set_edit_param(self, key: str, value: Any) -> bool: """Update a single edit parameter.""" with self._lock: - if key == 'rotation': + if key == "rotation": # Guard against arbitrary angles in 'rotation'. It expects 90-degree steps. # For arbitrary rotation (drag to rotate), use 'straighten_angle'. try: @@ -863,10 +1132,12 @@ def set_edit_param(self, key: str, value: Any) -> bool: val_deg = float(value) rounded_deg = round(val_deg / 90.0) * 90 final_val = int(rounded_deg) % 360 - + if abs(val_deg - rounded_deg) > 1.0: - log.warning(f"'rotation' received {value}. Rounding to {final_val}. Use 'straighten_angle' for free rotation.") - + log.warning( + f"'rotation' received {value}. Rounding to {final_val}. Use 'straighten_angle' for free rotation." + ) + self.current_edits[key] = final_val self._edits_rev += 1 return True @@ -874,13 +1145,11 @@ def set_edit_param(self, key: str, value: Any) -> bool: log.warning(f"Invalid value for rotation {value!r}: {e}") return False - - - if key in self.current_edits and key != 'crop_box': + if key in self.current_edits and key != "crop_box": # Check for floating point equality to prevent cache thrashing new_val = value current_val = self.current_edits.get(key) - + # Try to compare as floats if possible try: vf = float(new_val) @@ -897,64 +1166,199 @@ def set_edit_param(self, key: str, value: Any) -> bool: return True return False - def _apply_highlights_shadows(self, linear: np.ndarray, highlights: float, shadows: float) -> np.ndarray: - """Apply highlights and shadows adjustments using luminance-based processing in linear light. - + def _apply_highlights_shadows( + self, + linear: np.ndarray, + highlights: float, + shadows: float, + *, + srgb_u8_stride: Optional[np.ndarray] = None, + analysis_state: Optional[Dict[str, float]] = None, + edits: Optional[Dict[str, Any]] = None, + ) -> np.ndarray: + """Apply highlights and shadows adjustments using brightness-based processing in linear light. + + Highlights slider semantics: + - Negative (e.g., -100): Compress bright regions, recover detail if headroom exists. + Uses brightness-based rescaling to preserve hue/chroma. + - Positive (e.g., +100): Lift highlights (brighten bright areas). + + For JPEG (no headroom): Applies perceptual rolloff + optional desaturation + (artistic fallback) to simulate recovery of micro-contrast. + Args: - linear: Float32 RGB array (H, W, 3) in linear light + linear: Float32 RGB array (H, W, 3) in linear light, may have values > 1.0 highlights: -1.0 to 1.0, negative recovers highlights, positive boosts shadows: -1.0 to 1.0, positive lifts shadows, negative crushes - + srgb_u8_stride: Optional uint8 sRGB array (strided) for accurate JPEG clipping detection + (should be the image BEFORE linearization) + analysis_state: Optional pre-computed analysis state to avoid re-work. + Returns: Adjusted float32 RGB array (linear) """ - # Compute luminance (Rec. 709 coefficients) - lum = linear[:, :, 0] * 0.2126 + linear[:, :, 1] * 0.7152 + linear[:, :, 2] * 0.0722 - lum = np.clip(lum, 1e-10, None) # Avoid division by zero - - # Create smooth masks for highlights/shadows regions - # Pivot point is 0.18 (mid-gray in linear) - pivot = 0.18 - - # Shadow mask: high for dark pixels, fades to 0 above pivot - shadow_mask = _smoothstep01(1.0 - lum / pivot) - - # Highlight mask: high for bright pixels, fades to 0 below pivot - highlight_mask = _smoothstep01((lum - pivot) / (1.0 - pivot)) - - # Calculate adjustments - # Shadows: positive lifts, negative crushes - # Apply shoulder compression to avoid clipping - shadow_adj = shadows * 0.5 # Scale factor for effect strength - shadow_factor = 1.0 + shadow_adj * shadow_mask - - # Highlights: positive boosts, negative recovers (matches user expectation) - highlight_adj = highlights * 0.5 - highlight_factor = 1.0 + highlight_adj * highlight_mask - - # Combine factors and apply - combined_factor = shadow_factor * highlight_factor - combined_factor = np.expand_dims(combined_factor, axis=2) - - # Apply adjustment while preserving color ratios - adjusted = linear * combined_factor - - # Soft highlight recovery for negative highlights value (recover clipped highlights) - if highlights < -0.01: - # Apply shoulder compression to bright areas - recovery_strength = -highlights - bright_mask = np.expand_dims(_smoothstep01((lum - 0.5) / 0.5), axis=2) - # Compress values above 1.0 with a soft shoulder - # Use a simple shoulder (1 - exp(-x)) to bring values > 1 towards 1 - compressed = 1.0 - np.exp(-np.clip(adjusted, 0, 10.0)) # Soft clip - adjusted = adjusted * (1.0 - bright_mask * recovery_strength) + compressed * bright_mask * recovery_strength - - return adjusted - + arr = linear + + # Analyze highlight state if needed + # If caller passed analysis_state, usage that. + state = analysis_state + if state is None: + # Re-compute locally if not provided + # We assume srgb_u8_stride is ALREADY STRIDED if passed (based on the name change) + arr_stride = arr[::4, ::4, :] + # If srgb_u8_stride was passed, use it directly (it's already small). + # If it wasn't passed, we can't easily recreate the source state here without the original source buffer. + # But the caller (_apply_edits) usually provides it. + state = _analyze_highlight_state(arr_stride, srgb_u8=srgb_u8_stride) + + # Ensure we have edits dict to check upstream hash + if edits is None: + # Fallback to current_edits if not provided (cached preview path passes it) + # But access under lock to be safe from modifications + with self._lock: + edits = self.current_edits.copy() + + # We DO NOT update self._last_highlight_state here to avoid race/staleness during export. + # The preview path in _apply_edits handles the UI state update. + + # --- Shadows Adjustment (unchanged approach) --- + if abs(shadows) > 0.001: + # Compute luminance for shadow mask + lum = arr[:, :, 0] * 0.2126 + arr[:, :, 1] * 0.7152 + arr[:, :, 2] * 0.0722 + lum = np.clip(lum, 1e-10, None) + + pivot = 0.18 # Mid-gray in linear + shadow_mask = _smoothstep01(1.0 - lum / pivot) + + shadow_adj = shadows * 0.5 + shadow_factor = 1.0 + shadow_adj * shadow_mask + shadow_factor = np.expand_dims(shadow_factor, axis=2) + arr = arr * shadow_factor + + # --- Highlights Adjustment (new brightness-based approach) --- + if abs(highlights) > 0.001: + headroom_pct = state["headroom_pct"] + + # Use specific keys from new analysis logic + # source_clipped_pct: True JPEG flat-top clipping + # current_nearwhite_pct: Current brightness distribution near 1.0 + clipped_pct = state.get("source_clipped_pct", state.get("clipped_pct", 0.0)) + near_white_pct = state.get( + "current_nearwhite_pct", state.get("near_white_pct", 0.0) + ) + + if highlights < 0: + # Negative: compress/recover highlights + amount = -highlights # 0 to 1 + + # Adaptive parameters based on headroom and clipping + # More clipping (source) → later pivot (only affect very top end) + pivot = _lerp(ADAPTIVE_PIVOT_MIN, ADAPTIVE_PIVOT_MAX, clipped_pct) + + # k increases with near_white_pct (recoverable micro-contrast) + # BUT also increase k when headroom exists for stronger compression + k = ADAPTIVE_K_BASE + ADAPTIVE_K_SCALING * near_white_pct + if headroom_pct > 0.01: + k = max( + k, + ADAPTIVE_K_HEADROOM_BASE + + ADAPTIVE_K_HEADROOM_SCALING * headroom_pct, + ) # Stronger k for headroom + + # More clipping → more chroma rolloff + chroma_rolloff = _lerp( + ADAPTIVE_ROLLOFF_MIN, ADAPTIVE_ROLLOFF_MAX, clipped_pct + ) + + # Headroom ceiling: preserve some tonal separation above 1.0 + # Robustify: use percentile to ignore hot pixels, clamped to valid range + if headroom_pct > 0.01: + # Check cache for max_brightness + max_brightness = 1.0 + + # Compute hash of upstream params + current_hash = self._get_upstream_edits_hash(edits) + + max_brightness = 1.0 + hit = False + with self._lock: + cached = self._cached_max_brightness_state + if cached and cached.get("hash") == current_hash: + max_brightness = cached["value"] + hit = True + + if not hit: + # Use 99.5th percentile of max-channel brightness to avoid hot pixels + max_rgb = arr.max(axis=2) + if max_rgb.size > 0: + # Optimize: Use much coarser stride and np.partition for speed + # We only need an estimate for headroom, so we don't need high precision + # Stride ::10 reduces data by 100x vs full, 6x faster than ::4 + view = max_rgb[::10, ::10] + if view.size > 0: + # np.partition is O(N) vs np.percentile O(N log N) + # We want 99.5th percentile roughly. + # Index for 99.5% = size * 0.995 => size - (size * 0.005) + k_index = int(view.size * 0.995) + # Clamp to valid range + k_index = min(max(0, k_index), view.size - 1) + + partitioned = np.partition(view.flatten(), k_index) + max_brightness = float(partitioned[k_index]) + else: + max_brightness = 1.0 + else: + max_brightness = 1.0 + + with self._lock: + self._cached_max_brightness_state = { + "hash": current_hash, + "value": max_brightness, + } + + # Clamp to avoid crazy values from single hot pixels or artifacts + max_brightness = min(max_brightness, 100.0) + + if max_brightness > 1.0: + # Preserve some headroom detail, reduced by amount + headroom_ceiling = 1.0 + (max_brightness - 1.0) * 0.3 * ( + 1.0 - amount * 0.7 + ) + pivot = min(pivot, 0.5 + 0.25 * (1.0 - headroom_pct)) + else: + headroom_ceiling = 1.0 + else: + headroom_ceiling = 1.0 + + # JPEG fallback: when near_white is high but clipping is low, + # nudge pivot earlier to expose micro-contrast (Photoshop-like feel) + if headroom_pct < 0.01: + if near_white_pct > 0.05 and clipped_pct < 0.05: + # Lots of recoverable near-white, not much flat clipping + pivot = max(0.60, pivot - 0.12 * near_white_pct) + if clipped_pct > 0.02: + # Increase chroma rolloff for flat-clipped JPEGs + chroma_rolloff = max(chroma_rolloff, 0.25) + + arr = _highlight_recover_linear( + arr, + amount, + pivot=pivot, + k=k, + chroma_rolloff=chroma_rolloff, + headroom_ceiling=headroom_ceiling, + ) + else: + # Positive: boost highlights (hue-preserving) + amount = highlights # 0 to 1 + arr = _highlight_boost_linear(arr, amount, pivot=0.5) + + return arr + def set_crop_box(self, crop_box: Tuple[int, int, int, int]): """Set the normalized crop box (left, top, right, bottom) from 0-1000.""" with self._lock: - self.current_edits['crop_box'] = crop_box + self.current_edits["crop_box"] = crop_box self._edits_rev += 1 def _write_tiff_16bit(self, path: Path, arr_float: np.ndarray): @@ -962,29 +1366,33 @@ def _write_tiff_16bit(self, path: Path, arr_float: np.ndarray): Writes a float32 (0-1) numpy array as an uncompressed 16-bit RGB TIFF using OpenCV. arr_float shape: (H, W, 3) """ + if cv2 is None: + raise RuntimeError("Saving 16-bit TIFF requires OpenCV") + # Convert to 16-bit # Clip to safe range before scaling arr = (np.clip(arr_float, 0.0, 1.0) * 65535).astype(np.uint16) - + # OpenCv expects BGR for imwrite if len(arr.shape) == 3 and arr.shape[2] == 3: - import cv2 - arr_bgr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR) - success = cv2.imwrite(str(path), arr_bgr) - if not success: - raise IOError(f"Failed to write TIFF -> {path}") + arr_bgr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR) + success = cv2.imwrite(str(path), arr_bgr) + if not success: + raise IOError(f"Failed to write TIFF -> {path}") else: - raise ValueError("Only RGB supported for TIFF writer") - + raise ValueError("Only RGB supported for TIFF writer") def _get_sanitized_exif_bytes(self) -> Optional[bytes]: """ Returns EXIF bytes with Orientation reset to 1 (Normal). Used when we've baked rotation/straightening into the pixels. - + Prefers cached source EXIF (from paired JPEG) if available, otherwise falls back to the current original_image's EXIF. - + + If sanitization or serialization fails, returns None (drops EXIF) + to prevent incorrect "double rotation" in viewers. + Returns: bytes object of EXIF data, or None if sanitization/serialization failed. """ @@ -1026,83 +1434,87 @@ def _get_sanitized_exif_bytes(self) -> Optional[bytes]: # 5. Guard for tobytes() if not hasattr(exif, "tobytes"): - # Fallback to source bytes even if unsanitized - return self._source_exif_bytes or (self.original_image.info.get('exif') if self.original_image else None) + log.warning("EXIF object has no tobytes() method, dropping EXIF to prevent rotation issues.") + return None try: return exif.tobytes() - except Exception: - # Fallback to source bytes on failure - return self._source_exif_bytes or (self.original_image.info.get('exif') if self.original_image else None) + except Exception as e: + log.warning(f"Failed to serialize sanitized EXIF: {e}. Dropping EXIF to prevent rotation issues.") + return None except Exception as e: - log.warning(f"Failed to sanitize EXIF orientation: {e}") - return self._source_exif_bytes or (self.original_image.info.get('exif') if self.original_image else None) + log.warning(f"Failed to sanitize EXIF orientation: {e}. Dropping EXIF.") + return None - def save_image(self, write_developed_jpg: bool = False, developed_path: Optional[Path] = None) -> Optional[Tuple[Path, Path]]: + def save_image( + self, write_developed_jpg: bool = False, developed_path: Optional[Path] = None + ) -> Optional[Tuple[Path, Path]]: """Saves the edited image, backing up the original. - + Args: write_developed_jpg: If True, also create a `-developed.jpg` sidecar file. This should be True only when editing RAW files. - developed_path: Optional explicit path for the developed JPG. + developed_path: Optional explicit path for the developed JPG. If not provided, it's derived from current_filepath. - + Returns: A tuple of (saved_path, backup_path) on success, otherwise None. """ if self.float_image is None or self.current_filepath is None: return None - + # 1. Apply Edits to Full Resolution - final_float = self._apply_edits(self.float_image.copy(), for_export=True) # (H,W,3) float32 - + final_float = self._apply_edits( + self.float_image.copy(), for_export=True + ) # (H,W,3) float32 + original_path = self.current_filepath try: original_stat = original_path.stat() except OSError as e: log.warning(f"Unable to read timestamps for {original_path}: {e}") original_stat = None - + # 2. Backup backup_path = create_backup_file(original_path) if backup_path is None: return None - + try: # 3. Save Main File - is_tiff = original_path.suffix.lower() in ['.tif', '.tiff'] - + is_tiff = original_path.suffix.lower() in [".tif", ".tiff"] + if is_tiff: # Save as 16-bit TIFF using custom writer self._write_tiff_16bit(original_path, final_float) else: # Check for geometric transforms - rotation = self.current_edits.get('rotation', 0) - straighten_angle = float(self.current_edits.get('straighten_angle', 0.0)) + rotation = self.current_edits.get("rotation", 0) + straighten_angle = float( + self.current_edits.get("straighten_angle", 0.0) + ) transforms_applied = (rotation != 0) or (abs(straighten_angle) > 0.001) # Determine EXIF bytes to write exif_bytes = None if self.original_image: - if transforms_applied: - # If we rotated pixels, we MUST sanitize orientation (set to 1). - # If sanitization fails, we drop EXIF to avoid "double rotation" bugs. - exif_bytes = self._get_sanitized_exif_bytes() - else: - # No rotation applied: Safe to preserve original EXIF as-is (including orientation). - exif_bytes = self.original_image.info.get('exif') + # We NO LONGER check transforms_applied here because we ALWAYS + # bake orientation into the pixel buffer on load for consistency. + # Thus, we ALWAYS sanitize the Orientation tag to 1 to prevent "double rotation". + exif_bytes = self._get_sanitized_exif_bytes() # Save as standard format (Likely JPG) using Pillow # Convert to uint8 - # Apply highlight roll-off shoulder before clipping - final_float = _apply_soft_shoulder(final_float) + # Legacy soft shoulder moved to linear space (_apply_headroom_shoulder) + # converted via _linear_to_srgb in _apply_edits, so final_float is already sRGB. + # Just clip to valid range. arr_u8 = (np.clip(final_float, 0.0, 1.0) * 255).astype(np.uint8) - img_u8 = Image.fromarray(arr_u8, mode='RGB') - - save_kwargs = {'quality': 95} + img_u8 = Image.fromarray(arr_u8, mode="RGB") + + save_kwargs = {"quality": 95} if exif_bytes: - save_kwargs['exif'] = exif_bytes - + save_kwargs["exif"] = exif_bytes + try: img_u8.save(original_path, **save_kwargs) except Exception: @@ -1119,12 +1531,14 @@ def save_image(self, write_developed_jpg: bool = False, developed_path: Optional if stem.lower().endswith("-working"): stem = stem[:-8] developed_path = original_path.with_name(f"{stem}-developed.jpg") - + # Check for geometric transforms (re-check not strictly needed but for clarity) - rotation = self.current_edits.get('rotation', 0) - straighten_angle = float(self.current_edits.get('straighten_angle', 0.0)) + rotation = self.current_edits.get("rotation", 0) + straighten_angle = float( + self.current_edits.get("straighten_angle", 0.0) + ) transforms_applied = (rotation != 0) or (abs(straighten_angle) > 0.001) - + # Determine EXIF for sidecar - prefer source EXIF (from paired JPEG) exif_bytes = None if transforms_applied: @@ -1132,32 +1546,31 @@ def save_image(self, write_developed_jpg: bool = False, developed_path: Optional exif_bytes = self._get_sanitized_exif_bytes() elif self._source_exif_bytes: # Use cached source EXIF from paired JPEG - exif_bytes = self._source_exif_bytes + # Must sanitize orientation because we baked it on load! + exif_bytes = sanitize_exif_orientation(self._source_exif_bytes) elif self.original_image: # Fallback to current image's EXIF (may be empty for TIFFs) - exif_bytes = self.original_image.info.get('exif') - + exif_bytes = self.original_image.info.get("exif") + # Use the same uint8 data - # Apply highlight roll-off shoulder before clipping - final_float_sidecar = _apply_soft_shoulder(final_float) - arr_u8 = (np.clip(final_float_sidecar, 0.0, 1.0) * 255).astype(np.uint8) + # Legacy soft shoulder moved to linear space + arr_u8 = (np.clip(final_float, 0.0, 1.0) * 255).astype(np.uint8) img_u8 = Image.fromarray(arr_u8) - - dev_kwargs = {'quality': 90} + + dev_kwargs = {"quality": 90} if exif_bytes: - dev_kwargs['exif'] = exif_bytes - + dev_kwargs["exif"] = exif_bytes + try: img_u8.save(developed_path, **dev_kwargs) except Exception: img_u8.save(developed_path) - + return original_path, backup_path except Exception as e: - log.exception(f"Failed to save edited image or backup: {e}") - return None - + log.exception(f"Failed to save {self.current_filepath}: {e}") + raise RuntimeError(f"Save failed: {str(e)}") from e def _restore_file_times(self, path: Path, original_stat: os.stat_result) -> None: """Best-effort restoration of access/modify timestamps after saving.""" @@ -1169,16 +1582,19 @@ def _restore_file_times(self, path: Path, original_stat: os.stat_result) -> None def rotate_image_cw(self): """Decreases the rotation edit parameter by 90° modulo 360 (clockwise).""" with self._lock: - current = self.current_edits.get('rotation', 0) - self.current_edits['rotation'] = (current - 90) % 360 + current = self.current_edits.get("rotation", 0) + self.current_edits["rotation"] = (current - 90) % 360 self._edits_rev += 1 def rotate_image_ccw(self): """Increases the rotation edit parameter by 90° modulo 360 (counter-clockwise).""" with self._lock: - current = self.current_edits.get('rotation', 0) - self.current_edits['rotation'] = (current + 90) % 360 + current = self.current_edits.get("rotation", 0) + self.current_edits["rotation"] = (current + 90) % 360 self._edits_rev += 1 + # Dictionary of ratios for QML dropdown -ASPECT_RATIOS = [{"name": name, "ratio": ratio} for name, ratio in INSTAGRAM_RATIOS.items()] +ASPECT_RATIOS = [ + {"name": name, "ratio": ratio} for name, ratio in INSTAGRAM_RATIOS.items() +] diff --git a/faststack/imaging/math_utils.py b/faststack/imaging/math_utils.py new file mode 100644 index 0000000..8c78a82 --- /dev/null +++ b/faststack/imaging/math_utils.py @@ -0,0 +1,266 @@ +import numpy as np +from typing import Optional, Dict + +# ---------------------------- +# sRGB ↔ Linear Conversion Helpers +# ---------------------------- + +def _srgb_to_linear(x: np.ndarray) -> np.ndarray: + """Convert sRGB values to linear light. + + Preserves headroom (values > 1.0) for highlight recovery. + Clamps negatives to 0 since the power function requires non-negative input. + """ + # Clamp negatives to 0, but preserve values > 1.0 for headroom + x = np.clip(x, 0.0, None) + a = 0.055 + + # Apply the standard sRGB transfer function - works for values > 1.0 too + return np.where(x <= 0.04045, x / 12.92, ((x + a) / (1.0 + a)) ** 2.4) + + +def _linear_to_srgb(x: np.ndarray) -> np.ndarray: + """Convert linear light values to sRGB (0-1).""" + x = np.clip(x, 0.0, None) + a = 0.055 + return np.where(x <= 0.0031308, 12.92 * x, (1.0 + a) * (x ** (1.0 / 2.4)) - a) + + + + + +def _smoothstep01(x: np.ndarray) -> np.ndarray: + """Hermite smoothstep: 0 at x<=0, 1 at x>=1, smooth S-curve between.""" + x = np.clip(x, 0.0, 1.0) + return x * x * (3.0 - 2.0 * x) + + +def _apply_headroom_shoulder(x: np.ndarray, max_overshoot: float = 0.05) -> np.ndarray: + """Compress values above 1.0 smoothly into a very small headroom. + + Maps headroom (x > 1.0) into [1.0, 1.0 + max_overshoot). + Asymptotes to 1.0 + max_overshoot as x -> inf. + Maintains continuity and monotonicity at 1.0. + + Args: + x: Float32 array in linear light, may have values > 1.0 + max_overshoot: Maximum amount to overshoot 1.0 (e.g. 0.05 means max 1.05) + """ + mask = x > 1.0 + if not np.any(mask): + return x + + out = x.copy() + excess = x[mask] - 1.0 + # Rational compression targeting asymptote of 'max_overshoot' + # y = saturation * x / (saturation + x) -> asymptotes to saturation + # Here x=excess, saturation=max_overshoot + compressed_excess = max_overshoot * excess / (max_overshoot + excess) + out[mask] = 1.0 + compressed_excess + return out + + +# Constants for chroma rolloff +_CHROMA_ROLLOFF_START = 0.85 +_CHROMA_ROLLOFF_WIDTH = 0.15 + + +# Precomputed thresholds for JPEG clipping detection in linear space +# These correspond to sRGB u8 values 250, 253, 254 converted to linear +_LINEAR_THRESHOLD_250 = ((250.0 / 255.0 + 0.055) / 1.055) ** 2.4 # ~0.871 +_LINEAR_THRESHOLD_253 = ((253.0 / 255.0 + 0.055) / 1.055) ** 2.4 # ~0.948 +_LINEAR_THRESHOLD_254 = ((254.0 / 255.0 + 0.055) / 1.055) ** 2.4 # ~0.972 + + +def _analyze_highlight_state(rgb_linear: np.ndarray, srgb_u8: Optional[np.ndarray] = None, pre_exposure_linear: Optional[np.ndarray] = None) -> dict: + """Analyze image for headroom and clipping to tune recovery parameters. + + Args: + rgb_linear: Float32 RGB array in linear light (post-exposure/WB) + srgb_u8: Optional uint8 sRGB array (source image) for accurate JPEG clipping detection. + MUST have same H×W dimensions as rgb_linear (or be stride-compatible). + + Returns: + Dict with: + - headroom_pct: Fraction of pixels with max(rgb) > 1.0 (current state recoverable data) + - clipped_pct: Fraction of pixels with true source clipping (flat-top JPEG clip at 254+ if srgb_u8 provided) + - source_clipped_pct: Alias for clipped_pct (true source clipping) + - near_white_pct: Alias for current_nearwhite_pct (for UI display) + - current_nearwhite_pct: Fraction of pixels currently near white [250, 253] equivalent in processed linear space. + """ + total_pixels = rgb_linear.shape[0] * rgb_linear.shape[1] + if total_pixels == 0: + return { + 'headroom_pct': 0.0, + 'clipped_pct': 0.0, + 'source_clipped_pct': 0.0, + 'near_white_pct': 0.0, + 'current_nearwhite_pct': 0.0 + } + + # Headroom detection: Use pre-exposure buffer if available for "True Headroom" + if pre_exposure_linear is not None: + max_source = pre_exposure_linear.max(axis=2) + headroom_pct = float(np.count_nonzero(max_source > 1.0)) / total_pixels + else: + max_rgb = rgb_linear.max(axis=2) + headroom_pct = float(np.count_nonzero(max_rgb > 1.0)) / total_pixels + + # 1. Source Clipping Statistics (True JPEG Clipping) + # If srgb_u8 is provided, use it. Otherwise approximate from linear (less accurate if exposure shifted). + if srgb_u8 is not None and srgb_u8.shape[:2] == rgb_linear.shape[:2]: + max_u8 = srgb_u8.max(axis=2) + source_clipped_pct = float(np.count_nonzero(max_u8 >= 254)) / total_pixels + # Note: We don't necessarily use srgb_u8 for 'near_white' pivoting logic if user wants "current" state logic, + # but checking source near-white is useful for "is this image naturally bright". + else: + # Fallback: estimate "source clipping" from pre-exposure linear if available, else current + if pre_exposure_linear is not None: + max_to_check = pre_exposure_linear.max(axis=2) + else: + max_to_check = rgb_linear.max(axis=2) + + source_clipped_pct = float(np.count_nonzero(max_to_check >= _LINEAR_THRESHOLD_254)) / total_pixels + + # 2. Current Near-White Statistics (for Pivot Nudging) + # This drives the "micro-contrast feel" based on how bright the image IS NOW. + # Re-calculate max_rgb if we didn't do it earlier (optimization) + if pre_exposure_linear is not None: + max_rgb = rgb_linear.max(axis=2) + elif 'max_rgb' not in locals(): + max_rgb = rgb_linear.max(axis=2) + + current_nearwhite_pct = float(np.count_nonzero( + (max_rgb >= _LINEAR_THRESHOLD_250) & (max_rgb < _LINEAR_THRESHOLD_254) + )) / total_pixels + + # Legacy compat: near_white_pct usually referred to current state in previous logic? + # Actually previous logic tried to use srgb_u8 if available for 'near_white_pct', which implies source. + # But for pivot nudging, we might want current? + # The user said: "drive 'pivot nudging' off current_nearwhite_pct" and "drive 'JPEG fallback' off source_clipped_pct". + # So we provide both. + + return { + 'headroom_pct': headroom_pct, + 'source_clipped_pct': source_clipped_pct, + 'current_nearwhite_pct': current_nearwhite_pct, + } + + +def _lerp(a: float, b: float, t: float) -> float: + """Linear interpolation between a and b by t (clamped 0-1).""" + t_clamped = 0.0 if t < 0.0 else (1.0 if t > 1.0 else t) + return a + (b - a) * t_clamped + + +def _highlight_recover_linear( + rgb_linear: np.ndarray, + amount: float, + *, + pivot: float = 0.7, + k: float = 6.0, + chroma_rolloff: float = 0.15, + headroom_ceiling: float = 1.0, +) -> np.ndarray: + """Apply highlight recovery using brightness-based rescaling to preserve hue. + + Why brightness-based rescale? + - Per-channel compression causes hue/chroma shifts (e.g., bright red becomes pink). + - By computing a single brightness metric and rescaling all channels equally, + we preserve the original RGB color ratios (hue and relative saturation). + + For 16-bit sources with headroom (values > 1.0), the curve compresses into + [pivot, headroom_ceiling] rather than [pivot, 1.0], preserving subtle tonal + separation above 1.0 that represents real recovered detail. + + Args: + rgb_linear: Float32 RGB array (H, W, 3) in linear light, may have values > 1.0 + amount: Recovery strength 0.0-1.0 (mapped from slider -100 to 0) + pivot: Brightness threshold below which no recovery occurs + k: Shoulder curve steepness (higher = more aggressive compression) + chroma_rolloff: Desaturation amount in extreme highlights (0-1) + headroom_ceiling: Maximum output brightness (> 1.0 preserves headroom detail) + + Returns: + Recovered float32 RGB array (linear) + """ + if amount < 0.001: + return rgb_linear + + eps = 1e-7 + + # Use max-channel as brightness metric - handles saturated highlights better than luminance + brightness = rgb_linear.max(axis=2) + + # Build smooth highlight mask: 0 below pivot, 1 in highlights + # Use headroom_ceiling instead of 1.0 for the normalization range + mask = _smoothstep01((brightness - pivot) / (headroom_ceiling - pivot + eps)) + + # Highlights recovery should DIM bright areas to reveal detail/contrast. + # We use a gain-based approach that preserves the pivot and pull down highlights. + # strength of 0.3 means max 30% darkening at pure white. + recovery_strength = 0.3 + target_brightness = brightness * (1.0 - amount * recovery_strength * mask) + + # Rescale RGB to preserve hue/chroma + # Protect against div-by-zero or huge scale factors for near-black pixels + scale = np.clip(target_brightness / (brightness + eps), 0.0, 2.0) + scale = np.expand_dims(scale, axis=2) + recovered = rgb_linear * scale + + # Optional chroma rolloff in extreme highlights to reduce "neon" colors + if chroma_rolloff > 0.001: + # Use target_brightness (post-compression) for the mask to maintain monotonicity + # Normalize against headroom_ceiling for consistent behavior + extreme_mask = _smoothstep01((target_brightness - _CHROMA_ROLLOFF_START * headroom_ceiling) / (_CHROMA_ROLLOFF_WIDTH * headroom_ceiling)) + extreme_mask = np.expand_dims(extreme_mask, axis=2) + + # Compute grayscale (luminance) of recovered image + gray = recovered[:, :, 0:1] * 0.2126 + recovered[:, :, 1:2] * 0.7152 + recovered[:, :, 2:3] * 0.0722 + + # Desaturate in extreme highlights + # Note: This preserves monotonicity because both recovered and gray are + # monotonic with respect to input brightness, and we blend between them. + desat_amount = chroma_rolloff * amount * extreme_mask + recovered = recovered * (1.0 - desat_amount) + gray * desat_amount + + return recovered + + +def _highlight_boost_linear( + rgb_linear: np.ndarray, + amount: float, + *, + pivot: float = 0.5, +) -> np.ndarray: + """Apply highlight boost using brightness-based rescaling to preserve hue. + + Uses same hue-preserving approach as recovery for symmetry. + + Args: + rgb_linear: Float32 RGB array (H, W, 3) in linear light + amount: Boost strength 0.0-1.0 (mapped from slider 0 to 100) + pivot: Brightness threshold below which minimal boost occurs + + Returns: + Boosted float32 RGB array (linear) + """ + if amount < 0.001: + return rgb_linear + + eps = 1e-7 + + brightness = rgb_linear.max(axis=2) + + # Build mask for highlights + mask = _smoothstep01((brightness - pivot) / (1.0 - pivot + eps)) + + # Target brightness: lift with curve + target_brightness = brightness * (1.0 + amount * 1.5 * mask) + + # Rescale RGB to preserve hue, cap scale at 1.5x to prevent blowout + scale = np.clip(target_brightness / (brightness + eps), 0.0, 2.0) + scale = np.minimum(scale, 1.5) # Direct cap on scale + scale = np.expand_dims(scale, axis=2) + + return rgb_linear * scale diff --git a/faststack/imaging/optional_deps.py b/faststack/imaging/optional_deps.py new file mode 100644 index 0000000..6b1f9d1 --- /dev/null +++ b/faststack/imaging/optional_deps.py @@ -0,0 +1,8 @@ +"""Centralized optional dependencies to avoid circular imports and heavy initialization.""" + +try: + import cv2 + HAS_OPENCV = True +except ImportError: + cv2 = None + HAS_OPENCV = False diff --git a/faststack/imaging/orientation.py b/faststack/imaging/orientation.py new file mode 100644 index 0000000..98b053d --- /dev/null +++ b/faststack/imaging/orientation.py @@ -0,0 +1,81 @@ +"""Centralized utilities for EXIF orientation handling.""" + +import logging +from pathlib import Path +from typing import Optional +import numpy as np +from PIL import Image + +log = logging.getLogger(__name__) + +def get_exif_orientation(image_path: Path, exif: Optional[Image.Exif] = None) -> int: + """Read the EXIF Orientation tag from an image file or provided EXIF object. + + Args: + image_path: Path to the image file + exif: Optional pre-read PIL Exif object + + Returns: + Orientation value (1-8), defaults to 1 if missing or error. + """ + try: + if exif is None: + with Image.open(image_path) as img: + exif = img.getexif() + + if not exif: + return 1 + + # EXIF Orientation tag ID is 274 + return exif.get(274, 1) + except (OSError, IOError, AttributeError) as e: + log.debug("Could not read EXIF orientation for %s: %s", image_path, e) + return 1 + +def apply_orientation_to_np(buffer: np.ndarray, orientation: int) -> np.ndarray: + """Apply EXIF orientation transformation to a numpy image buffer. + + Args: + buffer: Image as numpy array (H, W, 3) RGB uint8 or float32 + orientation: Orientation value (1-8) + + Returns: + Transformed numpy array. Guaranteed to be C-contiguous. + """ + if orientation <= 1: + return buffer + + # Apply transformation based on orientation + if orientation == 2: + # Mirrored horizontally + result = np.fliplr(buffer) + elif orientation == 3: + # Rotated 180 degrees + result = np.rot90(buffer, k=2) + elif orientation == 4: + # Mirrored vertically + result = np.flipud(buffer) + elif orientation == 5: + # Mirrored horizontally then rotated 90 CCW + result = np.rot90(np.fliplr(buffer), k=1) + elif orientation == 6: + # Rotated 90 CW (270 CCW) + result = np.rot90(buffer, k=3) + elif orientation == 7: + # Mirrored horizontally then rotated 90 CW + result = np.rot90(np.fliplr(buffer), k=3) + elif orientation == 8: + # Rotated 90 CCW + result = np.rot90(buffer, k=1) + else: + return buffer + + # Ensure result is C-contiguous after flip/rotate + if not result.flags['C_CONTIGUOUS']: + result = np.ascontiguousarray(result) + return result + +def apply_exif_orientation(buffer: np.ndarray, image_path: Path, exif: Optional[Image.Exif] = None) -> np.ndarray: + """Helper that reads orientation and applies it to a numpy buffer.""" + orientation = get_exif_orientation(image_path, exif) + return apply_orientation_to_np(buffer, orientation) diff --git a/faststack/imaging/prefetch.py b/faststack/imaging/prefetch.py index 365e2a6..ba54dbf 100644 --- a/faststack/imaging/prefetch.py +++ b/faststack/imaging/prefetch.py @@ -21,6 +21,7 @@ from faststack.models import ImageFile, DecodedImage from faststack.imaging.jpeg import decode_jpeg_rgb, decode_jpeg_resized, TURBO_AVAILABLE from faststack.imaging.cache import build_cache_key +from faststack.imaging.orientation import apply_exif_orientation from faststack.config import config log = logging.getLogger(__name__) @@ -103,6 +104,9 @@ def get_monitor_profile() -> Optional[ImageCms.ImageCmsProfile]: return _monitor_profile_cache[monitor_icc_path] + +# apply_exif_orientation imported from orientation.py + def apply_saturation_compensation( arr: np.ndarray, width: int, @@ -313,6 +317,7 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, import time t_start = time.perf_counter() + exif_obj = None # Ensure variable is always initialized # Early check: if generation has already advanced since this task was submitted, skip it if generation != self.generation: @@ -388,10 +393,12 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, img = PILImage.fromarray(buffer) t_after_array_to_pil = time.perf_counter() - # Extract ICC profile from original file (need to read header only) + # Extract ICC profile AND EXIF from original file (need to read header only) t_before_profile_read = time.perf_counter() + exif_obj = None with PILImage.open(image_file.path) as orig: icc_bytes = orig.info.get("icc_profile") + exif_obj = orig.getexif() # Capture EXIF while open t_after_profile_read = time.perf_counter() src_profile = None @@ -421,7 +428,10 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, t_after_icc = time.perf_counter() rgb = np.array(img, dtype=np.uint8) - h, w, _ = rgb.shape + + # Note: We do NOT apply EXIF orientation here anymore. + # It is handled in the Unified EXIF Orientation Application block below. + # This avoids "double rotation" or potential "apply and discard" bugs. # Memory Optimization: Avoid explicit copy buffer = np.ascontiguousarray(rgb) @@ -435,7 +445,7 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, index, decoder, t_after_read - t_before_read, t_after_decode - t_after_read, t_after_array_to_pil - t_after_decode, t_after_profile_read - t_before_profile_read, t_after_icc - t_before_icc, t_after_copy - t_after_icc, - t_after_copy - t_start, w, h) + t_after_copy - t_start, buffer.shape[1], buffer.shape[0]) except (OSError, ImageCms.PyCMSError, ValueError) as e: # ICC conversion failed, fall back to standard decode log.warning("ICC profile conversion failed for %s: %s, falling back to standard decode", image_file.path, e) @@ -457,7 +467,21 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, return None t_after_fallback_decode = time.perf_counter() - h, w, _ = buffer.shape + # EXIF orientation correction + # In fallback path, we opened 'f' (file handle) and mmap. + # We didn't keep a PIL image open suitable for EXIF extraction except inside the try block (line 456). + # But line 456 `with PILImage.fromarray(buffer)` creates a new image from buffer, NO EXIF. + # Wait, line 377 `with PILImage.open(image_file.path) as img:` IS handling non-JPEG logic if TurboJPEG fails? + # No, this is inside `if is_jpeg`. + + # So inside ICC mode, failure fallback: + # We use decode_jpeg_rgb/resized (TurboJPEG) OR generic Pillow. + # If generic pillow (lines 373-383), it was `with PILImage.open... as img`. + # But that loop closed. + + # Optimization: we can't easily reuse the object unless we restructure. + # BUT, for the "Unified" block at the end, we can pass `exif_obj` if we have it. + pass # Memory Optimization: Avoid explicit copy buffer = np.ascontiguousarray(buffer) @@ -473,7 +497,7 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, index, decoder, t_after_fallback_read - t_before_fallback_read, t_after_fallback_decode - t_after_fallback_read, t_after_copy - t_after_fallback_decode, - t_after_copy - t_start, w, h) + t_after_copy - t_start, buffer.shape[1], buffer.shape[0]) else: # Fall back to standard decode if ICC profile not available log.warning("ICC mode selected but no monitor profile available, using standard decode") @@ -495,7 +519,34 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, return None t_after_decode = time.perf_counter() - h, w, _ = buffer.shape + # EXIF orientation application + # We have 'img' (PIL Image) open if we used the fallback path, but wait - + # in this block (lines 482-501) we MIGHT use decode_jpeg_resized which doesn't give us a PIL Image. + # Or we used Generic Pillow open (lines 494-496) where 'img' was capable. + # Actually, if we used decode_jpeg_XXX, we don't have a PIL object handy unless we open one. + # But optimization: if we used Pillow fallback, we might have it? + # This block is "Standard decode path (Option A or no color management) FOR JPEG FALLBACK" + + # BUT WAIT: logic above: if TurboJPEG worked on JPEG, it returned buffer. + # If TurboJPEG failed, it fell back to Pillow. + + # Actually, looking at the logic: + # 1. ICC Mode -> TurboJPEG (if success, we have buffer) -> applyTransform -> buffer + # 2. ICC Mode -> Fallback Pillow (if success, we have buffer) + + # The `apply_saturation_compensation` and `apply_exif_orientation` are effectively "Post Processing". + # In previous code "Unified EXIF Orientation Application" was doing a fresh open. + + # We should move the EXIF application UP into the scopes where we might have the PIL object. + + # For this specific block (ICC Fallback or TurboJPEG): + # In TurboJPEG case: we have 'img' (PIL Image from buffer). Does it have EXIF? No, it was created from raw bytes. + # We did open 'orig' to get ICC profile. We could have gotten EXIF combined? + # The TurboJPEG path opens 'orig' on line 397 (with PILImage.open(image_file.path) as orig). + # We should get EXIF there! + + # Let's fix the TurboJPEG path first (Lines 345+): + # We open 'orig' to read ICC (line 397). We should ALSO read EXIF there. # Memory Optimization: Avoid explicit copy buffer = np.ascontiguousarray(buffer) @@ -510,7 +561,7 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, log.info("Standard decode timing (no ICC profile) for index %d (%s): read=%.3fs, decode=%.3fs, copy=%.3fs, total=%.3fs, size=%dx%d", index, decoder, t_after_read - t_before_read, t_after_decode - t_after_read, t_after_copy - t_after_decode, - t_after_copy - t_start, w, h) + t_after_copy - t_start, buffer.shape[1], buffer.shape[0]) else: # Standard decode path (Option A or no color management) @@ -547,7 +598,7 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, return None t_after_decode = time.perf_counter() - h, w, _ = buffer.shape + # EXIF orientation correction moved to post-decode block # Memory Optimization: Avoid explicit copy buffer = np.ascontiguousarray(buffer) @@ -556,13 +607,38 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, t_after_copy = time.perf_counter() + # Unified EXIF Orientation Application + if buffer is not None: + pre_h, pre_w = buffer.shape[:2] + try: + # Optimization: Use pre-read EXIF object if available (ICC path) + # For non-ICC path, we might still need to open it. + if exif_obj is not None: + buffer = apply_exif_orientation(buffer, image_file.path, exif=exif_obj) + else: + # Fallback to opening (Non-ICC path or where we didn't capture it) + with PILImage.open(image_file.path) as img: + buffer = apply_exif_orientation(buffer, image_file.path, exif=img.getexif()) + except Exception as e: + log.warning("Failed to apply EXIF orientation for %s: %s", image_file.path, e) + + # Always re-establish these no matter what happened + h, w = buffer.shape[:2] + buffer = np.ascontiguousarray(buffer) + bytes_per_line = buffer.strides[0] + mv = memoryview(buffer).cast("B") + t_after_orient = time.perf_counter() + + if self.debug and (w != pre_w or h != pre_h): + log.info("Applied EXIF orientation for index %d: %dx%d -> %dx%d", index, pre_w, pre_h, w, h) + # Apply saturation compensation if enabled if color_mode == "saturation": try: factor = float(config.get('color', 'saturation_factor', fallback="1.0")) # Ensure buffer is contiguous and create a 1D view for saturation compensation - # Note: buffer is already made contiguous (np.ascontiguousarray) in the decode blocks above + # Note: buffer is already made contiguous (np.ascontiguousarray) in the decode blocks above or orientation block arr = buffer.ravel() # Verify shape expectations diff --git a/faststack/qml/ImageEditorDialog.qml b/faststack/qml/ImageEditorDialog.qml index 03fb8fb..4f43c22 100644 --- a/faststack/qml/ImageEditorDialog.qml +++ b/faststack/qml/ImageEditorDialog.qml @@ -152,7 +152,7 @@ Window { // --- Histogram Group --- RowLayout { Layout.fillWidth: true - Layout.preferredHeight: 120 + Layout.preferredHeight: 140 Layout.topMargin: 5 spacing: 5 @@ -160,12 +160,12 @@ Window { Layout.fillWidth: true Layout.fillHeight: true - channelName: "R" + channelName: "Red" channelColor: "#e15050" gridLineColor: imageEditorDialog.controlBorder dangerColor: "#40ff0000" textColor: imageEditorDialog.textColor - minimal: true + minimal: false histogramData: uiState && uiState.histogramData ? (uiState.histogramData["r"] || []) : [] clipCount: uiState && uiState.histogramData ? (uiState.histogramData["r_clip"] || 0) : 0 @@ -176,12 +176,12 @@ Window { Layout.fillWidth: true Layout.fillHeight: true - channelName: "G" + channelName: "Green" channelColor: "#50e150" gridLineColor: imageEditorDialog.controlBorder dangerColor: "#40ff0000" textColor: imageEditorDialog.textColor - minimal: true + minimal: false histogramData: uiState && uiState.histogramData ? (uiState.histogramData["g"] || []) : [] clipCount: uiState && uiState.histogramData ? (uiState.histogramData["g_clip"] || 0) : 0 @@ -192,18 +192,41 @@ Window { Layout.fillWidth: true Layout.fillHeight: true - channelName: "B" + channelName: "Blue" channelColor: "#5050e1" gridLineColor: imageEditorDialog.controlBorder dangerColor: "#40ff0000" textColor: imageEditorDialog.textColor - minimal: true + minimal: false histogramData: uiState && uiState.histogramData ? (uiState.histogramData["b"] || []) : [] clipCount: uiState && uiState.histogramData ? (uiState.histogramData["b_clip"] || 0) : 0 preClipCount: uiState && uiState.histogramData ? (uiState.histogramData["b_preclip"] || 0) : 0 } } + + // Highlight State Indicators + RowLayout { + Layout.fillWidth: true + Layout.topMargin: 2 + spacing: 15 + + Label { + visible: (uiState && uiState.highlightState && uiState.highlightState.headroom_pct > 0.001) + text: "📈 Headroom: " + (uiState && uiState.highlightState ? (uiState.highlightState.headroom_pct * 100).toFixed(1) : "0.0") + "%" + font.pixelSize: 10 + color: "#50e150" // Green - good, recoverable + opacity: 0.8 + } + Label { + visible: (uiState && uiState.highlightState && uiState.highlightState.source_clipped_pct > 0.01) + text: "⚠ Clipped: " + (uiState && uiState.highlightState ? (uiState.highlightState.source_clipped_pct * 100).toFixed(1) : "0.0") + "%" + font.pixelSize: 10 + color: uiState && uiState.highlightState && uiState.highlightState.source_clipped_pct > 0.05 ? "#e15050" : "#e1a050" // Red if severe, orange if mild + opacity: 0.8 + } + Item { Layout.fillWidth: true } // Spacer + } } // --- RIGHT COLUMN --- @@ -460,71 +483,85 @@ Window { } } - onMoved: { - _pendingValue = value - if (!sendTimer.running) sendTimer.start() - } - - property double lastPressTime: 0 - property double lastPressValue: 0 property bool isResetting: false + property double _lastPressTime: 0 + function triggerReset() { + isResetting = true + controller.set_edit_parameter(model.key, 0.0) + slider.value = 0.0 + _pendingValue = 0.0 + slider._lastSentValue = 0.0 + imageEditorDialog.updatePulse++ + } + + // Failsafe timer to prevent sticky isResetting state + Timer { + id: resetFailsafe + interval: 200 + repeat: false + onTriggered: { + if (slider.isResetting) { + console.warn("Slider reset stuck, forcing release") + slider.isResetting = false + } + } + } + onPressedChanged: { if (pressed) { var now = Date.now() - var range = slider.to - slider.from - var diff = Math.abs(value - lastPressValue) + // Double click logic with value delta guard + // Only reset if clicked recently AND value hasn't drifted far (meaning it wasn't a drag) + var timeDiff = now - _lastPressTime + var valDiff = Math.abs(value - _pendingValue) + var range = to - from - // Double click detection: <500ms time diff AND <5% value diff - // This prevents false positives when dragging quickly - if (now - lastPressTime < 500 && diff < (range * 0.05)) { - isResetting = true - controller.set_edit_parameter(model.key, 0.0) - - // CRITICAL FIX: Force the slider to release 'pressed' state - // to stop tracking the mouse position. - // We must wait a tick to ensure the internal state clears. - slider.enabled = false - resetTimer.start() - - _pendingValue = 0.0 - slider._lastSentValue = 0.0 - imageEditorDialog.updatePulse++ - isResetting = false - return + if (timeDiff < 600 && valDiff < (range * 0.05)) { + triggerReset() + _lastPressTime = 0 + resetFailsafe.start() // Start failsafe + } else { + _lastPressTime = now } - lastPressTime = now - lastPressValue = value - imageEditorDialog.slidersPressedCount++ - _pendingValue = value - if (!sendTimer.running) sendTimer.start() + + // Initialize drag logic only if not resetting + if (!isResetting) { + _pendingValue = value + if (!sendTimer.running) sendTimer.start() + } } else { - // Guard: If we are resetting, don't process the release logic - if (isResetting) return - imageEditorDialog.slidersPressedCount-- - // Stop repeating sends, then send final value immediately - sendTimer.stop() - var sendValue = isReversed ? -value : value - controller.set_edit_parameter(model.key, sendValue / maxVal) + if (isResetting) { + isResetting = false + // Force backend to 0 on release + controller.set_edit_parameter(model.key, 0.0) + } else { + // Stop repeating sends, then send final value immediately + sendTimer.stop() + var sendValue = isReversed ? -value : value + controller.set_edit_parameter(model.key, sendValue / maxVal) + } - // Don't update histogram here if we are just starting to drag? - // Actually release is end of drag. if (controller) controller.update_histogram() } } - Timer { - id: resetTimer - interval: 50 - onTriggered: { - slider.enabled = true - // Ensure value is synced - slider.value = slider.backendValue - } + // Force 0 while resetting - this overrides the slider's internal mouse handling + Binding { + target: slider + property: "value" + value: 0 + when: slider.isResetting + } + + onMoved: { + if (isResetting) return + _pendingValue = value + if (!sendTimer.running) sendTimer.start() } onBackendValueChanged: { @@ -590,7 +627,7 @@ Window { Behavior on color { ColorAnimation { duration: 150 } } HoverHandler { - id: hoverHandler + id: hoverHandler } } } diff --git a/faststack/qml/SingleChannelHistogram.qml b/faststack/qml/SingleChannelHistogram.qml index 6d4683a..ccf3deb 100644 --- a/faststack/qml/SingleChannelHistogram.qml +++ b/faststack/qml/SingleChannelHistogram.qml @@ -81,13 +81,43 @@ Item { ctx.moveTo(0, canvas.height) var len = root.histogramData.length - for (i = 0; i < len; i++) { - var x = len > 1 ? (i / (len - 1)) * canvas.width : canvas.width / 2 - var y = canvas.height - (root.histogramData[i] / maxVal) * canvas.height - ctx.lineTo(x, y) + var width = canvas.width + var height = canvas.height + + if (width >= len) { + // Standard drawing for sufficient width (upscaling or 1:1) + for (i = 0; i < len; i++) { + var x = len > 1 ? (i / (len - 1)) * width : width / 2 + var y = height - (root.histogramData[i] / maxVal) * height + ctx.lineTo(x, y) + } + } else { + // Downsampling with Max Pooling for small widths + // This creates a "skyline" envelope, preserving peaks and preventing aliasing spikes + for (var x = 0; x < width; x++) { + // Determine which bins fall into this pixel column + var binStart = Math.floor((x / width) * len) + var binEnd = Math.ceil(((x + 1) / width) * len) + + // Clamp + binStart = Math.max(0, Math.min(len - 1, binStart)) + binEnd = Math.max(binStart + 1, Math.min(len, binEnd)) + + // Find max value in this range + var localMax = 0 + for (var b = binStart; b < binEnd; b++) { + // Boundary check just in case + if (b < len) { + localMax = Math.max(localMax, root.histogramData[b]) + } + } + + var y = height - (localMax / maxVal) * height + ctx.lineTo(x, y) + } } - ctx.lineTo(canvas.width, canvas.height) + ctx.lineTo(width, height) ctx.closePath() // Create gradient fill diff --git a/faststack/result.txt b/faststack/result.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d2335ccbdf3ac3228debc4bb590d1f91587dc1d GIT binary patch literal 1548 zcmd6nPjAye5XIjaiSOVON(2pULd>57QWb&dr4%A@L8wX{hg8H#WIKgi_~^jz?b;-f zK!sbi@~&s+&Fq`m+4=cnt*vU!wbD|FUhuuuS~=24ACQ)4YS#O@r)#7NX`%%q*Hn+t z%o^F+&kgd5y-Z(_YU~qTU~8L|z9My0px-uF?^bwAbfQ#8x&rGHr|hlyo2wg6KMlK! zz#@m~378D+L}if7@F9DL5!HAx=Sq*6Ba>!07o!kM&b_SQzSS8q&G^keR z|52Yp1-{2)aey@QT(7vFjMY^X?{hz9=jXM2mkToUj=y;SF}D5uOz~7QoWnDsj_>5W zsT1>S{oZie2wV{=dI=K8yxx%MikI#V;fhH0L_-bqob@Sds%N~xKH6jU zhRnyz&h*Z*|DTo@I;HlS6ot=P@Ex*Ll!`995TvxwO+j6Xl+a9TplM4w72Wvg>UX9cY8FBnlHA-m zKX=ajd~furs5h;(QlUOiPv<(*1KmPP)${}XTmEL?O10_Fm@9f+egwOPxz!EteLYQj zK_6(%HAQ8u23`rGm-vP5bdQ!bkF0=G3nIddY0rLNCoJonf-~kntoy}b*>{lU4O%dL zOJ-wonNu6{7n?cx>qNaj0UPzqc>@VJiJGcPiD#^F7o4~cF5=?s7rI2@MX$O(C_TYz zl3L~k4asOme*>>(Z`GN}^ILnQ9XGmU`bVlV-EN*^n9}Z^r{$Ct+~XO1XSLpO8sO5f SO1vuk^DOr9y*VYUWVUC7`vmDxO}QEQo>?Wk@#cCX<=H-%`$0 zf+TIeU(5*s3xPs!F}&G^$J;-ZhNq(__k*8FSX8B#gw^b{l6dWyB(~+XV{*e!Z+V}Y zJ>7NcAR|R67u71>^YQMSIdk6mIC=MWYPUAG3!B*3GCR=UOPgA*-jTglZ>*WQ-k;hh zHd3!p@6@jJH-D>OA0*rvX0`U+_J z_XZEU+5`vYVX#Qa$_FiveBZgw7F*ofvGzYu8%!OWzDaMm#QY0k=k~L+>Rk3q^8>eQ z-#G_v`yS>cJeb75u;YiEsblN>Iner?OK@~YI=r#pOHE=AFu9|1zIJVMYl#8p>chY| z^T5Rl*oA)E7S8Ki`$;YQ0@lEnz9iJOnQ3+GcdVK4vlnjHk=+P=RN6CH^x8dS>{8N~ zu|B_#)k7bvUd@jdEBU%VeV|!k4M7^=? z(Rm~}tbs*(Is-EPg=3@AspbYkOng476V`kTwxWi5ddF7lEl}Tm-E!D->edK$z%Ax~ z^m8ZQ(_hRpz#Qm1nlu!}psZpFN0N)@!Lk=qQ(T+&5G8 z*mI9tiZ9d-cm~*>-l>M13PGir>wV*#iHZWx1e+ukVry#GzsEnQDZt*XLr+}t-0KyX z{qkS1m26@}r7?2zjWiCfzxJK58c`At!A~NR5S5S~UfQzXRPnUc8)$SQ3uQi6ICEwm zg#!6pcZ8OGulbQIMHOGSS;S2ATxvBUS0?sgYvL=OICZ}rYBc0^?9hHydt1_AG`vO2 zkhIhodI$Ezb!_XN5w&?kw~$R9^w|Ul<{_}ic&za~aSI&_<%ZN>$K%9du=V>?ME}*u z|IPc15V60KyUO8;zewfgw#uOD?#6to%qt!oizAUmn$9LT7T&qaGWb(v25u7Mz~E6J zQv^(Fr1u}T?m?S%iFal1 zdJCEL{(Wt5>3qn1bM@Bc#^A}DGGC5`$+T~7qI^Zwvd}4|xRmyhX^sk!YAI?r{;R@W zoEkCezNpQ*R_8=w#aP&`9yo!fbzZD8qpSGB9eC<^uygMfIv;NRPo?3d^g!Cub-461 z7`oh^X4!Sb^~Rm}jOR4%&r$~>HS+oUKV#^+weHh_@s8L{w%ONTiaq#yL?aMiYFE4Z d-PP{t^VgQG;eSgWNDjhF=u3Zk((lYT*)7R~ z5Qdo@@0@$@nRD;>{Itq3V99i$4dW79&CGOz z_avY)-GV4JV;u+lDnz(#Wih77`B|UTCx&@Ae&W(2`;KWwj=ffwvktL0Sk2YwLJPR% z{OW=-?~HG$DK_s5Jh%9H8b9Xr;nhiFt?`TaEZ{Tlk(1GPn(7bUxqdNs(U zzkIJVZ2wm>s?liCC=pdocW&&iWmKoFLKSp4R)biTFM*+}F=5vWBeDFeue;TiuJt|i>4d7;p78G1oH`r#@y(akjvrs0 zUtH8Mc6z&4Qf|j@_sxL(HT~h9ndy{l_n`}$=;zqJ;j;>Cb2@fMwOr>KFaH}P&i1h; zSM=_ZvDY0t|0nnj#QTgmZ}9GYv%Kb}NfYb2&?6a!oH=H~ui)VKJw3b{d&V=b{{Wl7 B#lZjo literal 0 HcmV?d00001 diff --git a/faststack/tests/test_cache_invalidation.py b/faststack/tests/test_cache_invalidation.py new file mode 100644 index 0000000..8378349 --- /dev/null +++ b/faststack/tests/test_cache_invalidation.py @@ -0,0 +1,75 @@ + +import sys +import os +import time +import shutil +from pathlib import Path +import numpy as np +from PIL import Image + +# Add parent directory to path +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +from faststack.imaging.editor import ImageEditor + +def test_cache_stability(): + """Verify that cache hash remains stable when reloading the same unmodified file.""" + + # Setup dummy image + test_dir = Path("tests/dummy_images_cache") + test_dir.mkdir(parents=True, exist_ok=True) + + img_path = test_dir / "test_cache.jpg" + + # Create a dummy image + arr = np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8) + Image.fromarray(arr).save(img_path) + + editor = ImageEditor() + + # 1. Load image and get hash + editor.load_image(str(img_path)) + hash1 = editor._get_upstream_edits_hash(editor.current_edits) + + # 2. Reload same image (simulate switching back and forth) + # Even if we create a new editor or reload, if the file hasn't changed, + # the ideal cache key for *content-dependent* heavy ops should be stable. + # However, the current implementation uses id(self.float_image), so we expect this to change + # if we reload, because float_image will be a new object. + + editor.load_image(str(img_path)) + hash2 = editor._get_upstream_edits_hash(editor.current_edits) + + print(f"Hash 1: {hash1}") + print(f"Hash 2: {hash2}") + + # Current behavior: Hashes DIFFERENT because id() changed + # Desired behavior: Hashes SAME because content/mtime is same + + if hash1 == hash2: + print("PASS: Hash is stable across reloads.") + else: + print("FAIL: Hash changed across reloads (unnecessary invalidation).") + + # 3. Touch file to update mtime + time.sleep(1.1) # Ensure mtime changes (some systems have 1s resolution) + img_path.touch() + + editor.load_image(str(img_path)) + hash3 = editor._get_upstream_edits_hash(editor.current_edits) + + print(f"Hash 3 (after touch): {hash3}") + + if hash3 != hash2: + print("PASS: Hash changed after mtime update.") + else: + print("FAIL: Hash did NOT change after mtime update.") + + # Cleanup + try: + shutil.rmtree(test_dir) + except: + pass + +if __name__ == "__main__": + test_cache_stability() diff --git a/faststack/tests/test_editor.py b/faststack/tests/test_editor.py index bf4e8b5..944c68e 100644 --- a/faststack/tests/test_editor.py +++ b/faststack/tests/test_editor.py @@ -57,6 +57,14 @@ def test_texture_edit(self): import tempfile from pathlib import Path import shutil + try: + import cv2 + except ImportError: + cv2 = None + + if cv2 is None: + self.skipTest("OpenCV not installed, skipping texture test") + import numpy as np tmp_dir = tempfile.mkdtemp() diff --git a/faststack/tests/test_editor_error_handling.py b/faststack/tests/test_editor_error_handling.py new file mode 100644 index 0000000..25a08c8 --- /dev/null +++ b/faststack/tests/test_editor_error_handling.py @@ -0,0 +1,60 @@ + +import sys +import unittest +from unittest.mock import MagicMock, patch, mock_open +import numpy as np +from pathlib import Path +import tempfile +import os + +# We need to mock cv2 before importing editor if it's not already imported, +# but since tests run in the same process, we just rely on patching. + +class TestEditorErrorHandling(unittest.TestCase): + """Test that ImageEditor raises exceptions when loading fails completely.""" + + def test_load_image_raises_exception_on_failure(self): + """Ensure load_image raises an exception when file opening fails completely.""" + from faststack.imaging.editor import ImageEditor + + editor = ImageEditor() + + # Patch Image.open to raise an exception + with patch('PIL.Image.open', side_effect=OSError("Mocked file error")): + # We also need to ensure cv2 doesn't rescue it. + # If cv2 exists, it might try to load. + # Let's mock cv2.imread to return None so it falls back to PIL, which fails. + + with patch.dict(sys.modules, {'cv2': MagicMock()}): + sys.modules['cv2'].imread.return_value = None + + with self.assertRaises(OSError) as cm: + editor.load_image("non_existent_file.jpg") + + self.assertIn("Mocked file error", str(cm.exception)) + + def test_save_image_raises_exception_on_failure(self): + """Ensure save_image raises an exception when saving fails.""" + from faststack.imaging.editor import ImageEditor + + editor = ImageEditor() + + # Setup a fake state so save_image attempts to run + editor.float_image = np.zeros((10, 10, 3), dtype=np.float32) + editor.current_filepath = Path("fake_path.jpg") + + # Patch create_backup_file to succeed + with patch('faststack.imaging.editor.create_backup_file', return_value=Path("backup.jpg")): + # Patch Image.fromarray to return a mock that fails to save + mock_img = MagicMock() + mock_img.save.side_effect = PermissionError("Mocked save error") + + with patch('PIL.Image.fromarray', return_value=mock_img): + with self.assertRaises(PermissionError) as cm: + # Attempt to save + editor.save_image() + + self.assertIn("Mocked save error", str(cm.exception)) + +if __name__ == '__main__': + unittest.main() diff --git a/faststack/tests/test_exif_compat.py b/faststack/tests/test_exif_compat.py index edc26ba..252a9ca 100644 --- a/faststack/tests/test_exif_compat.py +++ b/faststack/tests/test_exif_compat.py @@ -1,74 +1,140 @@ - -import unittest -from unittest.mock import MagicMock, patch -from pathlib import Path -from PIL import Image -import numpy as np -import io - -# Adjust path to import faststack -import sys -import os -from pathlib import Path -sys.path.append(str(Path(__file__).parents[2])) - -from faststack.imaging.editor import ImageEditor - -class TestExifCompat(unittest.TestCase): - def setUp(self): - self.editor = ImageEditor() - # Create a dummy image for testing - self.editor.original_image = Image.new('RGB', (10, 10)) - self.editor._source_exif_bytes = b"dummy exif bytes" - - def test_missing_image_exif_attribute(self): - """Test fallback when PIL.Image.Exif is missing.""" - # Patching PIL.Image.Exif to raise AttributeError on access simulates it being missing - with patch('PIL.Image.Exif', side_effect=AttributeError): - # Also mock getexif to verify it's the fallback - self.editor.original_image.getexif = MagicMock(return_value=None) - self.editor._get_sanitized_exif_bytes() - self.editor.original_image.getexif.assert_called_once() - - def test_missing_load_method(self): - """Test fallback when Exif object has no load() method.""" - mock_exif_instance = MagicMock() - del mock_exif_instance.load - - with patch('PIL.Image.Exif', return_value=mock_exif_instance): - # Should fall back to original_image.getexif() - self.editor.original_image.getexif = MagicMock(return_value=None) - self.editor._get_sanitized_exif_bytes() - self.editor.original_image.getexif.assert_called_once() - - def test_missing_tobytes_method(self): - """Test graceful failure when Exif object has no tobytes() method.""" - mock_exif_instance = MagicMock() - if hasattr(mock_exif_instance, 'tobytes'): - del mock_exif_instance.tobytes - - # Mocking getexif to return this broken instance - self.editor.original_image.getexif = MagicMock(return_value=mock_exif_instance) - self.editor._source_exif_bytes = None - - res = self.editor._get_sanitized_exif_bytes() - self.assertIsNone(res, "Should return None if tobytes() is missing") - - def test_missing_exiftags_base(self): - """Test fallback when ExifTags.Base is missing (older Pillow).""" - # Patch PIL.ExifTags to be a mock that does NOT have 'Base' - # This will cause ExifTags.Base to raise AttributeError - with patch('PIL.ExifTags', spec=[]): - # Use a mock that doesn't restrict attributes, but has tobytes - mock_exif = MagicMock() - mock_exif.tobytes = MagicMock(return_value=b"serialized exif") - self.editor.original_image.getexif = MagicMock(return_value=mock_exif) - self.editor._source_exif_bytes = None - - res = self.editor._get_sanitized_exif_bytes() - # Check if it tried to set 0x0112 (the fallback) - mock_exif.__setitem__.assert_called_with(0x0112, 1) - self.assertEqual(res, b"serialized exif") - -if __name__ == '__main__': - unittest.main() +import unittest +from unittest.mock import MagicMock, patch +import sys +import os +from pathlib import Path +from PIL import Image, ExifTags +import numpy as np + +# Ensure project root is in sys.path +project_root = str(Path(__file__).parents[1]) +if project_root not in sys.path: + sys.path.insert(0, project_root) + +# Pre-mock modules that might cause issues or aren't needed for this test +sys.modules['cv2'] = MagicMock() +# Mock faststack.models since it's used by editor.py +mock_models = MagicMock() +sys.modules['faststack.models'] = mock_models + +from faststack.imaging.editor import ImageEditor, sanitize_exif_orientation + +class TestExifCompat(unittest.TestCase): + def setUp(self): + self.editor = ImageEditor() + # Create a dummy image for testing + self.editor.original_image = Image.new('RGB', (10, 10)) + self.editor._source_exif_bytes = b"dummy exif bytes" + + def test_missing_image_exif_attribute(self): + """Test fallback when PIL.Image.Exif is missing.""" + # Patching PIL.Image.Exif to raise AttributeError on access simulates it being missing + with patch('PIL.Image.Exif', side_effect=AttributeError): + # Also mock getexif to verify it's the fallback + self.editor.original_image.getexif = MagicMock(return_value=None) + self.editor._get_sanitized_exif_bytes() + self.editor.original_image.getexif.assert_called_once() + + def test_missing_load_method(self): + """Test fallback when Exif object has no load() method.""" + mock_exif_instance = MagicMock() + del mock_exif_instance.load + + with patch('PIL.Image.Exif', return_value=mock_exif_instance): + # Should fall back to original_image.getexif() + self.editor.original_image.getexif = MagicMock(return_value=None) + self.editor._get_sanitized_exif_bytes() + self.editor.original_image.getexif.assert_called_once() + + def test_missing_tobytes_method(self): + """Test graceful failure when Exif object has no tobytes() method.""" + mock_exif_instance = MagicMock() + if hasattr(mock_exif_instance, 'tobytes'): + del mock_exif_instance.tobytes + + # Mocking getexif to return this broken instance + self.editor.original_image.getexif = MagicMock(return_value=mock_exif_instance) + # Set source bytes to verify they are NOT used as fallback (safer policy) + self.editor._source_exif_bytes = b"fallback bytes" + + res = self.editor._get_sanitized_exif_bytes() + self.assertIsNone(res, "Should return None if tobytes() is missing to prevent rotation issues") + + def test_tobytes_failure_drops_exif(self): + """Verify that failure in tobytes() now returns None (drops EXIF).""" + mock_exif = MagicMock() + mock_exif.tobytes.side_effect = Exception("failed to serialize") + + # Patch Image.Exif to return our mock + with patch('PIL.Image.Exif', return_value=mock_exif): + # Set source bytes + self.editor._source_exif_bytes = b"fallback bytes" + + res = self.editor._get_sanitized_exif_bytes() + self.assertIsNone(res, "Should return None if tobytes() fails to prevent rotation issues") + + def test_missing_exiftags_base(self): + """Test fallback when ExifTags.Base is missing (older Pillow).""" + # Patch PIL.ExifTags to be a mock that does NOT have 'Base' + # This will cause ExifTags.Base to raise AttributeError + with patch('PIL.ExifTags', spec=[]): + # Use a mock that doesn't restrict attributes, but has tobytes + mock_exif = MagicMock() + mock_exif.tobytes = MagicMock(return_value=b"serialized exif") + self.editor.original_image.getexif = MagicMock(return_value=mock_exif) + self.editor._source_exif_bytes = None + + res = self.editor._get_sanitized_exif_bytes() + # Check if it tried to set 0x0112 (the fallback) + mock_exif.__setitem__.assert_called_with(0x0112, 1) + self.assertEqual(res, b"serialized exif") + + def test_sanitize_exif_orientation_helper(self): + """Test the standalone sanitize_exif_orientation helper.""" + # 1. Valid EXIF with Orientation=6 + img = Image.new('RGB', (10, 10)) + exif = img.getexif() + # Use fallback if Base is not available in test env (just in case) + orientation_tag = getattr(ExifTags.Base, 'Orientation', 0x0112) + exif[orientation_tag] = 6 + exif_bytes = exif.tobytes() + + sanitized = sanitize_exif_orientation(exif_bytes) + self.assertIsNotNone(sanitized) + + # Verify it's now 1 + loaded_exif = Image.Exif() + loaded_exif.load(sanitized) + self.assertEqual(loaded_exif[orientation_tag], 1) + + # 2. None input + self.assertIsNone(sanitize_exif_orientation(None)) + + # 3. Invalid bytes + self.assertIsNone(sanitize_exif_orientation(b'invalid junk')) + + def test_save_uses_sanitizer_for_sidecar(self): + """Verify save_image calls sanitizer for sidecar when rotation baked in.""" + # Setup: source bytes present, edits imply rotation (or not, since we always bake now) + self.editor._source_exif_bytes = b"source_bytes" + self.editor.current_filepath = Path("test.jpg") + self.editor.float_image = np.zeros((10, 10, 3), dtype=np.float32) + + # Mock dependencies specifically for this test + with patch('faststack.imaging.editor.sanitize_exif_orientation') as mock_sanitize, \ + patch('faststack.imaging.editor.create_backup_file', return_value=Path("test-backup.jpg")), \ + patch('PIL.Image.fromarray') as mock_fromarray, \ + patch.object(self.editor, '_write_tiff_16bit') as mock_tiff: + + mock_img = MagicMock() + mock_fromarray.return_value = mock_img + + # Action: Save with sidecar + self.editor.save_image(write_developed_jpg=True) + + # Assert sanitizer was called with source bytes + mock_sanitize.assert_called_with(b"source_bytes") + + +if __name__ == '__main__': + unittest.main() \ No newline at end of file diff --git a/faststack/tests/test_exif_display_rotation.py b/faststack/tests/test_exif_display_rotation.py new file mode 100644 index 0000000..85e9d1c --- /dev/null +++ b/faststack/tests/test_exif_display_rotation.py @@ -0,0 +1,173 @@ +"""Tests for EXIF orientation correction during display.""" + +import os +import sys +import shutil +import tempfile +import unittest +from pathlib import Path + +import numpy as np +from PIL import Image, ExifTags + +# Adjust path to import faststack +sys.path.insert(0, str(Path(__file__).parents[1])) + +from faststack.imaging.prefetch import apply_exif_orientation + + +class TestExifDisplayOrientation(unittest.TestCase): + """Tests for apply_exif_orientation function.""" + + def setUp(self): + self.test_dir = tempfile.mkdtemp() + + def tearDown(self): + shutil.rmtree(self.test_dir) + + def _create_test_image(self, filename: str, orientation: int) -> Path: + """Creates a JPEG with a specific EXIF orientation. + + The image is 100x50 with red on left, blue on right. + This makes it easy to verify rotation by checking pixel colors. + """ + path = Path(self.test_dir) / filename + + # Create asymmetric image: 100w x 50h + # Left half (0-49) = red, right half (50-99) = blue + img = Image.new('RGB', (100, 50), color='red') + for x in range(50, 100): + for y in range(50): + img.putpixel((x, y), (0, 0, 255)) + + exif = img.getexif() + exif[ExifTags.Base.Orientation] = orientation + + img.save(path, format='JPEG', exif=exif.tobytes()) + return path + + def test_orientation_1_no_change(self): + """Orientation 1 (normal) should return unchanged buffer.""" + path = self._create_test_image("test_ori1.jpg", 1) + + with Image.open(path) as img: + original = np.array(img.convert("RGB")) + + result = apply_exif_orientation(original.copy(), path) + + self.assertEqual(result.shape, original.shape) + np.testing.assert_array_equal(result, original) + + def test_orientation_3_rotate_180(self): + """Orientation 3 should rotate 180 degrees.""" + path = self._create_test_image("test_ori3.jpg", 3) + + with Image.open(path) as img: + original = np.array(img.convert("RGB")) + + result = apply_exif_orientation(original.copy(), path) + + # Shape unchanged (still 50x100) + self.assertEqual(result.shape, original.shape) + + # After 180 rotation, top-left should now be blue (was bottom-right) + # Check that top-left pixel is blue + self.assertTrue(result[0, 0, 2] > 200) # Blue channel high + self.assertTrue(result[0, 0, 0] < 50) # Red channel low + + def test_orientation_6_rotate_90_cw(self): + """Orientation 6 should rotate 90 degrees clockwise (270 CCW).""" + path = self._create_test_image("test_ori6.jpg", 6) + + with Image.open(path) as img: + original = np.array(img.convert("RGB")) + + result = apply_exif_orientation(original.copy(), path) + + # Dimensions should swap: 100x50 -> 50x100 + self.assertEqual(result.shape, (100, 50, 3)) + + # After 90 CW rotation of [red-left, blue-right], + # top should be red, bottom should be blue + # Check top-left pixel is red + self.assertTrue(result[0, 0, 0] > 200) # Red channel high + self.assertTrue(result[0, 0, 2] < 50) # Blue channel low + + def test_orientation_8_rotate_90_ccw(self): + """Orientation 8 should rotate 90 degrees counter-clockwise.""" + path = self._create_test_image("test_ori8.jpg", 8) + + with Image.open(path) as img: + original = np.array(img.convert("RGB")) + + result = apply_exif_orientation(original.copy(), path) + + # Dimensions should swap: 100x50 -> 50x100 + self.assertEqual(result.shape, (100, 50, 3)) + + # After 90 CCW rotation of [red-left, blue-right], + # top should be blue, bottom should be red + # Check top-left pixel is blue + self.assertTrue(result[0, 0, 2] > 200) # Blue channel high + self.assertTrue(result[0, 0, 0] < 50) # Red channel low + + def test_orientation_2_mirror_horizontal(self): + """Orientation 2 should mirror horizontally.""" + path = self._create_test_image("test_ori2.jpg", 2) + + with Image.open(path) as img: + original = np.array(img.convert("RGB")) + + result = apply_exif_orientation(original.copy(), path) + + # Shape unchanged + self.assertEqual(result.shape, original.shape) + + # After horizontal flip, left becomes blue, right becomes red + # Check top-left pixel is blue + self.assertTrue(result[0, 0, 2] > 200) # Blue channel high + self.assertTrue(result[0, 0, 0] < 50) # Red channel low + + def test_no_exif_returns_unchanged(self): + """Image without EXIF should return unchanged buffer.""" + path = Path(self.test_dir) / "no_exif.jpg" + + # Create image without EXIF + img = Image.new('RGB', (100, 50), color='green') + img.save(path, format='JPEG') + + with Image.open(path) as img: + original = np.array(img.convert("RGB")) + + result = apply_exif_orientation(original.copy(), path) + + np.testing.assert_array_equal(result, original) + + def test_invalid_path_returns_unchanged(self): + """Non-existent file should return unchanged buffer.""" + path = Path(self.test_dir) / "nonexistent.jpg" + + dummy = np.zeros((50, 100, 3), dtype=np.uint8) + result = apply_exif_orientation(dummy.copy(), path) + + np.testing.assert_array_equal(result, dummy) + + def test_orientation_contiguity(self): + """Verify that the result is always C-contiguous after transformations.""" + # Orientation 6 involves rotation which often results in non-contiguous arrays + path = self._create_test_image("test_contiguity.jpg", 6) + + with Image.open(path) as img: + original = np.array(img.convert("RGB")) + + # Ensure input is contiguous + self.assertTrue(original.flags['C_CONTIGUOUS']) + + result = apply_exif_orientation(original, path) + + # Verify the result is C-contiguous + self.assertTrue(result.flags['C_CONTIGUOUS'], "Result of apply_exif_orientation should be C-contiguous") + + +if __name__ == '__main__': + unittest.main() diff --git a/faststack/tests/test_exif_orientation.py b/faststack/tests/test_exif_orientation.py index 4d7e9c6..394e487 100644 --- a/faststack/tests/test_exif_orientation.py +++ b/faststack/tests/test_exif_orientation.py @@ -8,18 +8,35 @@ import numpy as np # Adjust path to import faststack +from unittest.mock import MagicMock, patch import sys +# Removed global sys.modules override sys.path.append(str(Path(__file__).parents[2])) -from faststack.imaging.editor import ImageEditor +# MOVED: from faststack.imaging.editor import ImageEditor class TestExifOrientation(unittest.TestCase): def setUp(self): + # Patch sys.modules safely per-test + self.modules_patcher = patch.dict(sys.modules, {'cv2': MagicMock()}) + self.modules_patcher.start() + + # Import internally to respect the patch + try: + from faststack.imaging.editor import ImageEditor + self.ImageEditorClass = ImageEditor + except ImportError: + # Fallback if path issues persist (shouldn't with sys.path.append) + raise + self.test_dir = tempfile.mkdtemp() - self.editor = ImageEditor() + self.editor = self.ImageEditorClass() def tearDown(self): + self.modules_patcher.stop() shutil.rmtree(self.test_dir) + # Ensure we don't pollute other tests with our mocked-import version + sys.modules.pop('faststack.imaging.editor', None) def _create_test_image(self, filename, orientation=1): """Creates a dummy JPEG with specific EXIF orientation.""" @@ -67,8 +84,11 @@ def test_orientation_sanitization_on_rotation(self): # Double rotation check: if we reload this image, it should look correct # without any further rotation needed. - # We can check dimensions: 100x50 rotated 90 -> 50x100 - self.assertEqual(res.size, (50, 100), f"Dimensions should be swapped for start {start_ori}") + # Start 6 (Vertical 50x100) -> Baked (50x100) -> Rotate 90 -> 100x50 + # Start 8 (Vertical 50x100) -> Baked (50x100) -> Rotate 90 -> 100x50 + # Start 3 (Horizontal 100x50) -> Baked (100x50) -> Rotate 90 -> 50x100 + expected_size = (50, 100) if start_ori == 3 else (100, 50) + self.assertEqual(res.size, expected_size, f"Dimensions check failed for start {start_ori} with rotation") def test_orientation_preserved_no_rotation(self): """Verify Orientation is PRESERVED if we do NOT rotate.""" @@ -90,8 +110,17 @@ def test_orientation_preserved_no_rotation(self): exif = res.getexif() orientation = exif.get(ExifTags.Base.Orientation) - # Should be preserved - self.assertEqual(orientation, start_ori, f"Orientation {start_ori} should be preserved if no geometric transform") + # Should be sanitized to 1 because editor now ALWAYS bakes orientation + self.assertEqual(orientation, 1, f"Orientation should be sanitized to 1, got {orientation}") + + # Verify pixels are rotated if necessary (Start 5-8 involve 90 deg rotation or swap) + # Start 3 (180) -> Same dims + # Start 6 (90 CW) -> Swapped dims + # Start 8 (90 CCW) -> Swapped dims + if start_ori in [5, 6, 7, 8]: + self.assertEqual(res.size, (50, 100), f"Dimensions should be swapped for start {start_ori} due to baking") + else: + self.assertEqual(res.size, (100, 50), f"Dimensions should be preserved for start {start_ori}") def test_raw_mode_exif_preservation(self): """Verify that camera EXIF from a source JPEG is preserved when 'developing' RAW (simulated with TIFF).""" diff --git a/faststack/tests/test_fallback_blur.py b/faststack/tests/test_fallback_blur.py new file mode 100644 index 0000000..432f245 --- /dev/null +++ b/faststack/tests/test_fallback_blur.py @@ -0,0 +1,47 @@ + +import unittest +import numpy as np +from unittest.mock import patch, MagicMock +from PIL import Image + +# Import the functionality to test +from faststack.imaging import editor + +class TestFallbackBlur(unittest.TestCase): + + def test_fallback_blur_logic(self): + """Test that _gaussian_blur_float works even when cv2 is None""" + + # Setup a dummy float image (checkerboard) + # 0.0 and 1.0 values + arr = np.zeros((20, 20, 3), dtype=np.float32) + arr[::2, ::2] = 1.0 + + # Calculate expected "unblurred" std dev + orig_std = np.std(arr) + + # Mock cv2 to be None to force fallback path + with patch('faststack.imaging.editor.cv2', None): + # Verify we are hitting the fallback + self.assertIsNone(editor.cv2) + + # Run the blur function + blurred = editor._gaussian_blur_float(arr, radius=2.0) + + # Check shape/type preservation + self.assertEqual(blurred.shape, arr.shape) + self.assertEqual(blurred.dtype, np.float32) + + # Check that it actually blurred + # A blurred checkerboard should have lower standard deviation than the original + new_std = np.std(blurred) + print(f"Original Std: {orig_std:.4f}, Blurred Std: {new_std:.4f}") + + self.assertLess(new_std, orig_std, "Image should be blurred (lower variance)") + + # Additional check: max value should decrease, min value should increase (for 0/1 checkerboard) + self.assertLess(blurred.max(), 1.0) + self.assertGreater(blurred.min(), 0.0) + +if __name__ == '__main__': + unittest.main() diff --git a/faststack/tests/test_generation_aware_preview.py b/faststack/tests/test_generation_aware_preview.py new file mode 100644 index 0000000..62c08ca --- /dev/null +++ b/faststack/tests/test_generation_aware_preview.py @@ -0,0 +1,111 @@ + +import unittest +from unittest.mock import MagicMock, patch +from PySide6.QtCore import QObject +from PySide6.QtGui import QImage + +# Import the class to test (assuming it's importable) +# We might need to mock imports if they depend on full Qt app structure +import sys +import os + +# Adjust path to allow imports +sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) + +from faststack.ui.provider import ImageProvider + +class TestGenerationAwarePreview(unittest.TestCase): + def setUp(self): + self.mock_controller = MagicMock() + self.mock_controller.ui_state = MagicMock() + self.mock_controller.ui_state.isEditorOpen = True + self.mock_controller.ui_state.isZoomed = False + self.mock_controller.current_index = 0 + + # Setup mock images + self.mock_preview = MagicMock() + self.mock_preview.buffer = b'\x00' * 100 + self.mock_preview.width = 10 + self.mock_preview.height = 10 + self.mock_preview.bytes_per_line = 30 + self.mock_preview.format = QImage.Format.Format_RGB888 + + self.mock_decoded = MagicMock() + self.mock_decoded.buffer = b'\xFF' * 100 + self.mock_decoded.width = 10 + self.mock_decoded.height = 10 + self.mock_decoded.bytes_per_line = 30 + self.mock_decoded.format = QImage.Format.Format_RGB888 + + self.mock_controller._last_rendered_preview = self.mock_preview + self.mock_controller.get_decoded_image.return_value = self.mock_decoded + + self.provider = ImageProvider(self.mock_controller) + + def test_matching_generation(self): + """Should serve preview when generation matches.""" + # Setup matching state + self.mock_controller._last_rendered_preview_index = 0 + self.mock_controller._last_rendered_preview_gen = 5 + + # Request with matching generation + img = self.provider.requestImage("0/5", None, None) + + # Should be the preview (dark gray placeholder if fails, but here we mocked QImage creation?) + # Wait, requestImage creates a QImage from the buffer. + # We check WHICH buffer was used. + # Since we cannot easily check the pixels of the returned QImage without a GUI instance, + # we can check if get_decoded_image was called. + + # If it used preview, get_decoded_image should NOT be called (or only if preview is None) + # But wait, logic is: + # image_data = self.app_controller._last_rendered_preview if use_editor_preview else self.app_controller.get_decoded_image(index) + + # So we reset the mock + self.mock_controller.get_decoded_image.reset_mock() + + self.provider.requestImage("0/5", None, None) + + self.mock_controller.get_decoded_image.assert_not_called() + + def test_mismatched_generation(self): + """Should fallback to decoded image when generation does not match.""" + # Setup state: preview is old (gen 4) + self.mock_controller._last_rendered_preview_index = 0 + self.mock_controller._last_rendered_preview_gen = 4 + + # Request new generation (5) + self.mock_controller.get_decoded_image.reset_mock() + + self.provider.requestImage("0/5", None, None) + + self.mock_controller.get_decoded_image.assert_called_with(0) + + def test_mismatched_index(self): + """Should fallback when index does not match.""" + self.mock_controller._last_rendered_preview_index = 1 + self.mock_controller._last_rendered_preview_gen = 5 + + self.mock_controller.get_decoded_image.reset_mock() + self.provider.requestImage("0/5", None, None) + + self.mock_controller.get_decoded_image.assert_called_with(0) + + def test_no_generation_checking_if_not_provided(self): + """If generation not provided in ID, should ignore tracking? + The code says: (gen is None or getattr(...) == gen) + If ID is '0', gen is None. + (None is None) is True. So it matches. + So legacy requests (without gen) will still serve preview if index matches. + """ + self.mock_controller._last_rendered_preview_index = 0 + self.mock_controller._last_rendered_preview_gen = 99 + + self.mock_controller.get_decoded_image.reset_mock() + # Request without generation + self.provider.requestImage("0", None, None) + + self.mock_controller.get_decoded_image.assert_not_called() + +if __name__ == '__main__': + unittest.main() diff --git a/faststack/tests/test_headroom_semantics.py b/faststack/tests/test_headroom_semantics.py new file mode 100644 index 0000000..5a2f68d --- /dev/null +++ b/faststack/tests/test_headroom_semantics.py @@ -0,0 +1,61 @@ +import unittest +import numpy as np +import sys +import os +from unittest.mock import MagicMock + +# Mock cv2/turbojpeg +sys.modules['cv2'] = MagicMock() +sys.modules['turbojpeg'] = MagicMock() +sys.modules['PyTurboJPEG'] = MagicMock() + +# Ensure faststack is in path +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) + +from faststack.imaging.editor import ImageEditor, _analyze_highlight_state, _srgb_to_linear + +class TestHeadroomSemantics(unittest.TestCase): + def test_headroom_exposure_independence(self): + """Verify headroom calculation ignores exposure gain.""" + # 1. Create a synthetic image with max=1.0 (No headroom) + # Linear space: 1.0 + img = np.ones((100, 100, 3), dtype=np.float32) * 1.0 + + # 2. Analyze state with NO exposure change + # Pre-exposure is same as input + state = _analyze_highlight_state(img, pre_exposure_linear=img) + self.assertEqual(state['headroom_pct'], 0.0) + + # 3. Simulate High Exposure (+1 EV -> 2x gain) + # Current linear becomes 2.0 + exposed_img = img * 2.0 + + # Analyze: pass exposed as 'rgb_linear' but original as 'pre_exposure_linear' + state_exposed = _analyze_highlight_state(exposed_img, pre_exposure_linear=img) + + # Headroom should STILL be 0.0 because pre-exposure < 1.0 + self.assertEqual(state_exposed['headroom_pct'], 0.0, "Headroom should not be triggering just because of exposure") + + # 4. Reference: If we didn't pass pre-exposure, it WOULD show headroom + state_naive = _analyze_highlight_state(exposed_img, pre_exposure_linear=None) + self.assertGreater(state_naive['headroom_pct'], 0.99, "Naive analysis should show headroom (sanity check)") + + def test_true_headroom_detection(self): + """Verify actual headroom is detected regardless of exposure.""" + # 1. Image with real headroom (max=1.5) + img = np.ones((100, 100, 3), dtype=np.float32) * 1.5 + + # 2. Even if we darken it (-1 EV -> 0.75), we should know it HAD headroom? + # Typically "headroom" implies "recoverable data". + # If I underexpose a RAW, I still want to know it has headroom. + # Actually, if I underexpose, the values become < 1.0. + # But the SOURCE has values > 1.0. + # So yes, headroom_pct should ideally reflect source capability. + + darkened_img = img * 0.5 + + state = _analyze_highlight_state(darkened_img, pre_exposure_linear=img) + self.assertGreater(state['headroom_pct'], 0.99) + +if __name__ == '__main__': + unittest.main() diff --git a/faststack/tests/test_highlight_recovery.py b/faststack/tests/test_highlight_recovery.py new file mode 100644 index 0000000..1ae1a2c --- /dev/null +++ b/faststack/tests/test_highlight_recovery.py @@ -0,0 +1,212 @@ +"""Tests for highlight recovery system. + +Tests the new brightness-based highlight recovery that: +- Preserves hue/chroma via brightness rescaling +- Uses adaptive parameters based on headroom and clipping +- Handles both 16-bit (headroom) and 8-bit (JPEG) sources +""" +import sys +import os +# Add parent directory to path for standalone execution +sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) + +# Mock cv2 if not available (for test environments) +try: + import cv2 +except ImportError: + from unittest import mock + sys.modules['cv2'] = mock.MagicMock() + +import numpy as np +import time + +from faststack.imaging.editor import ( + _highlight_recover_linear, + _highlight_boost_linear, + _apply_headroom_shoulder, + _analyze_highlight_state, + _smoothstep01, +) + + +def test_monotonicity(): + """Gradient 0→2.0 should be non-decreasing after recovery.""" + # Create gradient with headroom + gradient = np.linspace(0, 2.0, 100).reshape(10, 10) + rgb = np.stack([gradient, gradient * 0.5, gradient * 0.3], axis=2).astype(np.float32) + + recovered = _highlight_recover_linear(rgb, amount=1.0, pivot=0.5, k=6.0) + + # Check max-channel brightness is non-decreasing + brightness = recovered.max(axis=2).flatten() + diffs = np.diff(brightness) + eps = 1e-7 + + assert np.all(diffs >= -eps), f"Monotonicity violated: min diff = {diffs.min()}" + print("test_monotonicity passed") + + +def test_no_nan_inf(): + """Random input including edge cases should produce finite output.""" + np.random.seed(42) + + # Include zeros, ones, headroom, and extreme values + test_cases = [ + np.random.rand(50, 50, 3).astype(np.float32), # Normal + np.zeros((10, 10, 3), dtype=np.float32), # All zeros + np.ones((10, 10, 3), dtype=np.float32), # All ones + np.ones((10, 10, 3), dtype=np.float32) * 2.0, # Headroom + np.array([[[0, 0, 0], [1e-10, 1e-10, 1e-10], [10.0, 5.0, 2.0]]], dtype=np.float32), # Edge cases + ] + + for i, arr in enumerate(test_cases): + recovered = _highlight_recover_linear(arr, amount=1.0, pivot=0.5, k=6.0) + assert np.isfinite(recovered).all(), f"NaN/inf in test case {i}" + + boosted = _highlight_boost_linear(arr, amount=1.0, pivot=0.5) + assert np.isfinite(boosted).all(), f"NaN/inf in boost test case {i}" + + print("test_no_nan_inf passed") + + +def test_hue_preservation(): + """Saturated highlight ramp should preserve RGB ratios (hue).""" + # Create saturated red gradient with headroom + brightness = np.linspace(0.1, 2.0, 50).reshape(5, 10) + rgb = np.stack([brightness, brightness * 0.2, brightness * 0.2], axis=2).astype(np.float32) + + recovered = _highlight_recover_linear(rgb, amount=0.8, pivot=0.5, k=6.0, chroma_rolloff=0.0) + + # Check R:G:B ratios where brightness > 0.01 + orig_brightness = rgb.max(axis=2) + mask = orig_brightness > 0.01 + + if np.any(mask): + # Normalize to get ratio + orig_norm = rgb[mask] / (orig_brightness[mask, None] + 1e-7) + rec_brightness = recovered.max(axis=2) + rec_norm = recovered[mask] / (rec_brightness[mask, None] + 1e-7) + + # Ratios should be within 5% + ratio_diff = np.abs(orig_norm - rec_norm).max() + assert ratio_diff < 0.05, f"Hue shift too large: {ratio_diff}" + + print("test_hue_preservation passed") + + +def test_mask_isolation(): + """Pixels with max-channel below pivot should barely change.""" + # Create image with values below and above pivot + low = np.ones((10, 10, 3), dtype=np.float32) * 0.3 # Below pivot 0.5 + + recovered = _highlight_recover_linear(low, amount=1.0, pivot=0.5, k=6.0) + + # Changes should be minimal + diff = np.abs(recovered - low).max() + assert diff < 1e-4, f"Below-pivot pixels changed by {diff}" + + print("test_mask_isolation passed") + + +def test_plateau_stability(): + """Clipped [1,1,1] region should stay uniform after recovery (no ringing).""" + # Uniform white plateau + plateau = np.ones((20, 20, 3), dtype=np.float32) + + recovered = _highlight_recover_linear(plateau, amount=1.0, pivot=0.5, k=6.0) + + # All pixels should be the same (uniform) + std = recovered.std() + assert std < 1e-6, f"Plateau became non-uniform: std = {std}" + + print("test_plateau_stability passed") + + +def test_headroom_shoulder(): + """Global shoulder should compress values > 1.0 correctly.""" + x = np.array([0.5, 1.0, 1.5, 2.0, 5.0], dtype=np.float32) + out = _apply_headroom_shoulder(x, max_overshoot=0.05) + + # f(x) for x <= 1 should be unchanged + assert out[0] == 0.5 + assert out[1] == 1.0 + + # f(x) for x > 1 should be > 1 but < x + for i in range(2, len(x)): + assert out[i] > 1.0, f"Value at {x[i]} should be > 1.0, got {out[i]}" + assert out[i] < x[i], f"Value at {x[i]} should be compressed, got {out[i]}" + + # Should be monotonic + assert np.all(np.diff(out) >= 0), "Shoulder is not monotonic" + + print("test_headroom_shoulder passed") + + +def test_analyze_highlight_state(): + """Highlight state analysis should detect headroom and clipping.""" + # Image with headroom + headroom_img = np.ones((10, 10, 3), dtype=np.float32) * 1.5 + state = _analyze_highlight_state(headroom_img) + assert state['headroom_pct'] > 0.9, f"Should detect headroom: {state['headroom_pct']}" + + # Normal image + normal_img = np.ones((10, 10, 3), dtype=np.float32) * 0.5 + state = _analyze_highlight_state(normal_img) + assert state['headroom_pct'] < 0.01, f"Should not detect headroom: {state['headroom_pct']}" + + print("test_analyze_highlight_state passed") + + +def test_source_clipping_detection(): + """Verify that srgb_u8 correctly influences clipping results even if linear is dimmed.""" + # 1. Create a "clipped" source image (uint8) + srgb_u8 = np.ones((10, 10, 3), dtype=np.uint8) * 255 + + # 2. Create a "dimmed" linear image (it was clipped in source, but exposure pulled it down) + # Even though it's 0.2, it WAS clipped at the source. + rgb_linear = np.ones((10, 10, 3), dtype=np.float32) * 0.2 + + # 3. Analyze WITHOUT srgb_u8 -> should report 0 clipping because 0.2 < threshold + state_no_u8 = _analyze_highlight_state(rgb_linear, srgb_u8=None) + assert state_no_u8['source_clipped_pct'] == 0.0 + + # 4. Analyze WITH srgb_u8 -> should report 100% clipping because srgb_u8 is 255 + state_with_u8 = _analyze_highlight_state(rgb_linear, srgb_u8=srgb_u8) + assert state_with_u8['source_clipped_pct'] == 1.0 + + print("test_source_clipping_detection passed") + + +def test_benchmark(): + """1920x1080 should be processed in reasonable time (vectorized).""" + arr = np.random.rand(1080, 1920, 3).astype(np.float32) + + # Warm up + _highlight_recover_linear(arr, amount=0.5, pivot=0.5, k=6.0) + + # Benchmark + start = time.perf_counter() + for _ in range(3): + _highlight_recover_linear(arr, amount=0.5, pivot=0.5, k=6.0) + elapsed = (time.perf_counter() - start) / 3 + + print(f"test_benchmark: 1920x1080 recovery in {elapsed*1000:.1f}ms") + # Informational only - no hard assertion for CI stability + + +if __name__ == "__main__": + try: + test_monotonicity() + test_no_nan_inf() + test_hue_preservation() + test_mask_isolation() + test_plateau_stability() + test_headroom_shoulder() + test_analyze_highlight_state() + test_benchmark() + print("\nALL TESTS PASSED") + except Exception as e: + print(f"\nTEST FAILED: {e}") + import traceback + traceback.print_exc() + exit(1) diff --git a/faststack/tests/test_highlight_state_normalization.py b/faststack/tests/test_highlight_state_normalization.py new file mode 100644 index 0000000..e92dd30 --- /dev/null +++ b/faststack/tests/test_highlight_state_normalization.py @@ -0,0 +1,63 @@ +"""Unit test for highlightState normalization in UIState.""" +import unittest +from unittest.mock import MagicMock +from faststack.ui.provider import UIState + +class TestUIStateNormalization(unittest.TestCase): + def setUp(self): + # Mock app_controller and image_editor + self.mock_controller = MagicMock() + self.mock_editor = MagicMock() + self.mock_controller.image_editor = self.mock_editor + self.ui_state = UIState(self.mock_controller) + + def test_highlight_state_normalization_standard(self): + """Test with standard keys.""" + self.mock_editor._last_highlight_state = { + 'headroom_pct': 0.1, + 'clipped_pct': 0.2, + 'near_white_pct': 0.3 + } + # Controller returns canonical keys using the passed dict (even if they were wrong in backend, provider normalizes? + # NO, provider simply gets what is in the dict. + # Wait, provider logic: + # return { + # 'headroom_pct': state.get('headroom_pct', 0.0), + # 'source_clipped_pct': state.get('source_clipped_pct', 0.0), + # 'current_nearwhite_pct': state.get('current_nearwhite_pct', 0.0) + # } + # So if backend has OLD keys, provider will return 0.0 for new keys! + # This confirms that backend MUST populate new keys. + + def test_highlight_state_normalization_standard(self): + """Test with canonical keys present.""" + self.mock_editor._last_highlight_state = { + 'headroom_pct': 0.1, + 'source_clipped_pct': 0.4, + 'current_nearwhite_pct': 0.5 + } + state = self.ui_state.highlightState + self.assertEqual(state['headroom_pct'], 0.1) + self.assertEqual(state['source_clipped_pct'], 0.4) + self.assertEqual(state['current_nearwhite_pct'], 0.5) + + def test_highlight_state_normalization_empty(self): + """Test with empty state.""" + self.mock_editor._last_highlight_state = None + state = self.ui_state.highlightState + self.assertEqual(state['headroom_pct'], 0.0) + self.assertEqual(state['source_clipped_pct'], 0.0) + self.assertEqual(state['current_nearwhite_pct'], 0.0) + + def test_highlight_state_normalization_missing_keys(self): + """Test with missing keys.""" + self.mock_editor._last_highlight_state = { + 'headroom_pct': 0.1 + } + state = self.ui_state.highlightState + self.assertEqual(state['headroom_pct'], 0.1) + self.assertEqual(state['source_clipped_pct'], 0.0) + self.assertEqual(state['current_nearwhite_pct'], 0.0) + +if __name__ == '__main__': + unittest.main() diff --git a/faststack/tests/test_highlights_responsiveness.py b/faststack/tests/test_highlights_responsiveness.py new file mode 100644 index 0000000..3a1013a --- /dev/null +++ b/faststack/tests/test_highlights_responsiveness.py @@ -0,0 +1,46 @@ +import unittest +import numpy as np +import sys +import os +from unittest.mock import MagicMock + +# Mock dependencies +sys.modules['cv2'] = MagicMock() +sys.modules['turbojpeg'] = MagicMock() +sys.modules['PyTurboJPEG'] = MagicMock() + +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) + +from faststack.imaging.editor import ImageEditor + +class TestHighlightsResponsiveness(unittest.TestCase): + def test_highlights_at_various_levels(self): + """Test how much highlights recovery affects various brightness levels.""" + editor = ImageEditor() + + # Create a gradient from 0.0 to 1.0 (linear) + # 0.5 linear is about 186/255 in sRGB + # 0.25 linear is about 137/255 in sRGB + steps = 11 + vals = np.linspace(0.0, 1.0, steps, dtype=np.float32) + linear = np.stack([vals]*3, axis=-1).reshape(1, steps, 3) + + # Apply edits with highlights at -1.0 (max recovery) + edits = editor._initial_edits() + edits['highlights'] = -1.0 + + out = editor._apply_edits(linear.copy(), edits=edits, for_export=True) + + print("\nBrightness Levels (Linear 0.0 -> 1.0):") + print("Input -> Output (Diff)") + for i in range(steps): + inp = vals[i] + outp = out[0, i, 0] + diff = inp - outp + print(f"{inp:0.2f} -> {outp:0.4f} ({diff:0.4f})") + + # The goal is to see significant changes (diff > 0.01) starting from lower levels + # Currently, with pivot 0.75, values below 0.75 should be unchanged (diff=0) + +if __name__ == '__main__': + unittest.main() diff --git a/faststack/tests/test_highlights_v2.py b/faststack/tests/test_highlights_v2.py new file mode 100644 index 0000000..cf65b2a --- /dev/null +++ b/faststack/tests/test_highlights_v2.py @@ -0,0 +1,93 @@ +import unittest +import numpy as np +# Adjust import path if necessary, but faststack is likely installed or in pythonpath +import sys +import os +from unittest.mock import MagicMock +# Mock cv2 before importing faststack modules that depend on it +sys.modules['cv2'] = MagicMock() +sys.modules['turbojpeg'] = MagicMock() +sys.modules['PyTurboJPEG'] = MagicMock() + +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))) + +from faststack.imaging.editor import ImageEditor, _apply_headroom_shoulder +from faststack.ui.provider import UIState +from faststack.app import AppController +from PySide6.QtCore import QObject, Signal + +class TestHighlightsV2(unittest.TestCase): + def test_shoulder_asymptote(self): + """Verify the new shoulder asymptotes to 1.0 + max_overshoot.""" + x = np.array([1.0, 2.0, 10.0, 100.0], dtype=np.float32) + max_overshoot = 0.05 + out = _apply_headroom_shoulder(x, max_overshoot=max_overshoot) + + # At 1.0, should be 1.0 + self.assertAlmostEqual(out[0], 1.0, places=5) + + # Above 1.0, should be < 1.0 + max_overshoot + self.assertTrue(np.all(out[1:] < 1.0 + max_overshoot)) + + # Monotonicity + self.assertTrue(out[1] > out[0]) + self.assertTrue(out[2] > out[1]) + + # Asymptote check: at very large x, should be close to 1.05 + self.assertAlmostEqual(out[-1], 1.0 + max_overshoot, delta=0.001) + + def test_analysis_decoupling(self): + """Verify analysis runs before adjustments and is cached via preview path.""" + editor = ImageEditor() + # Create a linear image with some headroom + linear = np.ones((100, 100, 3), dtype=np.float32) * 1.2 + # sRGB mock indicating some clipping (e.g. 255) + srgb = np.ones((100, 100, 3), dtype=np.uint8) * 255 + + # Setup editor state to simulate the image being loaded + # We need this because _apply_edits works on self.float_image/preview logic usually, + # but one can pass arr. + # But _apply_edits updates _last_highlight_state. + + # Run _apply_edits flow + edits = editor._initial_edits() + edits['highlights'] = -0.5 + + # _apply_edits expects global self.float_image for some contexts? + # No, it takes img_arr arg. + + editor._apply_edits(linear, edits=edits, for_export=False) + + # Check cache + self.assertIsNotNone(editor._last_highlight_state) + # Note: update logic might use striding so check rough values + self.assertGreater(editor._last_highlight_state['headroom_pct'], 0.9) + + def test_robust_ceiling(self): + """Verify headroom ceiling handles hot pixels.""" + try: + editor = ImageEditor() + linear = np.ones((100, 100, 3), dtype=np.float32) * 1.1 # Moderate headroom + # Add a single hot pixel + linear[50, 50, :] = 1000.0 + + # Use highlights recovery, ensuring we pass srgb_u8 if needed by analysis + # (Though robust ceiling logic is in the adjustment phase, analysis happens first) + editor._apply_highlights_shadows(linear, highlights=-1.0, shadows=0.0) + + # Check that we didn't explode or crash + # The result is returned by _apply_highlights_shadows, but editor doesn't store it in place of input? + # Wait, editor method returns new array. + out = editor._apply_highlights_shadows(linear, highlights=-1.0, shadows=0.0) + + self.assertTrue(np.isfinite(out).all()) + # The hot pixel should be compressed but not NaN + self.assertLess(out[50, 50, 0], 1000.0) + except Exception: + import traceback + import sys + traceback.print_exc(file=sys.__stderr__) + raise + +if __name__ == '__main__': + unittest.main() diff --git a/faststack/tests/test_prefetch_logic.py b/faststack/tests/test_prefetch_logic.py index 7ba6cb8..c06b556 100644 --- a/faststack/tests/test_prefetch_logic.py +++ b/faststack/tests/test_prefetch_logic.py @@ -1,7 +1,9 @@ - import unittest -from unittest.mock import MagicMock -from concurrent.futures import Future +from unittest.mock import MagicMock, patch +import sys + +# Mock config before importing prefetcher +sys.modules['faststack.config'] = MagicMock() from faststack.imaging.prefetch import Prefetcher class TestPrefetcher(unittest.TestCase): diff --git a/faststack/tests/test_rolloff.py b/faststack/tests/test_rolloff.py index a40a2f2..3a7c5cf 100644 --- a/faststack/tests/test_rolloff.py +++ b/faststack/tests/test_rolloff.py @@ -1,63 +1,81 @@ +import unittest +from unittest.mock import MagicMock, patch import numpy as np -from faststack.imaging.editor import _apply_soft_shoulder - -def test_apply_soft_shoulder_threshold(): - # Test that values below threshold are unchanged - threshold = 0.9 - x = np.array([0.0, 0.5, 0.8, 0.9]) - out = _apply_soft_shoulder(x, threshold=threshold) - np.testing.assert_allclose(out, x) - print("test_apply_soft_shoulder_threshold passed") - -def test_apply_soft_shoulder_rolloff(): - # Test that values above threshold are compressed but stay < 1.0 - threshold = 0.9 - x = np.array([0.91, 1.0, 2.0, 10.0]) - out = _apply_soft_shoulder(x, threshold=threshold) - - # Check that they are compressed (out < x) - assert np.all(out[x > threshold] < x[x > threshold]) - # Check that they stay below 1.0 - assert np.all(out < 1.0) - # Check that they are still above threshold - assert np.all(out[x > threshold] > threshold) - print("test_apply_soft_shoulder_rolloff passed") - -def test_apply_soft_shoulder_monotonic(): - # Test monotonicity - threshold = 0.8 - x = np.linspace(0, 5, 100) - out = _apply_soft_shoulder(x, threshold=threshold) - - # Check if strictly increasing (mostly, due to float precision) - assert np.all(np.diff(out) > 0) - print("test_apply_soft_shoulder_monotonic passed") - -def test_apply_soft_shoulder_no_threshold(): - # Test with threshold >= 1.0 - x = np.array([0.0, 0.5, 1.2]) - out = _apply_soft_shoulder(x, threshold=1.0) - np.testing.assert_allclose(out, x) - print("test_apply_soft_shoulder_no_threshold passed") - -def test_apply_soft_shoulder_none_above(): - # Test when no values are above threshold - threshold = 0.9 - x = np.array([0.1, 0.5, 0.8]) - out = _apply_soft_shoulder(x, threshold=threshold) - np.testing.assert_allclose(out, x) - print("test_apply_soft_shoulder_none_above passed") +import sys + +# We check for modules that might be missing and mock them if needed +# inside the test setup to avoid import errors at module level. + +class TestRolloff(unittest.TestCase): + def setUp(self): + # Now we use the pure math utils, so no need to mock cv2/gui/models + # unless math_utils unexpectedly depends on them. + + from faststack.imaging.math_utils import _apply_headroom_shoulder + self._apply_headroom_shoulder = _apply_headroom_shoulder + + def tearDown(self): + pass + + def test_apply_headroom_shoulder_threshold(self): + # Test that values <= 1.0 are unchanged + max_overshoot = 0.05 + x = np.array([0.0, 0.5, 0.9, 1.0]) + out = self._apply_headroom_shoulder(x, max_overshoot=max_overshoot) + np.testing.assert_allclose(out, x) + + def test_apply_headroom_shoulder_rolloff(self): + # Test that values > 1.0 are compressed + max_overshoot = 0.05 + # 1.0 + max_overshoot is the asymptote + x = np.array([1.01, 1.1, 2.0, 10.0]) + out = self._apply_headroom_shoulder(x, max_overshoot=max_overshoot) + + # Check that they are compressed (out < x) + self.assertTrue(np.all(out < x)) + + # Check that they stay above 1.0 + self.assertTrue(np.all(out > 1.0)) + + # Check asymptote (should never exceed 1.0 + max_overshoot) + self.assertTrue(np.all(out < 1.0 + max_overshoot)) + + def test_apply_headroom_shoulder_monotonic(self): + # Test monotonicity + max_overshoot = 0.05 + x = np.linspace(0.9, 5.0, 100) + out = self._apply_headroom_shoulder(x, max_overshoot=max_overshoot) + + # Check if strictly increasing + diffs = np.diff(out) + self.assertTrue(np.all(diffs > 0), "Output should be monotonic increasing") + + def test_apply_headroom_shoulder_continuity(self): + # Test continuity at 1.0 + max_overshoot = 0.05 + # Check very close to 1.0 from both sides + x = np.array([1.0 - 1e-7, 1.0, 1.0 + 1e-7]) + out = self._apply_headroom_shoulder(x, max_overshoot=max_overshoot) + + # Difference should be negligible + diffs = np.diff(out) + # Should be very small but positive + self.assertTrue(np.all(np.abs(diffs) < 1e-6)) + + def test_apply_headroom_shoulder_asymptote_check(self): + # Verification Plan Step: Feed synthetic array with very high values + max_overshoot = 0.05 + x = np.array([1.0, 1.0 + max_overshoot/2, 1.0 + 1000.0]) + out = self._apply_headroom_shoulder(x, max_overshoot=max_overshoot) + + # f(1.0) == 1.0 + self.assertAlmostEqual(out[0], 1.0) + + # f(very_large) should be close to 1.0 + max_overshoot + self.assertAlmostEqual(out[2], 1.0 + max_overshoot, places=4) + + # values <= 1.0 + max_overshoot + self.assertTrue(np.all(out <= 1.0 + max_overshoot + 1e-9)) if __name__ == "__main__": - try: - test_apply_soft_shoulder_threshold() - test_apply_soft_shoulder_rolloff() - test_apply_soft_shoulder_monotonic() - test_apply_soft_shoulder_no_threshold() - test_apply_soft_shoulder_none_above() - print("\nALL TESTS PASSED") - except Exception as e: - print(f"\nTEST FAILED: {e}") - import traceback - traceback.print_exc() - exit(1) + unittest.main() diff --git a/faststack/tests/test_sensitivity.py b/faststack/tests/test_sensitivity.py new file mode 100644 index 0000000..5007095 --- /dev/null +++ b/faststack/tests/test_sensitivity.py @@ -0,0 +1,52 @@ +import numpy as np +import sys +import os + +# Add parent directory to sys.path to allow importing faststack +sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))) + +from faststack.imaging.editor import ImageEditor + +def test_contrast_saturation_sensitivity(): + print("Testing contrast and saturation sensitivity...") + editor = ImageEditor() + # Create a 100x100 dummy image (gray with some color) + arr = np.zeros((100, 100, 3), dtype=np.float32) + arr[:, :50, 0] = 0.8 # Red left half + arr[:, 50:, 1] = 0.8 # Green right half + editor.float_preview = arr + + # Test Contrast at 100 (backend value 1.0) + print("Testing Contrast at 1.0...") + edits = editor._initial_edits() + edits['contrast'] = 1.0 + out = editor._apply_edits(arr.copy(), edits=edits) + + # Original contrast factor was 1.0 + 1.0 = 2.0 + # New contrast factor should be 1.0 + 1.0 * 0.4 = 1.4 + # Check a pixel that was 0.8: (0.8 - 0.5) * 1.4 + 0.5 = 0.3 * 1.4 + 0.5 = 0.42 + 0.5 = 0.92 + val = out[0, 0, 0] + print(f"Contrast 1.0 result: {val}") + assert np.allclose(val, 0.92, atol=0.01), f"Expected 0.92, got {val}" + + # Test Saturation at 100 (backend value 1.0) + print("Testing Saturation at 1.0...") + edits = editor._initial_edits() + edits['saturation'] = 1.0 + out = editor._apply_edits(arr.copy(), edits=edits) + + # Original saturation factor was 1.0 + 1.0 = 2.0 + # New saturation factor should be 1.0 + 1.0 * 0.5 = 1.5 + # Pixel (0.8, 0, 0): gray = 0.8 * 0.299 = 0.2392 + # New: 0.2392 + (0.8 - 0.2392) * 1.5 = 0.2392 + 0.5608 * 1.5 = 0.2392 + 0.8412 = 1.0804 + val_sat = out[0, 0, 0] + print(f"Saturation 1.0 result: {val_sat}") + assert np.allclose(val_sat, 1.0804, atol=0.01), f"Expected 1.0804, got {val_sat}" + print("All tests passed!") + +if __name__ == "__main__": + try: + test_contrast_saturation_sensitivity() + except Exception as e: + print(f"Test failed: {e}") + sys.exit(1) diff --git a/faststack/tests/test_version_sort.py b/faststack/tests/test_version_sort.py new file mode 100644 index 0000000..6465681 --- /dev/null +++ b/faststack/tests/test_version_sort.py @@ -0,0 +1,58 @@ +import unittest +import re +from pathlib import PureWindowsPath + +# Re-implementing the function locally to match the fix in config.py +# (The function in config.py is nested inside detect_rawtherapee_path so not easily importable) +def version_sort_key(path): + for part in reversed(PureWindowsPath(path).parts): + if re.fullmatch(r'\d+(?:\.\d+)*', part): + return [int(n) for n in part.split(".")] + return [0] + + +class TestVersionSort(unittest.TestCase): + def test_version_sort_preference(self): + """ + Test that higher version numbers are preferred regardless of parent directory names. + """ + # Scenario: 5.10 (x86) vs 5.9 (x64) + # The x86 path has "86" in "Program Files (x86)", which should NOT confuse the sort. + + path_5_10_x86 = r"C:\Program Files (x86)\RawTherapee\5.10\rawtherapee-cli.exe" + path_5_9_x64 = r"C:\Program Files\RawTherapee\5.9\rawtherapee-cli.exe" + + paths = [path_5_9_x64, path_5_10_x86] + + # BROKEN behavior check (optional logic, just to demonstrate the issue) + # In natural sort, "Program Files (x86)" might come after "Program Files" depending on how it handles " (" vs "" + # But specifically, the "86" is a number. + # "Program Files" split -> ['Program Files'] + # "Program Files (x86)" split -> ['Program Files (x', 86, ')'] + # Comparison logic is complex but often fails here. + + # CORRECT behavior check + paths.sort(key=version_sort_key, reverse=True) + self.assertEqual(paths[0], path_5_10_x86, "Should select 5.10 over 5.9") + + def test_same_version_different_arch(self): + """ + If versions are identical, stability or secondary sort doesn't strictly matter for validity, + but we want to ensure it doesn't crash or return garbage. + """ + p1 = r"C:\Program Files\RawTherapee\5.9\rawtherapee-cli.exe" + p2 = r"C:\Program Files (x86)\RawTherapee\5.9\rawtherapee-cli.exe" + + key1 = version_sort_key(p1) + key2 = version_sort_key(p2) + + self.assertEqual(key1, [5, 9]) + self.assertEqual(key2, [5, 9]) + self.assertEqual(key1, key2) + + def test_no_version_in_path(self): + p = r"C:\Program Files\RawTherapee\bin\rawtherapee-cli.exe" + self.assertEqual(version_sort_key(p), [0]) + +if __name__ == '__main__': + unittest.main() diff --git a/faststack/ui/provider.py b/faststack/ui/provider.py index f732d79..af33565 100644 --- a/faststack/ui/provider.py +++ b/faststack/ui/provider.py @@ -8,6 +8,8 @@ from faststack.models import DecodedImage from faststack.config import config +from faststack.imaging.optional_deps import HAS_OPENCV +from pathlib import Path # Try to import QColorSpace if available (Qt 6+) try: @@ -26,7 +28,9 @@ def __init__(self, app_controller): self.placeholder = QImage(256, 256, QImage.Format.Format_RGB888) self.placeholder.fill(Qt.GlobalColor.darkGray) # Keepalive queue to prevent GC of buffers currently in use by QImage - self._keepalive = collections.deque(maxlen=32) + # Increased to 128 to prevent crashes during rapid scrolling/thrashing where + # QML might hold onto textures slightly longer than the Python GC expects. + self._keepalive = collections.deque(maxlen=128) def requestImage(self, id: str, size: object, requestedSize: object) -> QImage: """Handles image requests from QML.""" @@ -34,17 +38,30 @@ def requestImage(self, id: str, size: object, requestedSize: object) -> QImage: return self.placeholder try: - image_index_str = id.split('/')[0] - index = int(image_index_str) + # Parse index and optional generation + parts = id.split('/') + index = int(parts[0]) + gen = int(parts[1]) if len(parts) > 1 else None # If editor is open, use the background-rendered preview buffer # BUT only if the requested index matches the currently edited index! - # Otherwise we serve the editor preview for thumbnails/prefetch. + # AND the generation matches (to avoid stale frames during rotation/param changes) # FIX: If zoomed in, force full-res image instead of low-res preview - if self.app_controller.ui_state.isEditorOpen and index == self.app_controller.current_index and not self.app_controller.ui_state.isZoomed: - image_data = self.app_controller._last_rendered_preview or self.app_controller.get_decoded_image(index) - else: - image_data = self.app_controller.get_decoded_image(index) + + use_editor_preview = ( + self.app_controller.ui_state.isEditorOpen + and index == self.app_controller.current_index + and not self.app_controller.ui_state.isZoomed + and self.app_controller._last_rendered_preview is not None + and getattr(self.app_controller, "_last_rendered_preview_index", None) == index + and (gen is None or getattr(self.app_controller, "_last_rendered_preview_gen", None) == gen) + ) + + image_data = ( + self.app_controller._last_rendered_preview + if use_editor_preview + else self.app_controller.get_decoded_image(index) + ) if image_data: # Handle format being None (from prefetcher) or missing @@ -135,6 +152,7 @@ class UIState(QObject): is_histogram_visible_changed = Signal(bool) histogram_data_changed = Signal() + highlightStateChanged = Signal() # New signal for highlight analysis updates brightness_changed = Signal(float) contrast_changed = Signal(float) saturation_changed = Signal(float) @@ -521,6 +539,11 @@ def isStackedJpg(self): """Returns True if the current image is a stacked JPG.""" return self.currentFilename.lower().endswith(" stacked.jpg") + @Property(bool, constant=True) + def hasOpenCV(self): + """Returns True if OpenCV is available.""" + return HAS_OPENCV + # --- Slots for QML to call --- @Slot() def nextImage(self): @@ -695,9 +718,13 @@ def isEditorOpen(self, new_value: bool): def editorFilename(self) -> str: """Returns the filename of the image currently being edited (may be .tif for developed RAW).""" editor = self.app_controller.image_editor - if editor and editor.current_filepath: - return editor.current_filepath.name - return "" + fp = getattr(editor, "current_filepath", None) if editor else None + if not fp: + return "" + try: + return Path(fp).name + except Exception: + return "" @Property(int, notify=editorImageChanged) def editorBitDepth(self) -> int: @@ -791,6 +818,31 @@ def histogramData(self, new_value): self._histogram_data = new_value self.histogram_data_changed.emit() + @Property('QVariant', notify=highlightStateChanged) + def highlightState(self): + """Returns highlight analysis state for UI display. + + Returns dict with: + - headroom_pct: Fraction of pixels with recoverable data above 1.0 (16-bit sources) + - clipped_pct: Fraction of pixels clipped in the SOURCE image (JPEG flat-top @ 254+) + - near_white_pct: Fraction of pixels currently near white in the processed state. + """ + editor = self.app_controller.image_editor + state = {} + if editor and hasattr(editor, '_last_highlight_state') and editor._last_highlight_state: + # Quick copy under lock to minimize contention + # Using the editor's lock ensures we don't read while it's being written + with editor._lock: + state = dict(editor._last_highlight_state) + + # Normalize for QML robustness: ensure stable keys exist regardless of internal naming + # Normalize for QML robustness: ensure stable keys exist + return { + 'headroom_pct': state.get('headroom_pct', 0.0), + 'source_clipped_pct': state.get('source_clipped_pct', 0.0), + 'current_nearwhite_pct': state.get('current_nearwhite_pct', 0.0) + } + @Property(float, notify=brightness_changed) def brightness(self) -> float: return self._brightness diff --git a/pyproject.toml b/pyproject.toml index e5ae06c..b2c395f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,7 +5,7 @@ build-backend = "setuptools.build_meta" [project] name = "faststack" -version = "1.4" +version = "1.5.2" authors = [ { name="Alan Rockefeller"}, ] diff --git a/tests/test_highlights_v2.py b/tests/test_highlights_v2.py new file mode 100644 index 0000000..8524c4f --- /dev/null +++ b/tests/test_highlights_v2.py @@ -0,0 +1,67 @@ +import unittest +import numpy as np +from faststack.imaging.editor import ImageEditor, _apply_headroom_shoulder +from faststack.ui.provider import UIState +from faststack.app import AppController +from PySide6.QtCore import QObject, Signal + +class MockAppController(QObject): + def __init__(self): + super().__init__() + self.image_editor = ImageEditor() + self.ui_state = None # Circular ref handle manually + +class TestHighlightsV2(unittest.TestCase): + def test_shoulder_asymptote(self): + """Verify the new shoulder asymptotes to 1.0 + steepness.""" + x = np.array([1.0, 2.0, 10.0, 100.0], dtype=np.float32) + steepness = 0.05 + out = _apply_headroom_shoulder(x, steepness=steepness) + + # At 1.0, should be 1.0 + self.assertAlmostEqual(out[0], 1.0, places=5) + + # Above 1.0, should be < 1.0 + steepness + self.assertTrue(np.all(out[1:] < 1.0 + steepness)) + + # Monotonicity + self.assertTrue(out[1] > out[0]) + self.assertTrue(out[2] > out[1]) + + # Asymptote check: at very large x, should be close to 1.05 + self.assertAlmostEqual(out[-1], 1.0 + steepness, delta=0.001) + + def test_analysis_decoupling(self): + """Verify analysis runs before adjustments and is cached.""" + editor = ImageEditor() + # Create a linear image with some headroom + linear = np.ones((100, 100, 3), dtype=np.float32) * 1.2 + # sRGB mock indicating some clipping (e.g. 255) + srgb = np.ones((100, 100, 3), dtype=np.uint8) * 255 + + # Run with highlights=-0.5 + editor._apply_highlights_shadows(linear, highlights=-0.5, shadows=0.0, srgb_u8=srgb) + + # Check cache + self.assertIsNotNone(editor._last_highlight_state) + self.assertGreater(editor._last_highlight_state['headroom_pct'], 0.9) + self.assertGreater(editor._last_highlight_state['clipped_pct'], 0.9) + + def test_robust_ceiling(self): + """Verify headroom ceiling handles hot pixels.""" + editor = ImageEditor() + linear = np.ones((100, 100, 3), dtype=np.float32) * 1.1 # Moderate headroom + # Add a single hot pixel + linear[50, 50, :] = 1000.0 + + # Use highlights recovery + # This triggers the robust percentile logic + out = editor._apply_highlights_shadows(linear, highlights=-1.0, shadows=0.0) + + # Check that we didn't explode or crash + self.assertTrue(np.isfinite(out).all()) + # The hot pixel should be compressed but not NaN + self.assertLess(out[50, 50, 0], 1000.0) + +if __name__ == '__main__': + unittest.main() From bef964177945ed558ce80450fd59af8687c38305 Mon Sep 17 00:00:00 2001 From: AlanRockefeller Date: Sun, 25 Jan 2026 23:14:24 -0800 Subject: [PATCH 2/6] bug cleanup --- faststack/qml/ImageEditorDialog.qml | 44 ++++++------ faststack/tests/test_editor_integration.py | 4 +- .../tests/test_editor_lifecycle_and_safety.py | 2 +- faststack/tests/test_raw_pipeline.py | 69 ++----------------- 4 files changed, 30 insertions(+), 89 deletions(-) diff --git a/faststack/qml/ImageEditorDialog.qml b/faststack/qml/ImageEditorDialog.qml index 4f43c22..518ccbb 100644 --- a/faststack/qml/ImageEditorDialog.qml +++ b/faststack/qml/ImageEditorDialog.qml @@ -147,7 +147,7 @@ Window { ListElement { name: "Texture"; key: "texture" } ListElement { name: "Sharpness"; key: "sharpness" } } - Repeater { model: detailModel; delegate: editSlider } + Repeater { model: effectsModel; delegate: editSlider } // --- Histogram Group --- RowLayout { @@ -316,7 +316,7 @@ Window { id: effectsModel ListElement { name: "Vignette"; key: "vignette"; min: 0; max: 100 } } - Repeater { model: effectsModel; delegate: editSlider } + Repeater { model: detailModel; delegate: editSlider } Loader { sourceComponent: sectionSeparator } @@ -486,26 +486,24 @@ Window { property bool isResetting: false property double _lastPressTime: 0 + // Timer to handle reset state duration + Timer { + id: resetTimer + interval: 100 // Lock for 100ms to prevent accidental drags during reset + repeat: false + onTriggered: { + slider.isResetting = false + } + } + function triggerReset() { - isResetting = true + slider.isResetting = true controller.set_edit_parameter(model.key, 0.0) slider.value = 0.0 _pendingValue = 0.0 slider._lastSentValue = 0.0 imageEditorDialog.updatePulse++ - } - - // Failsafe timer to prevent sticky isResetting state - Timer { - id: resetFailsafe - interval: 200 - repeat: false - onTriggered: { - if (slider.isResetting) { - console.warn("Slider reset stuck, forcing release") - slider.isResetting = false - } - } + resetTimer.restart() } onPressedChanged: { @@ -520,7 +518,6 @@ Window { if (timeDiff < 600 && valDiff < (range * 0.05)) { triggerReset() _lastPressTime = 0 - resetFailsafe.start() // Start failsafe } else { _lastPressTime = now } @@ -528,16 +525,15 @@ Window { imageEditorDialog.slidersPressedCount++ // Initialize drag logic only if not resetting - if (!isResetting) { + if (!slider.isResetting) { _pendingValue = value if (!sendTimer.running) sendTimer.start() } } else { imageEditorDialog.slidersPressedCount-- - if (isResetting) { - isResetting = false - // Force backend to 0 on release + if (slider.isResetting) { + // Force backend to 0 on release (redundant but safe) controller.set_edit_parameter(model.key, 0.0) } else { // Stop repeating sends, then send final value immediately @@ -559,13 +555,13 @@ Window { } onMoved: { - if (isResetting) return + if (slider.isResetting) return _pendingValue = value if (!sendTimer.running) sendTimer.start() } onBackendValueChanged: { - if (!pressed && !isResetting) { + if (!pressed && !slider.isResetting) { value = backendValue } } @@ -725,4 +721,4 @@ Window { } } } -} \ No newline at end of file +} diff --git a/faststack/tests/test_editor_integration.py b/faststack/tests/test_editor_integration.py index 0e6f60b..0a4ec42 100644 --- a/faststack/tests/test_editor_integration.py +++ b/faststack/tests/test_editor_integration.py @@ -5,7 +5,7 @@ import sys # Ensure we can import faststack -sys.path.append(str(Path(__file__).parents[1])) +sys.path.append(str(Path(__file__).parents[2])) from faststack.app import AppController @@ -85,7 +85,7 @@ def test_missing_methods(self): # 7. update_histogram # This one might be complex to mock fully due to threading, but we check existence if not hasattr(self.controller, 'update_histogram'): - self.fail("AppController is missing method 'update_histogram'") + self.fail("AppController is missing method 'update_histogram'") def test_set_edit_parameter_gating(self): diff --git a/faststack/tests/test_editor_lifecycle_and_safety.py b/faststack/tests/test_editor_lifecycle_and_safety.py index 25b63ec..e612713 100644 --- a/faststack/tests/test_editor_lifecycle_and_safety.py +++ b/faststack/tests/test_editor_lifecycle_and_safety.py @@ -72,7 +72,7 @@ def test_memory_cleanup_on_editor_close(self): # 4. Verify preview cache was cleared with self.controller._preview_lock: - self.assertIsNone(self.controller._last_rendered_preview) + self.assertIsNone(self.controller._last_rendered_preview) def test_histogram_worker_submission_safety(self): """Verify that histogram inflight flag is reset if submission fails.""" diff --git a/faststack/tests/test_raw_pipeline.py b/faststack/tests/test_raw_pipeline.py index fd53223..302b16c 100644 --- a/faststack/tests/test_raw_pipeline.py +++ b/faststack/tests/test_raw_pipeline.py @@ -60,62 +60,12 @@ def side_effect_start(*args, **kwargs): # Expect file to be DELETED because it was 0 bytes self.assertFalse(tif_path.exists(), "Zombie 0-byte file should be cleaned up") - @patch('faststack.app.os.path.exists') - @patch('faststack.app.subprocess.run') - @patch('faststack.config.config.get') - @patch('faststack.app.threading.Thread') - def test_develop_raw_timeout(self, mock_thread, mock_config_get, mock_run, mock_exists): - """Test handling of subprocess timeout.""" - mock_config_get.return_value = "c:\\path\\to\\rawtherapee-cli.exe" - mock_exists.return_value = True - - def side_effect_start(*args, **kwargs): - _, thread_kwargs = mock_thread.call_args - target = thread_kwargs.get('target') - if target: - target() - mock_thread.return_value.start.side_effect = side_effect_start - - # Mock timeout - mock_run.side_effect = subprocess.TimeoutExpired(cmd="rawtherapee-cli", timeout=60) - - app = MagicMock() - app._on_develop_finished = MagicMock() - app.image_files = [self.image_file] - app.current_index = 0 - app.update_status_message = MagicMock() - - # We need a real _develop_raw_backend attached to our mock app to test logic inside it, - # OR we can just use AppController.develop_raw_for_current_image(app) which calls it. - # But wait, develop_raw_for_current_image calls self._develop_raw_backend(). - # Since we are essentially testing AppController logic, we should probably mock the class methods partials? - # Actually simplest is to just use the class method as a function bound to our mock self. - - # But _develop_raw_backend is methods on AppController. Let's bind checking: - # We want to test logic inside _develop_raw_backend. - - # Let's bind the real method to our mock object - app._develop_raw_backend = AppController._develop_raw_backend.__get__(app, AppController) - - # Run - app._develop_raw_backend() - - # Verify - mock_run.assert_called() - self.assertIn("timeout", mock_run.call_args[1]) - self.assertEqual(mock_run.call_args[1]["timeout"], 60) - - # Verify _on_develop_finished called with False (failure) - # Note: We use QTimer.singleShot(0, partial(...)) - # We need to mock QTimer to execute the partial immediately or check if it was called. - pass # See QTimer mock below handled implicitly? No, I need to patch QTimer. - @patch('faststack.app.QTimer.singleShot') @patch('faststack.app.os.path.exists') @patch('faststack.app.subprocess.run') @patch('faststack.config.config.get') @patch('faststack.app.threading.Thread') - def test_develop_raw_timeout_with_qtimer(self, mock_thread, mock_config_get, mock_run, mock_exists, mock_single_shot): + def test_develop_raw_timeout(self, mock_thread, mock_config_get, mock_run, mock_exists, mock_single_shot): mock_config_get.return_value = "c:\\path\\to\\rawtherapee-cli.exe" mock_exists.return_value = True @@ -249,19 +199,14 @@ def test_develop_raw_slot(self, mock_config_get, mock_run, mock_exists): app.current_index = 0 app.update_status_message = MagicMock() app.load_image_for_editing = MagicMock() + app.enable_raw_editing = MagicMock() - # Mock run - mock_run.return_value.returncode = 0 - - # Call Slot - we mock the backend to avoid threading issues in this specific test? - # No, the original test mocked Popen. We changed to run. - # Let's adjust this test to match the new code structure if needed. - # But wait, we are patching subprocess.run now. + # Bind real method + func = AppController.develop_raw_for_current_image.__get__(app, AppController) + func() - # We call the unbound method with our mock self - # Actually, AppController.develop_raw_for_current_image just checks raw and calls _develop_raw_backend - # So we probably want to test _develop_raw_backend logic mainly. - pass + # Verify delegation + app.enable_raw_editing.assert_called_once() def test_editor_float_pipeline_io(self): """Test that editor saves 16-bit TIFF and Developed JPG.""" From 641fcf47abddb17c116de814488e364c26a8b266 Mon Sep 17 00:00:00 2001 From: AlanRockefeller Date: Sun, 25 Jan 2026 23:18:24 -0800 Subject: [PATCH 3/6] bug cleanup --- .gitignore | 1 + faststack/error_log.txt | 10 -------- faststack/imaging/prefetch.py | 44 +---------------------------------- 3 files changed, 2 insertions(+), 53 deletions(-) delete mode 100644 faststack/error_log.txt diff --git a/.gitignore b/.gitignore index 74d2e95..80d3b39 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ faststack.egg-info/ # Logs # ---------------------------- *.log +error_log.txt # ---------------------------- # OS cruft diff --git a/faststack/error_log.txt b/faststack/error_log.txt deleted file mode 100644 index 941b39e..0000000 --- a/faststack/error_log.txt +++ /dev/null @@ -1,10 +0,0 @@ -ERROR in test_raw_mode_exif_preservation (tests.test_exif_orientation.TestExifOrientation.test_raw_mode_exif_preservation): -Traceback (most recent call last): - File "C:\code\faststack\faststack\tests\test_exif_orientation.py", line 129, in test_raw_mode_exif_preservation - with Image.open(developed_path) as dev: - ^^^^^^^^^^^^^^^^^^^^^^^^^^ - File "C:\Users\alanr\AppData\Local\Programs\Python\Python312\Lib\site-packages\PIL\Image.py", line 3512, in open - fp = builtins.open(filename, "rb") - ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\alanr\\AppData\\Local\\Temp\\tmph5g2del1\\working_source-developed.jpg' - diff --git a/faststack/imaging/prefetch.py b/faststack/imaging/prefetch.py index ba54dbf..54fe3f2 100644 --- a/faststack/imaging/prefetch.py +++ b/faststack/imaging/prefetch.py @@ -196,9 +196,6 @@ def update_prefetch(self, current_index: int, is_navigation: bool = False, direc # Navigation just shifts which indices to prefetch. # OLD GENERATION CLEANUP MOVED TO INSIDE LOCK BELOW - # old_generations = [g for g in self._scheduled if g < self.generation] - # for g in old_generations: - # del self._scheduled[g] # Track navigation direction if direction is not None: @@ -468,19 +465,7 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, t_after_fallback_decode = time.perf_counter() # EXIF orientation correction - # In fallback path, we opened 'f' (file handle) and mmap. - # We didn't keep a PIL image open suitable for EXIF extraction except inside the try block (line 456). - # But line 456 `with PILImage.fromarray(buffer)` creates a new image from buffer, NO EXIF. - # Wait, line 377 `with PILImage.open(image_file.path) as img:` IS handling non-JPEG logic if TurboJPEG fails? - # No, this is inside `if is_jpeg`. - - # So inside ICC mode, failure fallback: - # We use decode_jpeg_rgb/resized (TurboJPEG) OR generic Pillow. - # If generic pillow (lines 373-383), it was `with PILImage.open... as img`. - # But that loop closed. - - # Optimization: we can't easily reuse the object unless we restructure. - # BUT, for the "Unified" block at the end, we can pass `exif_obj` if we have it. + pass # Memory Optimization: Avoid explicit copy @@ -520,33 +505,6 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int, t_after_decode = time.perf_counter() # EXIF orientation application - # We have 'img' (PIL Image) open if we used the fallback path, but wait - - # in this block (lines 482-501) we MIGHT use decode_jpeg_resized which doesn't give us a PIL Image. - # Or we used Generic Pillow open (lines 494-496) where 'img' was capable. - # Actually, if we used decode_jpeg_XXX, we don't have a PIL object handy unless we open one. - # But optimization: if we used Pillow fallback, we might have it? - # This block is "Standard decode path (Option A or no color management) FOR JPEG FALLBACK" - - # BUT WAIT: logic above: if TurboJPEG worked on JPEG, it returned buffer. - # If TurboJPEG failed, it fell back to Pillow. - - # Actually, looking at the logic: - # 1. ICC Mode -> TurboJPEG (if success, we have buffer) -> applyTransform -> buffer - # 2. ICC Mode -> Fallback Pillow (if success, we have buffer) - - # The `apply_saturation_compensation` and `apply_exif_orientation` are effectively "Post Processing". - # In previous code "Unified EXIF Orientation Application" was doing a fresh open. - - # We should move the EXIF application UP into the scopes where we might have the PIL object. - - # For this specific block (ICC Fallback or TurboJPEG): - # In TurboJPEG case: we have 'img' (PIL Image from buffer). Does it have EXIF? No, it was created from raw bytes. - # We did open 'orig' to get ICC profile. We could have gotten EXIF combined? - # The TurboJPEG path opens 'orig' on line 397 (with PILImage.open(image_file.path) as orig). - # We should get EXIF there! - - # Let's fix the TurboJPEG path first (Lines 345+): - # We open 'orig' to read ICC (line 397). We should ALSO read EXIF there. # Memory Optimization: Avoid explicit copy buffer = np.ascontiguousarray(buffer) From b0f41980975a254934ab3297908131336fb676de Mon Sep 17 00:00:00 2001 From: AlanRockefeller Date: Mon, 26 Jan 2026 18:44:03 -0800 Subject: [PATCH 4/6] Fix double clicking on sliders in image editor --- ChangeLog.md | 8 ++--- faststack/qml/ImageEditorDialog.qml | 54 ++++++++--------------------- 2 files changed, 18 insertions(+), 44 deletions(-) diff --git a/ChangeLog.md b/ChangeLog.md index b730572..6a90e17 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -5,7 +5,7 @@ Todo: Make it work on Linux / Mac. Create Windows .exe. Write better docum ## 1.5.2 (2026-01-25) -#### Added +### Added - **Highlight recovery telemetry + UI indicators** - New highlight state analysis in the editor pipeline (headroom/clipping/near-white metrics) and a UI signal (`highlightStateChanged`) to keep it live. - Image editor dialog now shows **Headroom** and **Clipped** indicators under the histogram when relevant. @@ -16,7 +16,7 @@ Todo: Make it work on Linux / Mac. Create Windows .exe. Write better docum - Centralized optional OpenCV usage (`optional_deps`) with fallbacks (e.g., Pillow-based Gaussian blur fallback when OpenCV is unavailable). - Tests updated to skip/patch appropriately when OpenCV isn’t installed. -#### Changed +### Changed - **Save flow restored to “old behavior”** - Saving now: **closes editor → clears editor state → refreshes image list → reselects saved image → clears cache/prefetches → syncs UI**. - Save errors now surface as user-visible status messages with safer exception handling. @@ -24,13 +24,13 @@ Todo: Make it work on Linux / Mac. Create Windows .exe. Write better docum - Histogram panel height increased; channel labels expanded (Red/Green/Blue); non-minimal histogram display enabled. - Single-channel histogram drawing now downsamples using **max-pooling** when canvas width is smaller than bin count to reduce aliasing/spikes. - **Slider double-click reset robustness** - - Reworked double-click-to-reset behavior with better drag-vs-click detection, an explicit reset path, a failsafe timer, and a binding to force value to 0 while resetting. + - Replaced heuristic double-click logic with `TapHandler` double-tap reset and removed competing `slider.value` writers for more deterministic behavior. - **Color/edit pipeline tuning** - Contrast and saturation slider sensitivity reduced (scaled effect). - Headroom “safety” shoulder moved to linear space (`_apply_headroom_shoulder`) replacing the old sRGB-side shoulder. - Auto-levels now kicks the preview worker for immediate visual feedback; histogram updates are guarded by visibility in more places. -#### Fixed +### Fixed - **EXIF orientation “double rotation” bugs** - Saving now consistently drops/sanitizes EXIF when orientation can’t be safely serialized, preventing incorrect viewer rotations. - Developed JPG sidecar EXIF from a paired JPEG is sanitized for Orientation as well. diff --git a/faststack/qml/ImageEditorDialog.qml b/faststack/qml/ImageEditorDialog.qml index 518ccbb..c33e56a 100644 --- a/faststack/qml/ImageEditorDialog.qml +++ b/faststack/qml/ImageEditorDialog.qml @@ -456,7 +456,7 @@ Window { target: slider property: "value" value: slider.backendValue - when: !slider.pressed + when: !slider.pressed && !slider.isResetting } property real _pendingValue: 0 @@ -467,24 +467,24 @@ Window { repeat: true onTriggered: { if (Math.abs(slider._pendingValue - slider._lastSentValue) > 0.001) { - var sendValue = slider.isReversed ? -slider._pendingValue : slider._pendingValue + var sendValue = isReversed ? -slider._pendingValue : slider._pendingValue controller.set_edit_parameter(model.key, sendValue / maxVal) slider._lastSentValue = slider._pendingValue } } } - Connections { - target: imageEditorDialog - function onUpdatePulseChanged() { - if (!slider.pressed) { - slider.value = slider.backendValue - } + // Double-click reset using TapHandler (coexists with Slider drag) + TapHandler { + acceptedButtons: Qt.LeftButton + gesturePolicy: TapHandler.DragThreshold + onDoubleTapped: { + if (!slider.isResetting) + slider.triggerReset() } } - + property bool isResetting: false - property double _lastPressTime: 0 // Timer to handle reset state duration Timer { @@ -498,6 +498,7 @@ Window { function triggerReset() { slider.isResetting = true + sendTimer.stop() controller.set_edit_parameter(model.key, 0.0) slider.value = 0.0 _pendingValue = 0.0 @@ -508,36 +509,23 @@ Window { onPressedChanged: { if (pressed) { - var now = Date.now() - // Double click logic with value delta guard - // Only reset if clicked recently AND value hasn't drifted far (meaning it wasn't a drag) - var timeDiff = now - _lastPressTime - var valDiff = Math.abs(value - _pendingValue) - var range = to - from - - if (timeDiff < 600 && valDiff < (range * 0.05)) { - triggerReset() - _lastPressTime = 0 - } else { - _lastPressTime = now - } - imageEditorDialog.slidersPressedCount++ // Initialize drag logic only if not resetting if (!slider.isResetting) { _pendingValue = value + slider._lastSentValue = value if (!sendTimer.running) sendTimer.start() } } else { imageEditorDialog.slidersPressedCount-- + sendTimer.stop() if (slider.isResetting) { // Force backend to 0 on release (redundant but safe) controller.set_edit_parameter(model.key, 0.0) } else { - // Stop repeating sends, then send final value immediately - sendTimer.stop() + // Send final value immediately var sendValue = isReversed ? -value : value controller.set_edit_parameter(model.key, sendValue / maxVal) } @@ -546,25 +534,11 @@ Window { } } - // Force 0 while resetting - this overrides the slider's internal mouse handling - Binding { - target: slider - property: "value" - value: 0 - when: slider.isResetting - } - onMoved: { if (slider.isResetting) return _pendingValue = value if (!sendTimer.running) sendTimer.start() } - - onBackendValueChanged: { - if (!pressed && !slider.isResetting) { - value = backendValue - } - } // Smooth transition for value changes from backend Behavior on value { From 01d0c22153d510578bf0735854d02e7eec2f9ebe Mon Sep 17 00:00:00 2001 From: AlanRockefeller Date: Mon, 26 Jan 2026 23:40:09 -0800 Subject: [PATCH 5/6] Image editor performance improvements --- .claude/settings.local.json | 7 + faststack/app.py | 2 +- faststack/imaging/editor.py | 218 ++++++++++++++++++++++++---- faststack/qml/ImageEditorDialog.qml | 2 +- 4 files changed, 202 insertions(+), 27 deletions(-) create mode 100644 .claude/settings.local.json diff --git a/.claude/settings.local.json b/.claude/settings.local.json new file mode 100644 index 0000000..3c31ae0 --- /dev/null +++ b/.claude/settings.local.json @@ -0,0 +1,7 @@ +{ + "permissions": { + "allow": [ + "Bash(python:*)" + ] + } +} diff --git a/faststack/app.py b/faststack/app.py index 8110b40..e72531a 100644 --- a/faststack/app.py +++ b/faststack/app.py @@ -227,7 +227,7 @@ def __init__(self, image_dir: Path, engine: QQmlApplicationEngine, debug_cache: self._preview_refresh_pending = False self.preview_timer = QTimer(self) self.preview_timer.setSingleShot(True) - self.preview_timer.setInterval(33) # ~30fps cap for smoother dragging + self.preview_timer.setInterval(16) # ~60fps cap for smoother dragging self.preview_timer.timeout.connect(self._do_preview_refresh) # Track if any dialog is open to disable keybindings diff --git a/faststack/imaging/editor.py b/faststack/imaging/editor.py index 9f9a72e..3ebfd45 100644 --- a/faststack/imaging/editor.py +++ b/faststack/imaging/editor.py @@ -339,6 +339,10 @@ def __init__(self): self._cached_max_brightness_state: Optional[Dict[str, Any]] = None self._cached_highlight_analysis: Optional[Dict[str, Any]] = None + # Cache for luma detail bands (pyramid blur decomposition) + # Stores: {'hash': int, 'Y20': ndarray, 'Y3': ndarray, 'Y1': ndarray} + self._cached_detail_bands: Optional[Dict[str, Any]] = None + def clear(self): """Clear all editor state so the next edit starts from a clean slate.""" with self._lock: @@ -353,6 +357,7 @@ def clear(self): self._source_exif_bytes = None self._last_highlight_state = None # Explicit reset self._cached_highlight_analysis = None + self._cached_detail_bands = None # Optionally also reset edits if that matches your mental model: # self.current_edits = self._initial_edits() @@ -811,36 +816,159 @@ def _apply_edits( edits=edits, ) - # 8. Clarity (Local Contrast) + # 8-10. Clarity / Texture / Sharpness (Unified Pyramid Detail Bands) + # + # Uses a hierarchical luma-only pyramid decomposition to avoid: + # - Triple-amplifying the same edges (halo stacking) + # - Chroma artifacts from RGB high-pass + # - Incorrect midtone mask on HDR/linear values >1.0 + # + # Bands: + # D_clarity = Y - Y20 (coarse local contrast) + # D_texture = Y3 - Y20 (mid-frequency detail) + # D_sharp = Y1 - Y3 (fine detail) + # clarity = edits.get("clarity", 0.0) - if abs(clarity) > 0.001: - # Apply in linear space, preservation of highlights - blurred_arr = _gaussian_blur_float(arr, radius=20.0) + texture = edits.get("texture", 0.0) + sharpness = edits.get("sharpness", 0.0) - # Apply: (original - blurred) is high pass. - # mask = midtones - # mean = axis 2 - gray = arr.mean(axis=2, keepdims=True) - midtone_mask = 1.0 - np.abs(np.clip(gray, 0, 1) - 0.5) * 2.0 + if abs(clarity) > 0.001 or abs(texture) > 0.001 or abs(sharpness) > 0.001: + # Ensure float32 to avoid memory bloat from float64 upcast + arr = arr.astype(np.float32, copy=False) - local_contrast = (arr - blurred_arr) * clarity * midtone_mask - arr += local_contrast + # Current exposure gain (for scaling cached blurs) + current_exp_gain = 2.0 ** edits.get("exposure", 0.0) - # 9. Texture (Fine Detail) - texture = edits.get("texture", 0.0) - if abs(texture) > 0.001: - # Small radius for texture/fine detail - blurred_arr = _gaussian_blur_float(arr, radius=3.0) - high_pass = arr - blurred_arr - arr += high_pass * texture + # Compute linear luminance (Rec.709 coefficients) + Y = arr @ np.array([0.2126, 0.7152, 0.0722], dtype=np.float32) - # 10. Sharpness - sharpness = edits.get("sharpness", 0.0) - if abs(sharpness) > 0.001: - # Unsharp mask with radius ~1.0 - blurred_arr = _gaussian_blur_float(arr, radius=1.0) - high_pass = arr - blurred_arr - arr += high_pass * sharpness + # Determine which blurs we need based on active sliders + need_Y20 = abs(clarity) > 0.001 or abs(texture) > 0.001 + need_Y3 = abs(texture) > 0.001 or abs(sharpness) > 0.001 + need_Y1 = abs(sharpness) > 0.001 + + # Check cache for detail bands (hash + frozen tuple verification) + detail_hash, detail_frozen = self._get_detail_upstream_hash(edits) + Y20_cached = Y3_cached = Y1_cached = None + cache_hit = False + cached_exp_gain = 1.0 + + with self._lock: + cached = self._cached_detail_bands + # Verify both hash AND frozen values to avoid collisions + if cached and cached.get("hash") == detail_hash and cached.get("frozen") == detail_frozen: + Y20_cached = cached.get("Y20") + Y3_cached = cached.get("Y3") + Y1_cached = cached.get("Y1") + cached_exp_gain = cached.get("exp_gain", 1.0) + cache_hit = True + + # Compute exposure scale factor for reusing cached blurs + # blur(k*Y) = k*blur(Y) is exact only if Y scales linearly with exposure. + # Since highlights/shadows recovery (step 7) is non-linear and sits between + # exposure and detail bands, this scaling is APPROXIMATE when h/s is active. + # The approximation is good enough for smooth 60fps dragging; exact render + # happens when upstream params (WB/crop/rotate) change and cache invalidates. + exp_scale = current_exp_gain / cached_exp_gain if cache_hit and abs(cached_exp_gain) > 1e-9 else 1.0 + + # Safe extraction: use [..., 0] if 3D, else keep as-is (avoids squeeze() collapsing H/W) + def _extract_2d(blur_result): + return blur_result[..., 0] if blur_result.ndim == 3 else blur_result + + # Get or compute each blur, tracking what we freshly computed + Y_3d = Y[..., None] # (H, W, 1) for blur function + Y20 = Y3 = Y1 = None + newly_computed = {"Y20": None, "Y3": None, "Y1": None} + + if need_Y20: + if Y20_cached is not None: + Y20 = Y20_cached * exp_scale + else: + Y20 = _extract_2d(_gaussian_blur_float(Y_3d, radius=20.0)) + newly_computed["Y20"] = Y20 + + if need_Y3: + if Y3_cached is not None: + Y3 = Y3_cached * exp_scale + else: + Y3 = _extract_2d(_gaussian_blur_float(Y_3d, radius=3.0)) + newly_computed["Y3"] = Y3 + + if need_Y1: + if Y1_cached is not None: + Y1 = Y1_cached * exp_scale + else: + Y1 = _extract_2d(_gaussian_blur_float(Y_3d, radius=1.0)) + newly_computed["Y1"] = Y1 + + # Update cache if we computed any new blurs + # Merge newly computed blurs with existing cached blurs (unscaled) + if any(v is not None for v in newly_computed.values()): + with self._lock: + # Start with existing cached values (unscaled) or empty + if cache_hit: + new_cache = { + "hash": detail_hash, + "frozen": detail_frozen, + "exp_gain": cached_exp_gain, # Keep original exp_gain for existing blurs + "Y20": Y20_cached, + "Y3": Y3_cached, + "Y1": Y1_cached, + } + # Add newly computed blurs (they're at current_exp_gain, need to rescale to cached_exp_gain) + rescale_to_cached = cached_exp_gain / current_exp_gain if abs(current_exp_gain) > 1e-9 else 1.0 + for key, val in newly_computed.items(): + if val is not None: + new_cache[key] = val * rescale_to_cached + else: + # Fresh cache at current exposure + new_cache = { + "hash": detail_hash, + "frozen": detail_frozen, + "exp_gain": current_exp_gain, + "Y20": newly_computed["Y20"], + "Y3": newly_computed["Y3"], + "Y1": newly_computed["Y1"], + } + self._cached_detail_bands = new_cache + + # Build hierarchical pyramid bands (non-overlapping frequency ranges) + detail = np.zeros_like(Y) + + if abs(clarity) > 0.001: + # D_clarity = Y - Y20 (coarse local contrast) + D_clarity = Y - Y20 + detail += clarity * D_clarity + + if abs(texture) > 0.001: + # D_texture = Y3 - Y20 (mid-frequency detail) + # Y3 has more high-frequency than Y20, so this isolates mid-band + D_texture = Y3 - Y20 + detail += texture * D_texture + + if abs(sharpness) > 0.001: + # D_sharp = Y1 - Y3 (fine detail) + # Scale factor to match perceived strength of old Y - Y1 unsharp mask + k_sharp = 2.0 + D_sharp = Y1 - Y3 + detail += sharpness * k_sharp * D_sharp + + # Compute bounded midtone mask from perceptual luminance + # Use sqrt for perceptual curve (approximates gamma) + Y_mask = np.clip(Y, 0.0, 1.0) + Y_mask = np.sqrt(Y_mask) + midtone_mask = np.clip(1.0 - np.abs(Y_mask - 0.5) * 2.0, 0.0, 1.0) + + # Apply detail via luma-ratio gain (preserves hue/saturation) + # Only apply ratio where Y > eps; leave gain at 1.0 for dark/negative regions + eps = 1e-7 + valid_mask = Y > eps + den = np.where(valid_mask, Y, 1.0) + gain = 1.0 + midtone_mask * detail / den + gain = np.where(valid_mask, gain, 1.0) + # Soft clamp to prevent extreme values (hard clamp for v1, can soften later) + gain = np.clip(gain, 0.5, 2.0) + arr *= gain[..., None] # 11. Global Headroom Shoulder (safety net for values > 1.0) # This ONLY affects values above 1.0, compressing headroom smoothly. @@ -1054,6 +1182,46 @@ def _freeze(v): values.append(self.current_mtime) return hash(tuple(values)) + def _get_detail_upstream_hash(self, edits: Dict[str, Any]) -> tuple: + """Returns a frozen tuple of edit parameters that affect the input to detail bands. + + NOTE: We intentionally EXCLUDE exposure, highlights, and shadows from this hash. + + Rationale for exclusions (performance vs accuracy tradeoff): + - Exposure: We scale cached blurs by exp_gain ratio. This is exact only when + highlights/shadows recovery is inactive (step 7 is non-linear). + - Highlights/Shadows: Non-linear, so cached blurs are approximate after changes. + + The approximation is acceptable for smooth 60fps dragging. Exact blurs are + recomputed when geometry (crop/rotate) or WB changes, which invalidates cache. + + Returns a tuple (hash, frozen_values) for collision-safe verification. + """ + keys = [ + "crop_box", + "rotation", + "straighten_angle", + "white_balance_by", + "white_balance_mg", + ] + + def _freeze(v): + # Recursively freeze and quantize floats + if isinstance(v, (list, tuple)): + return tuple(_freeze(x) for x in v) + if isinstance(v, dict): + return tuple(sorted((_freeze(k), _freeze(val)) for k, val in v.items())) + if isinstance(v, np.ndarray): + return v.tobytes() + # Quantize floats to avoid hash churn from tiny slider noise + if isinstance(v, float): + return round(v, 4) + return v + + frozen = tuple(_freeze(edits.get(k)) for k in keys) + frozen += (str(self.current_filepath), self.current_mtime) + return (hash(frozen), frozen) + def get_preview_data_cached( self, allow_compute: bool = True ) -> Optional[DecodedImage]: diff --git a/faststack/qml/ImageEditorDialog.qml b/faststack/qml/ImageEditorDialog.qml index c33e56a..92ed2ee 100644 --- a/faststack/qml/ImageEditorDialog.qml +++ b/faststack/qml/ImageEditorDialog.qml @@ -463,7 +463,7 @@ Window { property real _lastSentValue: 0 Timer { id: sendTimer - interval: 32 // ~30fps throttle + interval: 16 // ~60fps throttle repeat: true onTriggered: { if (Math.abs(slider._pendingValue - slider._lastSentValue) > 0.001) { From 8e12daa817dd44374d7eca1fd62f8f717a186bc0 Mon Sep 17 00:00:00 2001 From: AlanRockefeller Date: Tue, 27 Jan 2026 00:01:04 -0800 Subject: [PATCH 6/6] Image editor performance improvements --- faststack/app.py | 92 ++++++++++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 39 deletions(-) diff --git a/faststack/app.py b/faststack/app.py index e72531a..703be33 100644 --- a/faststack/app.py +++ b/faststack/app.py @@ -121,6 +121,7 @@ def __init__(self, image_dir: Path, engine: QQmlApplicationEngine, debug_cache: self._preview_lock = threading.Lock() self._last_rendered_preview = None # Store latest valid render self._shutting_down = False # Flag to gate async callbacks during shutdown + self._opencv_warning_shown = False # Only show OpenCV warning once per session self.image_dir = image_dir self.image_files: List[ImageFile] = [] # Filtered list for display @@ -223,12 +224,12 @@ def __init__(self, image_dir: Path, engine: QQmlApplicationEngine, debug_cache: self.histogram_timer.setInterval(50) # 50ms throttle (max 20fps) self.histogram_timer.timeout.connect(self._kick_histogram_worker) - # Preview Refresh Timer (Coalescing) - self._preview_refresh_pending = False - self.preview_timer = QTimer(self) - self.preview_timer.setSingleShot(True) - self.preview_timer.setInterval(16) # ~60fps cap for smoother dragging - self.preview_timer.timeout.connect(self._do_preview_refresh) + # Preview refresh uses a gate pattern instead of a timer: + # - _kick_preview_worker() runs immediately if not inflight + # - If inflight, it sets _preview_pending and returns + # - When render completes, _apply_preview_result chains immediately if pending + # This removes the extra 16ms delay in the fast-render case by chaining + # immediately on completion. QML's 16ms slider timer remains the fps cap. # Track if any dialog is open to disable keybindings self._dialog_open = False @@ -243,12 +244,23 @@ def __init__(self, image_dir: Path, engine: QQmlApplicationEngine, debug_cache: @Slot(bool) def _on_editor_open_changed(self, is_open: bool): """Handle necessary setup/cleanup when editor opens or closes.""" - if not is_open: + if is_open: + # Warn once if OpenCV is not available (detail sliders will be slower) + if not self._opencv_warning_shown: + from faststack.imaging.optional_deps import HAS_OPENCV + if not HAS_OPENCV: + self._opencv_warning_shown = True + log.warning("OpenCV not available - detail sliders (clarity/texture/sharpness) will be slower") + self.update_status_message( + "OpenCV not installed - editor performance reduced. Install opencv-python for faster editing.", + timeout=8000 + ) + else: # Cleanup large memory buffers when editor closes if self.image_editor: log.debug("Editor closed, clearing editor memory buffers") self.image_editor.clear() - + # Also clear the cached preview rendering with self._preview_lock: self._last_rendered_preview = None @@ -3055,12 +3067,6 @@ def get_preview_data(self) -> Optional[DecodedImage]: """Gets the preview data of the currently edited image as a DecodedImage.""" return self.image_editor.get_preview_data() - def _do_preview_refresh(self): - self._preview_refresh_pending = False - self._kick_preview_worker() - - - @Slot(str, "QVariant") def set_edit_parameter(self, key: str, value: Any): """Sets an edit parameter and updates the UIState for the slider visual.""" @@ -3089,11 +3095,10 @@ def set_edit_parameter(self, key: str, value: Any): if hasattr(self.ui_state, key): setattr(self.ui_state, key, final_value) - # Trigger a refresh of the image to show the edit (throttled), ONLY if something changed + # Trigger a refresh of the image to show the edit, ONLY if something changed + # Uses gate pattern: runs immediately if not inflight, else queues for next if changed: - if not self._preview_refresh_pending: - self._preview_refresh_pending = True - self.preview_timer.start() + self._kick_preview_worker() except Exception as e: log.error("Error setting edit parameter %s=%s: %s", key, value, e) @@ -3388,30 +3393,39 @@ def _apply_preview_result(self, payload): return token, decoded = payload - + should_kick = False + should_accept = False + with self._preview_lock: self._preview_inflight = False - - if decoded is not None: - # Only accept if it matches the latest requested token - if token == self._preview_token: - self._last_rendered_preview = decoded - # Ensure QML/provider URL changes so we don't get a cached frame - self.ui_refresh_generation += 1 - - # Track exactly what generation/index this preview corresponds to - self._last_rendered_preview_index = self.current_index - self._last_rendered_preview_gen = self.ui_refresh_generation - - self.ui_state.currentImageSourceChanged.emit() - self.ui_state.highlightStateChanged.emit() - - # Trigger histogram update (always call, let update_histogram handle visibility guards) - self.update_histogram() - - # If new requests arrived while we were rendering, start the next one immediately + + # Accept result only if: + # 1. We got valid decoded data + # 2. Token matches (not stale from an old request) + # 3. No pending request waiting (avoid "snap back" stale frame flash) + if decoded is not None and token == self._preview_token and not self._preview_pending: + self._last_rendered_preview = decoded + self.ui_refresh_generation += 1 + self._last_rendered_preview_index = self.current_index + self._last_rendered_preview_gen = self.ui_refresh_generation + should_accept = True + + # Consume pending flag atomically before scheduling if self._preview_pending: - QTimer.singleShot(0, self._kick_preview_worker) + self._preview_pending = False + should_kick = True + + # Emit outside lock to avoid holding lock during UI work + if should_accept: + self.ui_state.currentImageSourceChanged.emit() + self.ui_state.highlightStateChanged.emit() + self.update_histogram() + + # Call directly (not via singleShot) since we're on the UI thread. + # This prevents race where a new slider event could interleave between + # scheduling and execution, causing a spurious extra render. + if should_kick: + self._kick_preview_worker()