diff --git a/.gitignore b/.gitignore
index 2b5dea3..0b985ab 100644
--- a/.gitignore
+++ b/.gitignore
@@ -93,3 +93,4 @@ test_image*
**/*fail*.txt
**/*final*.txt
+green.txt
diff --git a/ChangeLog.md b/ChangeLog.md
index aed56f1..052ef80 100644
--- a/ChangeLog.md
+++ b/ChangeLog.md
@@ -2,6 +2,21 @@
Todo: More testing Linux / Mac. Create Windows .exe. Write better documentation / help. Add splash screen / icon. Fix raw image support.
+## 1.6.2 (2026-03-28)
+
+- Added a reusable soft-mask subsystem for local adjustments (mask model, mask engine, masked operations).
+- Added a Background Darkening tool (K key) as the first consumer of the mask system.
+ - Paint rough background hints (left-click) and subject protection (right-click).
+ - Strokes act as smart hints combined with image analysis, not hard mattes.
+ - Modes: Assisted, Paint Only, Strong Subject Protection, Border-Connected Auto.
+ - Controls: darken amount, edge protection, subject protection, feather, dark range, neutrality, expand/contract, auto from edges.
+ - Configurable mask overlay (blue default, selectable colours, toggle visibility).
+ - Darkening uses pedestal subtraction + multiplicative darkening for natural results.
+ - Mask data stored in normalised coordinates, survives rotate/straighten/crop changes.
+- Added "Darken Background (K)" button in the Image Editor effects section.
+- J and K keys no longer navigate to next/previous image. Use arrow keys instead.
+- K key now opens the Background Darkening tool (works from loupe view or inside the editor).
+
## 1.6.1 (2026-03-13)
- Added a slim custom title bar with hover-revealed menus.
diff --git a/README.md b/README.md
index fc19534..e4ebc12 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# FastStack
-# Version 1.6.1 - March 13, 2026
+# Version 1.6.2 - March 28, 2026
# By Alan Rockefeller
Ultra-fast, caching JPG viewer designed for culling and selecting RAW or JPG files for focus stacking and website upload.
@@ -16,7 +16,9 @@ This tool is optimized for speed, using `libjpeg-turbo` for decoding, aggressive
- **Helicon Focus Integration:** Launch Helicon Focus with your selected RAW files with a single keypress (`Enter`).
- **Instant Navigation:** Sub-10ms next/previous image switching, high performance decoding via `PyTurboJPEG`.
- **Image Editor:** Built-in editor with exposure, contrast, white balance, sharpness, and more (E key)
-- **Quick Auto White Balance:** Press A to apply auto white balance and save automatically with undo support (Ctrl+Z). For better white balance, load the raw into Photoshop with the P key.- **Photoshop Integration:** Edit current image in Photoshop (P key) - always uses RAW files when available.
+- **Background Darkening:** Mask-based background darkening tool (K key) with smart edge detection, subject protection, and multiple modes. Paint rough background hints and the tool refines them into natural-looking dark backgrounds.
+- **Quick Auto White Balance:** Press A to apply auto white balance and save automatically with undo support (Ctrl+Z). For better white balance, load the raw into Photoshop with the P key.
+- **Photoshop / Gimp Integration:** Edit current image in Photoshop or Gimp (P key) - always uses RAW files when available.
- **Clipboard Support:** Copy image path to clipboard (Ctrl+C)
- **Image Filtering:** Filter images by filename
- **Drag & Drop:** Drag images to external applications. Press { and } to batch files to drag & drop multiple images.
@@ -85,7 +87,7 @@ Example:
```cmd
set FASTSTACK_TURBOJPEG_LIB=C:\path\to\turbojpeg.dll
-venv\Scripts\python.exe -m faststack.app "C:\path\to\photos"
+faststack "C:\path\to\photos"
```
### Troubleshooting on Windows
@@ -105,15 +107,16 @@ Fastest fixes:
```cmd
set FASTSTACK_TURBOJPEG_LIB=C:\path\to\turbojpeg.dll
-venv\Scripts\python.exe -m faststack.app "C:\path\to\photos"
+faststack "C:\path\to\photos"
```
If you do nothing, FastStack will still run, but JPEG decoding and thumbnail generation will use Pillow instead of `libjpeg-turbo`, which is slower.
## Keyboard Shortcuts
-- `J` / `Right Arrow`: Next Image
-- `K` / `Left Arrow`: Previous Image
+- `Right Arrow`: Next Image
+- `Left Arrow`: Previous Image
+- `K`: Mask-based background darkening (smart edge detection, subject protection, multiple modes)
- `G`: Jump to Image Number
- `I`: Show EXIF Data
- `F11`: Toggle Fullscreen (Loupe View)
@@ -131,7 +134,7 @@ If you do nothing, FastStack will still run, but JPEG decoding and thumbnail gen
- `Ctrl+E`: Toggle edited flag
- `Ctrl+S`: Toggle stacked flag
- `Enter`: Launch Helicon Focus with selected RAWs
-- `P`: Edit in Photoshop (uses RAW file if available)
+- `P`: Edit in Photoshop or Gimp (uses RAW file when available)
- `O` (or Right-Click): Toggle crop mode (Enter to execute, Esc to cancel)
- `Delete` / `Backspace`: Move image to recycle bin
- `Ctrl+Z`: Undo last action (delete, auto white balance, or crop)
diff --git a/faststack/__main__.py b/faststack/__main__.py
new file mode 100644
index 0000000..5f12586
--- /dev/null
+++ b/faststack/__main__.py
@@ -0,0 +1,4 @@
+from .app import cli
+
+if __name__ == "__main__":
+ cli()
diff --git a/faststack/app.py b/faststack/app.py
index e3b8333..83abf14 100644
--- a/faststack/app.py
+++ b/faststack/app.py
@@ -71,6 +71,8 @@
from faststack.imaging.prefetch import Prefetcher, clear_icc_caches
from faststack.ui.keystrokes import Keybinder
from faststack.imaging.editor import ImageEditor, ASPECT_RATIOS
+from faststack.imaging.mask import DarkenSettings, MaskData, MaskStroke
+from faststack.imaging.mask_engine import inverse_transform
from faststack.imaging.metadata import get_exif_data
from faststack.thumbnail_view import (
ThumbnailModel,
@@ -245,7 +247,10 @@ def __init__(
)
# Deferred-init state: set to safe defaults, populated later by their methods
- self._save_initiated_path: Optional[str] = None
+ self._saves_in_flight: set = (
+ set()
+ ) # canonical target paths currently being saved
+ self._saving_keys: set = set() # keys of images with active saves
self._batch_indices_cache: set = set()
self._batch_indices_cache_key: Optional[tuple] = None
self.recycle_bin_dir: Optional[Path] = None
@@ -518,8 +523,21 @@ def _on_editor_open_changed(self, is_open: bool):
else:
# Cleanup large memory buffers when editor closes
if self.image_editor:
- log.debug("Editor closed, clearing editor memory buffers")
- self.image_editor.clear()
+ # If a save is active for this session, preserve the memory
+ # so the user can re-open/retry if the background task fails.
+ current_key = (
+ self._key(self.image_editor.current_filepath)
+ if self.image_editor.current_filepath
+ else None
+ )
+ if current_key and current_key in self._saving_keys:
+ log.debug(
+ "Editor closed but save in progress for %s; keeping session memory",
+ current_key,
+ )
+ else:
+ log.debug("Editor closed, clearing editor memory buffers")
+ self.image_editor.clear()
# Also clear the cached preview rendering
with self._preview_lock:
@@ -1561,14 +1579,23 @@ def _get_save_target_path_for_current_view(self) -> Optional[Path]:
def save_edited_image(self):
"""Saves the edited image in a background thread to keep UI responsive.
- Sets isSaving=True, spawns background worker, returns immediately.
- On completion, _on_save_finished is called via signal to perform cleanup.
+ All export-critical state is captured as an immutable snapshot on the
+ main thread BEFORE the editor is closed or the background worker starts.
+ The background worker operates only on the snapshot — it never reads
+ live editor state for export data.
"""
if not self.image_editor.original_image:
return
- # Prevent double-saves
- if self.ui_state.isSaving:
+ # Determine the actual target path for duplicate-save protection.
+ # Normalize to a canonical string so Path vs str never causes a miss.
+ save_target_path = self._get_save_target_path_for_current_view()
+ raw_target = save_target_path or self.image_editor.current_filepath
+ effective_target = str(Path(raw_target).resolve()) if raw_target else None
+ if effective_target and effective_target in self._saves_in_flight:
+ self.update_status_message(
+ "This image is still saving. Please wait a moment.", timeout=3000
+ )
return
# Capture state needed for save before we start
@@ -1577,46 +1604,133 @@ def save_edited_image(self):
if write_sidecar and 0 <= self.current_index < len(self.image_files):
dev_path = self.image_files[self.current_index].developed_jpg_path
- # Determine save_target_path for variant saves
- save_target_path = self._get_save_target_path_for_current_view()
+ # --- CRITICAL: Snapshot export state BEFORE closing editor or submitting ---
+ # This runs on the main thread and captures an immutable copy of everything
+ # needed for the export: source image, edits, darken settings, mask data, EXIF.
+ try:
+ export_snapshot = self.image_editor.snapshot_for_export(
+ write_developed_jpg=write_sidecar,
+ developed_path=dev_path,
+ save_target_path=save_target_path,
+ )
+ except RuntimeError as e:
+ self.update_status_message(str(e))
+ return
+
+ # Capture save context NOW — these are frozen into the result dict so
+ # _on_save_finished can make cleanup decisions without reading mutable
+ # controller/editor fields that may change during the background save.
+ editor_was_open = self.ui_state.isEditorOpen
+ save_image_key = (
+ self._key(self.image_files[self.current_index].path)
+ if 0 <= self.current_index < len(self.image_files)
+ else None
+ )
+ session_token = (
+ save_image_key,
+ getattr(self, "view_override_kind", None),
+ self.image_editor.session_id if self.image_editor else None,
+ )
+
+ if save_image_key and save_image_key in self._saving_keys:
+ self.update_status_message(
+ "This image is still saving. Please wait a moment.", timeout=3000
+ )
+ return
- # Store save token to prevent "surprise close" if user navigates away during save
- self._save_initiated_path = self.image_editor.current_filepath
+ # Track in-flight save by target path
+ if effective_target:
+ self._saves_in_flight.add(effective_target)
+ if save_image_key:
+ self._saving_keys.add(save_image_key)
- # Show saving indicator
+ # Show saving indicator (stays until save finishes — no auto-clear timeout)
self.ui_state.isSaving = True
- self.update_status_message("Saving...")
+ self.ui_state.statusMessage = "Saving..."
- # Submit save work to background thread
+ # Compute restore-override flag
+ # We are restoring if we have an override path AND kind is NOT developed (i.e. it's a backup)
+ started_from_restore_override = (
+ bool(self.view_override_path)
+ and getattr(self, "view_override_kind", None) != "developed"
+ )
+
+ # Build the base context that every result dict carries
+ _ctx = {
+ "target": effective_target,
+ "editor_was_open": editor_was_open,
+ "save_image_key": save_image_key,
+ "session_token": session_token,
+ "started_from_restore_override": started_from_restore_override,
+ }
+
+ # Submit save work to background thread — operates only on the snapshot
def do_save():
"""Worker function that runs in background thread."""
try:
- result = self.image_editor.save_image(
- write_developed_jpg=write_sidecar,
- developed_path=dev_path,
- save_target_path=save_target_path,
- )
- return {"success": True, "result": result}
+ result = self.image_editor.save_from_snapshot(export_snapshot)
+ return {"success": True, "result": result, **_ctx}
except RuntimeError as e:
- return {"success": False, "error": str(e)}
+ return {"success": False, "error": str(e), **_ctx}
except Exception as e:
log.exception("Unexpected error during save: %s", e)
- return {"success": False, "error": "Failed to save image"}
+ return {
+ "success": False,
+ "error": "Failed to save image",
+ **_ctx,
+ }
def on_done(future):
"""Callback when background save completes - emits signal to hop to main thread."""
- # Guard emit during shutdown to prevent signal to deleted QObject
if self._shutting_down:
return
try:
result = future.result()
except Exception as e:
- result = {"success": False, "error": str(e)}
- # Emit signal to process result on main thread
+ result = {"success": False, "error": str(e), **_ctx}
self._saveFinished.emit(result)
- future = self._save_executor.submit(do_save)
- future.add_done_callback(on_done)
+ try:
+ future = self._save_executor.submit(do_save)
+ except Exception as e:
+ log.error("Failed to submit save to background executor: %s", e)
+ # Rollback save bookkeeping: submission failed, save never started.
+ if effective_target:
+ self._saves_in_flight.discard(effective_target)
+ if save_image_key:
+ self._saving_keys.discard(save_image_key)
+ self.ui_state.isSaving = False
+ self.update_status_message(f"Failed to start background save: {e}")
+ # Do NOT close editor if submission failed, as the save never started.
+ # Return early to avoid the isEditorOpen = False block below.
+ return
+ try:
+ future.add_done_callback(on_done)
+ except Exception as e:
+ # Submission succeeded, so the save IS running — do not roll back
+ # _saves_in_flight / _saving_keys. Instead, spin up a minimal
+ # daemon thread to await the future and deliver the result via
+ # on_done(), so _saveFinished is still emitted and cleanup runs.
+ log.error(
+ "Failed to register save callback; using fallback watcher thread: %s", e
+ )
+
+ def _fallback_watcher(fut=future):
+ concurrent.futures.wait([fut])
+ on_done(fut)
+
+ t = threading.Thread(
+ target=_fallback_watcher, daemon=True, name="SaveCallbackFallback"
+ )
+ t.start()
+
+ # Close editor UI immediately to allow the user to continue working.
+ # The background worker uses the frozen export_snapshot, so it doesn't
+ # need the live editor UI to remain open.
+ # If the save fails later, memory is preserved due to the guard in
+ # _on_editor_open_changed, allowing the same session to be re-opened.
+ if self.ui_state.isEditorOpen:
+ self.ui_state.isEditorOpen = False
@Slot(object)
def _on_save_finished(self, save_result: dict):
@@ -1625,52 +1739,73 @@ def _on_save_finished(self, save_result: dict):
if self._shutting_down:
return
- # Always clear saving indicator
- self.ui_state.isSaving = False
+ # Remove completed target from in-flight set, then clear the saving
+ # indicator only when no exports remain in progress.
+ target = save_result.get("target")
+ if target:
+ self._saves_in_flight.discard(target)
+
+ save_key = save_result.get("save_image_key")
+ if save_key:
+ self._saving_keys.discard(save_key)
+ if not self._saves_in_flight:
+ self.ui_state.isSaving = False
if not save_result.get("success"):
- self.update_status_message(save_result.get("error", "Save failed"))
+ error_msg = save_result.get("error", "Save failed")
+ self.update_status_message(f"Save failed: {error_msg}", timeout=5000)
return
result = save_result.get("result")
- if result:
+ if isinstance(result, tuple) and len(result) >= 2:
saved_path, _ = result # backup_path unused
# --- Post-Save Cleanup ---
- # Only auto-close editor if still on the same image that initiated the save
- # Prevents "surprise close" if user navigated away during save
- initiated_path = getattr(self, "_save_initiated_path", None)
- editor_still_on_same_image = (
- self.ui_state.isEditorOpen
- and self.image_editor.current_filepath
- and initiated_path
- and self.image_editor.current_filepath == initiated_path
+ # Read frozen save context — these were captured at save-initiation
+ # time and are immune to editor/navigation changes during the save.
+ editor_was_open = save_result.get("editor_was_open", False)
+ save_session_token = save_result.get("session_token")
+
+ # Check whether the user is still viewing the identical session
+ # they saved (same image, same variant, same editor underlying data)
+ current_image_key = (
+ self._key(self.image_files[self.current_index].path)
+ if 0 <= self.current_index < len(self.image_files)
+ else None
+ )
+ current_session_token = (
+ current_image_key,
+ getattr(self, "view_override_kind", None),
+ self.image_editor.session_id if self.image_editor else None,
)
- # 1. Close Editor UI (only if still on same image)
- if editor_still_on_same_image:
- self.ui_state.isEditorOpen = False
-
- # 2. Clear Editor State (release memory) - only if still on same image
- if editor_still_on_same_image:
- self.image_editor.clear()
-
- # 2b. Clear variant override (save always targets Main)
- if editor_still_on_same_image:
- self._clear_variant_override()
+ still_on_same_image = (
+ save_session_token is not None
+ and current_session_token is not None
+ and current_session_token == save_session_token
+ )
- # 3. Refresh List and Handle Selection
- if editor_still_on_same_image:
- # Full refresh to see new file or updated timestamp
+ if still_on_same_image:
+ # Clear Editor State (release memory) — only when the
+ # editor dialog was actually open for this save.
+ if editor_was_open:
+ if self.ui_state.isEditorOpen:
+ self.ui_state.isEditorOpen = False
+ # Closing triggers _on_editor_open_changed -> image_editor.clear()
+ # but we call it explicitly here just in case they closed it manually.
+ self.image_editor.clear()
+
+ # Call this regardless of editor_was_open IF it was a restore-override
+ if save_result.get("started_from_restore_override"):
+ self._clear_variant_override()
+
+ # Refresh list to pick up new backup files and update variant map
self.refresh_image_list()
- # 4. Find and re-select the saved image
- new_index = (
- self.current_index
- ) # Default to keeping selection if not found
+ # Find and re-select the saved image
+ new_index = self.current_index
- # Try to find by exact path match
if saved_path:
target_key = self._key(saved_path)
for i, img in enumerate(self.image_files):
@@ -1680,20 +1815,20 @@ def _on_save_finished(self, save_result: dict):
self.current_index = new_index
- # 5. Force UI Sync / Prefetch
- self.image_cache.clear() # Clear cache to ensure we reload valid image
+ # Force UI Sync / Prefetch
+ self.image_cache.clear()
self.prefetcher.cancel_all()
self.prefetcher.update_prefetch(self.current_index)
self.sync_ui_state()
- # Refresh variant badges (backup was created)
- if self.ui_state:
- self.ui_state.variantBadgesChanged.emit()
else:
- # User navigated away - skip full refresh to preserve their selection
- # Just clear stale cache entry for the saved image
+ # User navigated away — clear stale cache entry
if saved_path:
self.image_cache.pop_path(saved_path)
+ # Always emit badge update — backup file was created
+ if self.ui_state:
+ self.ui_state.variantBadgesChanged.emit()
+
self.update_status_message("Image saved")
else:
self.update_status_message("Failed to save image")
@@ -1733,6 +1868,7 @@ def _set_current_index(
self._clear_variant_override()
self._reset_crop_settings()
+ self._reset_darken_on_navigation()
if self.debug_cache:
_t_prefetch = time.perf_counter()
@@ -4459,6 +4595,9 @@ def _delete_indices(self, indices: List[int], action_type: str) -> dict:
log.warning("[_delete_indices] No valid indices found in %s", indices)
return summary
+ if self._block_if_saving(*[img.path for img in images_to_delete]):
+ return summary
+
summary["requested_count"] = len(images_to_delete)
# --- PHASE 1: OPTIMISTIC UI UPDATE (instant, no I/O) ---
@@ -5358,6 +5497,10 @@ def edit_in_photoshop(self):
self.update_status_message("No image to edit.")
return
+ current_image_path = self.image_files[self.current_index].path
+ if self._block_if_saving(current_image_path):
+ return
+
# Prefer RAW file if it exists, otherwise use JPG
image_file = self.image_files[self.current_index]
jpg_path = image_file.path
@@ -5495,11 +5638,32 @@ def clear_message():
self.ui_state.statusMessage = message
QTimer.singleShot(timeout, clear_message)
+ def _is_image_saving(self, file_path_str: str) -> bool:
+ if not file_path_str or not hasattr(self, "_saving_keys"):
+ return False
+ return self._key(Path(file_path_str)) in self._saving_keys
+
+ def _block_if_saving(self, *paths) -> bool:
+ """Helper to block actions if any of the given paths are currently saving."""
+ for path in paths:
+ if path and self._is_image_saving(str(path)):
+ self.update_status_message(
+ "This image is still saving. Please wait a moment.", timeout=3000
+ )
+ return True
+ return False
+
@Slot()
def start_drag_current_image(self):
if not self.image_files or self.current_index >= len(self.image_files):
return
+ # (Check moved below after batch resolution)
+ path_to_check = self.image_files[self.current_index].path
+ # We still check the current image early as a fast-fail
+ if self._block_if_saving(path_to_check):
+ return
+
# Collect files to drag: batch files if any batches exist, otherwise current image
files_to_drag = set()
@@ -5519,6 +5683,11 @@ def start_drag_current_image(self):
idx for idx in file_indices if self.image_files[idx].path.exists()
]
+ # Check if ANY of the resolved files are currently saving
+ for idx in existing_indices:
+ if self._block_if_saving(self.image_files[idx].path):
+ return
+
# Prefer dragging the developed JPG if it exists (for external export),
# but only when RAW mode is active or we are dragging a developed file itself.
file_paths = []
@@ -5605,6 +5774,10 @@ def enable_raw_editing(self):
if not self.image_files:
return
+ current_image_path = self.image_files[self.current_index].path
+ if self._block_if_saving(current_image_path):
+ return
+
# 1. Update State
# 1. Update State
if self.current_edit_source_mode != "raw":
@@ -5750,20 +5923,48 @@ def worker():
def develop_raw_for_current_image(self):
self.enable_raw_editing()
- @Slot()
+ # Return sentinels for load_image_for_editing():
+ # True — a real reload was performed (new file or changed mtime)
+ # _REUSED — the existing editor session was kept (truthy, but ``is True`` is False)
+ # False — load failed or was aborted
+ _REUSED = 2 # truthy int so QML @Slot(result=bool) coerces to true
+
+ @Slot(result=bool)
def load_image_for_editing(self):
- """
- Loads the currently viewed image into the editor using active path logic.
- This provides a centralized entry point for loading the editor correctly.
+ """Load the currently viewed image into the editor.
+
+ Returns True on real reload, _REUSED when the existing session
+ was kept, or False on failure. The @Slot annotation coerces
+ _REUSED to true for QML callers (none of which inspect the value).
"""
try:
- # Use variant override path if active
if self.view_override_path:
active_path = Path(self.view_override_path)
else:
active_path = self.get_active_edit_path(self.current_index)
filepath = str(active_path)
+ editor_path = getattr(self.image_editor, "current_filepath", None)
+ match = False
+ if editor_path:
+ try:
+ match = Path(editor_path).resolve() == Path(filepath).resolve()
+ if match:
+ mtime = Path(filepath).stat().st_mtime
+ if mtime != getattr(self.image_editor, "current_mtime", 0.0):
+ match = False
+ except (OSError, ValueError):
+ pass
+
+ if match:
+ log.debug(
+ "load_image_for_editing: Reusing existing session for %s", filepath
+ )
+ # Ensure the background renderer is current and notify UI to refresh
+ # Also synchronize sliders/crop state to the backend session.
+ self._sync_editor_state_from_session()
+ return self._REUSED
+
# Fetch cached preview if available for faster initial display
cached_preview = self.get_decoded_image(self.current_index)
@@ -5798,27 +5999,39 @@ def load_image_for_editing(self):
# For now, simpler to emit a signal that UIState listens to,
# OR just manually update UIState here if we have reference.
if self.ui_state:
- self._sync_editor_state_to_ui()
+ self._sync_editor_state_from_session()
+
+ return True # Real reload performed
- return True
except Exception as e:
log.exception("Failed to load image for editing: %s", e)
self.update_status_message(f"Error loading editor: {e}")
+ if self.ui_state:
+ self.ui_state.isEditorOpen = False
+ return False
+ # load_image returned False
+ if self.ui_state:
+ self.ui_state.isEditorOpen = False
return False
- def _sync_editor_state_to_ui(self):
- """Helper to push editor state (initial edits) to UIState."""
- initial_edits = self.image_editor._initial_edits()
- for key, value in initial_edits.items():
+ def _sync_editor_state_from_session(self):
+ """Helper to push current editor session state (edits, crop) to UIState."""
+ edits = self.image_editor.current_edits
+ for key, value in edits.items():
if hasattr(self.ui_state, key):
setattr(self.ui_state, key, value)
# Reset visual components
if hasattr(self.ui_state, "aspectRatioNames"):
self.ui_state.aspectRatioNames = [r["name"] for r in ASPECT_RATIOS]
- self.ui_state.currentAspectRatioIndex = 0
- self.ui_state.currentCropBox = (0, 0, 1000, 1000)
+ # Pull crop box specifically; use default if None
+ crop_box = edits.get("crop_box")
+ if crop_box:
+ self.ui_state.currentCropBox = crop_box
+ else:
+ self.ui_state.currentAspectRatioIndex = 0
+ self.ui_state.currentCropBox = (0, 0, 1000, 1000)
# Kick off background render
self._kick_preview_worker()
@@ -5902,6 +6115,310 @@ def reset_edit_parameters(self):
if self.ui_state.isHistogramVisible:
self.update_histogram()
+ # ---- Background Darkening Tool ----
+
+ def _reset_darken_on_navigation(self):
+ """Reset all darken-specific state on image switch.
+
+ Called from _set_current_index() to ensure a clean slate.
+ Clears both editor-level mask data AND UI-side panel values,
+ because navigation while the editor is open does NOT call
+ editor.load_image().
+ """
+ # Editor-level: clear mask assets, raster cache, and darken settings
+ self.image_editor._mask_assets.clear()
+ self.image_editor._mask_raster_cache.clear()
+ if self.image_editor.current_edits.get("darken_settings") is not None:
+ self.image_editor.current_edits["darken_settings"] = None
+ self.image_editor._edits_rev += 1
+ # In-progress stroke
+ self._current_darken_stroke = None
+ # Tool mode
+ self.ui_state.isDarkening = False
+ # Overlay image
+ self.ui_state._darken_overlay_image = None
+ self.ui_state._darken_overlay_generation += 1
+ self.ui_state.darken_overlay_generation_changed.emit()
+ # Reset slider / panel values via property setters so QML bindings update
+ self.ui_state.darkenOverlayVisible = True
+ self.ui_state.darkenAmount = 0.5
+ self.ui_state.darkenEdgeProtection = 0.5
+ self.ui_state.darkenSubjectProtection = 0.5
+ self.ui_state.darkenFeather = 0.5
+ self.ui_state.darkenDarkRange = 0.5
+ self.ui_state.darkenNeutrality = 0.5
+ self.ui_state.darkenExpandContract = 0.0
+ self.ui_state.darkenAutoEdges = 0.0
+ self.ui_state.darkenMode = "assisted"
+ self.ui_state.darkenBrushRadius = 0.03
+
+ def _prepare_darken_image_state(self) -> bool:
+ """Helper to ensure the correct image is loaded for darkening."""
+ needs_load = (
+ self.image_editor.float_image is None
+ or self.image_editor.current_filepath is None
+ )
+ if not needs_load:
+ try:
+ active = (
+ self.view_override_path
+ if self.view_override_path
+ else self.get_active_edit_path(self.current_index)
+ )
+ current_p = self.image_editor.current_filepath
+ if current_p and active:
+ match = Path(current_p).resolve() == Path(active).resolve()
+ if not match:
+ needs_load = True
+ else:
+ needs_load = True
+ except (IndexError, TypeError, OSError, ValueError):
+ needs_load = True
+
+ if needs_load:
+ load_result = self.load_image_for_editing()
+ if load_result is False:
+ return False # load failed — abort rather than darken stale data
+ # Only reset darken on a real reload, not when reusing the session
+ if load_result is True:
+ self._reset_darken_on_navigation()
+ return True
+
+ @Slot()
+ def open_darken_tool(self):
+ """Activate the darkening tool, loading the image if needed.
+
+ The darken panel is independent of the editor sidebar — pressing K
+ in loupe view opens the darken panel without forcing the editor
+ panel open. The image is silently loaded for editing if it hasn't
+ been already.
+ """
+ if not self._prepare_darken_image_state():
+ return
+ self._ensure_darken_state()
+ self.ui_state.isDarkening = True
+ self._kick_preview_worker()
+ self._update_darken_overlay()
+
+ @Slot()
+ def toggle_darken_mode(self):
+ """Toggle the background darkening tool on/off.
+
+ Turning off also disables the darkening effect so the preview
+ reverts to the un-darkened image.
+ """
+ if self.ui_state.isDarkening:
+ self.ui_state.isDarkening = False
+ ds = self.image_editor.current_edits.get("darken_settings")
+ if ds is not None:
+ ds.enabled = False
+ self.image_editor._edits_rev += 1
+ self._kick_preview_worker()
+ else:
+ if not self._prepare_darken_image_state():
+ return
+ self._ensure_darken_state()
+ self.ui_state.isDarkening = True
+ self._kick_preview_worker()
+ self._update_darken_overlay()
+
+ def _ensure_darken_state(self):
+ """Ensure MaskData and DarkenSettings exist for the darken tool."""
+ if "darken" not in self.image_editor._mask_assets:
+ self.image_editor._mask_assets["darken"] = MaskData()
+ if self.image_editor.current_edits.get("darken_settings") is None:
+ ds = DarkenSettings(enabled=True)
+ self.image_editor.current_edits["darken_settings"] = ds
+ self.image_editor._edits_rev += 1
+ else:
+ ds = self.image_editor.current_edits["darken_settings"]
+ if not ds.enabled:
+ ds.enabled = True
+ self.image_editor._edits_rev += 1
+
+ @Slot(float, float, str)
+ def start_darken_stroke(self, x_norm: float, y_norm: float, stroke_type: str):
+ """Begin a new brush stroke. Coords are normalised [0,1] relative to the
+ displayed (post-crop, post-straighten) image."""
+ edits = self.image_editor.current_edits
+ x_base, y_base = inverse_transform(
+ x_norm,
+ y_norm,
+ edits,
+ (1, 1), # display_shape unused for normalised
+ )
+ brush_r = self.ui_state._darken_brush_radius
+ self._current_darken_stroke = {
+ "points": [(x_base, y_base)],
+ "radius": brush_r,
+ "stroke_type": stroke_type,
+ }
+
+ @Slot(float, float)
+ def continue_darken_stroke(self, x_norm: float, y_norm: float):
+ """Add a point to the current brush stroke."""
+ stroke = getattr(self, "_current_darken_stroke", None)
+ if stroke is None:
+ return
+ edits = self.image_editor.current_edits
+ x_base, y_base = inverse_transform(x_norm, y_norm, edits, (1, 1))
+ stroke["points"].append((x_base, y_base))
+
+ @Slot()
+ def finish_darken_stroke(self):
+ """Commit the current stroke to MaskData."""
+ stroke = getattr(self, "_current_darken_stroke", None)
+ if stroke is None or not stroke["points"]:
+ return
+ self._current_darken_stroke = None
+
+ mask_data = self.image_editor._mask_assets.get("darken")
+ if mask_data is None:
+ return
+
+ ms = MaskStroke(
+ points=stroke["points"],
+ radius=stroke["radius"],
+ stroke_type=stroke["stroke_type"],
+ )
+ mask_data.add_stroke(ms)
+
+ # Bump editor revision and refresh preview
+ self.image_editor._edits_rev += 1
+ self._kick_preview_worker()
+ self._update_darken_overlay()
+
+ @Slot()
+ def undo_darken_stroke(self):
+ """Remove the last brush stroke."""
+ mask_data = self.image_editor._mask_assets.get("darken")
+ if mask_data is None or not mask_data.has_strokes():
+ return
+ mask_data.undo_last_stroke()
+ self.image_editor._edits_rev += 1
+ self._kick_preview_worker()
+ self._update_darken_overlay()
+
+ @Slot()
+ def clear_darken_strokes(self):
+ """Clear all brush strokes."""
+ mask_data = self.image_editor._mask_assets.get("darken")
+ if mask_data is None or not mask_data.has_strokes():
+ return
+ mask_data.clear_strokes()
+ self.image_editor._edits_rev += 1
+ self._kick_preview_worker()
+ self._update_darken_overlay()
+
+ @Slot(str, float)
+ def set_darken_param(self, key: str, value: float):
+ """Update a DarkenSettings scalar and refresh."""
+ ds = self.image_editor.current_edits.get("darken_settings")
+ if ds is None:
+ return
+ if not hasattr(ds, key):
+ log.warning("Unknown darken param: %s", key)
+ return
+ setattr(ds, key, value)
+ # Sync UIState — map DarkenSettings field name → UIState property name
+ prop_map = {
+ "darken_amount": "darkenAmount",
+ "edge_protection": "darkenEdgeProtection",
+ "subject_protection": "darkenSubjectProtection",
+ "feather": "darkenFeather",
+ "dark_range": "darkenDarkRange",
+ "neutrality_sensitivity": "darkenNeutrality",
+ "expand_contract": "darkenExpandContract",
+ "auto_from_edges": "darkenAutoEdges",
+ "brush_radius": "darkenBrushRadius",
+ }
+ ui_prop = prop_map.get(key)
+ if ui_prop and hasattr(self.ui_state, ui_prop):
+ setattr(self.ui_state, ui_prop, value)
+
+ self.image_editor._edits_rev += 1
+ self._kick_preview_worker()
+ self._update_darken_overlay()
+
+ @Slot(str)
+ def set_darken_mode(self, mode: str):
+ """Set the darkening mode."""
+ ds = self.image_editor.current_edits.get("darken_settings")
+ if ds is None:
+ return
+ ds.mode = mode
+ self.ui_state.darkenMode = mode
+ self.image_editor._edits_rev += 1
+ self._kick_preview_worker()
+ self._update_darken_overlay()
+
+ @Slot(bool)
+ def set_darken_overlay_visible(self, visible: bool):
+ """Toggle mask overlay visibility."""
+ self.ui_state.darkenOverlayVisible = visible
+
+ @Slot(int, int, int)
+ def set_darken_overlay_color(self, r: int, g: int, b: int):
+ """Set the overlay colour."""
+ mask_data = self.image_editor._mask_assets.get("darken")
+ if mask_data is not None:
+ mask_data.overlay_color = (r, g, b)
+ self._update_darken_overlay()
+
+ def _update_darken_overlay(self):
+ """Generate the mask overlay QImage for display in QML."""
+ try:
+ from PySide6.QtGui import QImage
+
+ mask_data = self.image_editor._mask_assets.get("darken")
+ ds = self.image_editor.current_edits.get("darken_settings")
+ if mask_data is None or ds is None or not mask_data.has_strokes():
+ self.ui_state._darken_overlay_image = None
+ self.ui_state._darken_overlay_generation += 1
+ self.ui_state.darken_overlay_generation_changed.emit()
+ return
+
+ # Resolve mask at preview resolution
+ preview = self.image_editor.float_preview
+ if preview is None:
+ return
+
+ from faststack.imaging.mask_engine import resolve_mask
+
+ edits = dict(self.image_editor.current_edits)
+ resolved = resolve_mask(
+ mask_data,
+ ds,
+ preview,
+ preview.shape[:2],
+ edits,
+ cache=self.image_editor._mask_raster_cache,
+ )
+
+ # Build ARGB32 overlay
+ h, w = resolved.shape
+ r, g, b = mask_data.overlay_color
+ alpha = int(mask_data.overlay_opacity * 255)
+
+ # Create ARGB buffer: (H, W, 4) uint8
+ overlay = np.zeros((h, w, 4), dtype=np.uint8)
+ mask_u8 = (np.clip(resolved, 0.0, 1.0) * alpha).astype(np.uint8)
+ overlay[:, :, 0] = b # QImage ARGB32 is BGRA in memory on little-endian
+ overlay[:, :, 1] = g
+ overlay[:, :, 2] = r
+ overlay[:, :, 3] = mask_u8
+
+ buf = overlay.tobytes()
+ self._darken_overlay_buffer = buf
+ qimg = QImage(buf, w, h, w * 4, QImage.Format.Format_ARGB32)
+
+ self.ui_state._darken_overlay_image = qimg
+ self.ui_state._darken_overlay_generation += 1
+ self.ui_state.darken_overlay_generation_changed.emit()
+
+ except Exception:
+ log.exception("Failed to update darken overlay")
+
@Slot()
def rotate_image_cw(self):
"""Rotate the edited image 90 degrees clockwise."""
@@ -6291,6 +6808,9 @@ def _apply_preview_result(self, payload):
self.ui_state.currentImageSourceChanged.emit()
self.ui_state.highlightStateChanged.emit()
self.update_histogram()
+ # Keep mask overlay in sync with the preview whenever it changes
+ if self.ui_state._is_darkening:
+ self._update_darken_overlay()
# Call directly (not via singleShot) since we're on the UI thread.
# This prevents race where a new slider event could interleave between
@@ -6310,52 +6830,27 @@ def cancel_crop_mode(self):
self.ui_refresh_generation += 1
self.ui_state.currentImageSourceChanged.emit()
self.update_status_message("Crop cancelled")
- log.info("Crop mode cancelled")
@Slot()
def toggle_crop_mode(self):
"""Toggle crop mode on/off."""
self.ui_state.isCropping = not self.ui_state.isCropping
+
if self.ui_state.isCropping:
- # Reset crop box when entering crop mode
+ # Entering crop mode: reset to full image defaults
self.ui_state.currentCropBox = (0, 0, 1000, 1000)
- # Set aspect ratios for QML dropdown
self.ui_state.aspectRatioNames = [r["name"] for r in ASPECT_RATIOS]
self.ui_state.currentAspectRatioIndex = 0
- # Pre-load image into editor to ensure smooth rotation
- if self.image_files and self.current_index < len(self.image_files):
- image_file = self.image_files[self.current_index]
- filepath = image_file.path
- editor_path = self.image_editor.current_filepath
-
- # Robust comparison
- match = False
- if editor_path:
- try:
- match = Path(editor_path).resolve() == Path(filepath).resolve()
- except (OSError, ValueError):
- match = str(editor_path) == str(filepath)
-
- if not match:
- log.debug("toggle_crop_mode: Loading %s into editor", filepath)
- # Use cached preview if available to speed up using get_decoded_image(self.current_index)
- # note: get_decoded_image verifies index bounds
- cached_preview = self.get_decoded_image(self.current_index)
- self.image_editor.load_image(
- str(filepath), cached_preview=cached_preview
- )
-
# Reset rotation to 0 when starting fresh crop mode
self.image_editor.set_edit_param("straighten_angle", 0.0)
-
self.update_status_message("Crop mode: Drag to select area, Enter to crop")
- log.info("Crop mode enabled")
- else: # Exiting crop mode
- self.ui_state.isCropping = False
+ else:
+ # Exiting crop mode: cleanup
self.ui_state.currentCropBox = (0, 0, 1000, 1000)
+ # Ensure preview rotation is cleared when exiting
+ self.image_editor.set_edit_param("straighten_angle", 0.0)
self.update_status_message("Crop cancelled")
- log.info("Crop mode disabled")
@Slot()
def stack_source_raws(self):
@@ -6572,9 +7067,17 @@ def execute_crop(self):
self.update_status_message("No crop area selected")
return
- # Ensure image is loaded in editor
- image_file = self.image_files[self.current_index]
- filepath = image_file.path
+ # Restoration means viewing a backup; crop should target the main image.
+ # We must resolve this BEFORE potentially reloading or saving.
+ save_target_path = self._get_save_target_path_for_current_view()
+ is_restoring = save_target_path is not None
+
+ # Ensure image is loaded in editor.
+ # For crop, we use the CURRENTLY VIEWED file (which might be a variant).
+ if self.view_override_path:
+ filepath = Path(self.view_override_path)
+ else:
+ filepath = self.get_active_edit_path(self.current_index)
# Robust path comparison
editor_path = self.image_editor.current_filepath
@@ -6589,6 +7092,7 @@ def execute_crop(self):
log.debug(
f"execute_crop reloading image due to path mismatch. Editor: {editor_path}, File: {filepath}"
)
+ # get_decoded_image() honors variants/overrides.
cached_preview = self.get_decoded_image(self.current_index)
if not self.image_editor.load_image(
str(filepath), cached_preview=cached_preview
@@ -6602,9 +7106,11 @@ def execute_crop(self):
# This handles cases where we reloaded the image (resetting edits) or where UI state sync was flaky.
self.image_editor.set_edit_param("straighten_angle", current_rotation)
- # Save via ImageEditor (handles rotation + crop correctly)
+ # Save via ImageEditor (passing the resolved target for variant-save policy)
try:
- save_result = self.image_editor.save_image()
+ save_result = self.image_editor.save_image(
+ save_target_path=save_target_path
+ )
except RuntimeError as e:
log.warning("execute_crop: Save failed: %s", e)
self.update_status_message(f"Failed to save cropped image: {e}")
@@ -6617,6 +7123,10 @@ def execute_crop(self):
if save_result:
saved_path, backup_path = save_result
+ # IF we were restoring from a variant, clear the override now that it's "the truth"
+ if is_restoring:
+ self._clear_variant_override()
+
timestamp = time.time()
self.undo_history.append(
("crop", (str(saved_path), str(backup_path)), timestamp)
diff --git a/faststack/imaging/editor.py b/faststack/imaging/editor.py
index 8871648..2d94d0f 100644
--- a/faststack/imaging/editor.py
+++ b/faststack/imaging/editor.py
@@ -27,6 +27,10 @@
from faststack.imaging.orientation import apply_orientation_to_np, get_exif_orientation
from faststack.models import DecodedImage
+# Mask subsystem (lazy imports avoided — lightweight dataclasses)
+from faststack.imaging.mask import DarkenSettings, MaskData
+from faststack.imaging.mask_engine import MaskRasterCache
+
try:
from PySide6.QtGui import QImage
except ImportError:
@@ -316,6 +320,7 @@ def __init__(self):
# Stores the currently applied edits (used for preview)
self.current_edits: Dict[str, Any] = self._initial_edits()
self.current_filepath: Optional[Path] = None
+ self.session_id: Optional[str] = None
# Caching support for smooth updates
self._lock = threading.RLock()
@@ -358,11 +363,16 @@ def __init__(self):
# keyed on (round(blacks, 3), round(whites, 3)).
self._cached_u8_lut: Optional[Tuple[Tuple[float, float], List[int]]] = None
+ # Mask subsystem — generic mask assets keyed by tool id
+ self._mask_assets: Dict[str, MaskData] = {}
+ self._mask_raster_cache = MaskRasterCache()
+
def clear(self):
"""Clear all editor state so the next edit starts from a clean slate."""
with self._lock:
self.original_image = None
self.current_filepath = None
+ self.session_id = None
self.float_image = None
self.float_preview = None
self._edits_rev += 1
@@ -374,6 +384,8 @@ def clear(self):
self._cached_highlight_analysis = None
self._cached_detail_bands = None
self._cached_u8_lut = None
+ self._mask_assets.clear()
+ self._mask_raster_cache.clear()
# Optionally also reset edits if that matches your mental model:
# self.current_edits = self._initial_edits()
@@ -411,6 +423,7 @@ def _initial_edits(self) -> Dict[str, Any]:
"clarity": 0.0,
"texture": 0.0,
"straighten_angle": 0.0,
+ "darken_settings": None, # DarkenSettings or None
}
@staticmethod
@@ -458,12 +471,16 @@ def _get_f(key: str) -> float:
except (ValueError, TypeError):
return 1.0 # Safe default: treat as "active" to skip optimization
+ darken = edits.get("darken_settings")
+ darken_active = darken is not None and getattr(darken, "enabled", False)
+
return (
ImageEditor._edits_skip_linear(edits)
and abs(_get_f("vignette")) <= 0.001
and edits.get("rotation", 0) == 0
and abs(_get_f("straighten_angle")) <= 0.001
and not edits.get("crop_box")
+ and not darken_active
)
def load_image(
@@ -489,10 +506,14 @@ def load_image(
self.float_image = None
self.float_preview = None
self.current_filepath = None
+ self.session_id = None
self._source_exif_bytes = None
self._edits_rev += 1
self._cached_preview = None
self._cached_rev = -1
+ # Clear mask state from previous image
+ self._mask_assets.clear()
+ self._mask_raster_cache.clear()
log.error("Image file not found: %s", filepath)
return False
@@ -509,6 +530,9 @@ def load_image(
# Clear previous cached EXIF and set new one if provided
self.current_mtime = new_mtime
self._source_exif_bytes = source_exif
+ # Clear mask state from previous image
+ self._mask_assets.clear()
+ self._mask_raster_cache.clear()
try:
# We must load and close the original file handle immediately
@@ -664,6 +688,7 @@ def load_image(
# Assign all state atomically under lock to prevent race with preview worker
with self._lock:
self.current_filepath = load_filepath
+ self.session_id = uuid.uuid4().hex
self.original_image = loaded_original
self.float_image = loaded_float_image
self.float_preview = loaded_float_preview
@@ -696,9 +721,12 @@ def load_image(
self.float_image = None
self.float_preview = None
self.current_filepath = None
+ self.session_id = None
self._edits_rev += 1
self._cached_preview = None
self._cached_rev = -1
+ self._mask_assets.clear()
+ self._mask_raster_cache.clear()
return False
def _rotate_float_image(
@@ -731,6 +759,9 @@ def _apply_edits(
edits: Optional[Dict[str, Any]] = None,
*,
for_export: bool = False,
+ mask_assets_override: Optional[Dict[str, "MaskData"]] = None,
+ cache_override: Optional["MaskRasterCache"] = None,
+ cache_context: Optional[dict] = None,
) -> np.ndarray:
"""Applies all current edits to the provided float32 numpy array.
Returns float32 array (H, W, 3).
@@ -998,11 +1029,13 @@ def _apply_edits(
cached_analysis = None
with self._lock:
- if (
- self._cached_highlight_analysis
- and self._cached_highlight_analysis["hash"] == upstream_hash
- ):
- cached_analysis = self._cached_highlight_analysis["state"]
+ cached_dict = (
+ cache_context.get("highlight_analysis")
+ if cache_context is not None
+ else self._cached_highlight_analysis
+ )
+ if cached_dict and cached_dict["hash"] == upstream_hash:
+ cached_analysis = cached_dict["state"]
if cached_analysis:
analysis_state = cached_analysis
@@ -1019,10 +1052,14 @@ def _apply_edits(
)
with self._lock:
- self._cached_highlight_analysis = {
+ entry = {
"hash": upstream_hash,
"state": analysis_state,
}
+ if cache_context is not None:
+ cache_context["highlight_analysis"] = entry
+ else:
+ self._cached_highlight_analysis = entry
if not for_export:
with self._lock:
@@ -1037,6 +1074,7 @@ def _apply_edits(
srgb_u8_stride=srgb_u8_stride, # Pass if we need to recompute analysis
analysis_state=analysis_state,
edits=edits,
+ cache_context=cache_context,
)
# 8-10. Clarity / Texture / Sharpness (Unified Pyramid Detail Bands)
@@ -1077,7 +1115,11 @@ def _apply_edits(
cached_exp_gain = 1.0
with self._lock:
- cached = self._cached_detail_bands
+ cached = (
+ cache_context.get("detail_bands")
+ if cache_context is not None
+ else self._cached_detail_bands
+ )
# Verify both hash AND frozen values to avoid collisions
if (
cached
@@ -1175,7 +1217,10 @@ def _extract_2d(blur_result):
"Y3": newly_computed["Y3"],
"Y1": newly_computed["Y1"],
}
- self._cached_detail_bands = new_cache
+ if cache_context is not None:
+ cache_context["detail_bands"] = new_cache
+ else:
+ self._cached_detail_bands = new_cache
# Build hierarchical pyramid bands (non-overlapping frequency ranges)
detail = np.zeros_like(Y)
@@ -1281,6 +1326,40 @@ def _extract_2d(blur_result):
wp = bp + 0.0001
arr = (arr - bp) / (wp - bp)
+ # 13.5. Background Darkening (masked, after levels, before vignette)
+ darken = edits.get("darken_settings")
+ if darken is not None and getattr(darken, "enabled", False):
+ # Use override assets/cache if provided (export snapshot), else live state
+ _assets = (
+ mask_assets_override
+ if mask_assets_override is not None
+ else self._mask_assets
+ )
+ _cache = (
+ cache_override
+ if cache_override is not None
+ else self._mask_raster_cache
+ )
+ mask_data = _assets.get(darken.mask_id)
+ if mask_data is not None and mask_data.has_strokes():
+ from faststack.imaging.mask_engine import resolve_mask
+ from faststack.imaging.masked_ops import apply_masked_darken
+
+ resolved = resolve_mask(
+ mask_data,
+ darken,
+ arr,
+ arr.shape[:2],
+ edits,
+ cache=_cache,
+ )
+ arr = apply_masked_darken(
+ arr,
+ resolved,
+ darken_amount=darken.darken_amount,
+ edge_protection=darken.edge_protection,
+ )
+
# 14. Vignette
vignette = edits.get("vignette", 0.0)
if abs(vignette) > 0.001:
@@ -1619,6 +1698,7 @@ def _apply_highlights_shadows(
srgb_u8: Optional[np.ndarray] = None, # Planned future alias for srgb_u8_stride
analysis_state: Optional[Dict[str, float]] = None,
edits: Optional[Dict[str, Any]] = None,
+ cache_context: Optional[dict] = None,
) -> np.ndarray:
"""Apply highlights and shadows adjustments using brightness-based processing in linear light.
@@ -1728,7 +1808,11 @@ def _apply_highlights_shadows(
max_brightness = 1.0
hit = False
with self._lock:
- cached = self._cached_max_brightness_state
+ cached = (
+ cache_context.get("max_brightness_state")
+ if cache_context is not None
+ else self._cached_max_brightness_state
+ )
if cached and cached.get("hash") == current_hash:
max_brightness = cached["value"]
hit = True
@@ -1757,10 +1841,14 @@ def _apply_highlights_shadows(
max_brightness = 1.0
with self._lock:
- self._cached_max_brightness_state = {
+ entry = {
"hash": current_hash,
"value": max_brightness,
}
+ if cache_context is not None:
+ cache_context["max_brightness_state"] = entry
+ else:
+ self._cached_max_brightness_state = entry
# Clamp to avoid crazy values from single hot pixels or artifacts
max_brightness = min(max_brightness, 100.0)
@@ -1917,29 +2005,23 @@ def _ensure_float_image(self) -> None:
if self.float_image is None:
self.float_image = float_arr
- def save_image(
+ def snapshot_for_export(
self,
write_developed_jpg: bool = False,
developed_path: Optional[Path] = None,
save_target_path: Optional[Path] = None,
- ) -> Optional[Tuple[Path, Path]]:
- """Saves the edited image, backing up the original.
+ ) -> Dict[str, Any]:
+ """Capture an immutable export snapshot on the calling thread.
- Args:
- write_developed_jpg: If True, also create a `-developed.jpg` sidecar file.
- This should be True only when editing RAW files.
- developed_path: Optional explicit path for the developed JPG.
- If not provided, it's derived from current_filepath.
- save_target_path: Optional override for the output path. When saving
- from a variant (backup/developed), this should be
- the Main file's path. Backup is created for Main,
- the variant source file is left untouched.
+ Must be called on the main thread BEFORE submitting to a background
+ executor. The returned dict contains everything needed to produce the
+ final output — no live ``ImageEditor`` state is required afterwards.
Returns:
- A tuple of (saved_path, backup_path) on success, otherwise None.
+ A dict with all export-critical data.
Raises:
- RuntimeError: If preconditions are not met (no path, no image) or if saving fails.
+ RuntimeError: If preconditions are not met (no path, no image).
"""
if self.current_filepath is None:
raise RuntimeError("No file path set")
@@ -1949,51 +2031,136 @@ def save_image(
# Ensure float master exists (preview_only loads may not have it)
self._ensure_float_image()
- _debug = log.isEnabledFor(logging.DEBUG)
- if _debug:
- t0 = time.perf_counter()
-
- # 1. Apply Edits to Full Resolution
- # Snapshot state under lock to avoid races
with self._lock:
- # Re-check float image existence under lock (though _ensure calls it too)
- # Previously returned None, now raising to be explicit about failure
if self.float_image is None:
- raise RuntimeError(
- "save_image called with no float_image (race condition?)"
- )
+ raise RuntimeError("snapshot_for_export called with no float_image")
- # Determine if we can skip copy
+ # --- Source image ---
_safe_no_copy = self._edits_can_share_input(self.current_edits)
-
- # Snapshot the source data
- # If safe to share (read-only), we just grab the reference
- # If not safe, we MUST copy it here while holding the lock
if _safe_no_copy:
source_arr = self.float_image
- log.debug("save_image: skipping float_image.copy() (safe no-copy path)")
+ log.debug(
+ "snapshot_for_export: skipping float_image.copy() (safe no-copy path)"
+ )
else:
source_arr = self.float_image.copy()
- # Snapshot edits
+ source_shape = self.float_image.shape[:2] # for debug logging
+
+ # --- Edits (shallow dict copy) ---
edits_snapshot = self.current_edits.copy()
- # Expensive computation runs WITHOUT the lock
+ # --- Deep-snapshot mutable darken state ---
+ # Always deepcopy DarkenSettings when present so the background
+ # thread never reads the live object (which the main thread can
+ # mutate, e.g. enabling/disabling or changing params).
+ import copy
+
+ ds = edits_snapshot.get("darken_settings")
+ if ds is not None:
+ edits_snapshot["darken_settings"] = copy.deepcopy(ds)
+ if getattr(ds, "enabled", False):
+ live_mask = self._mask_assets.get(ds.mask_id)
+ mask_snapshot = (
+ copy.deepcopy(live_mask) if live_mask is not None else None
+ )
+ export_cache = MaskRasterCache()
+ else:
+ # Darken disabled — record the absence explicitly so
+ # save_from_snapshot does not fall back to live assets.
+ mask_snapshot = None
+ export_cache = None
+ else:
+ mask_snapshot = None
+ export_cache = None
+
+ # --- Paths ---
+ filepath_snapshot = self.current_filepath
+
+ # --- EXIF (may read original_image and _source_exif_bytes) ---
+ main_exif = self._get_sanitized_exif_bytes()
+ source_exif = self._source_exif_bytes
+
+ # Build mask override dict. When darken is present but disabled (or
+ # has no mask), provide an empty dict so _apply_edits uses it instead
+ # of falling back to the live self._mask_assets.
+ ds_snap = edits_snapshot.get("darken_settings")
+ if ds_snap is not None:
+ mask_override = (
+ {ds_snap.mask_id: mask_snapshot} if mask_snapshot is not None else {}
+ )
+ else:
+ mask_override = None
+
+ original_path = save_target_path if save_target_path else filepath_snapshot
+
+ return {
+ "source_arr": source_arr,
+ "source_shape": source_shape,
+ "edits": edits_snapshot,
+ "mask_override": mask_override,
+ "export_cache": export_cache,
+ "original_path": original_path,
+ "filepath_snapshot": filepath_snapshot,
+ "main_exif": main_exif,
+ "source_exif": source_exif,
+ "write_developed_jpg": write_developed_jpg,
+ "developed_path": developed_path,
+ }
+
+ def save_from_snapshot(
+ self, snapshot: Dict[str, Any]
+ ) -> Optional[Tuple[Path, Path]]:
+ """Run the full-resolution export from a pre-captured snapshot.
+
+ This method is safe to call from a background thread — it does NOT
+ read any live ``ImageEditor`` state for export-critical data.
+ All mutable state comes from the *snapshot* dict produced by
+ ``snapshot_for_export()``.
+
+ Returns:
+ A tuple of (saved_path, backup_path) on success, otherwise None.
+
+ Raises:
+ RuntimeError: If saving fails.
+ """
+ source_arr = snapshot["source_arr"]
+ edits_snapshot = snapshot["edits"]
+ mask_override = snapshot["mask_override"]
+ export_cache = snapshot["export_cache"]
+ original_path = snapshot["original_path"]
+ main_exif = snapshot["main_exif"]
+ source_exif = snapshot["source_exif"]
+ write_developed_jpg = snapshot["write_developed_jpg"]
+ developed_path = snapshot["developed_path"]
+ source_shape = snapshot["source_shape"]
+
+ _debug = log.isEnabledFor(logging.DEBUG)
+ if _debug:
+ t0 = time.perf_counter()
+
+ # 1. Apply edits to full resolution — uses only snapshot data
+ # Use isolated snapshot context so background export doesn't pollute self._cached_*
+ export_cache_context = {}
final_float = self._apply_edits(
- source_arr, edits=edits_snapshot, for_export=True
+ source_arr,
+ edits=edits_snapshot,
+ for_export=True,
+ mask_assets_override=mask_override,
+ cache_override=export_cache,
+ cache_context=export_cache_context,
) # (H,W,3) float32
if _debug:
t_edits = time.perf_counter()
- original_path = save_target_path if save_target_path else self.current_filepath
try:
original_stat = original_path.stat()
except OSError as e:
log.warning("Unable to read timestamps for %s: %s", original_path, e)
original_stat = None
- # 2. Backup (always backs up original_path, which is Main when save_target_path is set)
+ # 2. Backup
backup_path = create_backup_file(original_path)
if backup_path is None:
return None
@@ -2005,39 +2172,24 @@ def save_image(
is_tiff = original_path.suffix.lower() in [".tif", ".tiff"]
if is_tiff:
- # Save as 16-bit TIFF using custom writer
self._write_tiff_16bit(original_path, final_float)
else:
- # Determine EXIF bytes to write
- exif_bytes = None
- if self.original_image:
- # We NO LONGER check transforms_applied here because we ALWAYS
- # bake orientation into the pixel buffer on load for consistency.
- # Thus, we ALWAYS sanitize the Orientation tag to 1 to prevent "double rotation".
- exif_bytes = self._get_sanitized_exif_bytes()
-
- # Save as standard format (Likely JPG) using Pillow
- # Convert to uint8
- # Legacy soft shoulder moved to linear space (_apply_headroom_shoulder)
- # converted via _linear_to_srgb in _apply_edits, so final_float is already sRGB.
- # Just clip to valid range.
arr_u8 = (np.clip(final_float, 0.0, 1.0) * 255).astype(np.uint8)
img_u8 = Image.fromarray(arr_u8, mode="RGB")
save_kwargs = {"quality": 95}
- if exif_bytes:
- save_kwargs["exif"] = exif_bytes
+ if main_exif:
+ save_kwargs["exif"] = main_exif
try:
img_u8.save(original_path, **save_kwargs)
except Exception:
- # Fallback without EXIF
img_u8.save(original_path)
if original_stat is not None:
self._restore_file_times(original_path, original_stat)
- # 4. Save Sidecar JPG (-developed.jpg) - only when explicitly requested
+ # 4. Save Sidecar JPG (-developed.jpg) — only when explicitly requested
if write_developed_jpg:
if developed_path is None:
stem = original_path.stem
@@ -2045,29 +2197,16 @@ def save_image(
stem = stem[:-8]
developed_path = original_path.with_name(f"{stem}-developed.jpg")
- # Check for geometric transforms (re-check not strictly needed but for clarity)
rotation = edits_snapshot.get("rotation", 0)
straighten_angle = float(edits_snapshot.get("straighten_angle", 0.0))
transforms_applied = (rotation != 0) or (abs(straighten_angle) > 0.001)
- # Determine EXIF for sidecar - prefer source EXIF (from paired JPEG)
exif_bytes = None
if transforms_applied:
- # Use sanitized EXIF (orientation reset to 1)
- exif_bytes = self._get_sanitized_exif_bytes()
- elif self._source_exif_bytes:
- # Use cached source EXIF from paired JPEG
- # Must sanitize orientation because we baked it on load!
- exif_bytes = sanitize_exif_orientation(self._source_exif_bytes)
- elif self.original_image:
- # Fallback to current image's EXIF (may be empty for TIFFs)
- # Must sanitize orientation because we baked it on load!
- exif_bytes = sanitize_exif_orientation(
- self.original_image.info.get("exif")
- )
+ exif_bytes = main_exif
+ elif source_exif:
+ exif_bytes = sanitize_exif_orientation(source_exif)
- # Use the same uint8 data
- # Legacy soft shoulder moved to linear space
arr_u8 = (np.clip(final_float, 0.0, 1.0) * 255).astype(np.uint8)
img_u8 = Image.fromarray(arr_u8)
@@ -2082,7 +2221,7 @@ def save_image(
if _debug:
t_write = time.perf_counter()
- h, w = self.float_image.shape[:2]
+ h, w = source_shape
log.debug(
"[SAVE_IMAGE] apply_edits=%dms backup=%dms write=%dms total=%dms (%dx%d, %s)",
int((t_edits - t0) * 1000),
@@ -2096,9 +2235,39 @@ def save_image(
return original_path, backup_path
except Exception as e:
- log.exception("Failed to save %s: %s", self.current_filepath, e)
+ log.exception("Failed to save %s: %s", original_path, e)
raise RuntimeError(f"Save failed: {e}") from e
+ def save_image(
+ self,
+ write_developed_jpg: bool = False,
+ developed_path: Optional[Path] = None,
+ save_target_path: Optional[Path] = None,
+ ) -> Optional[Tuple[Path, Path]]:
+ """Saves the edited image, backing up the original.
+
+ Convenience wrapper that calls ``snapshot_for_export()`` then
+ ``save_from_snapshot()`` in sequence. Kept for backward compatibility
+ and direct (non-background) save paths.
+
+ Args:
+ write_developed_jpg: If True, also create a `-developed.jpg` sidecar file.
+ developed_path: Optional explicit path for the developed JPG.
+ save_target_path: Optional override for the output path.
+
+ Returns:
+ A tuple of (saved_path, backup_path) on success, otherwise None.
+
+ Raises:
+ RuntimeError: If preconditions are not met or saving fails.
+ """
+ snapshot = self.snapshot_for_export(
+ write_developed_jpg=write_developed_jpg,
+ developed_path=developed_path,
+ save_target_path=save_target_path,
+ )
+ return self.save_from_snapshot(snapshot)
+
def save_image_uint8_levels(
self,
save_target_path: Optional[Path] = None,
diff --git a/faststack/imaging/mask.py b/faststack/imaging/mask.py
new file mode 100644
index 0000000..bd3ed8d
--- /dev/null
+++ b/faststack/imaging/mask.py
@@ -0,0 +1,191 @@
+# faststack/imaging/mask.py
+"""Reusable soft-mask model for local adjustments.
+
+Layer 1 of the mask subsystem. Provides:
+- MaskStroke – a single brush stroke in image-normalised coordinates
+- MaskData – a generic, tool-agnostic mask asset (strokes + overlay metadata)
+- DarkenSettings – tool-specific parameters for the background darkening tool
+
+MaskData is intentionally free of tool-specific logic so that future local
+adjustment tools (selective exposure, colour, sharpening, dust cleanup …)
+can share the same mask representation.
+"""
+
+import dataclasses
+import logging
+from typing import Any, Dict, List, Optional, Tuple
+
+log = logging.getLogger(__name__)
+
+
+# ---------------------------------------------------------------------------
+# Stroke
+# ---------------------------------------------------------------------------
+
+
+@dataclasses.dataclass
+class MaskStroke:
+ """A single brush stroke stored in image-normalised coordinates [0, 1].
+
+ Coordinates are relative to the *oriented base image* — i.e. the image
+ after 90-degree rotation but **before** straighten and crop. This keeps
+ strokes stable when the user adjusts straighten/crop later.
+ """
+
+ points: List[Tuple[float, float]] # (x_norm, y_norm) sequence
+ radius: float # brush radius in normalised coords
+ stroke_type: str # "add" (background hint) or "protect" (subject hint)
+ pressure: Optional[List[float]] = None # optional per-point pressure
+
+ def to_dict(self) -> Dict[str, Any]:
+ d: Dict[str, Any] = {
+ "points": self.points,
+ "radius": self.radius,
+ "stroke_type": self.stroke_type,
+ }
+ if self.pressure is not None:
+ d["pressure"] = self.pressure
+ return d
+
+ @classmethod
+ def from_dict(cls, d: Dict[str, Any]) -> "MaskStroke":
+ return cls(
+ points=[tuple(p) for p in d["points"]],
+ radius=d["radius"],
+ stroke_type=d["stroke_type"],
+ pressure=d.get("pressure"),
+ )
+
+
+# ---------------------------------------------------------------------------
+# Generic mask asset (tool-agnostic)
+# ---------------------------------------------------------------------------
+
+
+@dataclasses.dataclass
+class MaskData:
+ """Reusable mask asset — strokes, revision tracking, overlay metadata.
+
+ This class owns *no* tool-specific parameters and *no* raster caches.
+ Raster products are disposable and live on the mask engine / editor.
+ """
+
+ strokes: List[MaskStroke] = dataclasses.field(default_factory=list)
+ revision: int = 0
+
+ # Overlay display metadata (generic — any tool can use these)
+ overlay_color: Tuple[int, int, int] = (80, 120, 255) # default blue
+ overlay_opacity: float = 0.4
+
+ # ---- mutation helpers (all bump revision) ----
+
+ def add_stroke(self, stroke: MaskStroke) -> None:
+ self.strokes.append(stroke)
+ self.revision += 1
+
+ def undo_last_stroke(self) -> Optional[MaskStroke]:
+ if self.strokes:
+ removed = self.strokes.pop()
+ self.revision += 1
+ return removed
+ return None
+
+ def clear_strokes(self) -> None:
+ self.strokes.clear()
+ self.revision += 1
+
+ def has_strokes(self) -> bool:
+ return len(self.strokes) > 0
+
+ # ---- serialisation ----
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "strokes": [s.to_dict() for s in self.strokes],
+ "revision": self.revision,
+ "overlay_color": list(self.overlay_color),
+ "overlay_opacity": self.overlay_opacity,
+ }
+
+ @classmethod
+ def from_dict(cls, d: Dict[str, Any]) -> "MaskData":
+ strokes = [MaskStroke.from_dict(s) for s in d.get("strokes", [])]
+ return cls(
+ strokes=strokes,
+ revision=d.get("revision", len(strokes)),
+ overlay_color=tuple(d.get("overlay_color", (80, 120, 255))),
+ overlay_opacity=d.get("overlay_opacity", 0.4),
+ )
+
+
+# ---------------------------------------------------------------------------
+# Background darkening tool settings (tool-specific)
+# ---------------------------------------------------------------------------
+
+
+@dataclasses.dataclass
+class DarkenSettings:
+ """Parameters for the background darkening tool.
+
+ References a MaskData asset by *mask_id* (a key into
+ ``ImageEditor._mask_assets``). This keeps the generic mask separate
+ from the tool-specific knobs.
+ """
+
+ mask_id: str = "darken"
+ enabled: bool = False
+
+ # Darkening intensity
+ darken_amount: float = 0.5 # 0–1
+
+ # Mask refinement
+ edge_protection: float = 0.5 # 0–1
+ subject_protection: float = 0.5 # 0–1
+ feather: float = 0.5 # 0–1
+ dark_range: float = 0.5 # 0–1
+ neutrality_sensitivity: float = 0.5 # 0–1
+ expand_contract: float = 0.0 # -1 to +1
+ auto_from_edges: float = 0.0 # 0–1
+
+ # Mode
+ mode: str = "assisted"
+ # Valid: "paint_only", "assisted", "strong_subject", "border_auto"
+
+ # Brush (stored on settings so each tool can have its own default)
+ brush_radius: float = 0.03 # normalised
+
+ def params_tuple(self) -> tuple:
+ """Frozen tuple of all scalar params — used as a cache key."""
+ return (
+ self.darken_amount,
+ self.edge_protection,
+ self.subject_protection,
+ self.feather,
+ self.dark_range,
+ self.neutrality_sensitivity,
+ self.expand_contract,
+ self.auto_from_edges,
+ self.mode,
+ )
+
+ # ---- serialisation ----
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "mask_id": self.mask_id,
+ "enabled": self.enabled,
+ "darken_amount": self.darken_amount,
+ "edge_protection": self.edge_protection,
+ "subject_protection": self.subject_protection,
+ "feather": self.feather,
+ "dark_range": self.dark_range,
+ "neutrality_sensitivity": self.neutrality_sensitivity,
+ "expand_contract": self.expand_contract,
+ "auto_from_edges": self.auto_from_edges,
+ "mode": self.mode,
+ "brush_radius": self.brush_radius,
+ }
+
+ @classmethod
+ def from_dict(cls, d: Dict[str, Any]) -> "DarkenSettings":
+ return cls(**{k: v for k, v in d.items() if k in cls.__dataclass_fields__})
diff --git a/faststack/imaging/mask_engine.py b/faststack/imaging/mask_engine.py
new file mode 100644
index 0000000..070df1e
--- /dev/null
+++ b/faststack/imaging/mask_engine.py
@@ -0,0 +1,589 @@
+# faststack/imaging/mask_engine.py
+"""Mask rasterisation, refinement, and coordinate transforms.
+
+Layer 2 of the mask subsystem. Provides:
+- forward_transform / inverse_transform – pure coordinate helpers
+- rasterize_strokes – draw normalised strokes onto a pixel grid
+- resolve_mask – full pipeline: strokes → confidence → feather → clamp
+- MaskRasterCache – disposable, resolution-keyed cache for raster products
+"""
+
+import logging
+import math
+from typing import Any, Dict, Optional, Tuple
+
+import numpy as np
+
+from faststack.imaging.mask import DarkenSettings, MaskData
+
+log = logging.getLogger(__name__)
+
+# Optional dependency -------------------------------------------------------
+try:
+ import cv2
+except ImportError:
+ cv2 = None # type: ignore[assignment]
+
+
+# ---------------------------------------------------------------------------
+# Coordinate transforms
+# ---------------------------------------------------------------------------
+
+
+def _geometry_hash(edits: Dict[str, Any]) -> int:
+ """Hash of the geometry edits that affect mask alignment."""
+ return hash(
+ (
+ edits.get("rotation", 0),
+ round(float(edits.get("straighten_angle", 0.0)), 3),
+ tuple(edits.get("crop_box") or ()),
+ )
+ )
+
+
+def forward_transform(
+ x_norm: float,
+ y_norm: float,
+ edits: Dict[str, Any],
+ target_shape: Tuple[int, int],
+) -> Tuple[float, float]:
+ """Map oriented-base-image [0,1] coords → pixel coords in the
+ post-geometry (post-straighten, post-crop) rasterisation array.
+
+ *target_shape* is ``(H, W)`` of the array being rasterised into.
+ """
+ straighten = float(edits.get("straighten_angle", 0.0))
+ crop_box = edits.get("crop_box")
+ has_crop = (
+ crop_box is not None
+ and len(crop_box) == 4
+ and any(v != d for v, d in zip(crop_box, (0, 0, 1000, 1000), strict=True))
+ )
+
+ # Start in oriented-base-image space [0, 1]
+ x, y = x_norm, y_norm
+
+ # 0. Apply 90-degree rotation steps (matches np.rot90 in _apply_edits)
+ rotation = edits.get("rotation", 0)
+ k = (rotation // 90) % 4
+ if k == 1: # 90 CCW: (x, y) → (y, 1-x)
+ x, y = y, 1.0 - x
+ elif k == 2: # 180: (x, y) → (1-x, 1-y)
+ x, y = 1.0 - x, 1.0 - y
+ elif k == 3: # 270 CCW: (x, y) → (1-y, x)
+ x, y = 1.0 - y, x
+
+ # 1. Apply straighten rotation around (0.5, 0.5)
+ if abs(straighten) > 0.001:
+ rad = math.radians(-straighten)
+ cos_a, sin_a = math.cos(rad), math.sin(rad)
+ dx, dy = x - 0.5, y - 0.5
+ x = dx * cos_a - dy * sin_a + 0.5
+ y = dx * sin_a + dy * cos_a + 0.5
+
+ # 2. Apply crop (map full-image normalised → crop-window normalised)
+ if has_crop:
+ cl, ct, cr, cb = (
+ crop_box[0] / 1000,
+ crop_box[1] / 1000,
+ crop_box[2] / 1000,
+ crop_box[3] / 1000,
+ )
+ cw = cr - cl
+ ch = cb - ct
+ if cw > 0 and ch > 0:
+ x = (x - cl) / cw
+ y = (y - ct) / ch
+
+ # 3. Scale to pixel coords
+ th, tw = target_shape
+ return x * tw, y * th
+
+
+def inverse_transform(
+ x_disp: float,
+ y_disp: float,
+ edits: Dict[str, Any],
+ display_shape: Tuple[int, int],
+) -> Tuple[float, float]:
+ """Map display / QML normalised [0,1] coords → oriented-base-image [0,1].
+
+ *display_shape* is ``(H, W)`` of the displayed image (not used for
+ normalised inputs but kept for API symmetry / future use).
+
+ ``x_disp`` and ``y_disp`` are assumed already normalised to [0,1]
+ relative to the displayed (post-crop, post-straighten) image.
+ """
+ straighten = float(edits.get("straighten_angle", 0.0))
+ crop_box = edits.get("crop_box")
+ has_crop = (
+ crop_box is not None
+ and len(crop_box) == 4
+ and any(v != d for v, d in zip(crop_box, (0, 0, 1000, 1000), strict=True))
+ )
+
+ x, y = x_disp, y_disp
+
+ # Inverse crop: map crop-window normalised → full-image normalised
+ if has_crop:
+ cl, ct, cr, cb = (
+ crop_box[0] / 1000,
+ crop_box[1] / 1000,
+ crop_box[2] / 1000,
+ crop_box[3] / 1000,
+ )
+ cw = cr - cl
+ ch = cb - ct
+ if cw > 0 and ch > 0:
+ x = x * cw + cl
+ y = y * ch + ct
+
+ # Inverse straighten: rotate by +angle (undo the -angle forward)
+ if abs(straighten) > 0.001:
+ rad = math.radians(straighten)
+ cos_a, sin_a = math.cos(rad), math.sin(rad)
+ dx, dy = x - 0.5, y - 0.5
+ x = dx * cos_a - dy * sin_a + 0.5
+ y = dx * sin_a + dy * cos_a + 0.5
+
+ # Inverse 90-degree rotation (undo step 0 of forward_transform)
+ rotation = edits.get("rotation", 0)
+ k = (rotation // 90) % 4
+ if k == 1: # undo 90 CCW: (x, y) → (1-y, x)
+ x, y = 1.0 - y, x
+ elif k == 2: # undo 180: (x, y) → (1-x, 1-y)
+ x, y = 1.0 - x, 1.0 - y
+ elif k == 3: # undo 270 CCW: (x, y) → (y, 1-x)
+ x, y = y, 1.0 - x
+
+ return x, y
+
+
+# ---------------------------------------------------------------------------
+# Stroke rasterisation
+# ---------------------------------------------------------------------------
+
+
+def _interpolate_points(points: list, max_gap: float) -> list:
+ """Insert intermediate points so no two consecutive points are more than
+ *max_gap* pixels apart. Prevents dotted strokes from fast mouse movement."""
+ if len(points) <= 1:
+ return points
+ result = [points[0]]
+ for i in range(1, len(points)):
+ x0, y0 = result[-1]
+ x1, y1 = points[i]
+ dx, dy = x1 - x0, y1 - y0
+ dist = math.sqrt(dx * dx + dy * dy)
+ if dist > max_gap:
+ n = int(math.ceil(dist / max_gap))
+ for j in range(1, n):
+ t = j / n
+ result.append((x0 + dx * t, y0 + dy * t))
+ result.append((x1, y1))
+ return result
+
+
+def _draw_stroke_numpy(
+ canvas: np.ndarray,
+ points: list,
+ radius_px: float,
+) -> None:
+ """Draw a stroke onto *canvas* (H, W) using numpy distance computation."""
+ h, w = canvas.shape
+ if radius_px < 0.5:
+ radius_px = 0.5
+
+ # Interpolate to prevent gaps from fast mouse movement
+ filled = _interpolate_points(points, max_gap=max(1.0, radius_px * 0.5))
+
+ for px, py in filled:
+ # Bounding box for this circle
+ x0 = max(0, int(px - radius_px - 1))
+ x1 = min(w, int(px + radius_px + 2))
+ y0 = max(0, int(py - radius_px - 1))
+ y1 = min(h, int(py + radius_px + 2))
+ if x1 <= x0 or y1 <= y0:
+ continue
+
+ yy, xx = np.ogrid[y0:y1, x0:x1]
+ dist_sq = (xx - px) ** 2 + (yy - py) ** 2
+ inside = dist_sq <= radius_px**2
+ canvas[y0:y1, x0:x1] = np.maximum(
+ canvas[y0:y1, x0:x1], inside.astype(np.float32)
+ )
+
+
+def _draw_stroke_cv2(
+ canvas: np.ndarray,
+ points: list,
+ radius_px: float,
+) -> None:
+ """Draw a stroke onto *canvas* using cv2.circle (faster)."""
+ r = max(1, int(round(radius_px)))
+
+ # Interpolate to prevent gaps from fast mouse movement
+ filled = _interpolate_points(points, max_gap=max(1.0, r * 0.5))
+
+ for px, py in filled:
+ cv2.circle(canvas, (int(round(px)), int(round(py))), r, 1.0, -1)
+
+
+def rasterize_strokes(
+ mask_data: MaskData,
+ shape: Tuple[int, int],
+ edits: Dict[str, Any],
+) -> Tuple[np.ndarray, np.ndarray]:
+ """Rasterise all strokes to float32 (H, W) maps.
+
+ Returns ``(add_map, protect_map)`` each in [0, 1]. ``add_map`` marks
+ background hints; ``protect_map`` marks subject protection.
+
+ Strokes are in oriented-base-image normalised coords and are
+ forward-transformed to *shape* accounting for current geometry edits.
+ """
+ h, w = shape
+ add_map = np.zeros((h, w), dtype=np.float32)
+ protect_map = np.zeros((h, w), dtype=np.float32)
+
+ draw_fn = _draw_stroke_cv2 if cv2 is not None else _draw_stroke_numpy
+
+ for stroke in mask_data.strokes:
+ # Transform stroke points to pixel coords in target array
+ pixel_points = []
+ for xn, yn in stroke.points:
+ px, py = forward_transform(xn, yn, edits, shape)
+ pixel_points.append((px, py))
+
+ # Radius in pixels (normalised radius × image diagonal for consistency)
+ diag = math.sqrt(w * w + h * h)
+ radius_px = stroke.radius * diag
+
+ target = add_map if stroke.stroke_type == "add" else protect_map
+ draw_fn(target, pixel_points, radius_px)
+
+ # Clamp
+ np.clip(add_map, 0.0, 1.0, out=add_map)
+ np.clip(protect_map, 0.0, 1.0, out=protect_map)
+ return add_map, protect_map
+
+
+# ---------------------------------------------------------------------------
+# Gaussian blur helper
+# ---------------------------------------------------------------------------
+
+
+def _gaussian_blur(arr: np.ndarray, sigma: float) -> np.ndarray:
+ """Gaussian blur a 2-D float32 array."""
+ if sigma < 0.5:
+ return arr
+ if cv2 is not None:
+ ksize = int(math.ceil(sigma * 6)) | 1 # odd kernel
+ return cv2.GaussianBlur(arr, (ksize, ksize), sigma)
+
+ # Numpy-only fallback: separable 1-D convolution
+ radius = int(math.ceil(sigma * 3))
+ x = np.arange(-radius, radius + 1, dtype=np.float32)
+ kernel = np.exp(-0.5 * (x / sigma) ** 2)
+ kernel /= kernel.sum()
+ # Pad + convolve rows then columns
+
+ out = arr.copy()
+ for row in range(out.shape[0]):
+ out[row, :] = np.convolve(out[row, :], kernel, mode="same")
+ for col in range(out.shape[1]):
+ out[:, col] = np.convolve(out[:, col], kernel, mode="same")
+ return out
+
+
+# ---------------------------------------------------------------------------
+# Confidence map builders
+# ---------------------------------------------------------------------------
+
+
+def _dark_prior(image_arr: np.ndarray, dark_range: float) -> np.ndarray:
+ """Higher confidence for darker pixels."""
+ luma = image_arr @ np.array([0.299, 0.587, 0.114], dtype=np.float32)
+ # smoothstep: 1 below lo, 0 above hi
+ lo = dark_range * 0.3
+ hi = max(lo + 0.01, dark_range * 0.7)
+ t = np.clip((luma - lo) / (hi - lo), 0.0, 1.0)
+ return 1.0 - t # dark pixels → 1.0
+
+
+def _neutral_prior(image_arr: np.ndarray, sensitivity: float) -> np.ndarray:
+ """Higher confidence for low-chroma (neutral / grey) pixels."""
+ cmax = image_arr.max(axis=2)
+ cmin = image_arr.min(axis=2)
+ chroma = cmax - cmin
+ lo = 0.05
+ hi = max(lo + 0.01, 0.15 * max(0.1, sensitivity))
+ t = np.clip((chroma - lo) / (hi - lo), 0.0, 1.0)
+ return 1.0 - t # neutral → 1.0
+
+
+def _border_prior(
+ shape: Tuple[int, int], border_width_frac: float = 0.05
+) -> np.ndarray:
+ """Distance-from-border prior — pixels near edges get higher confidence."""
+ h, w = shape
+ bw = max(1, int(min(h, w) * border_width_frac))
+ prior = np.zeros((h, w), dtype=np.float32)
+ prior[:bw, :] = 1.0
+ prior[-bw:, :] = 1.0
+ prior[:, :bw] = 1.0
+ prior[:, -bw:] = 1.0
+ return prior
+
+
+def _edge_magnitude(image_arr: np.ndarray) -> np.ndarray:
+ """Gradient magnitude for edge stopping."""
+ luma = image_arr @ np.array([0.299, 0.587, 0.114], dtype=np.float32)
+ if cv2 is not None:
+ gx = cv2.Sobel(luma, cv2.CV_32F, 1, 0, ksize=3)
+ gy = cv2.Sobel(luma, cv2.CV_32F, 0, 1, ksize=3)
+ mag = np.sqrt(gx**2 + gy**2)
+ else:
+ # Simple numpy gradient
+ gy, gx = np.gradient(luma)
+ mag = np.sqrt(gx**2 + gy**2)
+ # Normalise to [0, 1]
+ m = mag.max()
+ if m > 1e-6:
+ mag /= m
+ return mag
+
+
+# ---------------------------------------------------------------------------
+# Image-content in-process cache key for cache keying
+# ---------------------------------------------------------------------------
+
+
+def _image_content_key(image_arr: np.ndarray) -> int:
+ """Fast in-process cache key for resolved-mask cache invalidation.
+
+ Priors (dark, neutral, edge) depend on image content, so the cache must
+ be invalidated when edits change the image (exposure, WB, levels, etc.).
+
+ Samples a 4×4 spatial grid across all channels and hashes the raw bytes.
+ This catches both global adjustments and localised edits far more
+ reliably than a handful of single-channel pixel reads.
+ """
+ h, w = image_arr.shape[:2]
+ # 4 evenly-spaced row/col indices (always includes first and last)
+ rows = [0, h // 3, 2 * h // 3, h - 1]
+ cols = [0, w // 3, 2 * w // 3, w - 1]
+ samples = b"".join(image_arr[r, c].tobytes() for r in rows for c in cols)
+ return hash(samples)
+
+
+# ---------------------------------------------------------------------------
+# Mask resolution pipeline
+# ---------------------------------------------------------------------------
+
+
+def resolve_mask(
+ mask_data: MaskData,
+ settings: DarkenSettings,
+ image_arr: np.ndarray,
+ shape: Tuple[int, int],
+ edits: Dict[str, Any],
+ cache: Optional["MaskRasterCache"] = None,
+) -> np.ndarray:
+ """Full mask resolution: strokes → confidence → feather → soft mask.
+
+ Returns float32 (H, W) in [0, 1] where 1.0 = full background effect.
+
+ *image_arr* is the current sRGB float32 (H, W, 3) image used for
+ edge-aware analysis. *shape* must match ``image_arr.shape[:2]``.
+ """
+ geo_hash = _geometry_hash(edits)
+ params_key = settings.params_tuple()
+ img_key = _image_content_key(image_arr)
+
+ if cache is not None:
+ cached = cache.get_resolved(
+ mask_data.revision, shape, geo_hash, params_key, img_key
+ )
+ if cached is not None:
+ return cached
+
+ # --- Rasterise strokes (may hit stroke cache) ---
+ if cache is not None:
+ stroke_maps = cache.get_strokes(mask_data.revision, shape, geo_hash)
+ else:
+ stroke_maps = None
+
+ if stroke_maps is None:
+ add_map, protect_map = rasterize_strokes(mask_data, shape, edits)
+ if cache is not None:
+ cache.put_strokes(
+ mask_data.revision, shape, geo_hash, (add_map, protect_map)
+ )
+ else:
+ add_map, protect_map = stroke_maps
+
+ # --- Build auto priors based on mode ---
+ mode = settings.mode
+ auto_prior = np.zeros(shape, dtype=np.float32)
+
+ # Pre-compute edges if needed by either auto priors or final edge protection.
+ edges = None
+ if (
+ mode != "paint_only" and settings.auto_from_edges > 0.01
+ ) or settings.edge_protection > 0.01:
+ edges = _edge_magnitude(image_arr)
+
+ if mode != "paint_only":
+ # Dark prior
+ dp = _dark_prior(image_arr, settings.dark_range)
+
+ if mode == "border_auto":
+ # Border connectivity: combine border seed with dark prior
+ bp = _border_prior(shape)
+ bp_blurred = _gaussian_blur(bp, sigma=min(shape) * 0.05)
+ auto_prior = dp * 0.5 + bp_blurred * 0.5
+ elif mode == "strong_subject":
+ # Strong subject protection — use dark prior but weight protect more
+ auto_prior = dp * 0.3
+ else:
+ # "assisted" — balanced
+ auto_prior = dp * 0.4
+
+ # Neutrality prior blended in
+ if settings.neutrality_sensitivity > 0.01:
+ np_arr = _neutral_prior(image_arr, settings.neutrality_sensitivity)
+ auto_prior = (
+ auto_prior * 0.6 + np_arr * 0.4 * settings.neutrality_sensitivity
+ )
+
+ # Edge-aware prior: areas between strong edges are likely uniform
+ # background, so use inverted edge magnitude as a positive signal.
+ if settings.auto_from_edges > 0.01:
+ # edges is already computed above
+ # Blur the edge map so the "between edges" regions fill in
+ edge_blurred = _gaussian_blur(edges, sigma=min(shape) * 0.02)
+ # Invert: low-edge (smooth) regions get high confidence
+ edge_prior = 1.0 - edge_blurred
+ w = settings.auto_from_edges
+ auto_prior = auto_prior * (1.0 - w) + edge_prior * w
+
+ # --- Combine signals ---
+ # Background confidence = user strokes + auto prior where user hasn't painted
+ raw_bg = np.clip(add_map + auto_prior * (1.0 - add_map), 0.0, 1.0)
+
+ # Subject protection
+ sp_weight = settings.subject_protection
+ if mode == "strong_subject":
+ sp_weight = min(1.0, sp_weight + 0.3)
+
+ raw_bg = raw_bg * (1.0 - protect_map * sp_weight)
+
+ # --- Edge stopping ---
+ if settings.edge_protection > 0.01:
+ # edges is already computed above
+ # Reduce mask confidence at strong edges
+ edge_brake = 1.0 - edges * settings.edge_protection
+ raw_bg = raw_bg * np.clip(edge_brake, 0.0, 1.0)
+
+ # --- Feather / blur ---
+ feather_sigma = settings.feather * min(shape) * 0.03
+ if feather_sigma > 0.5:
+ raw_bg = _gaussian_blur(raw_bg, feather_sigma)
+
+ # --- Expand / contract ---
+ ec = settings.expand_contract
+ if abs(ec) > 0.01 and cv2 is not None:
+ ksize = max(3, int(abs(ec) * min(shape) * 0.02)) | 1
+ kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (ksize, ksize))
+ if ec > 0:
+ raw_bg = cv2.dilate(raw_bg, kernel)
+ else:
+ raw_bg = cv2.erode(raw_bg, kernel)
+
+ # --- Final clamp ---
+ result = np.clip(raw_bg, 0.0, 1.0)
+
+ if cache is not None:
+ cache.put_resolved(
+ mask_data.revision, shape, geo_hash, params_key, img_key, result
+ )
+
+ return result
+
+
+# ---------------------------------------------------------------------------
+# Disposable raster cache
+# ---------------------------------------------------------------------------
+
+
+class MaskRasterCache:
+ """Resolution-keyed cache for disposable raster products.
+
+ Keeps one stroke-map entry and one resolved-mask entry at a time.
+ Preview and export resolutions have different shapes, so they
+ naturally miss and recompute — no stale cross-contamination.
+ """
+
+ def __init__(self):
+ self._stroke_key: Optional[tuple] = None
+ self._stroke_maps: Optional[Tuple[np.ndarray, np.ndarray]] = None
+
+ self._resolved_key: Optional[tuple] = None
+ self._resolved_mask: Optional[np.ndarray] = None
+
+ def clear(self) -> None:
+ self._stroke_key = None
+ self._stroke_maps = None
+ self._resolved_key = None
+ self._resolved_mask = None
+
+ # stroke maps
+
+ def get_strokes(
+ self,
+ revision: int,
+ shape: Tuple[int, int],
+ geo_hash: int,
+ ) -> Optional[Tuple[np.ndarray, np.ndarray]]:
+ key = (revision, shape, geo_hash)
+ if self._stroke_key == key:
+ return self._stroke_maps
+ return None
+
+ def put_strokes(
+ self,
+ revision: int,
+ shape: Tuple[int, int],
+ geo_hash: int,
+ maps: Tuple[np.ndarray, np.ndarray],
+ ) -> None:
+ self._stroke_key = (revision, shape, geo_hash)
+ self._stroke_maps = maps
+
+ # resolved mask
+
+ def get_resolved(
+ self,
+ revision: int,
+ shape: Tuple[int, int],
+ geo_hash: int,
+ params_key: tuple,
+ img_key: int,
+ ) -> Optional[np.ndarray]:
+ key = (revision, shape, geo_hash, params_key, img_key)
+ if self._resolved_key == key:
+ return self._resolved_mask
+ return None
+
+ def put_resolved(
+ self,
+ revision: int,
+ shape: Tuple[int, int],
+ geo_hash: int,
+ params_key: tuple,
+ img_key: int,
+ mask: np.ndarray,
+ ) -> None:
+ self._resolved_key = (revision, shape, geo_hash, params_key, img_key)
+ self._resolved_mask = mask
diff --git a/faststack/imaging/masked_ops.py b/faststack/imaging/masked_ops.py
new file mode 100644
index 0000000..c416b6f
--- /dev/null
+++ b/faststack/imaging/masked_ops.py
@@ -0,0 +1,88 @@
+# faststack/imaging/masked_ops.py
+"""Masked image-processing operations.
+
+Layer 3 of the mask subsystem. Each function takes an image array and a
+soft mask and applies a local adjustment. Background darkening is the
+first consumer; future tools can add ``apply_masked_exposure``,
+``apply_masked_saturation``, etc.
+"""
+
+import logging
+
+import numpy as np
+
+log = logging.getLogger(__name__)
+
+# Optional dependency -------------------------------------------------------
+try:
+ import cv2
+except ImportError:
+ cv2 = None # type: ignore[assignment]
+
+
+def apply_masked_darken(
+ arr: np.ndarray,
+ mask: np.ndarray,
+ darken_amount: float,
+ edge_protection: float,
+) -> np.ndarray:
+ """Apply background darkening inside *mask*.
+
+ The algorithm combines two complementary techniques for a natural look:
+
+ 1. **Pedestal subtraction** — removes the ambient-light "haze floor"
+ that makes backgrounds look grey instead of black.
+ 2. **Multiplicative darkening** — compresses the remaining tonal range
+ in the masked area, preserving relative brightness relationships.
+ 3. **Edge detail preservation** — optionally adds back local detail
+ in darkened areas so texture is not lost.
+
+ Parameters
+ ----------
+ arr : float32 (H, W, 3) sRGB image in [0, 1]. Modified **in-place**.
+ mask : float32 (H, W) soft mask in [0, 1], where 1.0 = full background.
+ darken_amount : float overall strength, 0-1.
+ edge_protection : float detail preservation strength, 0-1.
+
+ Returns
+ -------
+ The same *arr* array (for convenience chaining).
+ """
+ if darken_amount < 0.001:
+ return arr
+
+ mask3 = mask[..., np.newaxis] # (H, W, 1) for broadcasting
+
+ # 1. Pedestal subtraction — remove the grey floor
+ pedestal = darken_amount * 0.15
+ arr -= mask3 * pedestal
+
+ # 2. Multiplicative darkening — compress remaining range
+ mult_factor = darken_amount * 0.4
+ arr *= 1.0 - mask3 * mult_factor
+
+ # 3. Edge detail preservation
+ if edge_protection > 0.01:
+ # Compute local detail at a fine scale
+ try:
+ if cv2 is not None and hasattr(cv2, "GaussianBlur"):
+ luma = np.ascontiguousarray(
+ arr @ np.array([0.299, 0.587, 0.114], dtype=np.float32)
+ )
+ blurred = cv2.GaussianBlur(luma, (5, 5), 1.5)
+ if isinstance(blurred, np.ndarray) and blurred.shape == luma.shape:
+ detail = luma - blurred # high-frequency detail
+ restore = detail[..., np.newaxis] * mask3 * edge_protection * 0.5
+ arr += restore
+ except Exception:
+ log.debug(
+ "Edge detail preservation skipped: arr=%s mask3=%s",
+ getattr(arr, "dtype", "?"),
+ getattr(mask3, "dtype", "?"),
+ exc_info=True,
+ )
+
+ # Safety clamp — keep in valid range
+ np.clip(arr, 0.0, 1.0, out=arr)
+
+ return arr
diff --git a/faststack/imaging/turbo.py b/faststack/imaging/turbo.py
index 006d381..86d42b9 100644
--- a/faststack/imaging/turbo.py
+++ b/faststack/imaging/turbo.py
@@ -75,7 +75,9 @@ def _candidate_library_paths() -> list[Optional[str]]:
def create_turbojpeg() -> Tuple[Optional["TurboJPEG"], bool]:
"""Create a TurboJPEG decoder if possible."""
if TurboJPEG is None:
- log.warning("PyTurboJPEG not found. Falling back to Pillow for JPEG decoding.")
+ log.warning(
+ "PyTurboJPEG not found. Falling back to Pillow for JPEG decoding. Installing PyTurboJPEG will improve image navigation speed."
+ )
return None, False
failures: list[str] = []
diff --git a/faststack/qml/Components.qml b/faststack/qml/Components.qml
index 67d3050..0b413a6 100644
--- a/faststack/qml/Components.qml
+++ b/faststack/qml/Components.qml
@@ -108,10 +108,6 @@ Item {
panTransform.y = 0
}
function onAbsoluteZoomRequested(scale) {
- if (uiState && uiState.debugMode) {
- console.log("QML: Absolute zoom requested: " + scale)
- }
-
imageRotator.zoomScale = scale
// If we need to switch to high-res, flag this scale as the target
@@ -142,8 +138,8 @@ Item {
// Size matches the AABB of the rotated image
// W' = W*|cos| + H*|sin|
// Geometry is now updated atomically via updateRotatorGeometry()
- property real implicitWidth: 0
- property real implicitHeight: 0
+ implicitWidth: 0
+ implicitHeight: 0
property bool isUpdatingGeometry: false
// Fix A: Atomic Zoom Scale
@@ -281,13 +277,30 @@ Item {
rotation: mainMouseArea.cropRotation
+ // Darken mask overlay - anchored to mainImage, rotates/scales with it
+ Image {
+ id: darkenOverlay
+ anchors.fill: parent
+ z: 90
+ visible: uiState && uiState.isDarkening && uiState.darkenOverlayVisible
+ source: (uiState && uiState.isDarkening && uiState.darkenOverlayVisible)
+ ? "image://provider/mask_overlay/" + uiState.darkenOverlayGeneration
+ : ""
+ fillMode: Image.Stretch
+ cache: false
+ opacity: 1.0 // Opacity is baked into the ARGB32 image
+ }
+
// Crop overlay - anchored to mainImage to rotate with it
Item {
id: cropOverlay
property var cropBox: uiState ? uiState.currentCropBox : [0, 0, 1000, 1000]
property bool hasActiveCrop: cropBox && cropBox.length === 4 && !(cropBox[0]===0 && cropBox[1]===0 && cropBox[2]===1000 && cropBox[3]===1000)
+ // Show visual content only when there is an actual user-drawn crop or rotate mode.
+ // The overlay Item itself stays alive (visible: isCropping) so updateCropRect() always fires.
+ property bool showCropContent: hasActiveCrop || mainMouseArea.isRotating
- visible: uiState && uiState.isCropping && (hasActiveCrop || mainMouseArea.isRotating)
+ visible: uiState && uiState.isCropping
anchors.fill: parent // Fills mainImage
z: 100
@@ -296,7 +309,7 @@ Item {
Connections {
target: uiState
- function onCurrentCropBoxChanged() { if (cropOverlay.visible && mainImage.source) cropOverlay.updateCropRect() }
+ function onCurrentCropBoxChanged() { if (mainImage.source) cropOverlay.updateCropRect() }
}
Connections {
@@ -321,14 +334,15 @@ Item {
cropRect.height = localBottom - localTop
}
- // Dimmer Rectangles
- Rectangle { x: 0; y: 0; width: parent.width; height: cropRect.y; color: "black"; opacity: 0.3 }
- Rectangle { x: 0; y: cropRect.y + cropRect.height; width: parent.width; height: parent.height - (cropRect.y + cropRect.height); color: "black"; opacity: 0.3 }
- Rectangle { x: 0; y: cropRect.y; width: cropRect.x; height: cropRect.height; color: "black"; opacity: 0.3 }
- Rectangle { x: cropRect.x + cropRect.width; y: cropRect.y; width: parent.width - (cropRect.x + cropRect.width); height: cropRect.height; color: "black"; opacity: 0.3 }
+ // Dimmer Rectangles — only render when a real crop is active/being drawn
+ Rectangle { visible: cropOverlay.showCropContent; x: 0; y: 0; width: parent.width; height: cropRect.y; color: "black"; opacity: 0.3 }
+ Rectangle { visible: cropOverlay.showCropContent; x: 0; y: cropRect.y + cropRect.height; width: parent.width; height: parent.height - (cropRect.y + cropRect.height); color: "black"; opacity: 0.3 }
+ Rectangle { visible: cropOverlay.showCropContent; x: 0; y: cropRect.y; width: cropRect.x; height: cropRect.height; color: "black"; opacity: 0.3 }
+ Rectangle { visible: cropOverlay.showCropContent; x: cropRect.x + cropRect.width; y: cropRect.y; width: parent.width - (cropRect.x + cropRect.width); height: cropRect.height; color: "black"; opacity: 0.3 }
Rectangle {
id: cropRect
+ visible: cropOverlay.showCropContent
color: "transparent"
border.color: "white"
border.width: 3 / ((scaleTransform && scaleTransform.xScale) ? scaleTransform.xScale : 1.0)
@@ -383,13 +397,6 @@ Item {
// Force fit recompute so fitScale / zoom logic stabilizes immediately
imageRotator.recomputeFitScale(true)
-
- if (uiState && uiState.debugMode) {
- console.log("sourceSize changed:", mainImage.sourceSize.width, mainImage.sourceSize.height,
- "dpr:", dpr,
- "base:", imageRotator.baseW, imageRotator.baseH,
- "zoomScale:", imageRotator.zoomScale)
- }
}
onSourceSizeChanged: { handleSourceSizeChange() }
@@ -518,11 +525,13 @@ Item {
acceptedButtons: Qt.LeftButton | Qt.RightButton
hoverEnabled: true
cursorShape: {
+ if (uiState && uiState.isDarkening) return Qt.CrossCursor
if (!uiState || !uiState.isCropping) return Qt.ArrowCursor
- // Use a simple cross cursor for crop mode - edge detection would require tracking mouse position
- // which is complex in QML. The edge dragging will still work based on click position.
return Qt.CrossCursor
}
+
+ // Darken painting state
+ property bool isDarkenPainting: false
// Drag-to-pan with drag-and-drop when dragging outside window
property real lastX: 0
@@ -587,35 +596,45 @@ Item {
startX = mouse.x
startY = mouse.y
isDraggingOutside = false
-
+
+ // Darken painting mode
+ if (uiState && uiState.isDarkening && !uiState.isCropping && controller) {
+ var imgCoords = mapToImageCoordinates(Qt.point(mouse.x, mouse.y))
+ var sx = Math.max(0, Math.min(1, imgCoords.x))
+ var sy = Math.max(0, Math.min(1, imgCoords.y))
+ if (imgCoords.x < 0 || imgCoords.x > 1 || imgCoords.y < 0 || imgCoords.y > 1) {
+ return // click outside image bounds
+ }
+ var strokeType = (mouse.button === Qt.RightButton) ? "protect" : "add"
+ controller.start_darken_stroke(sx, sy, strokeType)
+ isDarkenPainting = true
+ return
+ }
+
if (mouse.button === Qt.RightButton) {
- if (uiState && uiState.isCropping) {
- // Cancel crop mode if already active
- if (controller) controller.cancel_crop_mode()
- } else if (uiState) {
- // Enter crop mode and start new crop
- uiState.isCropping = true
-
- // Set up new crop state
- cropDragMode = "new"
- cropStartX = mouse.x
- cropStartY = mouse.y
-
- // Initialize anchors
- var startCoords = mapToImageCoordinates(Qt.point(mouse.x, mouse.y))
- // Clamp to [0, 1] and convert to [0, 1000]
- var startNormX = Math.max(0, Math.min(1, startCoords.x)) * 1000
- var startNormY = Math.max(0, Math.min(1, startCoords.y)) * 1000
-
- cropBoxStartLeft = startNormX
- cropBoxStartRight = startNormX
- cropBoxStartTop = startNormY
- cropBoxStartBottom = startNormY
-
- isCropDragging = true
+ if (!uiState.isCropping && controller) {
+ controller.toggle_crop_mode() // Ensure mode is ON
}
+
// Ensure loupeView has active focus so Escape key works
loupeView.forceActiveFocus()
+
+ // Start a NEW crop rectangle immediately from the clicked point
+ // This fulfills the "right-click drag crops immediately" requirement
+ var coords = mapToImageCoordinates(Qt.point(mouse.x, mouse.y))
+ var mx = coords.x * 1000
+ var my = coords.y * 1000
+
+ cropDragMode = "new"
+ cropStartX = mouse.x
+ cropStartY = mouse.y
+ cropBoxStartLeft = mx
+ cropBoxStartTop = my
+ cropBoxStartRight = mx
+ cropBoxStartBottom = my
+
+ uiState.currentCropBox = [Math.round(mx), Math.round(my), Math.round(mx), Math.round(my)]
+ isCropDragging = true
return
}
@@ -640,7 +659,6 @@ Item {
var inside = mx >= box[0] && mx <= box[2] && my >= box[1] && my <= box[3]
- // --- Hit test for rotation handle (robust: uses actual knob transform) ---
if (mainMouseArea.isRotating && cropOverlay.visible && rotateKnob.visible) {
// knob center in mainMouseArea coords (includes cropRect rotation)
// Note: rotateKnob is now inside mainImage -> cropOverlay -> cropRect
@@ -689,9 +707,17 @@ Item {
// If crop box is full image, always start a new crop
else if (isFullImage) {
+ // Start a new crop rectangle from the clicked point
cropDragMode = "new"
cropStartX = mouse.x
cropStartY = mouse.y
+
+ cropBoxStartLeft = mx
+ cropBoxStartTop = my
+ cropBoxStartRight = mx
+ cropBoxStartBottom = my
+
+ uiState.currentCropBox = [Math.round(mx), Math.round(my), Math.round(mx), Math.round(my)]
} else if (inside) {
// Determine which edge/corner is being dragged (Image Space)
var nearLeft = Math.abs(mx - box[0]) < edgeThreshold
@@ -725,6 +751,8 @@ Item {
cropBoxStartRight = mx
cropBoxStartTop = my
cropBoxStartBottom = my
+
+ uiState.currentCropBox = [Math.round(mx), Math.round(my), Math.round(mx), Math.round(my)]
}
isCropDragging = true
}
@@ -732,10 +760,24 @@ Item {
// Legacy getCropRect removed - using Image Space hit testing instead.
// mapToImageCoordinates maps directly to mainImage
function mapToImageCoordinates(screenPoint) {
- var p = mainMouseArea.mapToItem(mainImage, screenPoint.x, screenPoint.y)
- return {x: p.x / mainImage.width, y: p.y / mainImage.height}
+ if (!mainImage || mainImage.width <= 0) return {x:0, y:0}
+
+ // Simplified: Use Qt-native mapping to handle scale, pan, and rotation
+ var p = mainImage.mapFromItem(mainMouseArea, screenPoint.x, screenPoint.y)
+
+ // Normalize (0-1)
+ return { x: p.x / mainImage.width, y: p.y / mainImage.height }
}
onPositionChanged: function(mouse) {
+ // Darken painting drag — clamp to image bounds
+ if (isDarkenPainting && controller) {
+ var imgCoords = mapToImageCoordinates(Qt.point(mouse.x, mouse.y))
+ var cx = Math.max(0, Math.min(1, imgCoords.x))
+ var cy = Math.max(0, Math.min(1, imgCoords.y))
+ controller.continue_darken_stroke(cx, cy)
+ return
+ }
+
if (uiState && uiState.isCropping && isCropDragging) {
if (cropDragMode === "new") {
// Update crop rectangle while dragging
@@ -840,23 +882,15 @@ Item {
}
onReleased: function(mouse) {
+ // Darken painting release
+ if (isDarkenPainting) {
+ isDarkenPainting = false
+ if (controller) controller.finish_darken_stroke()
+ return
+ }
+
isDraggingOutside = false
if (uiState && uiState.isCropping && isCropDragging) {
- // Fix: Prevent accidental tiny crops with Right Click
- if (mouse.button === Qt.RightButton && cropDragMode === "new") {
- var dx = Math.abs(mouse.x - cropStartX)
- var dy = Math.abs(mouse.y - cropStartY)
- var maxDim = Math.max(dx, dy)
- var minDim = Math.min(dx, dy)
-
- // "at least 50 pixels in both dimensions"
- if (maxDim < 50 || minDim < 50) {
- if (controller) controller.cancel_crop_mode()
- isCropDragging = false
- cropDragMode = "none"
- return
- }
- }
isCropDragging = false
cropDragMode = "none"
@@ -930,7 +964,7 @@ Item {
}
function updateCropBox(x1, y1, x2, y2, applyAspectRatio = false) {
- if (!uiState || !mainImage.source) return
+ if (!uiState || !mainImage.source || mainImage.width <= 0) return
var imgCoord1 = mapToImageCoordinates(Qt.point(x1, y1))
var imgCoord2 = mapToImageCoordinates(Qt.point(x2, y2))
diff --git a/faststack/qml/DarkenToolPanel.qml b/faststack/qml/DarkenToolPanel.qml
new file mode 100644
index 0000000..c305c7e
--- /dev/null
+++ b/faststack/qml/DarkenToolPanel.qml
@@ -0,0 +1,487 @@
+import QtQuick 2.15
+import QtQuick.Controls 2.15
+import QtQuick.Controls.Material 2.15
+import QtQuick.Layouts 1.15
+import QtQuick.Window 2.15
+
+Window {
+ id: darkenPanel
+ width: 380
+ height: 700
+ title: "Background Darkening"
+ visible: uiState ? uiState.isDarkening : false
+ flags: Qt.Window | Qt.WindowTitleHint | Qt.WindowCloseButtonHint
+
+ property color backgroundColor: "#1e1e1e"
+ property color textColor: "white"
+
+ readonly property color accentColor: "#6366f1"
+ readonly property color accentColorHover: "#818cf8"
+ readonly property color controlBg: "#10ffffff"
+ readonly property color controlBorder: "#30ffffff"
+ readonly property color separatorColor: "#20ffffff"
+
+ Material.theme: Material.Dark
+ Material.accent: accentColor
+
+ color: backgroundColor
+
+ onClosing: (close) => {
+ if (controller) controller.toggle_darken_mode()
+ }
+
+ Shortcut {
+ sequence: "Escape"
+ context: Qt.WindowShortcut
+ onActivated: {
+ if (controller) controller.toggle_darken_mode()
+ }
+ }
+
+ ScrollView {
+ anchors.fill: parent
+ anchors.margins: 12
+ clip: true
+ contentWidth: availableWidth
+
+ ColumnLayout {
+ width: parent.width
+ spacing: 10
+
+ // --- Mode Selector ---
+ Label {
+ text: "Mode"
+ color: darkenPanel.accentColorHover
+ font.bold: true
+ font.pixelSize: 14
+ font.letterSpacing: 1.0
+ Layout.bottomMargin: 4
+ }
+
+ ComboBox {
+ id: modeCombo
+ Layout.fillWidth: true
+ model: ["Assisted", "Paint Only", "Strong Subject", "Border Auto"]
+
+ Binding {
+ target: modeCombo
+ property: "currentIndex"
+ value: {
+ var m = uiState ? uiState.darkenMode : "assisted"
+ if (m === "paint_only") return 1
+ if (m === "strong_subject") return 2
+ if (m === "border_auto") return 3
+ return 0
+ }
+ }
+ onActivated: (index) => {
+ var modes = ["assisted", "paint_only", "strong_subject", "border_auto"]
+ if (controller) controller.set_darken_mode(modes[index])
+ }
+
+ ToolTip.visible: hovered
+ ToolTip.delay: 500
+ ToolTip.text: {
+ var m = uiState ? uiState.darkenMode : "assisted"
+ if (m === "paint_only")
+ return "Paint Only: Only your brush strokes define the mask.\nNo automatic detection — full manual control.\nBest for precise, targeted darkening."
+ if (m === "strong_subject")
+ return "Strong Subject: Uses edge detection to strongly protect\nthe subject. Your brush strokes guide which areas to\ndarken, but edges are aggressively preserved.\nBest for images with clear foreground subjects."
+ if (m === "border_auto")
+ return "Border Auto: Automatically darkens edges/borders of\nthe image. Minimal brushwork needed — just adjust\nthe sliders. Best for quick vignette-like darkening."
+ return "Assisted: Your brush strokes are combined with\nautomatic edge detection to create a natural mask.\nThe algorithm helps blend your strokes smoothly.\nBest general-purpose mode for most images."
+ }
+ }
+
+ // --- Separator ---
+ Rectangle {
+ Layout.fillWidth: true
+ Layout.topMargin: 8
+ Layout.bottomMargin: 4
+ height: 1
+ color: darkenPanel.separatorColor
+ }
+
+ // --- Darkening Controls ---
+ Label {
+ text: "Darkening"
+ color: darkenPanel.accentColorHover
+ font.bold: true
+ font.pixelSize: 14
+ font.letterSpacing: 1.0
+ Layout.bottomMargin: 4
+ }
+
+ DarkenSlider {
+ label: "Amount"
+ paramKey: "darken_amount"
+ value: uiState ? uiState.darkenAmount * 100 : 50
+ tooltip: "How much to darken the masked background areas.\n0 = no darkening, 100 = maximum darkening.\nStart around 30–50 and adjust to taste."
+ }
+ DarkenSlider {
+ label: "Edge Protection"
+ paramKey: "edge_protection"
+ value: uiState ? uiState.darkenEdgeProtection * 100 : 50
+ tooltip: "Prevents darkening near strong edges (subject outlines).\nHigher values keep a brighter halo around sharp\nedges, avoiding unnatural dark fringing.\nUseful when the mask bleeds into the subject."
+ }
+ DarkenSlider {
+ label: "Subject Protection"
+ paramKey: "subject_protection"
+ value: uiState ? uiState.darkenSubjectProtection * 100 : 50
+ tooltip: "Protects bright, saturated areas from darkening.\nHigher values preserve subject colors and highlights.\nHelps when the mask accidentally covers the subject."
+ }
+
+ // --- Separator ---
+ Rectangle {
+ Layout.fillWidth: true
+ Layout.topMargin: 8
+ Layout.bottomMargin: 4
+ height: 1
+ color: darkenPanel.separatorColor
+ }
+
+ // --- Mask Refinement ---
+ Label {
+ text: "Mask Refinement"
+ color: darkenPanel.accentColorHover
+ font.bold: true
+ font.pixelSize: 14
+ font.letterSpacing: 1.0
+ Layout.bottomMargin: 4
+ }
+
+ DarkenSlider {
+ label: "Feather"
+ paramKey: "feather"
+ value: uiState ? uiState.darkenFeather * 100 : 50
+ tooltip: "Softens the mask edges for a gradual transition.\n0 = hard edge (sharp boundary between dark and light),\n100 = very soft edge (wide gradient).\nHigher values give a more natural, blended look."
+ }
+ DarkenSlider {
+ label: "Dark Range"
+ paramKey: "dark_range"
+ value: uiState ? uiState.darkenDarkRange * 100 : 50
+ tooltip: "Controls how the mask interacts with already-dark areas.\nHigher values extend the mask into darker tones,\nlower values focus darkening on midtones and highlights.\nUseful for controlling shadow depth."
+ }
+ DarkenSlider {
+ label: "Neutrality"
+ paramKey: "neutrality_sensitivity"
+ value: uiState ? uiState.darkenNeutrality * 100 : 50
+ tooltip: "Sensitivity to neutral (grey/unsaturated) colors.\nHigher values cause the mask to prefer darkening\nneutral areas while leaving colorful areas alone.\nHelps isolate plain backgrounds from colorful subjects."
+ }
+ DarkenSlider {
+ label: "Expand / Contract"
+ paramKey: "expand_contract"
+ minVal: -100
+ value: uiState ? uiState.darkenExpandContract * 100 : 0
+ tooltip: "Grows or shrinks the mask boundary.\nPositive values expand the darkened area outward,\nnegative values contract it inward.\nUse to fine-tune where darkening starts and stops."
+ }
+ DarkenSlider {
+ label: "Auto From Edges"
+ paramKey: "auto_from_edges"
+ value: uiState ? uiState.darkenAutoEdges * 100 : 0
+ minVal: 0
+ tooltip: "Uses edge detection to guide automatic masking.\nSmooth areas between strong edges get higher\nbackground confidence, helping the mask follow\nsubject outlines. Complements Edge Protection:\nthat slider stops the mask at edges, this one\nactively uses edges to shape the mask."
+ }
+
+ // --- Separator ---
+ Rectangle {
+ Layout.fillWidth: true
+ Layout.topMargin: 8
+ Layout.bottomMargin: 4
+ height: 1
+ color: darkenPanel.separatorColor
+ }
+
+ // --- Brush ---
+ Label {
+ text: "Brush"
+ color: darkenPanel.accentColorHover
+ font.bold: true
+ font.pixelSize: 14
+ font.letterSpacing: 1.0
+ Layout.bottomMargin: 4
+ }
+
+ RowLayout {
+ Layout.fillWidth: true
+ spacing: 10
+ Label {
+ text: "Size"
+ color: darkenPanel.textColor
+ font.pixelSize: 13
+ Layout.preferredWidth: 90
+
+ ToolTip.visible: brushSizeMA.containsMouse
+ ToolTip.delay: 500
+ ToolTip.text: "Brush radius for painting mask strokes.\nLarger brush = faster coverage of big areas.\nSmaller brush = more precise control."
+ MouseArea { id: brushSizeMA; anchors.fill: parent; hoverEnabled: true }
+ }
+ Slider {
+ id: brushSlider
+ Layout.fillWidth: true
+ from: 1; to: 100; stepSize: 1
+
+ Binding on value {
+ value: uiState ? uiState.darkenBrushRadius * 1000 : 30
+ when: !brushSlider.pressed
+ }
+
+ onMoved: {
+ if (controller) controller.set_darken_param("brush_radius", value / 1000.0)
+ }
+ }
+ Label {
+ text: Math.round(brushSlider.value).toString()
+ color: darkenPanel.textColor
+ font.pixelSize: 12
+ Layout.preferredWidth: 30
+ }
+ }
+
+ Label {
+ text: "Left-click: paint background | Right-click: protect subject"
+ color: darkenPanel.textColor
+ opacity: 0.6
+ font.pixelSize: 11
+ font.italic: true
+ wrapMode: Text.WordWrap
+ Layout.fillWidth: true
+ }
+
+ // --- Separator ---
+ Rectangle {
+ Layout.fillWidth: true
+ Layout.topMargin: 8
+ Layout.bottomMargin: 4
+ height: 1
+ color: darkenPanel.separatorColor
+ }
+
+ // --- Overlay Controls ---
+ Label {
+ text: "Overlay"
+ color: darkenPanel.accentColorHover
+ font.bold: true
+ font.pixelSize: 14
+ font.letterSpacing: 1.0
+ Layout.bottomMargin: 4
+ }
+
+ RowLayout {
+ Layout.fillWidth: true
+ spacing: 10
+ CheckBox {
+ id: overlayCheck
+ text: "Show Overlay"
+
+ Binding {
+ target: overlayCheck
+ property: "checked"
+ value: uiState ? uiState.darkenOverlayVisible : true
+ when: !overlayCheck.pressed
+ }
+
+ onToggled: {
+ if (controller) controller.set_darken_overlay_visible(checked)
+ }
+ Material.accent: darkenPanel.accentColor
+
+ ToolTip.visible: hovered
+ ToolTip.delay: 500
+ ToolTip.text: "Show or hide the colored mask overlay on the image.\nThe overlay helps you see exactly which areas will\nbe darkened. Toggle off to see the actual result."
+ }
+ }
+
+ // Colour swatches
+ RowLayout {
+ Layout.fillWidth: true
+ spacing: 6
+ Label {
+ text: "Color:"
+ color: darkenPanel.textColor
+ font.pixelSize: 13
+
+ ToolTip.visible: colorLabelMA.containsMouse
+ ToolTip.delay: 500
+ ToolTip.text: "Choose the overlay color.\nThis only affects the preview overlay — it does\nnot change the actual darkening result."
+ MouseArea { id: colorLabelMA; anchors.fill: parent; hoverEnabled: true }
+ }
+ Repeater {
+ model: [
+ {"name": "Blue", "r": 80, "g": 120, "b": 255},
+ {"name": "Red", "r": 255, "g": 80, "b": 80},
+ {"name": "Green", "r": 80, "g": 255, "b": 120},
+ {"name": "Yellow", "r": 255, "g": 255, "b": 80},
+ {"name": "Magenta", "r": 255, "g": 80, "b": 255},
+ {"name": "Cyan", "r": 80, "g": 255, "b": 255}
+ ]
+ Rectangle {
+ width: 24; height: 24; radius: 4
+ color: Qt.rgba(modelData.r / 255, modelData.g / 255, modelData.b / 255, 1.0)
+ border.color: activeFocus ? "white" : "transparent"
+ border.width: 2
+ activeFocusOnTab: true
+
+ Accessible.name: modelData.name
+ Accessible.role: Accessible.Button
+
+ ToolTip.visible: swatchMA.containsMouse
+ ToolTip.delay: 500
+ ToolTip.text: modelData.name
+
+ MouseArea {
+ id: swatchMA
+ anchors.fill: parent
+ cursorShape: Qt.PointingHandCursor
+ hoverEnabled: true
+ onClicked: {
+ if (controller) controller.set_darken_overlay_color(modelData.r, modelData.g, modelData.b)
+ }
+ }
+
+ Keys.onPressed: (event) => {
+ if (event.key === Qt.Key_Enter || event.key === Qt.Key_Return || event.key === Qt.Key_Space) {
+ if (controller) controller.set_darken_overlay_color(modelData.r, modelData.g, modelData.b)
+ event.accepted = true
+ }
+ }
+ }
+ }
+ }
+
+ // --- Separator ---
+ Rectangle {
+ Layout.fillWidth: true
+ Layout.topMargin: 8
+ Layout.bottomMargin: 4
+ height: 1
+ color: darkenPanel.separatorColor
+ }
+
+ // --- Action Buttons ---
+ RowLayout {
+ Layout.fillWidth: true
+ spacing: 10
+
+ Button {
+ text: "Undo Stroke"
+ Layout.fillWidth: true
+ onClicked: { if (controller) controller.undo_darken_stroke() }
+ contentItem: Text { text: parent.text; font: parent.font; color: darkenPanel.textColor; horizontalAlignment: Text.AlignHCenter; verticalAlignment: Text.AlignVCenter }
+ background: Rectangle { color: parent.pressed ? "#40ffffff" : "#20ffffff"; radius: 4; border.color: parent.hovered ? "#60ffffff" : "transparent" }
+
+ ToolTip.visible: hovered
+ ToolTip.delay: 500
+ ToolTip.text: "Remove the last brush stroke you painted."
+ }
+
+ Button {
+ text: "Clear All"
+ Layout.fillWidth: true
+ onClicked: { if (controller) controller.clear_darken_strokes() }
+ contentItem: Text { text: parent.text; font: parent.font; color: darkenPanel.textColor; horizontalAlignment: Text.AlignHCenter; verticalAlignment: Text.AlignVCenter }
+ background: Rectangle { color: parent.pressed ? "#40ffffff" : "#20ffffff"; radius: 4; border.color: parent.hovered ? "#60ffffff" : "transparent" }
+
+ ToolTip.visible: hovered
+ ToolTip.delay: 500
+ ToolTip.text: "Remove all brush strokes and start fresh."
+ }
+ }
+
+ // --- Close Button ---
+ Button {
+ Layout.fillWidth: true
+ Layout.topMargin: 6
+ text: "Close (K)"
+ onClicked: { if (controller) controller.toggle_darken_mode() }
+ contentItem: Text { text: parent.text; font: parent.font; color: darkenPanel.textColor; horizontalAlignment: Text.AlignHCenter; verticalAlignment: Text.AlignVCenter }
+ background: Rectangle { color: parent.pressed ? "#40ffffff" : "#20ffffff"; radius: 4; border.color: parent.hovered ? darkenPanel.accentColor : "#60ffffff" }
+
+ ToolTip.visible: hovered
+ ToolTip.delay: 500
+ ToolTip.text: "Close the darkening panel.\nThe darkening effect stays applied to the image.\nPress K again to reopen."
+ }
+
+ // Spacer
+ Item { Layout.fillHeight: true; Layout.minimumHeight: 10 }
+ }
+ }
+
+ // --- Darken Slider Component ---
+ component DarkenSlider: RowLayout {
+ id: sliderRoot
+ property string label: ""
+ property string paramKey: ""
+ property real value: 0
+ property real minVal: 0
+ property real maxVal: 100
+ property string tooltip: ""
+
+ Layout.fillWidth: true
+ spacing: 10
+
+ Label {
+ text: sliderRoot.label
+ color: darkenPanel.textColor
+ font.pixelSize: 13
+ Layout.preferredWidth: 110
+ elide: Text.ElideRight
+
+ ToolTip.visible: sliderLabelMA.containsMouse && sliderRoot.tooltip !== ""
+ ToolTip.delay: 500
+ ToolTip.text: sliderRoot.tooltip
+ MouseArea { id: sliderLabelMA; anchors.fill: parent; hoverEnabled: true }
+ }
+
+ Slider {
+ id: dSlider
+ Layout.fillWidth: true
+ from: sliderRoot.minVal; to: sliderRoot.maxVal; stepSize: 1
+
+ // Bind to sliderRoot.value (the component's own property) so
+ // `parent` ambiguity inside an inline component is avoided.
+ // Previously `parent.value` resolved to RowLayout.value → 0,
+ // causing the slider to snap back to the minimum on every frame.
+ Binding on value {
+ value: sliderRoot.value
+ when: !dSlider.pressed
+ }
+
+ property real _pendingValue: 0
+ property real _lastSentValue: 0
+ Timer {
+ id: dsendTimer
+ interval: 16
+ repeat: true
+ onTriggered: {
+ if (Math.abs(dSlider._pendingValue - dSlider._lastSentValue) > 0.001) {
+ if (controller) controller.set_darken_param(sliderRoot.paramKey, dSlider._pendingValue / sliderRoot.maxVal)
+ dSlider._lastSentValue = dSlider._pendingValue
+ }
+ }
+ }
+
+ onPressedChanged: {
+ if (pressed) {
+ _pendingValue = value
+ _lastSentValue = value
+ if (!dsendTimer.running) dsendTimer.start()
+ } else {
+ dsendTimer.stop()
+ if (controller) controller.set_darken_param(sliderRoot.paramKey, value / sliderRoot.maxVal)
+ }
+ }
+ onMoved: {
+ _pendingValue = value
+ if (!dsendTimer.running) dsendTimer.start()
+ }
+ }
+
+ Label {
+ text: Math.round(dSlider.value).toString()
+ color: darkenPanel.textColor
+ font.pixelSize: 12
+ Layout.preferredWidth: 30
+ }
+ }
+}
diff --git a/faststack/qml/ImageEditorDialog.qml b/faststack/qml/ImageEditorDialog.qml
index 5b940da..586018e 100644
--- a/faststack/qml/ImageEditorDialog.qml
+++ b/faststack/qml/ImageEditorDialog.qml
@@ -73,6 +73,13 @@ Window {
// Note: Editor closes automatically via _on_save_finished callback
}
}
+ Shortcut {
+ sequence: "K"
+ context: Qt.WindowShortcut
+ onActivated: {
+ if (controller) controller.toggle_darken_mode()
+ }
+ }
// Component for Section Separator
Component {
@@ -319,6 +326,27 @@ Window {
}
Repeater { model: effectsModel; delegate: editSlider }
+ Button {
+ text: "Darken Background (K)"
+ Layout.fillWidth: true
+ font.pixelSize: 12
+ onClicked: {
+ if (controller) controller.toggle_darken_mode()
+ }
+ contentItem: Text {
+ text: parent.text
+ font: parent.font
+ color: (uiState && uiState.isDarkening) ? "white" : imageEditorDialog.textColor
+ horizontalAlignment: Text.AlignHCenter
+ verticalAlignment: Text.AlignVCenter
+ }
+ background: Rectangle {
+ color: (uiState && uiState.isDarkening) ? imageEditorDialog.accentColor : (parent.pressed ? "#40ffffff" : "#20ffffff")
+ radius: 4
+ border.color: parent.hovered ? "#60ffffff" : "transparent"
+ }
+ }
+
Loader { sourceComponent: sectionSeparator }
// --- Transform Group ---
diff --git a/faststack/qml/Main.qml b/faststack/qml/Main.qml
index e84b987..10df6d4 100644
--- a/faststack/qml/Main.qml
+++ b/faststack/qml/Main.qml
@@ -1110,6 +1110,21 @@ ApplicationWindow {
}
}
+ // Background Darkening Tool (K) — independent of the editor sidebar
+ Shortcut {
+ sequence: "K"
+ context: Qt.ApplicationShortcut
+ enabled: uiState ? !uiState.isDialogOpen && !uiState.isCropping : false
+ onActivated: {
+ if (!uiState || !controller) return
+ if (uiState.isDarkening) {
+ controller.toggle_darken_mode()
+ } else {
+ controller.open_darken_tool()
+ }
+ }
+ }
+
// Grid View Toggle (T for Thumbnails)
Shortcut {
sequence: "T"
@@ -1471,7 +1486,9 @@ ApplicationWindow {
Label {
id: statusMessageLabel
text: uiState ? uiState.statusMessage : ""
- color: root.currentTextColor
+ color: (uiState && uiState.isSaving) ? "#4CAF50" : root.currentTextColor
+ font.bold: (uiState && uiState.isSaving) ? true : false
+ font.pixelSize: (uiState && uiState.isSaving) ? 14 : 12
visible: uiState ? (uiState.statusMessage !== "") : false
Layout.rightMargin: 10
}
@@ -1625,8 +1642,8 @@ ApplicationWindow {
width: 450
text: "FastStack Keyboard and Mouse Commands
" +
"Navigation:
" +
- " J / Right Arrow: Next Image
" +
- " K / Left Arrow: Previous Image
" +
+ " Right Arrow: Next Image
" +
+ " Left Arrow: Previous Image
" +
" G: Jump to Image Number
" +
" Alt+U: Jump to Last Uploaded
" +
" I: Show EXIF Data
" +
@@ -1681,6 +1698,7 @@ ApplicationWindow {
" Ctrl+S (in editor): Save edited image
" +
" A: Quick auto white balance
" +
" L: Quick auto levels
" +
+ " K: Background Darkening Tool
" +
" O (or right-click): Toggle crop mode
" +
" 1/2/3/4: Set aspect ratio (1:1, 4:3, 3:2, 16:9)
" +
" Enter: Execute crop
" +
@@ -1765,6 +1783,10 @@ ApplicationWindow {
}
}
+ DarkenToolPanel {
+ id: darkenToolPanel
+ }
+
function show_jump_to_image_dialog() {
jumpToImageDialog.open()
}
diff --git a/faststack/repro.py b/faststack/repro.py
new file mode 100644
index 0000000..940296a
--- /dev/null
+++ b/faststack/repro.py
@@ -0,0 +1,51 @@
+import sys
+from pathlib import Path
+from unittest.mock import MagicMock, patch
+
+# Repo root
+sys.path.append(str(Path(".").resolve().parent))
+
+print(f"DEBUG: sys.path[-1] is {sys.path[-1]}")
+
+from faststack.app import AppController
+
+# Mock dependencies
+mock_engine = MagicMock()
+mock_config = MagicMock()
+
+with (
+ patch("config.config"),
+ patch("faststack.io.watcher.Watcher"),
+ patch("faststack.io.sidecar.SidecarManager"),
+ patch("faststack.imaging.prefetch.Prefetcher"),
+ patch("faststack.imaging.cache.ByteLRUCache"),
+ patch("faststack.thumbnail_view.ThumbnailProvider"),
+):
+ controller = AppController(Path("."), mock_engine)
+
+# Setup state
+# Use real list to avoid mock issues
+from faststack.models import ImageFile
+
+mock_image = ImageFile(Path("test.jpg"))
+controller.image_files = [mock_image]
+controller.current_index = 0
+controller.auto_level_threshold = 0.001
+
+# Mock image_editor
+controller.image_editor = MagicMock()
+controller.image_editor.auto_levels.return_value = (10, 240, 10, 240) # Not full range
+controller.image_editor.current_filepath = Path("test.jpg")
+controller.image_editor.load_image.return_value = True
+
+print("Calling controller.auto_levels()...")
+try:
+ result = controller.auto_levels()
+ print(f"Result: {result}")
+ controller.image_editor.auto_levels.assert_called_once()
+ print("Assertion passed!")
+except Exception as e:
+ print(f"\nAssertion FAILED: {type(e).__name__}: {e}")
+ import traceback
+
+ traceback.print_exc()
diff --git a/faststack/result.txt b/faststack/result.txt
deleted file mode 100644
index d61bda5..0000000
--- a/faststack/result.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-============================= test session starts =============================
-platform win32 -- Python 3.12.10, pytest-8.4.2, pluggy-1.6.0 -- C:\code\faststack\.venv\Scripts\python.exe
-rootdir: C:\code\faststack
-configfile: pyproject.toml
-collecting ... collected 3 items
-
-tests\test_exif_orientation.py::TestExifOrientation::test_orientation_preserved_no_rotation PASSED [ 33%]
-tests\test_exif_orientation.py::TestExifOrientation::test_orientation_sanitization_on_rotation Windows fatal exception: stack overflow
-
-Current thread 0x000005a0 (most recent call first):
- File "", line 488 in _call_with_frames_removed
- File "", line 1301 in exec_module
- File "", line 935 in _load_unlocked
- File "", line 1331 in _find_and_load_unlocked
- File "", line 1360 in _find_and_load
- File "", line 488 in _call_with_frames_removed
- File "", line 1293 in create_module
- File "", line 813 in module_from_spec
- File "", line 921 in _load_unlocked
- File "", line 1331 in _find_and_load_unlocked
- File "", line 1360 in _find_and_load
- File "C:\code\faststack\faststack\imaging\editor.py", line 28 in
- File "", line 488 in _call_with_frames_removed
- File "", line 999 in exec_module
- File "", line 935 in _load_unlocked
- File "", line 1331 in _find_and_load_unlocked
- File "", line 1360 in _find_and_load
- File "C:\code\faststack\faststack\tests\test_exif_orientation.py", line 25 in setUp
- File "C:\Users\alanr\AppData\Local\Programs\Python\Python312\Lib\unittest\case.py", line 586 in _callSetUp
- File "C:\Users\alanr\AppData\Local\Programs\Python\Python312\Lib\unittest\case.py", line 630 in run
- File "C:\Users\alanr\AppData\Local\Programs\Python\Python312\Lib\unittest\case.py", line 690 in __call__
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\unittest.py", line 351 in runtest
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\runner.py", line 178 in pytest_runtest_call
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_callers.py", line 121 in _multicall
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_manager.py", line 120 in _hookexec
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_hooks.py", line 512 in __call__
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\runner.py", line 246 in
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\runner.py", line 344 in from_call
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\runner.py", line 245 in call_and_report
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\runner.py", line 136 in runtestprotocol
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\runner.py", line 117 in pytest_runtest_protocol
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_callers.py", line 121 in _multicall
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_manager.py", line 120 in _hookexec
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_hooks.py", line 512 in __call__
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\main.py", line 367 in pytest_runtestloop
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_callers.py", line 121 in _multicall
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_manager.py", line 120 in _hookexec
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_hooks.py", line 512 in __call__
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\main.py", line 343 in _main
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\main.py", line 289 in wrap_session
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\main.py", line 336 in pytest_cmdline_main
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_callers.py", line 121 in _multicall
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_manager.py", line 120 in _hookexec
- File "C:\code\faststack\.venv\Lib\site-packages\pluggy\_hooks.py", line 512 in __call__
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\config\__init__.py", line 175 in main
- File "C:\code\faststack\.venv\Lib\site-packages\_pytest\config\__init__.py", line 201 in console_main
- File "c:\code\faststack\.venv\Scripts\pytest.exe\__main__.py", line 6 in
- File "", line 88 in _run_code
- File "", line 198 in _run_module_as_main
diff --git a/faststack/tests/test_editor_integration.py b/faststack/tests/test_editor_integration.py
index 058d301..4e1374e 100644
--- a/faststack/tests/test_editor_integration.py
+++ b/faststack/tests/test_editor_integration.py
@@ -29,6 +29,9 @@ def setUp(self):
patch("faststack.app.ThumbnailProvider"),
):
self.controller = AppController(Path("."), self.mock_engine)
+ # Stub out refresh_image_list so later calls during the test
+ # do not trigger disk I/O or overwrite mock state.
+ self.controller.refresh_image_list = MagicMock()
# Mock the internal image_editor to verify delegation
self.controller.image_editor = MagicMock()
@@ -105,8 +108,20 @@ def test_missing_methods(self):
# 5. save_edited_image
try:
+ snapshot_sentinel = {"mock": "snapshot"}
+ self.controller.image_editor.snapshot_for_export.return_value = (
+ snapshot_sentinel
+ )
+ self.controller.image_editor.save_from_snapshot.return_value = (
+ Path("test.jpg"),
+ None,
+ )
+ self.controller.ui_state.isSaving = False
self.controller.save_edited_image()
- self.controller.image_editor.save_image.assert_called_once()
+ self.controller.image_editor.snapshot_for_export.assert_called_once()
+ self.controller.image_editor.save_from_snapshot.assert_called_once_with(
+ snapshot_sentinel
+ )
except AttributeError:
self.fail("AppController is missing method 'save_edited_image'")
diff --git a/faststack/tests/test_editor_reopening.py b/faststack/tests/test_editor_reopening.py
new file mode 100644
index 0000000..20db42a
--- /dev/null
+++ b/faststack/tests/test_editor_reopening.py
@@ -0,0 +1,224 @@
+import unittest
+import sys
+from pathlib import Path
+from unittest.mock import MagicMock, patch
+
+# Ensure we can import faststack
+sys.path.append(str(Path(__file__).parents[2]))
+
+from faststack.app import AppController
+
+
+class TestEditorReopening(unittest.TestCase):
+ def setUp(self):
+ # 1. Heavily patch all external-touching classes
+ self.patchers = [
+ patch("faststack.app.Watcher"),
+ patch("faststack.app.SidecarManager"),
+ patch("faststack.app.Prefetcher"),
+ patch("faststack.app.ByteLRUCache"),
+ patch("faststack.app.ThumbnailProvider"),
+ patch("faststack.app.config"),
+ patch("faststack.app.setup_logging"),
+ patch("faststack.app.UIState"),
+ patch(
+ "faststack.app.QTimer"
+ ), # <-- Fix QObject/Timer issues in headless tests
+ patch("faststack.app.create_daemon_threadpool_executor"),
+ patch("concurrent.futures.ThreadPoolExecutor"),
+ ]
+ for p in self.patchers:
+ p.start()
+
+ # 2. Instantiate controller
+ self.controller = AppController(Path("."), MagicMock())
+
+ # 3. Setup mocks for editor session logic
+ self.controller.image_editor = MagicMock()
+ self.controller.image_editor.current_filepath = Path("test.jpg")
+ self.controller.image_editor.current_mtime = 123.4
+ self.controller.image_editor.session_id = "test-session"
+
+ # Setup files
+ mock_file = MagicMock()
+ mock_file.path = Path("test.jpg")
+ self.controller.image_files = [mock_file]
+ self.controller.current_index = 0
+
+ def tearDown(self):
+ for p in self.patchers:
+ p.stop()
+
+ def test_save_failure_retains_editor_state(self):
+ # Simulate background worker callback firing with a failure.
+ # _on_save_finished takes a single dict that contains both the
+ # result and the context fields produced by save_edited_image().
+ save_result = {
+ "success": False,
+ "error": "Disk full",
+ "save_image_key": str(Path("test.jpg").resolve()),
+ "session_token": ("key", None, "test-session"),
+ "editor_was_open": True,
+ }
+
+ self.controller._on_save_finished(save_result)
+
+ # VERIFY: Clear must NOT be called on failure
+ self.controller.image_editor.clear.assert_not_called()
+
+ def test_reopen_hits_reuse_path_on_matching_file(self):
+ target = Path("test.jpg")
+ self.controller.image_editor.current_filepath = target
+ self.controller.image_editor.current_mtime = 123.4
+
+ with patch("pathlib.Path.resolve", return_value=target.absolute()):
+ with patch("pathlib.Path.stat") as mock_stat:
+ mock_stat.return_value.st_mtime = 123.4
+
+ # REOPEN
+ res = self.controller.load_image_for_editing()
+
+ self.assertTrue(res)
+ # VERIFY: reuse signals
+ self.controller.ui_state.editorImageChanged.emit.assert_called_once()
+ # VERIFY: no reload performed
+ self.controller.image_editor.load_image.assert_not_called()
+
+ def test_load_failure_closes_dialog(self):
+ # Case 1: load_image fails
+ self.controller.ui_state.isEditorOpen = True
+ self.controller.image_editor.current_filepath = None # Ensure no reuse
+ self.controller.image_editor.load_image.return_value = False
+
+ res = self.controller.load_image_for_editing()
+ self.assertFalse(res)
+ self.assertFalse(
+ self.controller.ui_state.isEditorOpen, "Dialog should close on failure"
+ )
+
+ # Case 2: exception throws
+ self.controller.ui_state.isEditorOpen = True
+ self.controller.image_editor.load_image.side_effect = RuntimeError("IO error")
+ res = self.controller.load_image_for_editing()
+ self.assertFalse(res)
+ self.assertFalse(
+ self.controller.ui_state.isEditorOpen, "Dialog should close on error"
+ )
+
+ def test_reuse_returns_REUSED_not_True(self):
+ """The reuse path must return _REUSED (truthy, but ``is True`` is False)
+ so _prepare_darken_image_state can distinguish reuse from reload."""
+ target = Path("test.jpg")
+ self.controller.image_editor.current_filepath = target
+ self.controller.image_editor.current_mtime = 123.4
+ self.controller.image_editor.current_edits = {}
+
+ with patch("pathlib.Path.resolve", return_value=target.absolute()):
+ with patch("pathlib.Path.stat") as mock_stat:
+ mock_stat.return_value.st_mtime = 123.4
+
+ res = self.controller.load_image_for_editing()
+
+ # Must be truthy (success)…
+ self.assertTrue(res)
+ # …but not exactly True (so ``is True`` check in
+ # _prepare_darken_image_state correctly skips darken reset)
+ self.assertIsNot(res, True)
+ self.assertEqual(res, AppController._REUSED)
+
+ def test_prepare_darken_skips_reset_on_reuse(self):
+ """_prepare_darken_image_state must NOT call _reset_darken_on_navigation
+ when load_image_for_editing returns _REUSED."""
+ target = Path("test.jpg")
+ self.controller.image_editor.current_filepath = None # Force a load
+ self.controller.image_editor.float_image = None # Force needs_load=True
+ self.controller.image_editor.current_edits = {}
+
+ with patch.object(
+ self.controller,
+ "load_image_for_editing",
+ return_value=AppController._REUSED,
+ ):
+ with patch.object(
+ self.controller, "_reset_darken_on_navigation"
+ ) as mock_reset:
+ result = self.controller._prepare_darken_image_state()
+ self.assertTrue(result)
+ mock_reset.assert_not_called()
+
+ def test_prepare_darken_resets_on_real_reload(self):
+ """_prepare_darken_image_state MUST call _reset_darken_on_navigation
+ when load_image_for_editing returns True (real reload)."""
+ self.controller.image_editor.current_filepath = None
+ self.controller.image_editor.float_image = None
+
+ with patch.object(self.controller, "load_image_for_editing", return_value=True):
+ with patch.object(
+ self.controller, "_reset_darken_on_navigation"
+ ) as mock_reset:
+ result = self.controller._prepare_darken_image_state()
+ self.assertTrue(result)
+ mock_reset.assert_called_once()
+
+ def test_prepare_darken_aborts_on_failure(self):
+ """_prepare_darken_image_state must return False when load fails."""
+ self.controller.image_editor.current_filepath = None
+ self.controller.image_editor.float_image = None
+
+ with patch.object(
+ self.controller, "load_image_for_editing", return_value=False
+ ):
+ result = self.controller._prepare_darken_image_state()
+ self.assertFalse(result)
+
+ def test_save_closes_ui_immediately_but_keeps_memory(self):
+ # 1. Setup
+ target = Path("test.jpg")
+ target_abs = self.controller._key(target)
+ self.controller.ui_state.isEditorOpen = True
+ self.controller.image_editor.current_filepath = target
+ self.controller.image_editor.session_id = "sess-1"
+ self.controller.image_editor.current_mtime = 123.4
+
+ # Mock snapshot
+ self.controller.image_editor.snapshot_for_export.return_value = MagicMock()
+
+ with patch.object(self.controller, "_save_executor") as mock_executor:
+ # 2. CALL SAVE
+ self.controller.save_edited_image()
+
+ # VERIFY: UI closed immediately in controller state
+ self.assertFalse(self.controller.ui_state.isEditorOpen)
+
+ # 3. SIMULATE SIGNAL TRIGGERED BY UI CLOSURE
+ # In the real app, setting isEditorOpen=False emits signal -> calls _on_editor_open_changed(False)
+ self.controller._on_editor_open_changed(False)
+
+ # VERIFY: Clear was NOT called (because save is in flight for this key)
+ self.controller.image_editor.clear.assert_not_called()
+
+ # VERIFY: Save in-flight markers are present
+ self.assertIn(target_abs, self.controller._saving_keys)
+
+ # 4. RE-OPEN (Simulation)
+ # Should be REUSED since memory wasn't cleared
+ with patch("pathlib.Path.resolve", return_value=target.absolute()):
+ with patch("pathlib.Path.stat") as mock_stat:
+ mock_stat.return_value.st_mtime = 123.4
+ res = self.controller.load_image_for_editing()
+ self.assertEqual(res, AppController._REUSED)
+ self.controller.image_editor.load_image.assert_not_called()
+
+ def test_editor_close_clears_memory_if_no_save_active(self):
+ # 1. Setup
+ self.controller.image_editor.current_filepath = Path("no_save.jpg")
+
+ # 2. Simulate closure via signal while NO save is in flight
+ self.controller._on_editor_open_changed(False)
+
+ # VERIFY: Clear IS called because no save active for this file
+ self.controller.image_editor.clear.assert_called_once()
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/faststack/tests/test_mask.py b/faststack/tests/test_mask.py
new file mode 100644
index 0000000..dad1134
--- /dev/null
+++ b/faststack/tests/test_mask.py
@@ -0,0 +1,738 @@
+"""Tests for the reusable mask subsystem and background darkening tool."""
+
+import math
+import unittest
+
+import numpy as np
+
+from faststack.imaging.mask import DarkenSettings, MaskData, MaskStroke
+from faststack.imaging.mask_engine import (
+ MaskRasterCache,
+ forward_transform,
+ inverse_transform,
+ rasterize_strokes,
+ resolve_mask,
+)
+from faststack.imaging.masked_ops import apply_masked_darken
+
+
+class TestMaskStroke(unittest.TestCase):
+ def test_create_stroke(self):
+ s = MaskStroke(
+ points=[(0.1, 0.2), (0.3, 0.4)],
+ radius=0.05,
+ stroke_type="add",
+ )
+ self.assertEqual(len(s.points), 2)
+ self.assertEqual(s.stroke_type, "add")
+ self.assertIsNone(s.pressure)
+
+ def test_serialise_round_trip(self):
+ s = MaskStroke(
+ points=[(0.5, 0.5)],
+ radius=0.1,
+ stroke_type="protect",
+ pressure=[0.8],
+ )
+ d = s.to_dict()
+ s2 = MaskStroke.from_dict(d)
+ self.assertEqual(s2.stroke_type, "protect")
+ self.assertAlmostEqual(s2.radius, 0.1)
+ self.assertEqual(s2.pressure, [0.8])
+
+
+class TestMaskData(unittest.TestCase):
+ def test_revision_tracking(self):
+ md = MaskData()
+ self.assertEqual(md.revision, 0)
+ self.assertFalse(md.has_strokes())
+
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.05, "add"))
+ self.assertEqual(md.revision, 1)
+ self.assertTrue(md.has_strokes())
+
+ md.add_stroke(MaskStroke([(0.2, 0.2)], 0.05, "protect"))
+ self.assertEqual(md.revision, 2)
+
+ removed = md.undo_last_stroke()
+ self.assertIsNotNone(removed)
+ self.assertEqual(removed.stroke_type, "protect")
+ self.assertEqual(md.revision, 3)
+ self.assertEqual(len(md.strokes), 1)
+
+ md.clear_strokes()
+ self.assertEqual(md.revision, 4)
+ self.assertFalse(md.has_strokes())
+
+ def test_serialise_round_trip(self):
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.1, 0.2)], 0.03, "add"))
+ md.overlay_color = (255, 0, 0)
+ md.overlay_opacity = 0.6
+
+ d = md.to_dict()
+ md2 = MaskData.from_dict(d)
+ self.assertEqual(len(md2.strokes), 1)
+ self.assertEqual(md2.overlay_color, (255, 0, 0))
+ self.assertAlmostEqual(md2.overlay_opacity, 0.6)
+
+ def test_default_overlay(self):
+ md = MaskData()
+ self.assertEqual(md.overlay_color, (80, 120, 255))
+ self.assertAlmostEqual(md.overlay_opacity, 0.4)
+
+
+class TestDarkenSettings(unittest.TestCase):
+ def test_separation_from_mask_data(self):
+ """DarkenSettings and MaskData are fully independent."""
+ md = MaskData()
+ ds = DarkenSettings(mask_id="darken", enabled=True)
+ # MaskData has no reference to DarkenSettings fields
+ self.assertFalse(hasattr(md, "darken_amount"))
+ # DarkenSettings has no strokes
+ self.assertFalse(hasattr(ds, "strokes"))
+
+ def test_params_tuple(self):
+ ds = DarkenSettings()
+ t = ds.params_tuple()
+ self.assertIsInstance(t, tuple)
+ self.assertEqual(len(t), 9)
+
+ def test_serialise_round_trip(self):
+ ds = DarkenSettings(darken_amount=0.7, mode="border_auto", enabled=True)
+ d = ds.to_dict()
+ ds2 = DarkenSettings.from_dict(d)
+ self.assertAlmostEqual(ds2.darken_amount, 0.7)
+ self.assertEqual(ds2.mode, "border_auto")
+ self.assertTrue(ds2.enabled)
+
+
+class TestCoordinateTransforms(unittest.TestCase):
+ def test_identity_no_geometry(self):
+ """No rotation, no crop → coords pass through."""
+ edits = {"rotation": 0, "straighten_angle": 0.0, "crop_box": None}
+ px, py = forward_transform(0.5, 0.5, edits, (100, 200))
+ self.assertAlmostEqual(px, 100.0, places=1)
+ self.assertAlmostEqual(py, 50.0, places=1)
+
+ def test_round_trip_no_geometry(self):
+ edits = {"rotation": 0, "straighten_angle": 0.0, "crop_box": None}
+ xn, yn = 0.3, 0.7
+ shape = (100, 200) # (H, W)
+ px, py = forward_transform(xn, yn, edits, shape)
+ # Normalise pixel coords back to [0,1] for inverse_transform
+ xr, yr = inverse_transform(px / shape[1], py / shape[0], edits, shape)
+ self.assertAlmostEqual(xr, xn, places=5)
+ self.assertAlmostEqual(yr, yn, places=5)
+
+ def test_round_trip_with_crop(self):
+ edits = {
+ "rotation": 0,
+ "straighten_angle": 0.0,
+ "crop_box": (250, 250, 750, 750), # center 50%
+ }
+ # A point at (0.5, 0.5) in base image should map to (0.5, 0.5) in display
+ # because crop is centred
+ display_x, display_y = 0.5, 0.5
+ base_x, base_y = inverse_transform(display_x, display_y, edits, (100, 100))
+ self.assertAlmostEqual(base_x, 0.5, places=3)
+ self.assertAlmostEqual(base_y, 0.5, places=3)
+
+ def test_round_trip_with_straighten(self):
+ edits = {
+ "rotation": 0,
+ "straighten_angle": 5.0,
+ "crop_box": None,
+ }
+ # Round-trip: base → display → base
+ xn, yn = 0.3, 0.7
+ # Forward to display coords (normalised)
+ px, py = forward_transform(xn, yn, edits, (100, 100))
+ # Normalise back
+ disp_x, disp_y = px / 100, py / 100
+ # Inverse
+ xr, yr = inverse_transform(disp_x, disp_y, edits, (100, 100))
+ self.assertAlmostEqual(xr, xn, places=3)
+ self.assertAlmostEqual(yr, yn, places=3)
+
+ def test_round_trip_with_rotation_90(self):
+ edits = {"rotation": 90, "straighten_angle": 0.0, "crop_box": None}
+ xn, yn = 0.3, 0.7
+ # After 90 CCW rotation, target array has swapped dimensions
+ px, py = forward_transform(xn, yn, edits, (200, 100))
+ disp_x, disp_y = px / 100, py / 200
+ xr, yr = inverse_transform(disp_x, disp_y, edits, (200, 100))
+ self.assertAlmostEqual(xr, xn, places=3)
+ self.assertAlmostEqual(yr, yn, places=3)
+
+ def test_round_trip_with_rotation_180(self):
+ edits = {"rotation": 180, "straighten_angle": 0.0, "crop_box": None}
+ xn, yn = 0.3, 0.7
+ px, py = forward_transform(xn, yn, edits, (100, 100))
+ disp_x, disp_y = px / 100, py / 100
+ xr, yr = inverse_transform(disp_x, disp_y, edits, (100, 100))
+ self.assertAlmostEqual(xr, xn, places=3)
+ self.assertAlmostEqual(yr, yn, places=3)
+
+ def test_round_trip_with_rotation_270(self):
+ edits = {"rotation": 270, "straighten_angle": 0.0, "crop_box": None}
+ xn, yn = 0.3, 0.7
+ px, py = forward_transform(xn, yn, edits, (200, 100))
+ disp_x, disp_y = px / 100, py / 200
+ xr, yr = inverse_transform(disp_x, disp_y, edits, (200, 100))
+ self.assertAlmostEqual(xr, xn, places=3)
+ self.assertAlmostEqual(yr, yn, places=3)
+
+ def test_round_trip_rotation_plus_crop(self):
+ """Combined 90-degree rotation + crop — the most realistic scenario."""
+ edits = {
+ "rotation": 90,
+ "straighten_angle": 0.0,
+ "crop_box": (250, 250, 750, 750), # centre 50%
+ }
+ # Centre point should survive the round trip
+ xn, yn = 0.5, 0.5
+ px, py = forward_transform(xn, yn, edits, (100, 100))
+ disp_x, disp_y = px / 100, py / 100
+ xr, yr = inverse_transform(disp_x, disp_y, edits, (100, 100))
+ self.assertAlmostEqual(xr, xn, places=3)
+ self.assertAlmostEqual(yr, yn, places=3)
+
+ # Off-centre point
+ xn, yn = 0.4, 0.6
+ px, py = forward_transform(xn, yn, edits, (100, 100))
+ disp_x, disp_y = px / 100, py / 100
+ xr, yr = inverse_transform(disp_x, disp_y, edits, (100, 100))
+ self.assertAlmostEqual(xr, xn, places=3)
+ self.assertAlmostEqual(yr, yn, places=3)
+
+
+class TestStrokeRasterisation(unittest.TestCase):
+ def test_basic_rasterisation(self):
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.1, "add"))
+
+ edits = {"rotation": 0, "straighten_angle": 0.0, "crop_box": None}
+ add_map, protect_map = rasterize_strokes(md, (100, 100), edits)
+
+ self.assertEqual(add_map.shape, (100, 100))
+ self.assertEqual(protect_map.shape, (100, 100))
+ # Centre should be painted
+ self.assertGreater(add_map[50, 50], 0.5)
+ # Protect map should be empty
+ self.assertAlmostEqual(protect_map.max(), 0.0)
+
+ def test_protect_stroke(self):
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.1, "protect"))
+
+ edits = {"rotation": 0, "straighten_angle": 0.0, "crop_box": None}
+ add_map, protect_map = rasterize_strokes(md, (100, 100), edits)
+
+ self.assertAlmostEqual(add_map.max(), 0.0)
+ self.assertGreater(protect_map[50, 50], 0.5)
+
+ def test_different_resolutions(self):
+ """Same strokes rasterised at different sizes produce different arrays."""
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.05, "add"))
+ edits = {"rotation": 0, "straighten_angle": 0.0, "crop_box": None}
+
+ add_small, _ = rasterize_strokes(md, (50, 50), edits)
+ add_large, _ = rasterize_strokes(md, (200, 200), edits)
+
+ self.assertEqual(add_small.shape, (50, 50))
+ self.assertEqual(add_large.shape, (200, 200))
+ # Both should have paint near centre
+ self.assertGreater(add_small[25, 25], 0.0)
+ self.assertGreater(add_large[100, 100], 0.0)
+
+ def test_rasterisation_with_rotation_90(self):
+ """A stroke at (0.8, 0.5) should move after 90 CCW rotation."""
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.8, 0.5)], 0.1, "add"))
+ edits = {"rotation": 90, "straighten_angle": 0.0, "crop_box": None}
+ # 90 CCW: (0.8, 0.5) → (0.5, 0.2) in rotated space
+ add_map, _ = rasterize_strokes(md, (100, 100), edits)
+ self.assertGreater(add_map[20, 50], 0.3)
+ # Original position (50, 80) should have low/no paint
+ self.assertLess(add_map[50, 80], 0.1)
+
+
+class TestMaskResolution(unittest.TestCase):
+ def test_resolve_produces_valid_mask(self):
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.15, "add"))
+
+ ds = DarkenSettings(enabled=True, mode="paint_only")
+ img = np.full((100, 100, 3), 0.5, dtype=np.float32)
+ edits = {"rotation": 0, "straighten_angle": 0.0, "crop_box": None}
+
+ mask = resolve_mask(md, ds, img, (100, 100), edits)
+ self.assertEqual(mask.shape, (100, 100))
+ self.assertTrue(np.all(mask >= 0.0))
+ self.assertTrue(np.all(mask <= 1.0))
+ # Centre should have high mask value
+ self.assertGreater(mask[50, 50], 0.3)
+
+ def test_protect_resists_masking(self):
+ """Protected areas should have lower mask values."""
+ md = MaskData()
+ # Paint entire image as background
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.5, "add"))
+ # Protect the centre
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.1, "protect"))
+
+ ds = DarkenSettings(enabled=True, mode="paint_only", subject_protection=1.0)
+ img = np.full((100, 100, 3), 0.5, dtype=np.float32)
+ edits = {"rotation": 0, "straighten_angle": 0.0, "crop_box": None}
+
+ mask = resolve_mask(md, ds, img, (100, 100), edits)
+ # Centre (protected) should be lower than edges (unprotected)
+ centre = mask[50, 50]
+ edge = mask[5, 5]
+ self.assertLess(centre, edge)
+
+
+class TestMaskedDarken(unittest.TestCase):
+ def test_darken_only_affects_masked_areas(self):
+ arr = np.full((100, 100, 3), 0.6, dtype=np.float32)
+ # Mask: left half = background, right half = subject
+ mask = np.zeros((100, 100), dtype=np.float32)
+ mask[:, :50] = 1.0
+
+ original_right = arr[50, 75].copy()
+ result = apply_masked_darken(arr, mask, darken_amount=0.8, edge_protection=0.0)
+
+ # Right half (unmasked) should be unchanged
+ np.testing.assert_array_almost_equal(result[50, 75], original_right, decimal=3)
+ # Left half (masked) should be darker
+ self.assertTrue(np.all(result[50, 25] < original_right))
+
+ def test_zero_amount_is_noop(self):
+ arr = np.full((50, 50, 3), 0.5, dtype=np.float32)
+ original = arr.copy()
+ mask = np.ones((50, 50), dtype=np.float32)
+
+ result = apply_masked_darken(arr, mask, darken_amount=0.0, edge_protection=0.0)
+ np.testing.assert_array_equal(result, original)
+
+ def test_output_clamped(self):
+ arr = np.full((50, 50, 3), 0.1, dtype=np.float32)
+ mask = np.ones((50, 50), dtype=np.float32)
+
+ result = apply_masked_darken(arr, mask, darken_amount=1.0, edge_protection=0.0)
+ self.assertTrue(np.all(result >= 0.0))
+ self.assertTrue(np.all(result <= 1.0))
+
+
+class TestMaskRasterCache(unittest.TestCase):
+ def test_stroke_cache_hit(self):
+ cache = MaskRasterCache()
+ maps = (
+ np.zeros((10, 10), dtype=np.float32),
+ np.zeros((10, 10), dtype=np.float32),
+ )
+ cache.put_strokes(1, (10, 10), 42, maps)
+
+ result = cache.get_strokes(1, (10, 10), 42)
+ self.assertIsNotNone(result)
+
+ def test_stroke_cache_miss_different_revision(self):
+ cache = MaskRasterCache()
+ maps = (
+ np.zeros((10, 10), dtype=np.float32),
+ np.zeros((10, 10), dtype=np.float32),
+ )
+ cache.put_strokes(1, (10, 10), 42, maps)
+
+ result = cache.get_strokes(2, (10, 10), 42)
+ self.assertIsNone(result)
+
+ def test_stroke_cache_miss_different_shape(self):
+ """Different resolution = different cache key."""
+ cache = MaskRasterCache()
+ maps = (
+ np.zeros((10, 10), dtype=np.float32),
+ np.zeros((10, 10), dtype=np.float32),
+ )
+ cache.put_strokes(1, (10, 10), 42, maps)
+
+ result = cache.get_strokes(1, (200, 200), 42)
+ self.assertIsNone(result)
+
+ def test_resolved_cache(self):
+ cache = MaskRasterCache()
+ mask = np.zeros((10, 10), dtype=np.float32)
+ params = (0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, 0.0, "assisted")
+ img_key = 12345
+ cache.put_resolved(1, (10, 10), 42, params, img_key, mask)
+
+ result = cache.get_resolved(1, (10, 10), 42, params, img_key)
+ self.assertIsNotNone(result)
+
+ # Different params = miss
+ params2 = (0.7, 0.5, 0.5, 0.5, 0.5, 0.5, 0.0, 0.0, "assisted")
+ result2 = cache.get_resolved(1, (10, 10), 42, params2, img_key)
+ self.assertIsNone(result2)
+
+ # Different image content = miss
+ img_key2 = 99999
+ result3 = cache.get_resolved(1, (10, 10), 42, params, img_key2)
+ self.assertIsNone(result3)
+
+ def test_clear(self):
+ cache = MaskRasterCache()
+ maps = (
+ np.zeros((10, 10), dtype=np.float32),
+ np.zeros((10, 10), dtype=np.float32),
+ )
+ cache.put_strokes(1, (10, 10), 42, maps)
+ cache.clear()
+ self.assertIsNone(cache.get_strokes(1, (10, 10), 42))
+
+
+class TestEditorIntegration(unittest.TestCase):
+ """Test that the editor pipeline integrates the darken step correctly."""
+
+ def test_darken_settings_in_initial_edits(self):
+ from faststack.imaging.editor import ImageEditor
+
+ editor = ImageEditor()
+ self.assertIn("darken_settings", editor.current_edits)
+ self.assertIsNone(editor.current_edits["darken_settings"])
+
+ def test_mask_assets_dict_exists(self):
+ from faststack.imaging.editor import ImageEditor
+
+ editor = ImageEditor()
+ self.assertIsInstance(editor._mask_assets, dict)
+ self.assertEqual(len(editor._mask_assets), 0)
+
+ def test_clear_resets_mask_state(self):
+ from faststack.imaging.editor import ImageEditor
+
+ editor = ImageEditor()
+ editor._mask_assets["darken"] = MaskData()
+ editor.clear()
+ self.assertEqual(len(editor._mask_assets), 0)
+
+ def test_apply_edits_with_darken(self):
+ """Darken step runs when settings and strokes are present."""
+ from PIL import Image as PILImage
+
+ from faststack.imaging.editor import ImageEditor
+
+ editor = ImageEditor()
+ # Create a small test image
+ img = PILImage.new("RGB", (50, 50), color=(128, 128, 128))
+ import tempfile
+ from pathlib import Path
+
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f:
+ img.save(f.name)
+ editor.load_image(f.name)
+
+ # Set up darken
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.3, "add"))
+ editor._mask_assets["darken"] = md
+ ds = DarkenSettings(enabled=True, darken_amount=0.8, mode="paint_only")
+ editor.current_edits["darken_settings"] = ds
+
+ # Apply edits
+ arr = editor.float_preview.copy()
+ result = editor._apply_edits(arr, for_export=False)
+
+ # Result should be darker in the centre vs a version without darken
+ editor.current_edits["darken_settings"] = None
+ arr2 = editor.float_preview.copy()
+ result_no_darken = editor._apply_edits(arr2, for_export=False)
+
+ # The darkened version should have lower values in the masked area
+ centre_dark = result[25, 25].mean()
+ centre_normal = result_no_darken[25, 25].mean()
+ self.assertLess(centre_dark, centre_normal)
+
+ # Clean up
+ import os
+
+ os.unlink(f.name)
+
+ def test_load_image_clears_mask_state(self):
+ """Loading a new image must clear mask assets and raster cache."""
+ from PIL import Image as PILImage
+
+ from faststack.imaging.editor import ImageEditor
+
+ editor = ImageEditor()
+
+ # First load
+ import tempfile
+
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f:
+ PILImage.new("RGB", (50, 50), color=(128, 128, 128)).save(f.name)
+ editor.load_image(f.name)
+
+ # Add darken state
+ editor._mask_assets["darken"] = MaskData()
+ editor._mask_assets["darken"].add_stroke(MaskStroke([(0.5, 0.5)], 0.1, "add"))
+ editor._mask_raster_cache.put_strokes(
+ 1,
+ (50, 50),
+ 0,
+ (
+ np.zeros((50, 50), dtype=np.float32),
+ np.zeros((50, 50), dtype=np.float32),
+ ),
+ )
+
+ # Second load — should clear mask state
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f2:
+ PILImage.new("RGB", (50, 50), color=(200, 200, 200)).save(f2.name)
+ editor.load_image(f2.name)
+
+ self.assertEqual(len(editor._mask_assets), 0)
+ self.assertIsNone(editor._mask_raster_cache.get_strokes(1, (50, 50), 0))
+
+ import os
+
+ os.unlink(f.name)
+ os.unlink(f2.name)
+
+ def test_toggle_off_disables_darken_effect(self):
+ """Turning the darken tool off must disable the effect in the render pipeline."""
+ from PIL import Image as PILImage
+
+ from faststack.imaging.editor import ImageEditor
+
+ editor = ImageEditor()
+ import tempfile
+
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f:
+ PILImage.new("RGB", (50, 50), color=(128, 128, 128)).save(f.name)
+ editor.load_image(f.name)
+
+ # Set up darken with strokes
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.3, "add"))
+ editor._mask_assets["darken"] = md
+ ds = DarkenSettings(enabled=True, darken_amount=0.8, mode="paint_only")
+ editor.current_edits["darken_settings"] = ds
+
+ # Render with darken ON
+ arr_on = editor.float_preview.copy()
+ result_on = editor._apply_edits(arr_on, for_export=False)
+ centre_on = result_on[25, 25].mean()
+
+ # Simulate toggle off: set enabled=False (what toggle_darken_mode does)
+ ds.enabled = False
+
+ # Render with darken OFF
+ arr_off = editor.float_preview.copy()
+ result_off = editor._apply_edits(arr_off, for_export=False)
+ centre_off = result_off[25, 25].mean()
+
+ # Effect must be gone — centre should be brighter when disabled
+ self.assertGreater(centre_off, centre_on)
+
+ import os
+
+ os.unlink(f.name)
+
+ def test_snapshot_captures_immutable_darken_state(self):
+ """snapshot_for_export deep-copies darken state — mutations after
+ snapshot do not affect the export data."""
+ import tempfile
+
+ from PIL import Image as PILImage
+
+ from faststack.imaging.editor import ImageEditor
+
+ editor = ImageEditor()
+
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f:
+ PILImage.new("RGB", (50, 50), color=(128, 128, 128)).save(f.name)
+ editor.load_image(f.name)
+
+ # Set up darken with strokes
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.3, "add"))
+ editor._mask_assets["darken"] = md
+ ds = DarkenSettings(enabled=True, darken_amount=0.8, mode="paint_only")
+ editor.current_edits["darken_settings"] = ds
+
+ # Take snapshot
+ snapshot = editor.snapshot_for_export()
+
+ # Verify deep-copy: snapshot objects are NOT the live ones
+ snap_ds = snapshot["edits"].get("darken_settings")
+ self.assertIsNotNone(snap_ds)
+ self.assertIsNot(snap_ds, ds, "DarkenSettings should be deep-copied")
+
+ snap_mask = snapshot["mask_override"]
+ self.assertIsNotNone(snap_mask)
+ self.assertIsNot(
+ snap_mask.get("darken"),
+ md,
+ "MaskData should be deep-copied",
+ )
+
+ # Verify fresh export cache
+ self.assertIsNotNone(snapshot["export_cache"])
+ self.assertIsNot(
+ snapshot["export_cache"],
+ editor._mask_raster_cache,
+ "Export should use a fresh cache, not the shared preview cache",
+ )
+
+ # Verify EXIF is captured
+ self.assertIn("main_exif", snapshot)
+ self.assertIn("source_exif", snapshot)
+
+ # Verify filepath is captured
+ self.assertIsNotNone(snapshot["filepath_snapshot"])
+
+ import os
+
+ os.unlink(f.name)
+
+ def test_snapshot_without_darken_no_override(self):
+ """snapshot_for_export with no darken should not produce mask overrides."""
+ import tempfile
+
+ from PIL import Image as PILImage
+
+ from faststack.imaging.editor import ImageEditor
+
+ editor = ImageEditor()
+
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f:
+ PILImage.new("RGB", (50, 50), color=(128, 128, 128)).save(f.name)
+ editor.load_image(f.name)
+
+ snapshot = editor.snapshot_for_export()
+ self.assertIsNone(snapshot["mask_override"])
+ self.assertIsNone(snapshot["export_cache"])
+
+ import os
+
+ os.unlink(f.name)
+
+ def test_mutation_after_snapshot_does_not_affect_export(self):
+ """Modifying editor state after snapshot must not change saved output."""
+ import tempfile
+
+ from PIL import Image as PILImage
+
+ from faststack.imaging.editor import ImageEditor
+
+ editor = ImageEditor()
+
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f:
+ PILImage.new("RGB", (50, 50), color=(128, 128, 128)).save(f.name)
+ editor.load_image(f.name)
+
+ # Set up darken
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.3, "add"))
+ editor._mask_assets["darken"] = md
+ ds = DarkenSettings(enabled=True, darken_amount=0.5, mode="paint_only")
+ editor.current_edits["darken_settings"] = ds
+
+ # Snapshot at darken_amount=0.5
+ snapshot = editor.snapshot_for_export()
+
+ # Mutate live state AFTER snapshot
+ ds.darken_amount = 1.0
+ md.add_stroke(MaskStroke([(0.1, 0.1)], 0.5, "add"))
+ editor._mask_assets.clear()
+
+ # Snapshot should still have the original values
+ snap_ds = snapshot["edits"]["darken_settings"]
+ self.assertAlmostEqual(snap_ds.darken_amount, 0.5)
+
+ snap_mask = snapshot["mask_override"]["darken"]
+ self.assertEqual(len(snap_mask.strokes), 1) # only the original stroke
+
+ import os
+
+ os.unlink(f.name)
+
+ def test_navigation_after_snapshot_does_not_affect_export(self):
+ """Clearing editor state (simulating navigation) after snapshot must
+ not prevent save_from_snapshot from working."""
+ import tempfile
+
+ from PIL import Image as PILImage
+
+ from faststack.imaging.editor import ImageEditor
+
+ editor = ImageEditor()
+
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f:
+ PILImage.new("RGB", (50, 50), color=(128, 128, 128)).save(f.name)
+ editor.load_image(f.name)
+
+ # Set up darken
+ md = MaskData()
+ md.add_stroke(MaskStroke([(0.5, 0.5)], 0.3, "add"))
+ editor._mask_assets["darken"] = md
+ ds = DarkenSettings(enabled=True, darken_amount=0.8, mode="paint_only")
+ editor.current_edits["darken_settings"] = ds
+
+ # Snapshot captures all state
+ snapshot = editor.snapshot_for_export()
+
+ # Simulate navigation clearing all editor state
+ editor.clear()
+ self.assertIsNone(editor.float_image)
+ self.assertEqual(len(editor._mask_assets), 0)
+
+ # Simulate loading a second temporary image which will repopulate current_filepath
+ # and cached state, creating a potential cross-image race context.
+ with tempfile.NamedTemporaryFile(suffix=".jpg", delete=False) as f2:
+ PILImage.new("RGB", (50, 50), color=(150, 150, 150)).save(f2.name)
+ editor.load_image(f2.name)
+
+ # save_from_snapshot should still work with the snapshot
+ result = editor.save_from_snapshot(snapshot)
+ # save_from_snapshot uses _apply_edits which uses the passed cache_context
+ # to avoid polluting or depending on live editor state.
+ self.assertIsNotNone(result)
+
+ import os
+
+ os.unlink(f.name)
+ os.unlink(f2.name)
+
+
+class TestOverlayFallback(unittest.TestCase):
+ def test_mask_overlay_returns_transparent_when_no_overlay(self):
+ """Verify that requesting mask_overlay with no image returns a
+ transparent QImage, not an opaque placeholder."""
+ try:
+ from PySide6.QtGui import QImage
+ from PySide6.QtCore import Qt
+ from faststack.ui.provider import ImageProvider
+ from unittest.mock import Mock
+ except ImportError:
+ self.skipTest("PySide6 not available")
+
+ # Mock app_controller to return no overlay image
+ mock_controller = Mock()
+ mock_controller.ui_state._darken_overlay_image = None
+
+ provider = ImageProvider(mock_controller)
+ transparent = provider.requestImage("mask_overlay/test", None, None)
+
+ # Verify it has zero alpha (i.e. fully transparent)
+ pixel = transparent.pixelColor(0, 0)
+ self.assertEqual(pixel.alpha(), 0)
+
+ # Verify it is NOT RGB888 format (the old placeholder was)
+ self.assertEqual(transparent.format(), QImage.Format.Format_ARGB32)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/faststack/ui/keystrokes.py b/faststack/ui/keystrokes.py
index 21ed5af..48488e4 100644
--- a/faststack/ui/keystrokes.py
+++ b/faststack/ui/keystrokes.py
@@ -21,9 +21,7 @@ def __init__(self, controller):
# View switching
Qt.Key_Escape: "switch_to_grid_view",
# Navigation
- Qt.Key_J: "next_image",
Qt.Key_Right: "next_image",
- Qt.Key_K: "prev_image",
Qt.Key_Left: "prev_image",
Qt.Key_G: "show_jump_to_image_dialog",
# Stacking
diff --git a/faststack/ui/provider.py b/faststack/ui/provider.py
index 9f74476..d31ec79 100644
--- a/faststack/ui/provider.py
+++ b/faststack/ui/provider.py
@@ -30,6 +30,9 @@ def __init__(self, app_controller):
self._app_controller = app_controller # Backward compatibility alias
self.placeholder = QImage(256, 256, QImage.Format.Format_RGB888)
self.placeholder.fill(Qt.GlobalColor.darkGray)
+ # Transparent 1x1 fallback for mask overlays (prevents grey-screen bug)
+ self._transparent = QImage(1, 1, QImage.Format.Format_ARGB32)
+ self._transparent.fill(Qt.GlobalColor.transparent)
# Keepalive queue to prevent GC of buffers currently in use by QImage
# Increased to 128 to prevent crashes during rapid scrolling/thrashing where
# QML might hold onto textures slightly longer than the Python GC expects.
@@ -50,6 +53,15 @@ def requestImage(self, id: str, size: object, requestedSize: object) -> QImage:
return self.placeholder
try:
+ # Handle mask overlay requests
+ if id.startswith("mask_overlay/"):
+ overlay = getattr(
+ self.app_controller.ui_state, "_darken_overlay_image", None
+ )
+ if overlay is not None:
+ return overlay
+ return self._transparent
+
# Parse index and optional generation
parts = id.split("/")
index = int(parts[0])
@@ -226,6 +238,21 @@ class UIState(QObject):
clarity_changed = Signal(float)
texture_changed = Signal(float)
+ # Background Darkening Signals
+ is_darkening_changed = Signal(bool)
+ darken_overlay_generation_changed = Signal()
+ darken_overlay_visible_changed = Signal(bool)
+ darken_amount_changed = Signal(float)
+ darken_edge_protection_changed = Signal(float)
+ darken_subject_protection_changed = Signal(float)
+ darken_feather_changed = Signal(float)
+ darken_dark_range_changed = Signal(float)
+ darken_neutrality_changed = Signal(float)
+ darken_expand_contract_changed = Signal(float)
+ darken_auto_edges_changed = Signal(float)
+ darken_mode_changed = Signal(str)
+ darken_brush_radius_changed = Signal(float)
+
# Debug Cache Signals
debugCacheChanged = Signal(bool)
cacheStatsChanged = Signal(str)
@@ -290,6 +317,22 @@ def __init__(self, app_controller, clock_func=None):
self._clarity = 0.0
self._texture = 0.0
+ # Background Darkening State
+ self._is_darkening = False
+ self._darken_overlay_visible = True
+ self._darken_overlay_generation = 0
+ self._darken_overlay_image = None # QImage for mask overlay
+ self._darken_amount = 0.5
+ self._darken_edge_protection = 0.5
+ self._darken_subject_protection = 0.5
+ self._darken_feather = 0.5
+ self._darken_dark_range = 0.5
+ self._darken_neutrality = 0.5
+ self._darken_expand_contract = 0.0
+ self._darken_auto_edges = 0.0
+ self._darken_mode = "assisted"
+ self._darken_brush_radius = 0.03
+
# Debug Cache State
self._debug_cache = False
self._cache_stats = ""
@@ -996,6 +1039,19 @@ def reset_editor_state(self):
self.cropRotation = 0.0
self.currentCropBox = (0, 0, 1000, 1000)
self.currentAspectRatioIndex = 0
+ # Darken tool — use property setters so QML bindings update
+ self.isDarkening = False
+ self.darkenOverlayVisible = True
+ self.darkenAmount = 0.5
+ self.darkenEdgeProtection = 0.5
+ self.darkenSubjectProtection = 0.5
+ self.darkenFeather = 0.5
+ self.darkenDarkRange = 0.5
+ self.darkenNeutrality = 0.5
+ self.darkenExpandContract = 0.0
+ self.darkenAutoEdges = 0.0
+ self.darkenMode = "assisted"
+ self.darkenBrushRadius = 0.03
@Property("QVariant", notify=histogram_data_changed)
def histogramData(self):
@@ -1296,6 +1352,132 @@ def texture(self, new_value: float):
self._texture = new_value
self.texture_changed.emit(new_value)
+ # --- Background Darkening Properties ---
+
+ @Property(bool, notify=is_darkening_changed)
+ def isDarkening(self) -> bool:
+ return self._is_darkening
+
+ @isDarkening.setter
+ def isDarkening(self, new_value: bool):
+ if self._is_darkening != new_value:
+ self._is_darkening = new_value
+ self.is_darkening_changed.emit(new_value)
+
+ @Property(bool, notify=darken_overlay_visible_changed)
+ def darkenOverlayVisible(self) -> bool:
+ return self._darken_overlay_visible
+
+ @darkenOverlayVisible.setter
+ def darkenOverlayVisible(self, new_value: bool):
+ if self._darken_overlay_visible != new_value:
+ self._darken_overlay_visible = new_value
+ self.darken_overlay_visible_changed.emit(new_value)
+
+ @Property(int, notify=darken_overlay_generation_changed)
+ def darkenOverlayGeneration(self) -> int:
+ return self._darken_overlay_generation
+
+ @Property(float, notify=darken_amount_changed)
+ def darkenAmount(self) -> float:
+ return self._darken_amount
+
+ @darkenAmount.setter
+ def darkenAmount(self, new_value: float):
+ if self._darken_amount != new_value:
+ self._darken_amount = new_value
+ self.darken_amount_changed.emit(new_value)
+
+ @Property(float, notify=darken_edge_protection_changed)
+ def darkenEdgeProtection(self) -> float:
+ return self._darken_edge_protection
+
+ @darkenEdgeProtection.setter
+ def darkenEdgeProtection(self, new_value: float):
+ if self._darken_edge_protection != new_value:
+ self._darken_edge_protection = new_value
+ self.darken_edge_protection_changed.emit(new_value)
+
+ @Property(float, notify=darken_subject_protection_changed)
+ def darkenSubjectProtection(self) -> float:
+ return self._darken_subject_protection
+
+ @darkenSubjectProtection.setter
+ def darkenSubjectProtection(self, new_value: float):
+ if self._darken_subject_protection != new_value:
+ self._darken_subject_protection = new_value
+ self.darken_subject_protection_changed.emit(new_value)
+
+ @Property(float, notify=darken_feather_changed)
+ def darkenFeather(self) -> float:
+ return self._darken_feather
+
+ @darkenFeather.setter
+ def darkenFeather(self, new_value: float):
+ if self._darken_feather != new_value:
+ self._darken_feather = new_value
+ self.darken_feather_changed.emit(new_value)
+
+ @Property(float, notify=darken_dark_range_changed)
+ def darkenDarkRange(self) -> float:
+ return self._darken_dark_range
+
+ @darkenDarkRange.setter
+ def darkenDarkRange(self, new_value: float):
+ if self._darken_dark_range != new_value:
+ self._darken_dark_range = new_value
+ self.darken_dark_range_changed.emit(new_value)
+
+ @Property(float, notify=darken_neutrality_changed)
+ def darkenNeutrality(self) -> float:
+ return self._darken_neutrality
+
+ @darkenNeutrality.setter
+ def darkenNeutrality(self, new_value: float):
+ if self._darken_neutrality != new_value:
+ self._darken_neutrality = new_value
+ self.darken_neutrality_changed.emit(new_value)
+
+ @Property(float, notify=darken_expand_contract_changed)
+ def darkenExpandContract(self) -> float:
+ return self._darken_expand_contract
+
+ @darkenExpandContract.setter
+ def darkenExpandContract(self, new_value: float):
+ if self._darken_expand_contract != new_value:
+ self._darken_expand_contract = new_value
+ self.darken_expand_contract_changed.emit(new_value)
+
+ @Property(float, notify=darken_auto_edges_changed)
+ def darkenAutoEdges(self) -> float:
+ return self._darken_auto_edges
+
+ @darkenAutoEdges.setter
+ def darkenAutoEdges(self, new_value: float):
+ if self._darken_auto_edges != new_value:
+ self._darken_auto_edges = new_value
+ self.darken_auto_edges_changed.emit(new_value)
+
+ @Property(str, notify=darken_mode_changed)
+ def darkenMode(self) -> str:
+ return self._darken_mode
+
+ @darkenMode.setter
+ def darkenMode(self, new_value: str):
+ if self._darken_mode != new_value:
+ self._darken_mode = new_value
+ self.darken_mode_changed.emit(new_value)
+
+ @Property(float, notify=darken_brush_radius_changed)
+ def darkenBrushRadius(self) -> float:
+ return self._darken_brush_radius
+
+ @darkenBrushRadius.setter
+ def darkenBrushRadius(self, new_value: float):
+ if self._darken_brush_radius != new_value:
+ self._darken_brush_radius = new_value
+ self.darken_brush_radius_changed.emit(new_value)
+
# --- Debug Cache Properties ---
@Property(bool, notify=debugCacheChanged)
diff --git a/lightroom-catalog-import/README.lightroom-catalog-import.md b/lightroom-catalog-import/README.lightroom-catalog-import.md
new file mode 100644
index 0000000..1803f7d
--- /dev/null
+++ b/lightroom-catalog-import/README.lightroom-catalog-import.md
@@ -0,0 +1,470 @@
+# Lightroom Classic to FastStack Migration Tools
+
+This directory contains a small set of Python scripts that were developed to answer one practical question:
+
+> Can data from an Adobe Lightroom Classic catalog (`.lrcat`) be used to mark files as uploaded in FastStack?
+
+When I was using Lightroom, I marked all of the files that I uploaded as green - this tool successfully migrated this flag to Faststack.
+
+In this case, the Lightroom workflow was:
+
+- images that had been uploaded were marked **Green** in Lightroom Classic
+- FastStack stores upload state in a per-directory `faststack.json`
+- the goal was to carry that historical Lightroom information into FastStack
+
+These scripts were created on **April 1, 2026** while working with a real Lightroom Classic catalog from that date. They worked for that catalog and are **likely to work on various modern Lightroom Classic catalogs**, but there are **no guarantees**. Adobe does not publish a stable public schema for every internal Lightroom table, and catalog structure may vary across versions.
+
+Use these tools carefully and keep backups. You should be able to use these tools to extract any data from your Lightroom catalog for use in your work.
+
+---
+
+## Overview of the four scripts
+
+There are four scripts:
+
+1. `lrcat_diff.py`
+2. `inspect_lrcat_photo.py`
+3. `test_lrcat_join.py`
+4. `green2faststack.py`
+
+They were written in that order of discovery:
+
+- first, identify which Lightroom fields change when a photo is marked Green
+- second, inspect one known photo and discover how it connects to file-path tables
+- third, test the exact join needed to reconstruct file paths
+- fourth, build a practical export-and-import tool for FastStack
+
+The first three scripts are best understood as **reverse-engineering helpers**.
+The fourth script, `green2faststack.py`, is the **main end-user tool**.
+
+---
+
+# Thought process and discovery path
+
+## Problem statement
+
+Lightroom Classic stores a great deal of metadata in a SQLite catalog file ending in `.lrcat`.
+FastStack stores per-directory metadata in a JSON file named `faststack.json`.
+
+To migrate data from Lightroom to FastStack, we needed to answer two questions:
+
+1. Where does Lightroom store the fact that an image was marked Green?
+2. How do we reconstruct the full file path for each green-labeled image?
+
+## Step 1: prove where the Green label lives
+
+The first experiment was:
+
+1. close Lightroom
+2. back up the catalog
+3. open Lightroom
+4. mark exactly one known image Green
+5. close Lightroom
+6. diff the before/after catalogs
+
+That led to the key observation that in the tested catalog, the relevant field was:
+
+- `Adobe_images.colorLabels = 'Green'`
+
+## Step 2: prove how to recover the file path
+
+Once one known green-labeled image was identified, the next task was to inspect the row and the surrounding tables to learn how Lightroom connects image metadata to filenames and folders.
+
+The tested catalog showed a join chain that worked:
+
+- `Adobe_images.rootFile -> AgLibraryFile.id_local`
+- `AgLibraryFile.folder -> AgLibraryFolder.id_local`
+- `AgLibraryFolder.rootFolder -> AgLibraryRootFolder.id_local`
+
+Using that chain, plus the filename stem and extension, the scripts were able to reconstruct the full Lightroom path for each green-labeled file.
+
+## Step 3: use the recovered paths to update FastStack
+
+FastStack stores metadata in `faststack.json`, with image entries keyed by lowercase filename stem, for example:
+
+- `P3270037.JPG` becomes `p3270037`
+
+That means once a Lightroom-exported path is known, it can be converted into a lowercase stem and matched against existing FastStack JSON entries.
+
+This is helpful for RAW/JPG pairs too. If Lightroom recorded a RAW file like `foo.ORF` and FastStack has an entry keyed as `foo`, the stem still matches.
+
+---
+
+# Script-by-script documentation
+
+## 1. `lrcat_diff.py`
+
+### Purpose
+
+`lrcat_diff.py` compares two Lightroom Classic catalogs and reports changed rows and columns.
+
+It exists to answer questions like:
+
+- What changed in the catalog when I marked a photo Green?
+- Which tables are relevant to color labels?
+- Did Lightroom store text, numeric state, or something more complex?
+
+### Why this script matters
+
+Without it, you are guessing.
+With it, you can make a controlled edit in Lightroom and inspect what really changed.
+
+This is the best starting point when adapting the workflow to a new Lightroom version.
+
+### Typical usage
+
+```bash
+/usr/bin/python3 lrcat_diff.py before.lrcat after.lrcat --match "IMG_1234"
+```
+
+You can also run without `--match` to see all changed rows.
+
+### What it helped discover
+
+On the tested Lightroom catalog, this script showed that marking one image Green changed:
+
+- `Adobe_images.colorLabels: '' -> 'Green'`
+
+That was the breakthrough that made the rest of the process possible.
+
+### Caveats
+
+- It reads a lot of data and may be memory-heavy on very large catalogs.
+- It is a discovery tool, not a migration tool.
+- It works best when you make **one controlled Lightroom change at a time**.
+
+---
+
+## 2. `inspect_lrcat_photo.py`
+
+### Purpose
+
+`inspect_lrcat_photo.py` takes a catalog and an `Adobe_images.id_local` value and prints:
+
+- the `Adobe_images` row
+- rows from other tables that appear to reference the same image
+- tables that contain likely path-related columns
+
+### Why this script matters
+
+Once `lrcat_diff.py` shows that a particular image row changed, the next task is figuring out how that image connects to file-path tables. This script helps explore those relationships.
+
+### Typical usage
+
+```bash
+/usr/bin/python3 inspect_lrcat_photo.py catalog.lrcat 32638618
+```
+
+### What it helped discover
+
+It showed that the image row had a `rootFile` field and that the catalog contained promising path-related tables such as:
+
+- `AgLibraryFile`
+- `AgLibraryFolder`
+- `AgLibraryRootFolder`
+
+That gave the next script a clear join target.
+
+### Caveats
+
+- This is exploratory output and can be noisy.
+- It does not prove the final join by itself.
+- It is intended for reverse engineering, not batch processing.
+
+---
+
+## 3. `test_lrcat_join.py`
+
+### Purpose
+
+`test_lrcat_join.py` tests the likely Lightroom join chain for one known image and prints the reconstructed path fields.
+
+### Why this script matters
+
+This is the bridge between “we think these tables connect” and “yes, this join reconstructs the expected file path.”
+
+### Typical usage
+
+```bash
+/usr/bin/python3 test_lrcat_join.py catalog.lrcat 32638618
+```
+
+### What it helped discover
+
+On the tested catalog, it confirmed that this join worked:
+
+- `Adobe_images.rootFile -> AgLibraryFile.id_local`
+- `AgLibraryFile.folder -> AgLibraryFolder.id_local`
+- `AgLibraryFolder.rootFolder -> AgLibraryRootFolder.id_local`
+
+That was sufficient to reconstruct full paths for green-labeled files.
+
+### Caveats
+
+- This is still a schema-discovery helper.
+- A future Lightroom version could use different relationships.
+- If the join stops working on your catalog, use the earlier helper scripts to rediscover the correct one.
+
+---
+
+## 4. `green2faststack.py`
+
+### Purpose
+
+`green2faststack.py` is the main tool.
+
+It supports two modes:
+
+1. **Export mode**: read a Lightroom Classic `.lrcat` file and write all Green-labeled paths to a text file
+2. **JSON mode**: read that exported text file and update one existing `faststack.json`
+
+### Why it is designed this way
+
+The design is intentionally two-step.
+
+Instead of reading Lightroom every time you want to update FastStack, the script lets you:
+
+1. read the catalog once
+2. save the extracted paths in a simple text file
+3. reuse that exported file as often as you want when updating different FastStack directories
+
+This is useful if:
+
+- you are done using Lightroom
+- you want an auditable intermediate file
+- you want to update FastStack later without touching the catalog again
+
+### Export mode example
+
+```bash
+/usr/bin/python3 green2faststack.py -i backup.lrcat -o green.txt
+```
+
+This writes one Lightroom path per line.
+
+### JSON mode example
+
+```bash
+/usr/bin/python3 green2faststack.py --paths green.txt --json /path/to/faststack.json
+```
+
+### Dry-run example
+
+```bash
+/usr/bin/python3 green2faststack.py --paths green.txt --json /path/to/faststack.json --dry-run --verbose
+```
+
+### What JSON mode does
+
+In JSON mode, the script:
+
+1. reads all exported Lightroom paths from the text file
+2. derives lowercase filename stems from those paths
+3. loads one existing `faststack.json`
+4. finds matching `entries` keys in the JSON
+5. marks matching entries as uploaded
+6. preserves existing `uploaded_date` values when already present
+7. creates an automatic backup before writing
+
+### Matching strategy
+
+Matching is **stem-based**, not extension-based.
+
+That means:
+
+- `foo.jpg`
+- `foo.JPG`
+- `foo.ORF`
+- `foo.NEF`
+
+all map to the same stem key:
+
+- `foo`
+
+This is intentional and is what makes the tool useful for Lightroom catalogs that may reference RAW files while FastStack tracks same-stem JPGs.
+
+### Uploaded date behavior
+
+If a FastStack entry is newly marked uploaded and has no existing `uploaded_date`, the script uses a default date unless the user provides:
+
+```bash
+--uploaded-date YYYY-MM-DD
+```
+
+This project intentionally did **not** assume Lightroom preserved a trustworthy historical “date Green was applied” field for general use. That might exist in some form in some catalogs, but it was not established well enough to rely on.
+
+### Backup behavior
+
+Before modifying a JSON file, the script creates a backup such as:
+
+- `faststack.json.bak`
+- `faststack.json.bak1`
+- `faststack.json.bak2`
+
+and so on.
+
+### File existence checks
+
+The script can report how many exported image paths currently exist on disk.
+
+This is useful but imperfect.
+
+A Lightroom catalog may store Windows-style paths such as:
+
+- `C:/Users/...`
+
+If the script is run under WSL or Linux, those exact strings may not pass an existence check even when the files are actually present at:
+
+- `/mnt/c/Users/...`
+
+So existence checks should be understood as **best-effort diagnostics**, not the core matching method.
+The real FastStack update logic is based on stems.
+
+---
+
+# Recommended workflow
+
+## If you want to adapt this to your own Lightroom catalog
+
+### 1. Back up everything first
+
+Make backups of:
+
+- your `.lrcat`
+- any `faststack.json` files you care about
+
+Even though `green2faststack.py` creates JSON backups automatically, do not skip manual backups.
+
+### 2. Confirm the schema on your catalog
+
+Use the helper scripts first.
+
+A safe discovery workflow is:
+
+1. close Lightroom
+2. copy the catalog to a backup file
+3. open Lightroom
+4. mark exactly one known image Green
+5. close Lightroom
+6. run `lrcat_diff.py`
+7. verify that your catalog also uses `Adobe_images.colorLabels = 'Green'` or discover the equivalent in your version
+
+Example:
+
+```bash
+/usr/bin/python3 lrcat_diff.py before.lrcat after.lrcat --match "IMG_1234"
+```
+
+### 3. Inspect one known changed image
+
+Use `inspect_lrcat_photo.py` and `test_lrcat_join.py` to verify that the same join strategy works on your catalog.
+
+Examples:
+
+```bash
+/usr/bin/python3 inspect_lrcat_photo.py after.lrcat 32638618
+/usr/bin/python3 test_lrcat_join.py after.lrcat 32638618
+```
+
+### 4. Export Green-labeled paths
+
+Once you are confident the schema matches, export all Green-labeled paths:
+
+```bash
+/usr/bin/python3 green2faststack.py -i after.lrcat -o green.txt
+```
+
+### 5. Update FastStack directories one at a time
+
+Use the exported path list to update one `faststack.json` at a time.
+
+Dry run first:
+
+```bash
+/usr/bin/python3 green2faststack.py --paths green.txt --json /path/to/faststack.json --dry-run --verbose
+```
+
+Then real run:
+
+```bash
+/usr/bin/python3 green2faststack.py --paths green.txt --json /path/to/faststack.json
+```
+
+### 6. Repeat for other directories as needed
+
+Because JSON mode reads from the exported text file instead of the Lightroom catalog, you can reuse the same `green.txt` again and again.
+
+---
+
+# Why this worked for the tested catalog
+
+This project worked because three separate observations lined up:
+
+1. The Green state was stored plainly enough to discover.
+2. The file path could be reconstructed from catalog tables.
+3. FastStack tracks entries by lowercase stem, which made RAW/JPG pair handling practical.
+
+That combination may hold for many modern Lightroom Classic catalogs, but it may not hold forever.
+
+---
+
+## Catalog version differences
+
+A newer or older Lightroom Classic catalog may:
+
+- rename tables
+- move fields
+- use different join relationships
+- store label state differently
+
+## OS path differences
+
+Paths recorded by Lightroom may not match your current runtime environment exactly.
+
+Examples:
+
+- Windows path in catalog, script run in WSL
+- moved drives
+- offline volumes
+- different mount letters or mount points
+
+## FastStack assumptions
+
+The scripts assume FastStack JSON behavior based on observed sample files, especially:
+
+- lowercase stem keys
+- `entries` dictionary
+- `uploaded` and `uploaded_date` fields
+
+If FastStack changes its JSON structure in the future, the migration script may need to be updated.
+
+## Best-effort existence checks
+
+A file may fail the existence check and still be a valid match for FastStack stem-based import.
+
+---
+
+# What “worked” means here
+
+On the tested April 1, 2026 Lightroom Classic catalog, the workflow successfully:
+
+- identified Green-labeled images in the catalog
+- reconstructed their paths
+- exported tens of thousands of Green-labeled paths to a text file
+- used those exported paths to mark matching FastStack entries as uploaded in a target `faststack.json`
+
+That is good evidence that the approach is practical.
+It is **not** a promise that every Lightroom catalog will behave the same way.
+
+---
+
+# Recommendations for anyone using this on their own data
+
+- work on copies first
+- verify the schema with one controlled edit before bulk export
+- use dry runs before writing JSON
+- inspect the backup files the tool creates
+- test on one directory before touching many
+- treat helper-script output as discovery evidence, not gospel
+
+---
diff --git a/lightroom-catalog-import/green-sample.paths.txt b/lightroom-catalog-import/green-sample.paths.txt
new file mode 100644
index 0000000..b84aa9a
--- /dev/null
+++ b/lightroom-catalog-import/green-sample.paths.txt
@@ -0,0 +1,15 @@
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-01-04 12-52-55 (C,S4) mycena tenerrima droplet.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-01-22 23-29-10 (A,R8,S4) apple inocybe 121323.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-01-23 00-00-50 (C,S4) apple grey tricholoma 121323.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-03-31 12-46-17 (C,S4) Ecuador Thismia 032724.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-04-02 09-58-16 (A,R8,S4) Ecuador Cordyceps 032924.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-04-09 14-07-03 (C,S4) Columbia Ceratiomyxa 040723.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-04-20 21-22-30 (A,R8,S4) Ecuador Trechispora thelephora 040124.dng
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-05-24 00-00-50 (C,S4) NZ Psilocybe weraroa 060523.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-05-30 17-45-12 (C,S4) Shasta Galeropsis 052624.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-06-09 00-42-13 (C,S4) New Mexico Helotiales 060824.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-07-21 23-55-36 (C,S4) Rhodophana 122323.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-08-02 19-13-48 (C,S4) Xalapa Mycena margarita 090723.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-08-11 01-58-04 (A,R8,S4) Colorado flower 081024 bright.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-08-12 10-15-54 (A,R8,S4) Colorado Springs yellow myc 081024.tif
+C:/Users/alanr/OneDrive/Documents/stacked images/2024-08-12 10-19-08 (C,S4) Colorado Springs yellow myc 081024.tif
diff --git a/lightroom-catalog-import/green2faststack.py b/lightroom-catalog-import/green2faststack.py
new file mode 100644
index 0000000..aace7e3
--- /dev/null
+++ b/lightroom-catalog-import/green2faststack.py
@@ -0,0 +1,717 @@
+#!/usr/bin/env python3
+"""
+green2faststack.py — Migrate Lightroom Classic green labels into FastStack
+
+PURPOSE:
+ Export Lightroom Classic green-labeled image paths from a .lrcat catalog,
+ and optionally update an existing FastStack faststack.json using the exported
+ paths file.
+
+ This is the main user-facing migration tool.
+
+DESIGN GOALS:
+ - Read the Lightroom catalog once and export all green-labeled paths to a text file.
+ - Later, read that text file as many times as desired to update FastStack JSON files.
+ - Match FastStack entries by lowercase stem only, so RAW/JPG pairs naturally align.
+ - Never create new JSON files implicitly.
+ - Be safe by default: dry-run support, automatic backups, verbose help, and summaries.
+
+WORKFLOW:
+ Step 1 — Export green-labeled paths from the catalog:
+ python green2faststack.py -i catalog.lrcat -o green.txt
+
+ Step 2 — Update an existing FastStack JSON from the exported paths:
+ python green2faststack.py --paths green.txt --json /path/to/faststack.json
+
+ You can repeat step 2 for different faststack.json files as needed.
+
+OBSERVED SCHEMA:
+ The Lightroom catalog join chain for path reconstruction:
+ Adobe_images.rootFile -> AgLibraryFile.id_local
+ AgLibraryFile.folder -> AgLibraryFolder.id_local
+ AgLibraryFolder.rootFolder -> AgLibraryRootFolder.id_local
+
+ Green labels are stored as Adobe_images.colorLabels = 'Green'.
+
+ These observations are from specific Lightroom Classic catalogs. Other
+ versions may differ. Use the helper scripts (inspect_lrcat_photo.py,
+ lrcat_diff.py, test_lrcat_join.py) to verify against your own catalog.
+"""
+
+from __future__ import annotations
+
+import argparse
+import json
+import os
+import platform
+import re
+import shutil
+import sqlite3
+import sys
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Iterable
+
+PROGRAM_NAME = "green2faststack"
+DEFAULT_UPLOADED_DATE = "1970-01-01"
+
+
+@dataclass(frozen=True)
+class GreenPathRecord:
+ """A single green-labeled image path extracted from the Lightroom catalog."""
+
+ image_id: int
+ full_path: str
+ stem_key: str
+
+
+class Logger:
+ """Simple leveled logger that writes to stdout/stderr."""
+
+ def __init__(self, verbose: bool = False, debug: bool = False) -> None:
+ self.verbose_enabled = verbose or debug
+ self.debug_enabled = debug
+
+ def info(self, msg: str) -> None:
+ print(msg)
+
+ def verbose(self, msg: str) -> None:
+ if self.verbose_enabled:
+ print(msg)
+
+ def debug(self, msg: str) -> None:
+ if self.debug_enabled:
+ print(f"[debug] {msg}")
+
+ def warn(self, msg: str) -> None:
+ print(f"[warn] {msg}", file=sys.stderr)
+
+ def error(self, msg: str) -> None:
+ print(f"[error] {msg}", file=sys.stderr)
+
+
+def verbose_description() -> str:
+ return f"""
+{PROGRAM_NAME} bridges old Lightroom Classic "green label means uploaded"
+workflows into FastStack's per-directory JSON tracking.
+
+This tool has two modes:
+
+1) Export mode
+ Read a Lightroom Classic .lrcat catalog (a SQLite database), find all photos
+ where Adobe_images.colorLabels = 'Green', reconstruct their full catalog paths,
+ and write those paths to a plain text file, one path per line.
+
+ Example:
+ {PROGRAM_NAME} -i catalog.lrcat -o green.txt
+
+2) JSON mode
+ Read a previously exported text file of green-labeled paths and update one
+ existing FastStack faststack.json file by matching entries on lowercase stem.
+ This is intended for cases where Lightroom is no longer part of the workflow,
+ and you want to apply the exported upload knowledge to FastStack as needed.
+
+ Example:
+ {PROGRAM_NAME} --paths green.txt --json /path/to/faststack.json
+
+What this tool hopes to accomplish:
+- Preserve historical "uploaded" decisions you made in Lightroom Classic.
+- Let FastStack reflect those decisions without needing to reopen Lightroom.
+- Work naturally with RAW/JPG pairs by matching same-stem entries.
+
+Important limitations and behavior:
+- Export mode reads the Lightroom catalog only; it does not require the image files
+ to be mounted or present on disk.
+- JSON mode reads only from the exported text file, not from the .lrcat.
+- JSON mode updates only one existing faststack.json at a time.
+- This tool does not create a new faststack.json. The JSON must already exist.
+- Matching is stem-based only. For example, IMG_0001.ORF and IMG_0001.JPG both map
+ to the same FastStack key img_0001 if that is how FastStack tracks the entry.
+- JSON mode sanity-checks whether the exported file paths currently exist on disk
+ and includes those counts in the summary. These existence checks are best-effort
+ and do not affect the stem-based matching.
+- If an entry is already uploaded in faststack.json, it is left unchanged and
+ counted as already present.
+- Existing uploaded_date values are preserved. If a matching entry is newly marked
+ uploaded and has no uploaded_date, the default date is {DEFAULT_UPLOADED_DATE}
+ unless overridden with --uploaded-date YYYY-MM-DD.
+- Before any JSON write, an automatic rotating backup is created:
+ faststack.json.bak
+ faststack.json.bak1
+ faststack.json.bak2
+ ...
+- Use --dry-run to preview changes without writing anything.
+- Use --verbose for normal detailed progress.
+- Use --debug for very chatty troubleshooting output.
+
+Cross-platform path resolution:
+ Lightroom catalogs store Windows-style paths (e.g. C:/Users/...). When running
+ in WSL or other environments, these paths may not exist at their stored location.
+ The existence check tries several strategies:
+ 1. The path exactly as stored in the catalog.
+ 2. On WSL/Linux: if the path looks like a Windows drive letter (C:/... or C:\\...),
+ try /mnt/c/... (lowercase drive letter).
+ 3. On Windows: if the path looks like /mnt/c/..., try C:/... instead.
+ These are best-effort for the summary counts only. The actual stem-based matching
+ into FastStack does NOT depend on file existence.
+
+Examples:
+ Export green paths from a catalog:
+ {PROGRAM_NAME} -i "Alan Rockefeller-v13-3.lrcat" -o green.txt
+
+ Update an existing FastStack JSON from an exported paths file:
+ {PROGRAM_NAME} --paths green.txt --json /mnt/c/.../faststack.json
+
+ Preview JSON changes without writing:
+ {PROGRAM_NAME} --paths green.txt --json /mnt/c/.../faststack.json --dry-run --verbose
+
+ Use a specific uploaded date for newly marked entries:
+ {PROGRAM_NAME} --paths green.txt --json /mnt/c/.../faststack.json --uploaded-date 2026-04-01
+
+Helper scripts:
+ inspect_lrcat_photo.py — Inspect a single image record across all catalog tables.
+ lrcat_diff.py — Compare two catalog snapshots to discover schema changes.
+ test_lrcat_join.py — Test the 4-table path-reconstruction join for one image.
+""".strip()
+
+
+class RichHelpFormatter(argparse.RawDescriptionHelpFormatter):
+ pass
+
+
+def build_parser() -> argparse.ArgumentParser:
+ """Build the argument parser with comprehensive help text."""
+ parser = argparse.ArgumentParser(
+ prog=PROGRAM_NAME,
+ description=verbose_description(),
+ formatter_class=RichHelpFormatter,
+ )
+ parser.add_argument(
+ "-i",
+ "--input",
+ help="Path to Lightroom Classic .lrcat file for export mode.",
+ )
+ parser.add_argument(
+ "-o",
+ "--output",
+ help="Output text file for export mode; one green-labeled path per line.",
+ )
+ parser.add_argument(
+ "--paths",
+ help="Previously exported text file of green-labeled paths for JSON mode.",
+ )
+ parser.add_argument(
+ "--json",
+ help="Path to an existing FastStack faststack.json to update in JSON mode.",
+ )
+ parser.add_argument(
+ "--uploaded-date",
+ default=DEFAULT_UPLOADED_DATE,
+ help=f"Date to use for newly marked uploaded entries with no date (default: {DEFAULT_UPLOADED_DATE}).",
+ )
+ parser.add_argument(
+ "--dry-run",
+ action="store_true",
+ help="Preview JSON changes without writing any file.",
+ )
+ parser.add_argument(
+ "--verbose",
+ action="store_true",
+ help="Print normal detailed progress information.",
+ )
+ parser.add_argument(
+ "--debug",
+ action="store_true",
+ help="Print very verbose debugging information.",
+ )
+ return parser
+
+
+def validate_args(parser: argparse.ArgumentParser, args: argparse.Namespace) -> str:
+ """Validate argument combinations and return the selected mode ('export' or 'json')."""
+ export_selected = bool(args.input or args.output)
+ json_selected = bool(args.paths or args.json)
+
+ if not export_selected and not json_selected:
+ parser.print_help()
+ raise SystemExit(1)
+
+ if export_selected and json_selected:
+ parser.error(
+ "Choose either export mode (-i/-o) or JSON mode (--paths/--json), not both."
+ )
+
+ if export_selected:
+ if not args.input or not args.output:
+ parser.error("Export mode requires both -i/--input and -o/--output.")
+ return "export"
+
+ if not args.paths or not args.json:
+ parser.error("JSON mode requires both --paths and --json.")
+ return "json"
+
+
+def connect_ro_sqlite(path: str) -> sqlite3.Connection:
+ """Open a read-only SQLite connection to the given file."""
+ uri = f"file:{os.path.abspath(path)}?mode=ro"
+ conn = sqlite3.connect(uri, uri=True)
+ conn.row_factory = sqlite3.Row
+ return conn
+
+
+def normalize_catalog_path(
+ root: str, subdir: str, base_name: str, extension: str
+) -> str:
+ """Assemble a full path from the Lightroom join components.
+
+ The components come from:
+ root = AgLibraryRootFolder.absolutePath (typically ends with /)
+ subdir = AgLibraryFolder.pathFromRoot (typically ends with /)
+ base_name = AgLibraryFile.baseName (no extension)
+ extension = AgLibraryFile.extension (no leading dot)
+ """
+ filename = f"{base_name}.{extension}" if extension else base_name
+ return os.path.normpath((root or "") + (subdir or "") + filename)
+
+
+def stem_key_from_path(path_str: str) -> str:
+ """Extract the lowercase stem (filename without extension) for matching.
+
+ FastStack uses lowercase stems as entry keys, so IMG_0001.ORF and
+ IMG_0001.JPG both map to 'img_0001'.
+ """
+ return Path(path_str).stem.lower()
+
+
+# ---------------------------------------------------------------------------
+# Cross-platform path existence checking
+# ---------------------------------------------------------------------------
+#
+# Lightroom catalogs store paths in the format they were added on the original
+# OS — typically Windows paths like "C:/Users/alan/Photos/IMG_0001.ORF".
+#
+# When running this tool in different environments, those stored paths may not
+# resolve directly:
+# - In WSL: C:/Users/... does not exist, but /mnt/c/Users/... does.
+# - On native Windows: paths should work as-is.
+# - On macOS/Linux: Windows paths will not exist unless the drive is mounted.
+#
+# The functions below try multiple path forms to give a best-effort existence
+# check. This is used ONLY for the summary counts ("X files exist on disk").
+# The actual stem-based matching into FastStack does NOT depend on whether the
+# file is found on disk.
+# ---------------------------------------------------------------------------
+
+# Matches Windows drive-letter paths like "C:/..." or "C:\..."
+_WINDOWS_DRIVE_RE = re.compile(r"^([A-Za-z]):[/\\]")
+
+# Matches WSL mount paths like "/mnt/c/..."
+_WSL_MOUNT_RE = re.compile(r"^/mnt/([a-z])/", re.IGNORECASE)
+
+
+def _is_wsl() -> bool:
+ """Detect whether we are running inside Windows Subsystem for Linux.
+
+ Checks for 'microsoft' or 'WSL' in the kernel release string, which is
+ the standard detection method. Returns False on non-Linux platforms.
+ """
+ if platform.system() != "Linux":
+ return False
+ try:
+ release = platform.release().lower()
+ return "microsoft" in release or "wsl" in release
+ except Exception:
+ return False
+
+
+# Cache the WSL check at module load time so we don't re-check per path.
+_RUNNING_IN_WSL = _is_wsl()
+
+
+def check_file_exists(path_str: str, logger: Logger) -> bool:
+ """Best-effort check whether a file exists, trying cross-platform path forms.
+
+ Strategy:
+ 1. Try the path exactly as given.
+ 2. If it looks like a Windows drive path (C:/...) and we're on WSL/Linux,
+ normalize slashes and try /mnt//... (lowercase drive letter).
+ 3. If it looks like a WSL /mnt//... path and we're on Windows,
+ try :/... instead.
+
+ Returns True if the file is found via any of these strategies.
+ """
+ # Normalize forward/back slashes for consistent matching.
+ normalized = path_str.replace("\\", "/")
+
+ # Strategy 1: try the path exactly as stored (after slash normalization).
+ if os.path.exists(normalized):
+ logger.debug(f"Exists (direct): {normalized}")
+ return True
+
+ # Strategy 2: Windows drive path -> WSL mount path.
+ # e.g. "C:/Users/alan/file.jpg" -> "/mnt/c/Users/alan/file.jpg"
+ drive_match = _WINDOWS_DRIVE_RE.match(normalized)
+ if drive_match and (platform.system() == "Linux"):
+ drive_letter = drive_match.group(1).lower()
+ rest = normalized[2:] # strip "C:" prefix, keep the leading "/"
+ wsl_path = f"/mnt/{drive_letter}{rest}"
+ if os.path.exists(wsl_path):
+ logger.debug(f"Exists (WSL mount): {wsl_path}")
+ return True
+
+ # Strategy 3: WSL mount path -> Windows drive path.
+ # e.g. "/mnt/c/Users/alan/file.jpg" -> "C:/Users/alan/file.jpg"
+ wsl_match = _WSL_MOUNT_RE.match(normalized)
+ if wsl_match and (platform.system() == "Windows" or os.name == "nt"):
+ drive_letter = wsl_match.group(1).upper()
+ rest = normalized[len(wsl_match.group(0)) - 1 :] # keep leading "/"
+ win_path = f"{drive_letter}:{rest}"
+ if os.path.exists(win_path):
+ logger.debug(f"Exists (Windows drive): {win_path}")
+ return True
+
+ logger.debug(f"Not found on disk: {path_str}")
+ return False
+
+
+# ---------------------------------------------------------------------------
+# Export mode
+# ---------------------------------------------------------------------------
+
+
+def export_green_paths(catalog_path: str, output_path: str, logger: Logger) -> int:
+ """Export all green-labeled image paths from a Lightroom catalog to a text file.
+
+ Returns the number of paths written.
+ """
+ if not os.path.exists(catalog_path):
+ raise FileNotFoundError(f"Catalog not found: {catalog_path}")
+
+ logger.verbose(f"Opening Lightroom catalog read-only: {catalog_path}")
+ conn = connect_ro_sqlite(catalog_path)
+ try:
+ # This query uses the observed join chain:
+ # Adobe_images.rootFile -> AgLibraryFile.id_local
+ # AgLibraryFile.folder -> AgLibraryFolder.id_local
+ # AgLibraryFolder.rootFolder -> AgLibraryRootFolder.id_local
+ # and filters for colorLabels = 'Green' (observed storage for green labels).
+ sql = """
+ SELECT
+ i.id_local AS image_id,
+ r.absolutePath,
+ d.pathFromRoot,
+ f.baseName,
+ f.extension
+ FROM Adobe_images i
+ JOIN AgLibraryFile f
+ ON i.rootFile = f.id_local
+ JOIN AgLibraryFolder d
+ ON f.folder = d.id_local
+ JOIN AgLibraryRootFolder r
+ ON d.rootFolder = r.id_local
+ WHERE i.colorLabels = 'Green'
+ ORDER BY r.absolutePath, d.pathFromRoot, f.baseName
+ """
+ rows = conn.execute(sql).fetchall()
+ logger.verbose(f"Found {len(rows)} Lightroom rows where colorLabels = 'Green'.")
+
+ output_records: list[GreenPathRecord] = []
+ for row in rows:
+ full_path = normalize_catalog_path(
+ row["absolutePath"] or "",
+ row["pathFromRoot"] or "",
+ row["baseName"] or "",
+ row["extension"] or "",
+ )
+ record = GreenPathRecord(
+ image_id=int(row["image_id"]),
+ full_path=full_path,
+ stem_key=stem_key_from_path(full_path),
+ )
+ output_records.append(record)
+ logger.debug(
+ f"Export row image_id={record.image_id} path={record.full_path}"
+ )
+
+ out_path = Path(output_path)
+ out_path.parent.mkdir(parents=True, exist_ok=True)
+ with out_path.open("w", encoding="utf-8", newline="\n") as handle:
+ for record in output_records:
+ handle.write(record.full_path)
+ handle.write("\n")
+
+ logger.info(f"Wrote {len(output_records)} green-labeled paths to {out_path}")
+ return len(output_records)
+ finally:
+ conn.close()
+
+
+# ---------------------------------------------------------------------------
+# JSON mode
+# ---------------------------------------------------------------------------
+
+
+@dataclass
+class JsonUpdateSummary:
+ """Tracks counts and outcomes for a JSON update operation."""
+
+ paths_read: int = 0
+ unique_stems_in_paths: int = 0
+ existing_files: int = 0
+ missing_files: int = 0
+ existence_check_note: str = ""
+ json_entries_total: int = 0
+ matching_entries_found: int = 0
+ newly_marked_uploaded: int = 0
+ already_uploaded: int = 0
+ stems_not_present_in_json: int = 0
+ backup_path: str | None = None
+ json_written: bool = False
+
+
+# Default shape for FastStack entries. Used to fill in missing fields when
+# updating entries that may have been created by an older FastStack version.
+DEFAULT_FASTSTACK_ENTRY_SHAPE = {
+ "stack_id": None,
+ "stacked": False,
+ "stacked_date": None,
+ "uploaded": False,
+ "uploaded_date": None,
+ "edited": False,
+ "edited_date": None,
+ "restacked": False,
+ "restacked_date": None,
+ "favorite": False,
+ "todo": False,
+ "todo_date": None,
+}
+
+
+def load_paths_file(paths_path: str, logger: Logger) -> list[str]:
+ """Load non-empty lines from a previously exported paths text file."""
+ if not os.path.exists(paths_path):
+ raise FileNotFoundError(f"Paths file not found: {paths_path}")
+
+ result: list[str] = []
+ with open(paths_path, "r", encoding="utf-8") as handle:
+ for line_number, raw_line in enumerate(handle, start=1):
+ line = raw_line.strip()
+ if not line:
+ logger.debug(f"Skipping empty line {line_number} in paths file.")
+ continue
+ result.append(line)
+ logger.verbose(f"Loaded {len(result)} non-empty paths from {paths_path}")
+ return result
+
+
+def next_backup_path(json_path: Path) -> Path:
+ """Find the next available backup filename (faststack.json.bak, .bak1, .bak2, ...)."""
+ first = json_path.with_name(json_path.name + ".bak")
+ if not first.exists():
+ return first
+
+ MAX_BACKUP_ATTEMPTS = 1000
+ index = 1
+ while index <= MAX_BACKUP_ATTEMPTS:
+ candidate = json_path.with_name(json_path.name + f".bak{index}")
+ if not candidate.exists():
+ return candidate
+ index += 1
+ raise RuntimeError(
+ f"Could not find an available backup path for {json_path.name} "
+ f"within {MAX_BACKUP_ATTEMPTS} attempts."
+ )
+
+
+def ensure_faststack_entry_shape(entry: dict) -> dict:
+ """Fill in any missing fields with defaults. Does not overwrite existing values."""
+ for key, value in DEFAULT_FASTSTACK_ENTRY_SHAPE.items():
+ entry.setdefault(key, value)
+ return entry
+
+
+def load_json(path: Path) -> dict:
+ """Load and return the parsed JSON from the given file."""
+ with path.open("r", encoding="utf-8") as handle:
+ return json.load(handle)
+
+
+def save_json(path: Path, data: dict) -> None:
+ """Atomically write JSON data to the given path (write to .tmp, then rename)."""
+ tmp_path = path.with_suffix(path.suffix + ".tmp")
+ with tmp_path.open("w", encoding="utf-8", newline="\n") as handle:
+ json.dump(data, handle, indent=2, ensure_ascii=False)
+ handle.write("\n")
+ tmp_path.replace(path)
+
+
+def human_summary(summary: JsonUpdateSummary, json_path: str) -> str:
+ """Format the update summary as a human-readable string."""
+ lines = [
+ f"Summary for {json_path}",
+ f" Paths read: {summary.paths_read}",
+ f" Unique stems in paths file: {summary.unique_stems_in_paths}",
+ f" Exported image paths that currently exist on disk: {summary.existing_files}",
+ f" Exported image paths missing on disk: {summary.missing_files}",
+ ]
+ if summary.existence_check_note:
+ lines.append(f" Note: {summary.existence_check_note}")
+ lines.extend(
+ [
+ f" FastStack entries present in JSON: {summary.json_entries_total}",
+ f" Matching FastStack entries found: {summary.matching_entries_found}",
+ f" Newly marked uploaded: {summary.newly_marked_uploaded}",
+ f" Already uploaded: {summary.already_uploaded}",
+ f" Exported stems not present in this JSON: {summary.stems_not_present_in_json}",
+ ]
+ )
+ if summary.backup_path:
+ lines.append(f" Backup created: {summary.backup_path}")
+ if summary.json_written:
+ lines.append(" JSON file written: yes")
+ else:
+ lines.append(" JSON file written: no")
+ return "\n".join(lines)
+
+
+def update_faststack_json(
+ paths_file: str,
+ json_path_str: str,
+ uploaded_date: str,
+ dry_run: bool,
+ logger: Logger,
+) -> JsonUpdateSummary:
+ """Update a FastStack JSON file based on a previously exported paths file.
+
+ Matches exported paths to FastStack entries by lowercase stem. Sets
+ uploaded=True on matching entries that are not already marked.
+ """
+ json_path = Path(json_path_str)
+ if not json_path.exists():
+ raise FileNotFoundError(
+ f"FastStack JSON not found: {json_path}. This tool does not create new JSON files."
+ )
+
+ path_lines = load_paths_file(paths_file, logger)
+ summary = JsonUpdateSummary(paths_read=len(path_lines))
+
+ # Build a mapping from lowercase stem to the list of catalog paths that
+ # share that stem. Multiple paths can share a stem (e.g. RAW + JPG pairs).
+ stem_to_paths: dict[str, list[str]] = {}
+ for path_str in path_lines:
+ stem = stem_key_from_path(path_str)
+ stem_to_paths.setdefault(stem, []).append(path_str)
+
+ # Best-effort existence check using cross-platform path resolution.
+ # This is for informational summary only — matching is stem-based
+ # and does not depend on whether the file is found on disk.
+ if check_file_exists(path_str, logger):
+ summary.existing_files += 1
+ else:
+ summary.missing_files += 1
+
+ summary.unique_stems_in_paths = len(stem_to_paths)
+
+ # Add a note explaining the existence check strategy.
+ if summary.missing_files > 0 and _RUNNING_IN_WSL:
+ summary.existence_check_note = (
+ "Existence checks tried both catalog paths and WSL /mnt/ paths. "
+ "Missing files may be on unmounted drives or external storage."
+ )
+ elif summary.missing_files > 0:
+ summary.existence_check_note = (
+ "Some exported paths were not found on disk. This is expected if "
+ "files are on unmounted drives, external storage, or a different OS."
+ )
+
+ logger.verbose(f"Opening FastStack JSON: {json_path}")
+ data = load_json(json_path)
+ entries = data.get("entries")
+ if not isinstance(entries, dict):
+ raise ValueError(f"FastStack JSON missing an 'entries' dictionary: {json_path}")
+
+ summary.json_entries_total = len(entries)
+ changed = False
+
+ json_stems = set(entries.keys())
+ exported_stems = set(stem_to_paths.keys())
+ summary.stems_not_present_in_json = len(exported_stems - json_stems)
+
+ for stem in sorted(exported_stems & json_stems):
+ summary.matching_entries_found += 1
+ entry = entries[stem]
+ if not isinstance(entry, dict):
+ logger.warn(
+ f"Skipping malformed FastStack entry for stem {stem!r}: not an object"
+ )
+ continue
+
+ ensure_faststack_entry_shape(entry)
+ source_paths = stem_to_paths[stem]
+ logger.debug(f"Matching stem {stem!r} with source paths: {source_paths}")
+
+ if entry.get("uploaded") is True:
+ summary.already_uploaded += 1
+ logger.verbose(f"Already uploaded: {stem}")
+ continue
+
+ entry["uploaded"] = True
+ if not entry.get("uploaded_date"):
+ entry["uploaded_date"] = uploaded_date
+ summary.newly_marked_uploaded += 1
+ changed = True
+ logger.verbose(f"Marking uploaded: {stem}")
+
+ if changed and not dry_run:
+ backup_path = next_backup_path(json_path)
+ shutil.copy2(json_path, backup_path)
+ summary.backup_path = str(backup_path)
+ save_json(json_path, data)
+ summary.json_written = True
+ elif changed and dry_run:
+ logger.info("[dry-run] Changes detected; JSON was not written.")
+ else:
+ logger.info("No JSON changes were needed.")
+
+ return summary
+
+
+# ---------------------------------------------------------------------------
+# CLI entry point
+# ---------------------------------------------------------------------------
+
+
+def main(argv: Iterable[str] | None = None) -> int:
+ parser = build_parser()
+ args = parser.parse_args(list(argv) if argv is not None else None)
+ mode = validate_args(parser, args)
+ logger = Logger(verbose=args.verbose, debug=args.debug)
+
+ try:
+ if mode == "export":
+ count = export_green_paths(args.input, args.output, logger)
+ logger.info(f"Export complete. {count} paths written.")
+ return 0
+
+ summary = update_faststack_json(
+ paths_file=args.paths,
+ json_path_str=args.json,
+ uploaded_date=args.uploaded_date,
+ dry_run=args.dry_run,
+ logger=logger,
+ )
+ logger.info("")
+ logger.info(human_summary(summary, args.json))
+ return 0
+ except (
+ FileNotFoundError,
+ ValueError,
+ sqlite3.DatabaseError,
+ json.JSONDecodeError,
+ ) as exc:
+ logger.error(str(exc))
+ return 2
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/lightroom-catalog-import/inspect_lrcat_photo.py b/lightroom-catalog-import/inspect_lrcat_photo.py
new file mode 100644
index 0000000..eac9ce9
--- /dev/null
+++ b/lightroom-catalog-import/inspect_lrcat_photo.py
@@ -0,0 +1,241 @@
+#!/usr/bin/env python3
+"""
+inspect_lrcat_photo.py — Exploratory helper for reverse-engineering Lightroom catalogs
+
+PURPOSE:
+ Given a Lightroom Classic .lrcat catalog and an Adobe_images.id_local value,
+ print the Adobe_images row and scan all tables for columns that might reference
+ that image ID. Also lists tables containing path-related columns.
+
+ This is a reverse-engineering/exploration helper, NOT the main migration tool.
+ Use green2faststack.py for the actual green-label migration workflow.
+
+WHEN TO USE:
+ - You want to understand the schema of a specific Lightroom catalog.
+ - You want to trace how a specific image is referenced across tables.
+ - You're investigating which tables/columns store file paths, folder
+ structures, or other metadata for a given photo.
+
+HOW TO FIND AN IMAGE ID:
+ Open the .lrcat file in a SQLite browser (e.g. DB Browser for SQLite) and
+ query Adobe_images. Or use lrcat_diff.py to find rows that changed after
+ a known edit.
+
+EXAMPLES:
+ python inspect_lrcat_photo.py catalog.lrcat 12345
+ python inspect_lrcat_photo.py "Alan Rockefeller-v13-3.lrcat" 99
+
+WHAT IT DOES:
+ 1. Prints all columns from the Adobe_images row for the given id_local.
+ 2. Scans every table in the catalog for columns whose names suggest they
+ might be foreign keys pointing to an image (e.g. "image", "rootFile").
+ For each such column, queries for rows matching the given image ID.
+ This is a heuristic scan — it may produce false positives or miss
+ columns with unusual naming.
+ 3. Lists all tables that have columns with path-related names (path,
+ filename, folder, volume, root, etc.) to help identify where file
+ locations are stored.
+
+NOTES:
+ - The catalog is opened read-only; no changes are made.
+ - The candidate-reference scan checks column names heuristically.
+ Lightroom's schema is not publicly documented, so column naming
+ conventions were inferred from observation and may vary by version.
+ - Lightroom Classic catalogs are SQLite databases. This script requires
+ only the Python standard library (sqlite3).
+"""
+
+from __future__ import annotations
+
+import argparse
+import os
+import sqlite3
+import sys
+
+
+def quote_ident(name: str) -> str:
+ """Quote a SQL identifier to prevent injection. Doubles internal quotes."""
+ return '"' + name.replace('"', '""') + '"'
+
+
+def connect_ro(path: str) -> sqlite3.Connection:
+ """Open a read-only SQLite connection to the given file."""
+ abs_path = os.path.abspath(path)
+ uri = f"file:{abs_path}?mode=ro"
+ conn = sqlite3.connect(uri, uri=True)
+ conn.row_factory = sqlite3.Row
+ return conn
+
+
+def get_tables(conn: sqlite3.Connection) -> list[str]:
+ """Return all user table names in the database, sorted."""
+ rows = conn.execute(
+ """
+ SELECT name
+ FROM sqlite_master
+ WHERE type='table'
+ AND name NOT LIKE 'sqlite_%'
+ ORDER BY name
+ """
+ ).fetchall()
+ return [row["name"] for row in rows]
+
+
+def get_columns(conn: sqlite3.Connection, table: str) -> list[str]:
+ """Return column names for the given table."""
+ rows = conn.execute(f"PRAGMA table_info({quote_ident(table)})").fetchall()
+ return [row["name"] for row in rows]
+
+
+def print_row(title: str, row: sqlite3.Row | None) -> None:
+ """Pretty-print a single database row with a section title."""
+ print(f"\n=== {title} ===")
+ if row is None:
+ print(" ")
+ return
+ for key in row.keys():
+ print(f"{key} = {row[key]!r}")
+
+
+# Column names (lowercased) that might be foreign keys pointing to an image.
+# These were observed in Lightroom Classic catalogs; other versions may differ.
+CANDIDATE_IMAGE_COLUMN_NAMES = {
+ "image",
+ "imageid",
+ "id_image",
+ "image_id",
+ "rootfile",
+ "rootfileid",
+ "id_rootfile",
+}
+
+
+def main() -> int:
+ parser = argparse.ArgumentParser(
+ prog="inspect_lrcat_photo",
+ description=(
+ "Inspect a single image record in a Lightroom Classic .lrcat catalog.\n"
+ "\n"
+ "Prints the Adobe_images row for the given id_local, scans all tables\n"
+ "for columns that might reference this image, and lists tables with\n"
+ "path-related columns.\n"
+ "\n"
+ "This is an exploratory/reverse-engineering helper. For the actual\n"
+ "green-label migration, use green2faststack.py instead."
+ ),
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog=(
+ "examples:\n"
+ " %(prog)s catalog.lrcat 12345\n"
+ ' %(prog)s "Alan Rockefeller-v13-3.lrcat" 99\n'
+ "\n"
+ "The catalog is opened read-only; no changes are made.\n"
+ "Requires only the Python standard library."
+ ),
+ )
+ parser.add_argument(
+ "catalog",
+ help="Path to a Lightroom Classic .lrcat file (a SQLite database).",
+ )
+ parser.add_argument(
+ "image_id",
+ type=int,
+ help="The Adobe_images.id_local value to inspect.",
+ )
+
+ if len(sys.argv) == 1:
+ parser.print_help()
+ return 1
+
+ args = parser.parse_args()
+
+ if not os.path.exists(args.catalog):
+ print(f"error: catalog not found: {args.catalog}", file=sys.stderr)
+ return 2
+
+ conn = connect_ro(args.catalog)
+ try:
+ # Step 1: Print the Adobe_images row for this id_local.
+ row = conn.execute(
+ "SELECT * FROM Adobe_images WHERE id_local = ?",
+ (args.image_id,),
+ ).fetchone()
+ print_row("Adobe_images", row)
+
+ if row is None:
+ print(f"\nNo Adobe_images row found for id_local = {args.image_id}.")
+ print("Check that the image_id is correct. You can find valid IDs by")
+ print("querying Adobe_images in a SQLite browser or using lrcat_diff.py.")
+ return 1
+
+ image_id = args.image_id
+ root_file_id = row["rootFile"]
+ tables = get_tables(conn)
+
+ # Step 2: Scan all tables for columns that might reference this image.
+ # This is a heuristic: we look for column names that suggest foreign keys
+ # to image-related tables (e.g. "image", "rootFile", etc.).
+ # This can produce false positives if unrelated columns happen to have
+ # matching names or values.
+ candidates = []
+ for table in tables:
+ cols = get_columns(conn, table)
+ matching_cols = [
+ col for col in cols if col.lower() in CANDIDATE_IMAGE_COLUMN_NAMES
+ ]
+ if matching_cols:
+ candidates.append((table, matching_cols))
+
+ print("\n=== Candidate references to this image ===")
+ print("(Scanning tables for columns whose names suggest image foreign keys.)")
+ print("(This is heuristic — column naming may vary by Lightroom version.)")
+ found_any = False
+ for table, cols in candidates:
+ for col in cols:
+ # If column name specifically suggests it's a "rootFile" foreign key,
+ # use the library-file ID instead of the image ID.
+ probe_id = root_file_id if "rootfile" in col.lower() else image_id
+ sql = f"SELECT * FROM {quote_ident(table)} WHERE {quote_ident(col)} = ? LIMIT 5"
+ rows = conn.execute(sql, (probe_id,)).fetchall()
+ if rows:
+ found_any = True
+ print(f"\n--- {table}.{col} ---")
+ for i, r in enumerate(rows, 1):
+ print(f"[row {i}]")
+ for key in r.keys():
+ print(f"{key} = {r[key]!r}")
+
+ if not found_any:
+ print("No obvious direct references found.")
+
+ # Step 3: List tables that have columns with path-related names.
+ # This helps identify where Lightroom stores file locations.
+ print("\n=== Tables with likely path columns ===")
+ print("(Tables containing columns named like path, filename, folder, etc.)")
+ for table in tables:
+ cols = get_columns(conn, table)
+ interesting = [
+ c
+ for c in cols
+ if any(
+ x in c.lower()
+ for x in [
+ "path",
+ "filename",
+ "basename",
+ "folder",
+ "volume",
+ "root",
+ ]
+ )
+ ]
+ if interesting:
+ print(f"{table}: {', '.join(interesting)}")
+
+ return 0
+ finally:
+ conn.close()
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/lightroom-catalog-import/lrcat_diff.py b/lightroom-catalog-import/lrcat_diff.py
new file mode 100644
index 0000000..53adfe5
--- /dev/null
+++ b/lightroom-catalog-import/lrcat_diff.py
@@ -0,0 +1,452 @@
+#!/usr/bin/env python3
+"""
+lrcat_diff.py — Reverse-engineering helper for discovering Lightroom catalog changes
+
+PURPOSE:
+ Compare two Lightroom Classic .lrcat files (before and after a known edit)
+ and report which rows and columns changed. This is how we discovered that
+ green labels are stored in Adobe_images.colorLabels = 'Green'.
+
+ This is a reverse-engineering/exploration helper, NOT the main migration tool.
+ Use green2faststack.py for the actual green-label migration workflow.
+
+WHEN TO USE:
+ - You want to discover which table/column stores a specific piece of metadata
+ (color labels, ratings, keywords, develop settings, etc.).
+ - You want to verify that a known Lightroom edit changed what you expected.
+ - You want to understand the Lightroom catalog schema by observing changes.
+
+TYPICAL WORKFLOW:
+ 1. Close Lightroom Classic.
+ 2. Copy the catalog file to a backup:
+ cp "My Catalog.lrcat" before.lrcat
+ 3. Open Lightroom, make one small known change (e.g., mark one photo green),
+ then close Lightroom.
+ 4. Run the diff:
+ python lrcat_diff.py before.lrcat "My Catalog.lrcat" --match "IMG_1234"
+
+ The --match flag filters output to rows containing the given substring,
+ which is very helpful for isolating the specific photo you changed.
+
+EXAMPLES:
+ # Compare two catalogs, show all changes
+ python lrcat_diff.py before.lrcat after.lrcat
+
+ # Show only changes related to a specific filename
+ python lrcat_diff.py before.lrcat after.lrcat --match "DSC_0042"
+
+ # Compare only specific tables
+ python lrcat_diff.py before.lrcat after.lrcat --tables Adobe_images AgLibraryFile
+
+ # Increase the per-table row limit
+ python lrcat_diff.py before.lrcat after.lrcat --max-rows 100
+
+MEMORY NOTE:
+ This tool loads all rows for each compared table into memory (keyed by
+ primary key) to compute diffs. For very large catalogs with hundreds of
+ thousands of photos, this may use significant memory. If you hit memory
+ limits, use --tables to compare specific tables one at a time.
+
+NOTES:
+ - Both catalogs are opened read-only; no changes are made.
+ - Tables without a primary key are skipped (cannot reliably match rows).
+ - BLOB values are shown as their SHA-1 hash and byte length.
+ - Lightroom Classic catalogs are SQLite databases. This script requires
+ only the Python standard library (sqlite3).
+"""
+
+from __future__ import annotations
+
+import argparse
+import hashlib
+import os
+import sqlite3
+import sys
+from typing import Iterable, Sequence
+
+
+def quote_ident(name: str) -> str:
+ """Quote a SQL identifier to prevent injection."""
+ return '"' + name.replace('"', '""') + '"'
+
+
+def connect_ro(path: str) -> sqlite3.Connection:
+ """Open a read-only SQLite connection to the given file."""
+ abs_path = os.path.abspath(path)
+ uri = f"file:{abs_path}?mode=ro"
+ conn = sqlite3.connect(uri, uri=True)
+ conn.row_factory = sqlite3.Row
+ return conn
+
+
+def get_tables(conn: sqlite3.Connection) -> set[str]:
+ """Return all user table names in the database."""
+ rows = conn.execute(
+ """
+ SELECT name
+ FROM sqlite_master
+ WHERE type = 'table'
+ AND name NOT LIKE 'sqlite_%'
+ """
+ ).fetchall()
+ return {row["name"] for row in rows}
+
+
+def get_columns(conn: sqlite3.Connection, table: str) -> list[sqlite3.Row]:
+ """Return PRAGMA table_info rows for the given table."""
+ return conn.execute(f"PRAGMA table_info({quote_ident(table)})").fetchall()
+
+
+def get_pk_columns(conn: sqlite3.Connection, table: str) -> list[str]:
+ """Return primary key column names for the given table, in PK order."""
+ cols = get_columns(conn, table)
+ pk_cols = [
+ row["name"] for row in sorted(cols, key=lambda r: r["pk"]) if row["pk"] > 0
+ ]
+ return pk_cols
+
+
+def get_all_columns(conn: sqlite3.Connection, table: str) -> list[str]:
+ """Return all column names for the given table."""
+ return [row["name"] for row in get_columns(conn, table)]
+
+
+def row_to_key(row: sqlite3.Row, pk_cols: Sequence[str]) -> tuple:
+ """Extract primary key values from a row as a hashable tuple."""
+ return tuple(row[col] for col in pk_cols)
+
+
+def stable_repr(value: object) -> str:
+ """Human-readable representation of a value, with stable BLOB hashing."""
+ if value is None:
+ return "NULL"
+ if isinstance(value, bytes):
+ h = hashlib.sha1(value).hexdigest()
+ return f""
+ return repr(value)
+
+
+def row_matches(row: sqlite3.Row, needle: str | None) -> bool:
+ """Return True if any non-BLOB column value contains the needle substring."""
+ if not needle:
+ return True
+ needle = needle.lower()
+ for value in row:
+ if value is None:
+ continue
+ if isinstance(value, bytes):
+ continue
+ if needle in str(value).lower():
+ return True
+ return False
+
+
+def fetch_rows_by_pk(
+ conn: sqlite3.Connection,
+ table: str,
+ pk_cols: Sequence[str],
+) -> dict[tuple, sqlite3.Row]:
+ """Fetch all rows from a table, indexed by primary key tuple."""
+ pk_expr = ", ".join(quote_ident(c) for c in pk_cols)
+ sql = f"SELECT * FROM {quote_ident(table)} ORDER BY {pk_expr}"
+ rows = conn.execute(sql)
+ return {row_to_key(row, pk_cols): row for row in rows}
+
+
+def compare_rows(
+ before: sqlite3.Row,
+ after: sqlite3.Row,
+ columns: Sequence[str],
+) -> dict[str, tuple[object, object]]:
+ """Return a dict of {column: (old_value, new_value)} for columns that differ."""
+ diffs: dict[str, tuple[object, object]] = {}
+ for col in columns:
+ if before[col] != after[col]:
+ diffs[col] = (before[col], after[col])
+ return diffs
+
+
+def summarize_table_counts(
+ conn_before: sqlite3.Connection,
+ conn_after: sqlite3.Connection,
+ tables: Iterable[str],
+) -> list[tuple[str, int, int]]:
+ """Return (table, before_count, after_count) for tables with differing row counts."""
+ out = []
+ for table in sorted(tables):
+ try:
+ b = conn_before.execute(
+ f"SELECT COUNT(*) FROM {quote_ident(table)}"
+ ).fetchone()[0]
+ a = conn_after.execute(
+ f"SELECT COUNT(*) FROM {quote_ident(table)}"
+ ).fetchone()[0]
+ if b != a:
+ out.append((table, b, a))
+ except sqlite3.DatabaseError as exc:
+ print(f"[warn] could not count table {table}: {exc}", file=sys.stderr)
+ return out
+
+
+def compare_table(
+ conn_before: sqlite3.Connection,
+ conn_after: sqlite3.Connection,
+ table: str,
+ match: str | None,
+ max_rows: int,
+) -> dict[str, list]:
+ """Compare a single table between two catalogs. Returns a result dict."""
+ pk_cols = get_pk_columns(conn_before, table)
+ if not pk_cols:
+ return {"skipped": [f"{table}: no primary key"]}
+
+ cols_before = get_all_columns(conn_before, table)
+ cols_after = get_all_columns(conn_after, table)
+ if cols_before != cols_after:
+ return {
+ "schema_changed": [
+ {
+ "table": table,
+ "before_columns": cols_before,
+ "after_columns": cols_after,
+ }
+ ]
+ }
+
+ # NOTE: This loads all rows for both catalogs into memory.
+ # For very large tables this may be expensive; use --tables to limit scope.
+ before_rows = fetch_rows_by_pk(conn_before, table, pk_cols)
+ after_rows = fetch_rows_by_pk(conn_after, table, pk_cols)
+
+ before_keys = set(before_rows)
+ after_keys = set(after_rows)
+
+ inserted_keys = sorted(after_keys - before_keys)
+ if match:
+ inserted_keys = [k for k in inserted_keys if row_matches(after_rows[k], match)]
+ inserted_keys = inserted_keys[:max_rows]
+
+ deleted_keys = sorted(before_keys - after_keys)
+ if match:
+ deleted_keys = [k for k in deleted_keys if row_matches(before_rows[k], match)]
+ deleted_keys = deleted_keys[:max_rows]
+ common_keys = before_keys & after_keys
+
+ changed = []
+ for key in sorted(common_keys):
+ b = before_rows[key]
+ a = after_rows[key]
+ if match and not (row_matches(b, match) or row_matches(a, match)):
+ continue
+ diffs = compare_rows(b, a, cols_before)
+ if diffs:
+ changed.append(
+ {
+ "pk": dict(zip(pk_cols, key, strict=True)),
+ "diffs": diffs,
+ }
+ )
+ if len(changed) >= max_rows:
+ break
+
+ inserted = []
+ for key in inserted_keys:
+ row = after_rows[key]
+ inserted.append({"pk": dict(zip(pk_cols, key, strict=True)), "row": row})
+
+ deleted = []
+ for key in deleted_keys:
+ row = before_rows[key]
+ deleted.append({"pk": dict(zip(pk_cols, key, strict=True)), "row": row})
+
+ return {
+ "table": table,
+ "pk_cols": pk_cols,
+ "changed": changed,
+ "inserted": inserted,
+ "deleted": deleted,
+ }
+
+
+def print_row(row: sqlite3.Row, prefix: str = " ") -> None:
+ """Print all columns of a row with a prefix indent."""
+ for key in row.keys():
+ print(f"{prefix}{key} = {stable_repr(row[key])}")
+
+
+def main() -> int:
+ parser = argparse.ArgumentParser(
+ prog="lrcat_diff",
+ description=(
+ "Compare two Lightroom Classic .lrcat files and report changed rows/columns.\n"
+ "\n"
+ "This is a reverse-engineering helper for discovering which tables and\n"
+ "columns store specific Lightroom metadata. It is NOT the main migration\n"
+ "tool — use green2faststack.py for that.\n"
+ "\n"
+ "Typical workflow:\n"
+ " 1. Close Lightroom Classic.\n"
+ " 2. Copy the catalog: cp catalog.lrcat before.lrcat\n"
+ " 3. Open Lightroom, make one known change, close Lightroom.\n"
+ " 4. Run: %(prog)s before.lrcat catalog.lrcat --match 'IMG_1234'"
+ ),
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog=(
+ "examples:\n"
+ " %(prog)s before.lrcat after.lrcat\n"
+ ' %(prog)s before.lrcat after.lrcat --match "DSC_0042"\n'
+ " %(prog)s before.lrcat after.lrcat --tables Adobe_images\n"
+ " %(prog)s before.lrcat after.lrcat --max-rows 100\n"
+ "\n"
+ "memory note:\n"
+ " This tool loads all rows per table into memory. For very large\n"
+ " catalogs, use --tables to compare specific tables one at a time.\n"
+ "\n"
+ "Both catalogs are opened read-only; no changes are made.\n"
+ "Requires only the Python standard library."
+ ),
+ )
+ parser.add_argument(
+ "before",
+ help="Path to the backup/original .lrcat file.",
+ )
+ parser.add_argument(
+ "after",
+ help="Path to the modified/current .lrcat file.",
+ )
+ parser.add_argument(
+ "--match",
+ help=(
+ "Only show rows where any text column contains this substring. "
+ "Useful for isolating changes to a specific photo (e.g. a filename "
+ "like IMG_1234 or DSC_0042)."
+ ),
+ )
+ parser.add_argument(
+ "--max-rows",
+ type=int,
+ default=20,
+ help="Maximum inserted/deleted/changed rows to print per table (default: 20).",
+ )
+ parser.add_argument(
+ "--tables",
+ nargs="*",
+ help="Compare only these specific tables (default: all common tables).",
+ )
+
+ if len(sys.argv) == 1:
+ parser.print_help()
+ return 1
+
+ args = parser.parse_args()
+
+ if not os.path.exists(args.before):
+ print(f"error: not found: {args.before}", file=sys.stderr)
+ return 2
+ if not os.path.exists(args.after):
+ print(f"error: not found: {args.after}", file=sys.stderr)
+ return 2
+
+ conn_before = connect_ro(args.before)
+ conn_after = connect_ro(args.after)
+
+ try:
+ tables_before = get_tables(conn_before)
+ tables_after = get_tables(conn_after)
+
+ only_before = sorted(tables_before - tables_after)
+ only_after = sorted(tables_after - tables_before)
+ common = sorted(tables_before & tables_after)
+
+ if args.tables:
+ wanted = set(args.tables)
+ common = [t for t in common if t in wanted]
+
+ print("=== Catalog table overview ===")
+ print(f"Tables in BEFORE only: {len(only_before)}")
+ for t in only_before:
+ print(f" - {t}")
+ print(f"Tables in AFTER only: {len(only_after)}")
+ for t in only_after:
+ print(f" - {t}")
+ print(f"Common tables: {len(common)}")
+
+ print("\n=== Tables with changed row counts ===")
+ changed_counts = summarize_table_counts(conn_before, conn_after, common)
+ if not changed_counts:
+ print(" none")
+ else:
+ for table, b, a in changed_counts:
+ print(f" {table}: {b} -> {a}")
+
+ print("\n=== Row/column diffs ===")
+ any_output = False
+
+ for table in common:
+ try:
+ result = compare_table(
+ conn_before=conn_before,
+ conn_after=conn_after,
+ table=table,
+ match=args.match,
+ max_rows=args.max_rows,
+ )
+ except sqlite3.DatabaseError as exc:
+ print(f"\n--- {table} ---")
+ print(f"[warn] could not compare table: {exc}")
+ continue
+
+ if "skipped" in result:
+ continue
+
+ if "schema_changed" in result:
+ print(f"\n--- {table} ---")
+ print("Schema changed between catalogs.")
+ any_output = True
+ continue
+
+ changed = result["changed"]
+ inserted = result["inserted"]
+ deleted = result["deleted"]
+
+ if not changed and not inserted and not deleted:
+ continue
+
+ any_output = True
+ print(f"\n--- {table} ---")
+ print(f"Primary key columns: {', '.join(result['pk_cols'])}")
+
+ if changed:
+ print(f"Changed rows: {len(changed)}")
+ for item in changed:
+ print(f" PK: {item['pk']}")
+ for col, (old, new) in item["diffs"].items():
+ print(f" {col}: {stable_repr(old)} -> {stable_repr(new)}")
+
+ if inserted:
+ print(f"Inserted rows: {len(inserted)}")
+ for item in inserted:
+ print(f" PK: {item['pk']}")
+ print_row(item["row"])
+
+ if deleted:
+ print(f"Deleted rows: {len(deleted)}")
+ for item in deleted:
+ print(f" PK: {item['pk']}")
+ print_row(item["row"])
+
+ if not any_output:
+ print("No matching row-level diffs found.")
+ if args.match:
+ print(
+ "Try rerunning without --match, or with a different filename/path fragment."
+ )
+
+ return 0
+ finally:
+ conn_before.close()
+ conn_after.close()
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/lightroom-catalog-import/test_lrcat_join.py b/lightroom-catalog-import/test_lrcat_join.py
new file mode 100644
index 0000000..7a60b5b
--- /dev/null
+++ b/lightroom-catalog-import/test_lrcat_join.py
@@ -0,0 +1,185 @@
+#!/usr/bin/env python3
+"""
+test_lrcat_join.py — Schema-discovery helper for Lightroom catalog path reconstruction
+
+PURPOSE:
+ Given a Lightroom Classic .lrcat catalog and an Adobe_images.id_local value,
+ run the 4-table join that reconstructs the full file path and print all
+ columns from the join result.
+
+ This is the join chain we observed for path reconstruction:
+ Adobe_images.rootFile -> AgLibraryFile.id_local
+ AgLibraryFile.folder -> AgLibraryFolder.id_local
+ AgLibraryFolder.rootFolder -> AgLibraryRootFolder.id_local
+
+ The full path is assembled as:
+ AgLibraryRootFolder.absolutePath + AgLibraryFolder.pathFromRoot + baseName + "." + extension
+
+ This is a schema-discovery/verification helper, NOT the main migration tool.
+ Use green2faststack.py for the actual green-label migration workflow.
+
+WHEN TO USE:
+ - You want to verify that the 4-table join produces the correct file path
+ for a specific image in your catalog.
+ - You're investigating how Lightroom stores paths and want to see all
+ the intermediate column values (rootFile, folder, rootFolder, etc.).
+ - You want to confirm the join chain before trusting the export in
+ green2faststack.py.
+
+HOW TO FIND AN IMAGE ID:
+ Open the .lrcat file in a SQLite browser and query Adobe_images, or use
+ lrcat_diff.py to find rows that changed after a known edit.
+
+EXAMPLES:
+ python test_lrcat_join.py catalog.lrcat 12345
+ python test_lrcat_join.py "Alan Rockefeller-v13-3.lrcat" 99
+
+OBSERVED SCHEMA NOTES:
+ - AgLibraryFile.baseName is the filename without extension.
+ - AgLibraryFile.extension is the file extension without a leading dot.
+ - AgLibraryFile.originalFilename includes the extension.
+ - AgLibraryRootFolder.absolutePath typically ends with a trailing slash.
+ - AgLibraryFolder.pathFromRoot typically ends with a trailing slash.
+ - These observations are from specific Lightroom Classic catalogs and may
+ vary by Lightroom version.
+
+NOTES:
+ - The catalog is opened read-only; no changes are made.
+ - LEFT JOINs are used so partial results are shown even if the join chain
+ is incomplete (e.g., missing folder or root folder records).
+ - Requires only the Python standard library (sqlite3).
+"""
+
+from __future__ import annotations
+
+import argparse
+import os
+import sqlite3
+import sys
+
+
+def connect_ro(path: str) -> sqlite3.Connection:
+ """Open a read-only SQLite connection to the given file."""
+ uri = f"file:{os.path.abspath(path)}?mode=ro"
+ conn = sqlite3.connect(uri, uri=True)
+ conn.row_factory = sqlite3.Row
+ return conn
+
+
+def main() -> int:
+ parser = argparse.ArgumentParser(
+ prog="test_lrcat_join",
+ description=(
+ "Test the 4-table join that reconstructs file paths from a Lightroom\n"
+ "Classic .lrcat catalog.\n"
+ "\n"
+ "Joins: Adobe_images -> AgLibraryFile -> AgLibraryFolder -> AgLibraryRootFolder\n"
+ "Prints all intermediate columns and the reconstructed full path.\n"
+ "\n"
+ "This is a schema-discovery helper. For the actual green-label\n"
+ "migration, use green2faststack.py instead."
+ ),
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog=(
+ "examples:\n"
+ " %(prog)s catalog.lrcat 12345\n"
+ ' %(prog)s "Alan Rockefeller-v13-3.lrcat" 99\n'
+ "\n"
+ "The catalog is opened read-only; no changes are made.\n"
+ "Requires only the Python standard library."
+ ),
+ )
+ parser.add_argument(
+ "catalog",
+ help="Path to a Lightroom Classic .lrcat file (a SQLite database).",
+ )
+ parser.add_argument(
+ "image_id",
+ type=int,
+ help="The Adobe_images.id_local value to look up.",
+ )
+
+ if len(sys.argv) == 1:
+ parser.print_help()
+ return 1
+
+ args = parser.parse_args()
+
+ if not os.path.exists(args.catalog):
+ print(f"error: catalog not found: {args.catalog}", file=sys.stderr)
+ return 2
+
+ conn = connect_ro(args.catalog)
+ try:
+ # The 4-table join chain for path reconstruction.
+ # LEFT JOINs are used so we still see partial results if some tables
+ # are missing rows (which would indicate an unusual catalog state).
+ sql = """
+ SELECT
+ i.id_local AS image_id,
+ i.colorLabels,
+ i.rootFile,
+ f.id_local AS file_id,
+ f.baseName,
+ f.originalFilename,
+ f.extension,
+ f.folder AS folder_id,
+ d.id_local AS agfolder_id,
+ d.pathFromRoot,
+ d.rootFolder AS rootfolder_id,
+ r.id_local AS root_id,
+ r.absolutePath,
+ r.relativePathFromCatalog
+ FROM Adobe_images i
+ LEFT JOIN AgLibraryFile f
+ ON i.rootFile = f.id_local
+ LEFT JOIN AgLibraryFolder d
+ ON f.folder = d.id_local
+ LEFT JOIN AgLibraryRootFolder r
+ ON d.rootFolder = r.id_local
+ WHERE i.id_local = ?
+ """
+ row = conn.execute(sql, (args.image_id,)).fetchone()
+ if not row:
+ print(f"No row found for Adobe_images.id_local = {args.image_id}.")
+ print("Check that the image_id is correct. You can find valid IDs by")
+ print("querying Adobe_images in a SQLite browser or using lrcat_diff.py.")
+ return 1
+
+ print("=== Join result ===")
+ for k in row.keys():
+ print(f"{k} = {row[k]!r}")
+
+ # Reconstruct the full path from the join components.
+ # absolutePath and pathFromRoot typically include trailing slashes.
+ abs_root = row["absolutePath"] or ""
+ path_from_root = row["pathFromRoot"] or ""
+ # AgLibraryFile.baseName is the filename without extension.
+ # AgLibraryFile.originalFilename is a fallback that already includes the extension.
+ base_name = row["baseName"]
+ extension = row["extension"] or ""
+
+ if base_name:
+ if extension:
+ filename = f"{base_name}.{extension}"
+ else:
+ filename = base_name
+ else:
+ filename = row["originalFilename"] or ""
+
+ full_path = os.path.normpath(abs_root + path_from_root + filename)
+ print(f"\nfull_path_guess = {full_path!r}")
+
+ # Show what each component contributed, for debugging.
+ print(f"\n absolutePath = {abs_root!r}")
+ print(f" pathFromRoot = {path_from_root!r}")
+ print(f" baseName = {base_name!r}")
+ print(f" extension = {extension!r}")
+ print(f" -> filename = {filename!r}")
+ return 0
+ finally:
+ conn.close()
+
+
+if __name__ == "__main__":
+ raise SystemExit(main())
diff --git a/pyproject.toml b/pyproject.toml
index e6562ed..60b4d31 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
[project]
name = "faststack"
-version = "1.6.1"
+version = "1.6.2"
authors = [
{ name = "Alan Rockefeller" },
]