diff --git a/.gitignore b/.gitignore index c364b5a..2b5dea3 100644 --- a/.gitignore +++ b/.gitignore @@ -88,6 +88,8 @@ test_results.txt smoke_test_output.txt verify_result.txt *test_output.txt +test_image* + **/*fail*.txt **/*final*.txt diff --git a/ChangeLog.md b/ChangeLog.md index 7a07064..c77e639 100644 --- a/ChangeLog.md +++ b/ChangeLog.md @@ -1,6 +1,16 @@ # ChangeLog -Todo: Make it work on Linux / Mac. Create Windows .exe. Write better documentation / help. Add splash screen / icon. Fix raw image support. +Todo: More testing Linux / Mac. Create Windows .exe. Write better documentation / help. Add splash screen / icon. Fix raw image support. + +## 1.5.9 (2026-02-16) + +- Full-Screen Mode: Press F11 to toggle fullscreen in loupe view +- Spark Line Display: Grid view now shows upload progress indicators per folder. +- Optimized grid view performance and prefetch behavior. +- Added EXIF brief display in status bar showing ISO, aperture, shutter speed, and capture time. +- Enhanced metadata display with camera-style shutter speed formatting. +- Added new thumbnail badges for Backups (Bk) and Developed (D) variants. +- Improved cache eviction handling and thread-safety for concurrent operations. ## 1.5.8 (2026-02-13) diff --git a/README.md b/README.md index 184fe69..cb1383d 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ This tool is optimized for speed, using `libjpeg-turbo` for decoding, aggressive - **Crop:** Added the ability to crop and rotate images via the cr(O)p hotkey (or right mouse click). It can be a freeform crop, or constrained to several popular aspect ratios. - **Zoom & Pan:** Smooth zooming and panning. - **Stack Selection:** Group images into stacks (`[`, `]`) and select them for processing (`S`). -- **Speak Line**: In grid view, a spark line is visible on each folder, so you can see how far you have gotten in uploading photos in each directory. +- **Spark Line**: In grid view, a spark line is visible on each folder, so you can see how far you have gotten in uploading photos in each directory. - **Helicon Focus Integration:** Launch Helicon Focus with your selected RAW files with a single keypress (`Enter`). - **Instant Navigation:** Sub-10ms next/previous image switching, high performance decoding via `PyTurboJPEG`. - **Image Editor:** Built-in editor with exposure, contrast, white balance, sharpness, and more (E key) diff --git a/faststack/app.py b/faststack/app.py index 9d53e5b..d94daff 100644 --- a/faststack/app.py +++ b/faststack/app.py @@ -54,7 +54,9 @@ from faststack.models import ImageFile, DecodedImage from faststack.io.indexer import find_images, find_images_with_variants, image_sort_key from faststack.io.variants import ( - VariantGroup, build_badge_list, get_group_key_for_path, + VariantGroup, + build_badge_list, + get_group_key_for_path, norm_path, ) from faststack.io.sidecar import SidecarManager @@ -156,7 +158,7 @@ class AppController(QObject): _thumbnailReadySignal = Signal(str) MAX_FAILED_RESTORATIONS_TO_LOG = 10 - + @staticmethod def _key(p: Optional[Path]) -> Optional[str]: """Normalize path for consistent comparison without slow resolve().""" @@ -175,21 +177,27 @@ class ProgressReporter(QObject): _deleteFinished = Signal( object ) # Signal for async delete completion (result dict from worker) + _exifBriefReady = Signal(str, str) # (path_str, brief) from background thread def __init__( - self, image_dir: Path, engine: QQmlApplicationEngine, - debug_cache: bool = False, + self, + image_dir: Path, + engine: QQmlApplicationEngine, + debug_cache: bool = False, debug_thumb_timing: bool = False, - debug_thumb_trace: bool = False + debug_thumb_trace: bool = False, ): super().__init__() self.debug_thumb_timing = debug_thumb_timing self.debug_thumb_trace = debug_thumb_trace import faststack.util.thumb_debug as thumb_debug + thumb_debug.init(timing=self.debug_thumb_timing, trace=self.debug_thumb_trace) # Histogram Offloading Setup - self._hist_executor = create_daemon_threadpool_executor(max_workers=1, thread_name_prefix="Histogram") + self._hist_executor = create_daemon_threadpool_executor( + max_workers=1, thread_name_prefix="Histogram" + ) self._hist_inflight = False self._hist_pending = None self._hist_token = 0 @@ -214,7 +222,13 @@ def __init__( self._next_delete_job_id = 0 # Preview Offloading Setup - self._preview_executor = create_daemon_threadpool_executor(max_workers=1, thread_name_prefix="Preview") + self._preview_executor = create_daemon_threadpool_executor( + max_workers=1, thread_name_prefix="Preview" + ) + # EXIF Brief Offloading Setup (dedicated executor to avoid blocking histograms) + self._exif_executor = create_daemon_threadpool_executor( + max_workers=2, thread_name_prefix="EXIF" + ) self._preview_inflight = False self._preview_pending = False self._preview_token = 0 @@ -223,12 +237,14 @@ def __init__( self._shutting_down = False # Flag to gate async callbacks during shutdown self._refresh_scheduled = False # Coalesce guard for deferred disk refresh self._opencv_warning_shown = False # Only show OpenCV warning once per session - self._last_auto_levels_msg: str = "" # Detail message from last auto_levels() call + self._last_auto_levels_msg: str = ( + "" # Detail message from last auto_levels() call + ) # Variant state self._variant_map: Dict[str, VariantGroup] = {} self.view_override_path: Optional[str] = None # normalized absolute string - self.view_override_kind: Optional[str] = None # "main"|"developed"|"backup" + self.view_override_kind: Optional[str] = None # "main"|"developed"|"backup" self.image_dir = image_dir self.image_files: List[ImageFile] = [] # Filtered list for display @@ -267,7 +283,9 @@ def __init__( self.sidecar = SidecarManager(self.image_dir, self.watcher, debug=_debug_mode) self.image_editor = ImageEditor() # Initialize the editor self._dialog_open_count = 0 # Track nested dialogs - self._temp_files_to_clean: List[Path] = [] # Track temp files for cleanup on shutdown + self._temp_files_to_clean: List[Path] = ( + [] + ) # Track temp files for cleanup on shutdown # -- Caching & Prefetching -- cache_size_gb = config.getfloat("core", "cache_size_gb", 1.5) @@ -367,11 +385,15 @@ def __init__( self.batches: List[List[int]] = [] # List of [start, end] ranges self._filter_string: str = "" # Default filter - self._filter_flags: list = [] # Active flag filters (e.g. ["uploaded", "stacked"]) + self._filter_flags: list = ( + [] + ) # Active flag filters (e.g. ["uploaded", "stacked"]) self._filter_enabled: bool = False self._metadata_cache = {} self._metadata_cache_index = (-1, -1) + self._exif_brief_cache: dict = {} # normalized path key → formatted EXIF string + self._exif_pending_path: Optional[str] = None # path currently awaiting EXIF read with self._last_image_lock: self.last_displayed_image = None self._logged_empty_metadata = False @@ -419,16 +441,26 @@ def __init__( if self.debug_thumb_timing or self.debug_thumb_trace: self._thumb_summary_timer = QTimer(self) self._thumb_summary_timer.setInterval(1000) # Check every second - self._thumb_summary_timer.timeout.connect(thumb_debug.check_periodic_summary) + self._thumb_summary_timer.timeout.connect( + thumb_debug.check_periodic_summary + ) self._thumb_summary_timer.start() - # Debounce timer for metadata/highlight signals during rapid navigation # Only emits these signals once user stops navigating (16ms = 1 frame debounce) self._metadata_debounce_timer = QTimer(self) self._metadata_debounce_timer.setSingleShot(True) self._metadata_debounce_timer.setInterval(16) # 16ms debounce (1 frame) - self._metadata_debounce_timer.timeout.connect(self._emit_debounced_metadata_signals) + self._metadata_debounce_timer.timeout.connect( + self._emit_debounced_metadata_signals + ) + + # Debounce timer for EXIF reads — only fires after user stops scrolling + self._exif_debounce_timer = QTimer(self) + self._exif_debounce_timer.setSingleShot(True) + self._exif_debounce_timer.setInterval(150) # 150ms debounce + self._exif_debounce_timer.timeout.connect(self._read_exif_deferred) + self._exifBriefReady.connect(self._on_exif_brief_ready) # Track if any dialog is open to disable keybindings self._dialog_open = False @@ -576,7 +608,11 @@ def get_filter_flags(self): @Slot() def clear_filter(self): - if not self._filter_enabled and not self._filter_string and not self._filter_flags: + if ( + not self._filter_enabled + and not self._filter_string + and not self._filter_flags + ): return self._filter_enabled = False self._filter_string = "" @@ -764,7 +800,11 @@ def eventFilter(self, watched, event) -> bool: return super().eventFilter(watched, event) def _do_prefetch( - self, index: int, is_navigation: bool = False, direction: Optional[int] = None, override_path: Optional[Path] = None + self, + index: int, + is_navigation: bool = False, + direction: Optional[int] = None, + override_path: Optional[Path] = None, ): """Helper to defer prefetch until display size is stable. @@ -802,7 +842,7 @@ def _do_prefetch( index, is_navigation=is_navigation, direction=direction ) - def load(self): + def load(self, skip_thumbnail_refresh: bool = False): """Loads images, sidecar data, and starts services.""" # Reset instrumentation for this load operation self._scan_count_variant = 0 @@ -828,10 +868,14 @@ def load(self): self._folder_loaded = True self.ui_state.isFolderLoadedChanged.emit() - if self._is_grid_view_active: - + if self._is_grid_view_active and not skip_thumbnail_refresh: + # Ensure grid model is populated if starting in grid mode - if self._thumbnail_model and self._grid_model_dirty and self._thumbnail_model.rowCount() == 0: + if ( + self._thumbnail_model + and self._grid_model_dirty + and self._thumbnail_model.rowCount() == 0 + ): self._grid_refreshes += 1 self._thumbnail_model.refresh_from_controller(self.image_files) self._path_resolver.update_from_model(self._thumbnail_model) @@ -861,7 +905,10 @@ def _request_watcher_refresh(self, path=None): if expiry: if now < expiry: if _debug_mode: - log.debug("Suppressing watcher refresh for recently deleted path: %s", path) + log.debug( + "Suppressing watcher refresh for recently deleted path: %s", + path, + ) return else: # Cleanup expired entry @@ -934,7 +981,7 @@ def _apply_filter_to_cached_list(self): meta = entries.get(stem) if not meta: continue - + # Check if all flags are present # EntryMetadata is a simple object, getattr is fast if all(getattr(meta, flag, False) for flag in flags): @@ -981,9 +1028,7 @@ def _reindex_after_save(self, saved_path: str) -> bool: self.current_index = i return True - log.warning( - "_reindex_after_save: could not find %s in list", saved_path - ) + log.warning("_reindex_after_save: could not find %s in list", saved_path) return False # --- Variant Badge Logic --- @@ -1011,7 +1056,7 @@ def get_variant_badges(self) -> list: result_badges = [] for badge in badges: b_copy = badge.copy() - b_copy["active"] = (badge["path"] == active_norm) + b_copy["active"] = badge["path"] == active_norm result_badges.append(b_copy) return result_badges @@ -1041,10 +1086,12 @@ def set_variant_override(self, path_str: str): # Bump generation to bust cache, trigger re-render self.ui_refresh_generation += 1 - + # Trigger decode for the new variant override_p = Path(self.view_override_path) if self.view_override_path else None - self._maybe_decode_current_image(reason="variant-switch", override_path=override_p) + self._maybe_decode_current_image( + reason="variant-switch", override_path=override_p + ) if self.ui_state: self.ui_state.variantBadgesChanged.emit() @@ -1065,11 +1112,13 @@ def _loupe_decode_allowed(self) -> bool: # Only decode full-res when we are in loupe mode and the folder is loaded. return (not self._is_grid_view_active) and self._folder_loaded - def _maybe_decode_current_image(self, reason: str = "", override_path: Optional[Path] = None) -> None: + def _maybe_decode_current_image( + self, reason: str = "", override_path: Optional[Path] = None + ) -> None: """Trigger decode of current image if allowed.""" if not self._loupe_decode_allowed(): return - + # Log reason for debugging if _debug_mode and reason: log.debug(f"Triggering decode: {reason}") @@ -1086,12 +1135,17 @@ def get_decoded_image(self, index: int) -> Optional[DecodedImage]: """ if not self._loupe_decode_allowed(): if _debug_mode: - log.debug("get_decoded_image called but loupe decode not allowed (index=%d)", index) + log.debug( + "get_decoded_image called but loupe decode not allowed (index=%d)", + index, + ) return None if self.debug_cache: _t_start = time.perf_counter() - print(f"[DBGCACHE] {_t_start*1000:.3f} get_decoded_image: START index={index}") + print( + f"[DBGCACHE] {_t_start*1000:.3f} get_decoded_image: START index={index}" + ) if not self.image_files or index < 0 or index >= len(self.image_files): log.warning( @@ -1129,12 +1183,13 @@ def get_decoded_image(self, index: int) -> Optional[DecodedImage]: # Variant override: use override path for current index current_override = None - if ( - self.view_override_path - and index == self.current_index - ): + if self.view_override_path and index == self.current_index: if _debug_mode: - log.debug("DISPLAY OVERRIDE kind=%s path=%s", self.view_override_kind, self.view_override_path) + log.debug( + "DISPLAY OVERRIDE kind=%s path=%s", + self.view_override_kind, + self.view_override_path, + ) image_path = Path(self.view_override_path) current_override = image_path else: @@ -1152,7 +1207,9 @@ def get_decoded_image(self, index: int) -> Optional[DecodedImage]: if self.debug_cache: _t_end = time.perf_counter() - print(f"[DBGCACHE] {_t_end*1000:.3f} get_decoded_image: CACHE HIT index={index} total={(_t_end - _t_start)*1000:.2f}ms") + print( + f"[DBGCACHE] {_t_end*1000:.3f} get_decoded_image: CACHE HIT index={index} total={(_t_end - _t_start)*1000:.2f}ms" + ) return decoded @@ -1167,7 +1224,9 @@ def get_decoded_image(self, index: int) -> Optional[DecodedImage]: ] cache_usage_gb = self.image_cache.currsize / (1024**3) _t_miss = time.perf_counter() - print(f"[DBGCACHE] {_t_miss*1000:.3f} get_decoded_image: CACHE MISS index={index} gen={display_gen} (after {(_t_miss - _t_start)*1000:.2f}ms)") + print( + f"[DBGCACHE] {_t_miss*1000:.3f} get_decoded_image: CACHE MISS index={index} gen={display_gen} (after {(_t_miss - _t_start)*1000:.2f}ms)" + ) log.info( "Cache miss for %s (index=%d gen=%d). Cached gens: %s. Cache usage=%.2fGB entries=%d", image_path.name, @@ -1238,7 +1297,9 @@ def get_decoded_image(self, index: int) -> Optional[DecodedImage]: log.info("Decoded image %d in %.3fs", index, elapsed) if self.debug_cache: _t_decoded = time.perf_counter() - print(f"[DBGCACHE] {_t_decoded*1000:.3f} get_decoded_image: DECODED index={index} total={(_t_decoded - _t_start)*1000:.2f}ms") + print( + f"[DBGCACHE] {_t_decoded*1000:.3f} get_decoded_image: DECODED index={index} total={(_t_decoded - _t_start)*1000:.2f}ms" + ) return decoded else: if _debug_mode: @@ -1320,14 +1381,16 @@ def _get_decoded_image_safe(self, index: int) -> Optional[DecodedImage]: def sync_ui_state(self): """Forces the UI to update by emitting all state change signals. - + Essential signals (currentIndexChanged, currentImageSourceChanged) are emitted immediately. Non-essential signals (highlightStateChanged, metadataChanged) are debounced to reduce overhead during rapid navigation. """ if self.debug_cache: _t_start = time.perf_counter() - print(f"[DBGCACHE] {_t_start*1000:.3f} sync_ui_state: START gen={self.ui_refresh_generation + 1}") + print( + f"[DBGCACHE] {_t_start*1000:.3f} sync_ui_state: START gen={self.ui_refresh_generation + 1}" + ) self.ui_refresh_generation += 1 self._metadata_cache_index = (-1, -1) # Invalidate cache @@ -1342,7 +1405,9 @@ def sync_ui_state(self): if self.debug_cache: _t_end = time.perf_counter() - print(f"[DBGCACHE] {_t_end*1000:.3f} sync_ui_state: DONE signals emitted, total={(_t_end - _t_start)*1000:.2f}ms") + print( + f"[DBGCACHE] {_t_end*1000:.3f} sync_ui_state: DONE signals emitted, total={(_t_end - _t_start)*1000:.2f}ms" + ) log.debug( "UI State Synced: Index=%d, Count=%d", @@ -1352,9 +1417,14 @@ def sync_ui_state(self): def _emit_debounced_metadata_signals(self): """Emit deferred metadata/highlight signals after navigation stops.""" + if not self.ui_state: + return + if self.debug_cache: _t_start = time.perf_counter() - print(f"[DBGCACHE] {_t_start*1000:.3f} _emit_debounced_metadata_signals: emitting deferred signals") + print( + f"[DBGCACHE] {_t_start*1000:.3f} _emit_debounced_metadata_signals: emitting deferred signals" + ) self.ui_state.highlightStateChanged.emit() self.ui_state.metadataChanged.emit() @@ -1363,7 +1433,9 @@ def _emit_debounced_metadata_signals(self): if self.debug_cache: _t_end = time.perf_counter() - print(f"[DBGCACHE] {_t_end*1000:.3f} _emit_debounced_metadata_signals: DONE total={(_t_end - _t_start)*1000:.2f}ms") + print( + f"[DBGCACHE] {_t_end*1000:.3f} _emit_debounced_metadata_signals: DONE total={(_t_end - _t_start)*1000:.2f}ms" + ) log.debug( "Metadata Synced: Filename=%s, Uploaded=%s, StackInfo='%s', BatchInfo='%s'", @@ -1390,15 +1462,17 @@ def _get_save_target_path_for_current_view(self) -> Optional[Path]: """ if not self.view_override_path: return None - + # Policy Change: When editing a "developed" variant (e.g. from RawTherapee), - # we want to save IN-PLACE (overwrite the developed file) rather than + # we want to save IN-PLACE (overwrite the developed file) rather than # overwriting the Main source file. This prevents accidental data loss/confusion. # Editing a "backup" still targets the Main file (restore behavior). if self.view_override_kind == "developed": return None - if self.current_index is not None and 0 <= self.current_index < len(self.image_files): + if self.current_index is not None and 0 <= self.current_index < len( + self.image_files + ): return self.image_files[self.current_index].path return None @@ -1424,7 +1498,7 @@ def save_edited_image(self): # Determine save_target_path for variant saves save_target_path = self._get_save_target_path_for_current_view() - + # Store save token to prevent "surprise close" if user navigates away during save self._save_initiated_path = self.image_editor.current_filepath @@ -1551,7 +1625,9 @@ def _set_current_index( """Centralized method to change current image index and reset state.""" if self.debug_cache: _t_start = time.perf_counter() - print(f"[DBGCACHE] {_t_start*1000:.3f} _set_current_index: START index={index} dir={direction}") + print( + f"[DBGCACHE] {_t_start*1000:.3f} _set_current_index: START index={index} dir={direction}" + ) if index < 0 or index >= len(self.image_files): return @@ -1581,7 +1657,9 @@ def _set_current_index( if self.debug_cache: _t_prefetch = time.perf_counter() - print(f"[DBGCACHE] {_t_prefetch*1000:.3f} _set_current_index: calling _do_prefetch") + print( + f"[DBGCACHE] {_t_prefetch*1000:.3f} _set_current_index: calling _do_prefetch" + ) self._do_prefetch( self.current_index, is_navigation=is_navigation, direction=direction @@ -1589,7 +1667,9 @@ def _set_current_index( if self.debug_cache: _t_sync = time.perf_counter() - print(f"[DBGCACHE] {_t_sync*1000:.3f} _set_current_index: calling sync_ui_state (prefetch took {(_t_sync - _t_prefetch)*1000:.2f}ms)") + print( + f"[DBGCACHE] {_t_sync*1000:.3f} _set_current_index: calling sync_ui_state (prefetch took {(_t_sync - _t_prefetch)*1000:.2f}ms)" + ) self.sync_ui_state() @@ -1599,31 +1679,41 @@ def _set_current_index( if self.debug_cache: _t_end = time.perf_counter() - print(f"[DBGCACHE] {_t_end*1000:.3f} _set_current_index: DONE total={(_t_end - _t_start)*1000:.2f}ms") + print( + f"[DBGCACHE] {_t_end*1000:.3f} _set_current_index: DONE total={(_t_end - _t_start)*1000:.2f}ms" + ) def next_image(self): if self.debug_cache: _t_start = time.perf_counter() - print(f"[DBGCACHE] {_t_start*1000:.3f} next_image: START from index={self.current_index}") + print( + f"[DBGCACHE] {_t_start*1000:.3f} next_image: START from index={self.current_index}" + ) if self.current_index < len(self.image_files) - 1: self._set_current_index(self.current_index + 1, direction=1) if self.debug_cache: _t_end = time.perf_counter() - print(f"[DBGCACHE] {_t_end*1000:.3f} next_image: DONE total={(_t_end - _t_start)*1000:.2f}ms") + print( + f"[DBGCACHE] {_t_end*1000:.3f} next_image: DONE total={(_t_end - _t_start)*1000:.2f}ms" + ) def prev_image(self): if self.debug_cache: _t_start = time.perf_counter() - print(f"[DBGCACHE] {_t_start*1000:.3f} prev_image: START from index={self.current_index}") + print( + f"[DBGCACHE] {_t_start*1000:.3f} prev_image: START from index={self.current_index}" + ) if self.current_index > 0: self._set_current_index(self.current_index - 1, direction=-1) if self.debug_cache: _t_end = time.perf_counter() - print(f"[DBGCACHE] {_t_end*1000:.3f} prev_image: DONE total={(_t_end - _t_start)*1000:.2f}ms") + print( + f"[DBGCACHE] {_t_end*1000:.3f} prev_image: DONE total={(_t_end - _t_start)*1000:.2f}ms" + ) @Slot(int) def jump_to_image(self, index: int): @@ -1680,7 +1770,6 @@ def jump_to_last_uploaded(self): else: self.update_status_message("No uploaded images found in this folder") - def show_jump_to_image_dialog(self): """Shows the jump to image dialog (called from keybinder).""" if self.main_window and hasattr(self.main_window, "show_jump_to_image_dialog"): @@ -1732,7 +1821,7 @@ def toggle_grid_view(self): @Slot() def refresh_grid(self): """Full manual refresh: Rescans the disk directory and rebuilds the grid view. - + This is a heavy operation that clears caches and rescans the filesystem. Use this only when you need to pick up external changes that the watcher might have missed. @@ -1763,13 +1852,15 @@ def _set_grid_view_active(self, active: bool): self._thumbnail_prefetcher.cancel_all() # Refresh the model if dirty or empty - needs_refresh = self._grid_model_dirty or self._thumbnail_model.rowCount() == 0 + needs_refresh = ( + self._grid_model_dirty or self._thumbnail_model.rowCount() == 0 + ) if needs_refresh: self._grid_refreshes += 1 - + # Always use controller's list, even if empty. self._thumbnail_model.refresh_from_controller(self.image_files) - + # Update path resolver for the current directory self._path_resolver.update_from_model(self._thumbnail_model) self._grid_model_dirty = False @@ -2188,11 +2279,26 @@ def get_current_metadata(self) -> Dict: filename = self.image_files[self.current_index].path.name if self.image_files[self.current_index].has_raw: # e.g. "image.JPG + ORF" - raw_ext = self.image_files[self.current_index].raw_path.suffix.lstrip(".").upper() + raw_ext = ( + self.image_files[self.current_index].raw_path.suffix.lstrip(".").upper() + ) filename += f" + {raw_ext}" + # EXIF brief: instant from cache, otherwise schedule deferred read. + # Always read EXIF from the variant group's main image (not backup/developed). + exif_key = self._exif_source_key(self.current_index) + exif_brief = self._exif_brief_cache.get(exif_key, None) + if exif_brief is None: + exif_brief = "" + # Only restart timer when the pending path actually changes, + # so repeated QML property evaluations don't keep resetting it. + if self._exif_pending_path != exif_key: + self._exif_pending_path = exif_key + self._exif_debounce_timer.start() + self._metadata_cache = { "filename": filename, + "exif_brief": exif_brief, "stacked": meta.stacked, "stacked_date": meta.stacked_date or "", "uploaded": meta.uploaded, @@ -2208,6 +2314,88 @@ def get_current_metadata(self) -> Dict: self._metadata_cache_index = cache_key return self._metadata_cache + _EXIF_SUFFIXES = frozenset( + {".jpg", ".jpeg", ".jpe", ".tif", ".tiff", ".heif", ".heic"} + ) + + def _exif_source_key(self, index: int) -> str: + """Return a normalized cache key for the EXIF source of image at *index*. + + Resolves to the variant group's main image path when available, + so backups/developed files use the same EXIF as the original. + Uses ``normalize_path_key`` for Windows case-insensitivity. + """ + img = self.image_files[index] + source_path = img.path + key_cf = get_group_key_for_path(img.path, self._variant_map) + if key_cf is not None: + group = self._variant_map[key_cf] + if group.main_path is not None: + source_path = group.main_path + return normalize_path_key(source_path) + + def _read_exif_deferred(self): + """Called after 150ms of no navigation. Submits EXIF read to background.""" + if getattr(self, "_shutting_down", False): + return + if not self.image_files or self.current_index >= len(self.image_files): + return + exif_key = self._exif_source_key(self.current_index) + if self._exif_pending_path is not None and self._exif_pending_path != exif_key: + self._exif_pending_path = None + return # Different image path pending, or we aren't tracking this key. + + if exif_key in self._exif_brief_cache: + self._exif_pending_path = None + return # already cached + # Resolve the actual file path for the EXIF source + img = self.image_files[self.current_index] + source_path = img.path + key_cf = get_group_key_for_path(img.path, self._variant_map) + if key_cf is not None: + group = self._variant_map[key_cf] + if group.main_path is not None: + source_path = group.main_path + # Early return for formats without EXIF support + if source_path.suffix.lower() not in self._EXIF_SUFFIXES: + self._exif_brief_cache[exif_key] = "" + self._exif_pending_path = None + return + # Submit to dedicated EXIF executor to avoid blocking histograms + signal = self._exifBriefReady + + def _worker(key=exif_key, p=str(source_path)): + from faststack.imaging.metadata import get_exif_brief + + try: + brief = get_exif_brief(p) + except Exception as e: + log.error(f"Failed to get EXIF brief for {p}: {e}", exc_info=True) + brief = "" + signal.emit(key, brief) + + try: + self._exif_executor.submit(_worker) + except RuntimeError: + pass # executor shut down, ignore + + def _on_exif_brief_ready(self, exif_key: str, brief: str): + """Slot called on main thread when background EXIF read completes.""" + if getattr(self, "_shutting_down", False) or not self.ui_state: + return + self._exif_brief_cache[exif_key] = brief + if self._exif_pending_path == exif_key: + self._exif_pending_path = None + # Only refresh UI if we're still looking at this image + if ( + self.image_files + and self.current_index < len(self.image_files) + and self._exif_source_key(self.current_index) == exif_key + ): + self._metadata_cache_index = (-1, -1) + if self.ui_state: + self.ui_state.metadataChanged.emit() + def begin_new_stack(self): self.stack_start_index = self.current_index log.info("Stack start marked at index %d", self.stack_start_index) @@ -2279,7 +2467,7 @@ def grid_add_selection_to_batch(self): # Build path -> index map for the main image list # 1. Rebuild index mapping self._rebuild_path_to_index() - + # 2. Find indices for selected paths indices_to_add = [] for path in selected_paths: @@ -2287,8 +2475,6 @@ def grid_add_selection_to_batch(self): if idx is not None: indices_to_add.append(idx) - - if not indices_to_add: self.update_status_message("Selected images not found in current list.") return @@ -2403,7 +2589,11 @@ def add_uploaded_to_batch(self): meta = self.sidecar.get_metadata(img.path.stem) if not meta: continue - uploaded = meta.get("uploaded", False) if isinstance(meta, dict) else getattr(meta, "uploaded", False) + uploaded = ( + meta.get("uploaded", False) + if isinstance(meta, dict) + else getattr(meta, "uploaded", False) + ) if uploaded: indices_to_add.append(i) @@ -2838,8 +3028,6 @@ def _launch_helicon_with_files(self, files: List[Path]) -> bool: return success - - def clear_all_stacks(self): log.info("Clearing all defined stacks.") self.stacks = [] @@ -3311,6 +3499,8 @@ def _switch_to_directory( self.display_generation += 1 self._metadata_cache = {} self._metadata_cache_index = (-1, -1) + self._exif_brief_cache.clear() + self._exif_pending_path = None # Clear batch indices cache (avoids stale batch membership checks) if hasattr(self, "_batch_indices_cache"): @@ -3520,42 +3710,6 @@ def _move_to_recycle(src: Path, _created_bins: set | None = None) -> Optional[Pa log.error("Failed to recycle %s: %s", src.name, e) return None - def _shutdown_executors(self) -> None: - """Shutdown thread pools and clean up pending jobs.""" - log.info("Shutting down executors...") - self._shutting_down = True - - # Clear pending jobs and remove associated undo placeholders - if self._pending_delete_jobs: - log.info("Clearing %d pending delete jobs on shutdown", len(self._pending_delete_jobs)) - pending_ids = set(self._pending_delete_jobs.keys()) - self._pending_delete_jobs.clear() - self.undo_history = [ - entry for entry in self.undo_history - if not (entry[0] == "pending_delete" and entry[1] in pending_ids) - ] - - # Shutdown all known executors - # Use wait=False to avoid hanging UI shutdown on long operations - for executor in [ - self._delete_executor, - self._hist_executor, - self._save_executor, - self._preview_executor - ]: - if executor: - executor.shutdown(wait=False, cancel_futures=True) - - # Shutdown prefetchers (they own their own thread pools) - try: - self.prefetcher.shutdown() - except Exception: - pass - try: - if getattr(self, "_thumbnail_prefetcher", None): - self._thumbnail_prefetcher.shutdown() - except Exception: - pass @staticmethod def _perm_delete_worker( @@ -3625,26 +3779,34 @@ def _delete_worker( # images_to_delete MUST be List[Tuple[Path, Optional[Path]]] # If item is (0, (path, raw)), it's a nested structure from incorrect calling code. if not isinstance(item, (tuple, list)) or len(item) != 2: - log.error("CRITICAL: _delete_worker received invalid item format: %r", item) - failures.append({ - "jpg": None, - "raw": None, - "code": DeletionErrorCodes.INVALID_WORK_ITEM.value, - }) + log.error( + "CRITICAL: _delete_worker received invalid item format: %r", item + ) + failures.append( + { + "jpg": None, + "raw": None, + "code": DeletionErrorCodes.INVALID_WORK_ITEM.value, + } + ) continue jpg_path, raw_path = item - + # Robustness: if raw_path is a tuple/list, we have a nested structure error. # This is a hard error — record failure and skip rather than silently recovering, # which would mask upstream bugs. if isinstance(raw_path, (tuple, list)): - log.error("CRITICAL: _delete_worker received nested tuple item: %r", item) - failures.append({ - "jpg": str(jpg_path) if jpg_path else None, - "raw": None, - "code": DeletionErrorCodes.INVALID_WORK_ITEM.value, - }) + log.error( + "CRITICAL: _delete_worker received nested tuple item: %r", item + ) + failures.append( + { + "jpg": str(jpg_path) if jpg_path else None, + "raw": None, + "code": DeletionErrorCodes.INVALID_WORK_ITEM.value, + } + ) continue processed_count += 1 @@ -3653,61 +3815,71 @@ def _delete_worker( try: recycled_jpg = AppController._move_to_recycle(jpg_path, created_bins) if not recycled_jpg: - failures.append({ - "jpg": jpg_path, - "raw": raw_path, - "code": DeletionErrorCodes.RECYCLE_FAILED.value - }) + failures.append( + { + "jpg": jpg_path, + "raw": raw_path, + "code": DeletionErrorCodes.RECYCLE_FAILED.value, + } + ) continue recycled_raw = None if actual_raw_exists: try: - recycled_raw = AppController._move_to_recycle(raw_path, created_bins) + recycled_raw = AppController._move_to_recycle( + raw_path, created_bins + ) if not recycled_raw: raise OSError("RAW move failed") except OSError as e: log.warning("RAW recycle failed for %s: %s", raw_path.name, e) - warnings.append({ - "jpg": jpg_path, - "raw": raw_path, - "message": str(e) - }) - - successes.append({ - "jpg": jpg_path, - "recycled_jpg": recycled_jpg, - "raw": raw_path, - "recycled_raw": recycled_raw - }) + warnings.append( + {"jpg": jpg_path, "raw": raw_path, "message": str(e)} + ) + + successes.append( + { + "jpg": jpg_path, + "recycled_jpg": recycled_jpg, + "raw": raw_path, + "recycled_raw": recycled_raw, + } + ) except PermissionError: log.warning("Permission denied deleting %s", jpg_path.name) - failures.append({ - "jpg": jpg_path, - "raw": raw_path, - "code": DeletionErrorCodes.PERMISSION_DENIED.value - }) + failures.append( + { + "jpg": jpg_path, + "raw": raw_path, + "code": DeletionErrorCodes.PERMISSION_DENIED.value, + } + ) except OSError as e: # Check for "trash full" or similar OS errors if distinguishable, # otherwise treat as generic recycle failure or unknown. # Windows "trash full" is hard to detect reliably without win32 api, # but we can at least capture the message. log.warning("OSError deleting %s: %s", jpg_path.name, e) - failures.append({ - "jpg": jpg_path, - "raw": raw_path, - "code": DeletionErrorCodes.RECYCLE_FAILED.value, # Fallback to recycle failed - "message": str(e) - }) + failures.append( + { + "jpg": jpg_path, + "raw": raw_path, + "code": DeletionErrorCodes.RECYCLE_FAILED.value, # Fallback to recycle failed + "message": str(e), + } + ) except Exception as e: log.warning("Recycle exception for %s: %s", jpg_path.name, e) - failures.append({ - "jpg": jpg_path, - "raw": raw_path, - "code": DeletionErrorCodes.UNKNOWN.value, - "message": str(e) - }) + failures.append( + { + "jpg": jpg_path, + "raw": raw_path, + "code": DeletionErrorCodes.UNKNOWN.value, + "message": str(e), + } + ) # Record unprocessed items (skipped due to cancellation) if did_cancel and cancel_index >= 0: @@ -3716,13 +3888,15 @@ def _delete_worker( # Re-validate shape to prevent crashes on invalid items if not isinstance(item, (tuple, list)) or len(item) != 2: continue - + jpg_path, raw_path = item - failures.append({ - "jpg": jpg_path, - "raw": raw_path, - "code": DeletionErrorCodes.CANCELLED.value - }) + failures.append( + { + "jpg": jpg_path, + "raw": raw_path, + "code": DeletionErrorCodes.CANCELLED.value, + } + ) # Convert all Path objects to str before crossing signal boundary. # _normalize_worker_results converts back to Path on the UI thread. @@ -3759,21 +3933,25 @@ def _on_delete_finished(self, result_dict: dict) -> None: if job: # Remove pending_delete placeholders from undo history self.undo_history = [ - entry for entry in self.undo_history + entry + for entry in self.undo_history if not (entry[0] == "pending_delete" and entry[1] == job.job_id) ] else: # Job might have been popped by undo_delete logic already? # Or this is a stray signal. - log.warning("Delete job %d completed but not found in pending jobs", result.job_id) + log.warning( + "Delete job %d completed but not found in pending jobs", result.job_id + ) return - # --- Phase 1.5: Handle Perm Delete Result --- if result.is_perm_result: if result.perm_success: - self.update_status_message(f"Permanently deleted {len(result.perm_success)} images") - + self.update_status_message( + f"Permanently deleted {len(result.perm_success)} images" + ) + # Update suppression for permanent deletes (prevent watcher re-scans) ttl = 2.0 now = time.monotonic() @@ -3783,7 +3961,7 @@ def _on_delete_finished(self, result_dict: dict) -> None: self._suppressed_paths[self._key(img.path)] = now + ttl if img.raw_pair: self._suppressed_paths[self._key(img.raw_pair)] = now + ttl - + if result.perm_fail: # Rollback failures (they have original indices) # Note: job context is required for rollback (restores index/batches/focus) @@ -3793,9 +3971,9 @@ def _on_delete_finished(self, result_dict: dict) -> None: self.sync_ui_state() self._schedule_delete_refresh() return - + # --- Phase 2: Apply Results --- - + # 2a. Update suppression (prevent watcher loops for moved files) # Opportunistic cleanup of expired suppression entries ttl = 2.0 @@ -3817,25 +3995,28 @@ def _on_delete_finished(self, result_dict: dict) -> None: # If user hit Undo while worker was running, we must restore any moved files immediately. if job.undo_requested: if result.successes: - log.info("Job %d was undone mid-flight; auto-restoring %d moved files", - job.job_id, len(result.successes)) + log.info( + "Job %d was undone mid-flight; auto-restoring %d moved files", + job.job_id, + len(result.successes), + ) self._auto_restore_moved_files(result.successes) # Do NOT record history. # Failures/Cancelled items are already handled by the undo_delete logic (restored to UI) # or simply ignored because they never moved. - + # Update status self.update_status_message("Deletion cancelled (files restored)") self._schedule_delete_refresh() return # 2c. Normal Completion: Record History & Handle Failures - + # Track recycle bins for s in result.successes: if s.recycled_jpg: self.active_recycle_bins.add(s.recycled_jpg.parent) - + # Log warnings for w in result.warnings: log.warning("Partial delete warning for %s: %s", w.jpg, w.message) @@ -3853,7 +4034,7 @@ def _on_delete_finished(self, result_dict: dict) -> None: self._handle_delete_failures(result, job) # --- Phase 3: Post Actions --- - + # Status Message count = len(result.successes) if count > 0: @@ -3864,7 +4045,9 @@ def _on_delete_finished(self, result_dict: dict) -> None: msg = "Image moved to recycle bin" self.update_status_message(msg) elif result.failures: - self.update_status_message("Deletion cancelled" if result.cancelled else "Delete failed") + self.update_status_message( + "Deletion cancelled" if result.cancelled else "Delete failed" + ) self._schedule_delete_refresh() @@ -3875,13 +4058,16 @@ def _auto_restore_moved_files(self, successes: List[DeleteRecord]) -> None: # Restore JPG if s.jpg and s.recycled_jpg: ok, reason = self._restore_from_recycle_bin_safe(s.jpg, s.recycled_jpg) - if ok: restored += 1 - else: log.error("Failed to auto-restore JPG %s: %s", s.jpg, reason) - + if ok: + restored += 1 + else: + log.error("Failed to auto-restore JPG %s: %s", s.jpg, reason) + # Restore RAW if s.raw and s.recycled_raw: ok, reason = self._restore_from_recycle_bin_safe(s.raw, s.recycled_raw) - if not ok: log.error("Failed to auto-restore RAW %s: %s", s.raw, reason) + if not ok: + log.error("Failed to auto-restore RAW %s: %s", s.raw, reason) def _handle_delete_failures(self, result: DeleteResult, job: DeleteJob) -> None: """Handle items that failed to delete. Rollback UI or prompt for perm delete.""" @@ -3891,7 +4077,7 @@ def _handle_delete_failures(self, result: DeleteResult, job: DeleteJob) -> None: # Identify which UI items failed (map back using paths) # Note: We use the _key() mapping to ensure we match robustly failed_keys = {self._key(f.jpg) for f in result.failures if f.jpg} - + failed_indices_and_imgs = [] for idx, img in job.removed_items: if self._key(img.path) in failed_keys: @@ -3901,48 +4087,54 @@ def _handle_delete_failures(self, result: DeleteResult, job: DeleteJob) -> None: return # Check if we should offer permanent delete (recycle bin error) - perm_candidates = [] # List of (idx, img) - + perm_candidates = [] # List of (idx, img) + # Helper to find if a specific failure code warrants perm delete recycle_codes = { DeletionErrorCodes.RECYCLE_FAILED.value, DeletionErrorCodes.PERMISSION_DENIED.value, - DeletionErrorCodes.TRASH_FULL.value + DeletionErrorCodes.TRASH_FULL.value, } - + # Map failure code by key for easy lookup failure_map = {self._key(f.jpg): f for f in result.failures if f.jpg} for idx, img in failed_indices_and_imgs: - f = failure_map.get(self._key(img.path)) - if f and f.code in recycle_codes: - perm_candidates.append((idx, img)) + f = failure_map.get(self._key(img.path)) + if f and f.code in recycle_codes: + perm_candidates.append((idx, img)) if perm_candidates: # Prompt user for permanent delete - + # 1. Rollback non-candidates first candidate_keys = {self._key(img.path) for _, img in perm_candidates} - to_rollback = [(i, img) for i, img in failed_indices_and_imgs if self._key(img.path) not in candidate_keys] - + to_rollback = [ + (i, img) + for i, img in failed_indices_and_imgs + if self._key(img.path) not in candidate_keys + ] + if to_rollback: self._rollback_ui_items(to_rollback, job) # 2. Ask user candidate_imgs = [img for _, img in perm_candidates] - + reason = "Recycle bin failure" confirmed = False if len(candidate_imgs) == 1: confirmed = confirm_permanent_delete(candidate_imgs[0], reason=reason) else: - confirmed = confirm_batch_permanent_delete(candidate_imgs, reason=reason) + confirmed = confirm_batch_permanent_delete( + candidate_imgs, reason=reason + ) if confirmed: # ASYNC permanent delete # Put job back in pending map so _on_delete_finished can find it again self._pending_delete_jobs[job.job_id] = job - + # Define callback to bridge back to main thread def _on_perm_done(future): try: @@ -3953,12 +4145,10 @@ def _on_perm_done(future): log.error("Perm delete worker exception: %s", e) fut = self._delete_executor.submit( - self._perm_delete_worker, - job.job_id, - perm_candidates + self._perm_delete_worker, job.job_id, perm_candidates ) fut.add_done_callback(_on_perm_done) - + self.update_status_message("Permanently deleting files...") # Return EARLY so we don't rebuild index/sync UI yet return @@ -3980,19 +4170,19 @@ def _rollback_ui_items(self, items: List[Tuple[int, Any]], job: DeleteJob) -> No # Access attributes of DeleteJob for idx, img in sorted(items, key=lambda x: x[0], reverse=True): self.image_files.insert(min(idx, len(self.image_files)), img) - + # Restore selection/focus (approximated) self.current_index = min(job.previous_index, len(self.image_files) - 1) self.display_generation += 1 # Targeted cache invalidation instead of full clear if self.image_cache is not None: - paths_to_invalidate = [] - for _, img in items: - paths_to_invalidate.append(img.path) - if img.raw_pair: - paths_to_invalidate.append(img.raw_pair) - self.image_cache.evict_paths(paths_to_invalidate) + paths_to_invalidate = [] + for _, img in items: + paths_to_invalidate.append(img.path) + if img.raw_pair: + paths_to_invalidate.append(img.raw_pair) + self.image_cache.evict_paths(paths_to_invalidate) if self._thumbnail_model: # Restore model rows (simple refresh for correctness) @@ -4007,16 +4197,13 @@ def _rollback_ui_items(self, items: List[Tuple[int, Any]], job: DeleteJob) -> No self.batch_start_index = job.saved_batch_start_index self._invalidate_batch_cache() - - - - def _schedule_delete_refresh(self) -> None: """Debounce post-delete refresh: coalesce rapid deletes into one refresh.""" if self._refresh_scheduled: return self._refresh_scheduled = True from PySide6.QtCore import QTimer + QTimer.singleShot(200, self._fire_delete_refresh) def _fire_delete_refresh(self) -> None: @@ -4026,27 +4213,29 @@ def _fire_delete_refresh(self) -> None: def _do_delete_refresh(self) -> None: """Perform user-interface refresh (debounce ended). - + Optimized: No longer performs a full disk scan (refresh_image_list). Relies on optimistic UI updates already performed in _delete_indices. Watcher events handle any true drift (external changes). """ t_start = time.perf_counter() - + # Coalesce with watcher: if we are doing a delete refresh, we don't # need a separate watcher refresh immediately after. self._watcher_debounce_timer.stop() - + clear_raw_count_cache() self._rebuild_path_to_index() # Update the path resolver to reflect current model state if self._thumbnail_model and hasattr(self, "_path_resolver"): self._path_resolver.update_from_model(self._thumbnail_model) - + dt = time.perf_counter() - t_start if _debug_mode: - log.info("delete_refresh took %.4fs for %d images", dt, len(self.image_files)) + log.info( + "delete_refresh took %.4fs for %d images", dt, len(self.image_files) + ) def _delete_indices(self, indices: List[int], action_type: str) -> dict: """Unified core deletion engine for FastStack. @@ -4120,7 +4309,7 @@ def _delete_indices(self, indices: List[int], action_type: str) -> dict: paths_to_evict.append(img.path) if img.raw_pair: paths_to_evict.append(img.raw_pair) - + # Use new targeted eviction with tombstones self.image_cache.evict_paths(paths_to_evict) @@ -4136,28 +4325,30 @@ def _delete_indices(self, indices: List[int], action_type: str) -> dict: if self._thumbnail_model: del_paths = [img.path for img in images_to_delete] self._thumbnail_model.remove_rows_by_path(del_paths) - + # Diagnostic: check synchronization between controller and model if _debug_mode: img_count = len(self.image_files) model_rows = self._thumbnail_model.rowCount() folder_count = getattr(self._thumbnail_model, "folder_count", 0) - + log.debug( "Sync Check (delete): controller=%d, model=%d", img_count, - model_rows + model_rows, ) log.debug( "Sync Breakdown: images=%d, folders=%d, model_rows=%d", img_count, folder_count, - model_rows + model_rows, ) # Pre-suppress watcher events for these soon-to-be-moved/deleted paths. # Must happen BEFORE the worker starts I/O, because watchdog events can arrive immediately. - ttl = 2.0 # seconds; plenty to cover os.replace/shutil.move and watchdog delivery + ttl = ( + 2.0 # seconds; plenty to cover os.replace/shutil.move and watchdog delivery + ) now = time.monotonic() with self._suppressed_paths_lock: for img in images_to_delete: @@ -4191,7 +4382,9 @@ def _delete_indices(self, indices: List[int], action_type: str) -> dict: log.info( "Delete enqueued: job_id=%d, type='%s', count=%d", - job_id, action_type, len(images_to_delete), + job_id, + action_type, + len(images_to_delete), ) # Submit to background executor @@ -4202,18 +4395,27 @@ def _on_worker_done(fut): except Exception as e: log.error("Delete worker failed: %s", e) # Emit a failure result so completion handler can rollback - self._deleteFinished.emit({ - "job_id": job_id, - "successes": [], - "failures": [ - {"jpg": str(p) if p else None, "raw": str(r) if r else None, "code": str(e)} - for p, r in worker_items - ], - "cancelled": False, - }) + self._deleteFinished.emit( + { + "job_id": job_id, + "successes": [], + "failures": [ + { + "jpg": str(p) if p else None, + "raw": str(r) if r else None, + "code": str(e), + } + for p, r in worker_items + ], + "cancelled": False, + } + ) fut = self._delete_executor.submit( - self._delete_worker, job_id, worker_items, cancel_event, + self._delete_worker, + job_id, + worker_items, + cancel_event, ) fut.add_done_callback(_on_worker_done) @@ -4275,7 +4477,9 @@ def delete_batch_images(self): job_id = summary["job_id"] if job_id in self._pending_delete_jobs: self._pending_delete_jobs[job_id].saved_batches = saved_batches - self._pending_delete_jobs[job_id].saved_batch_start_index = saved_batch_start + self._pending_delete_jobs[job_id].saved_batch_start_index = ( + saved_batch_start + ) self.batches = [] self.batch_start_index = None @@ -4455,12 +4659,12 @@ def undo_delete(self): self.display_generation += 1 # Targeted eviction instead of full clear if self.image_cache is not None: - paths_to_evict = [] - for _, img in removed_items: - paths_to_evict.append(img.path) - if img.raw_pair: - paths_to_evict.append(img.raw_pair) - self.image_cache.evict_paths(paths_to_evict) + paths_to_evict = [] + for _, img in removed_items: + paths_to_evict.append(img.path) + if img.raw_pair: + paths_to_evict.append(img.raw_pair) + self.image_cache.evict_paths(paths_to_evict) self.prefetcher.cancel_all() if self.image_files: self.prefetcher.update_prefetch(self.current_index) @@ -4471,7 +4675,9 @@ def undo_delete(self): self.update_status_message( f"Cancel requested... restoring view ({count} item{'s' if count > 1 else ''})" ) - log.info("Undo cancelled pending delete job %d (%d items)", job_id, count) + log.info( + "Undo cancelled pending delete job %d (%d items)", job_id, count + ) else: # Job already completed — find the corresponding "delete" entries # in undo_history and undo the last one @@ -4610,15 +4816,21 @@ def undo_delete(self): def shutdown_qt(self): """Shutdown Qt objects only - MUST run on main/Qt thread.""" self._shutting_down = True # set EARLY to make all slots no-op + self._exif_pending_path = None log.info("Application shutting down (Qt cleanup).") # Stop Qt timers try: self._metadata_debounce_timer.stop() + self._exif_debounce_timer.stop() + self._watcher_debounce_timer.stop() + self.histogram_timer.stop() + self.resize_timer.stop() + if hasattr(self, "_thumb_summary_timer"): + self._thumb_summary_timer.stop() except Exception: pass - # Stop QFileSystemWatcher if it's Qt-based try: self.watcher.stop() @@ -4662,11 +4874,34 @@ def shutdown_nonqt(self): """Shutdown non-Qt resources - safe to run in background thread.""" log.info("Shutting down background resources.") + self._shutting_down = True # gate async callbacks during shutdown_nonqt too + self._exif_pending_path = None # optional but consistent with shutdown_qt + + + # Clear pending delete jobs and remove associated undo placeholders + if self._pending_delete_jobs: + log.info( + "Clearing %d pending delete jobs on shutdown", + len(self._pending_delete_jobs), + ) + pending_ids = set(self._pending_delete_jobs.keys()) + self._pending_delete_jobs.clear() + self.undo_history = [ + entry + for entry in self.undo_history + if not (entry[0] == "pending_delete" and entry[1] in pending_ids) + ] + # Shutdown thread pool executors try: log.info("Shutting down background executors...") self._hist_executor.shutdown(wait=False, cancel_futures=True) self._preview_executor.shutdown(wait=False, cancel_futures=True) + + exif_exec = getattr(self, "_exif_executor", None) + if exif_exec: + exif_exec.shutdown(wait=False, cancel_futures=True) + # wait=True ensures pending saves/deletes complete to avoid data loss/corruption self._save_executor.shutdown(wait=True, cancel_futures=False) self._delete_executor.shutdown(wait=True, cancel_futures=False) @@ -4697,7 +4932,9 @@ def shutdown_nonqt(self): # Clean up temporary files (e.g. Helicon Focus lists) if self._temp_files_to_clean: - log.debug("Cleaning up %d temporary files...", len(self._temp_files_to_clean)) + log.debug( + "Cleaning up %d temporary files...", len(self._temp_files_to_clean) + ) for tmp_path in self._temp_files_to_clean: try: tmp_path.unlink(missing_ok=True) @@ -4743,7 +4980,9 @@ def _on_cache_evict(self, key, value): # 1. Record eviction timestamp / prune self._eviction_timestamps.append(now) cutoff = now - CACHE_THRASH_WINDOW_SECS - self._eviction_timestamps = [t for t in self._eviction_timestamps if t > cutoff] + self._eviction_timestamps = [ + t for t in self._eviction_timestamps if t > cutoff + ] # 2. Check for thrashing (e.g., > threshold evictions in window) if len(self._eviction_timestamps) > CACHE_THRASH_THRESHOLD: @@ -4755,8 +4994,17 @@ def _on_cache_evict(self, key, value): # UI update logic used_gb = self.image_cache.currsize / (1024**3) max_gb = self.image_cache.max_bytes / (1024**3) - msg = f"Cache thrashing! {len(self._eviction_timestamps)} evictions in {CACHE_THRASH_WINDOW_SECS}s. Usage: {used_gb:.1f}GB / {max_gb:.1f}GB." - + + # Include key/value summary to fix lint error and provide context + val_summary = "" + if hasattr(value, "width") and hasattr(value, "height"): + val_summary = f" ({value.width}x{value.height})" + elif hasattr(value, "__len__"): + val_summary = f" (len={len(value)})" + + key_name = getattr(key, "name", str(key)) + msg = f"Cache thrashing! {len(self._eviction_timestamps)} evictions in {CACHE_THRASH_WINDOW_SECS}s. Evicted {key_name}{val_summary}. Usage: {used_gb:.1f}GB / {max_gb:.1f}GB." + # Schedule UI work safely on main thread # QTimer.singleShot(0, ...) is thread-safe entry to main loop QTimer.singleShot(0, lambda: self.update_status_message(msg)) @@ -6162,9 +6410,13 @@ def auto_levels(self): # Build detail message if p_high >= 255.0: - msg = f"Auto levels: highlights clipped; shadows only (blacks {blacks:+.1f})" + msg = ( + f"Auto levels: highlights clipped; shadows only (blacks {blacks:+.1f})" + ) elif p_low <= 0.0: - msg = f"Auto levels: shadows clipped; highlights only (whites {whites:+.1f})" + msg = ( + f"Auto levels: shadows clipped; highlights only (whites {whites:+.1f})" + ) else: gain = 255.0 / dynamic_range msg = ( @@ -6523,9 +6775,7 @@ def quick_auto_white_balance(self): int((t_list - t_save) * 1000), total_ms, ) - self.update_status_message( - f"{detail_msg} \u2014 saved ({total_ms} ms)" - ) + self.update_status_message(f"{detail_msg} \u2014 saved ({total_ms} ms)") log.info("Quick auto white balance applied to %s", filepath) else: self.update_status_message("Failed to save image") @@ -6647,11 +6897,17 @@ def auto_white_balance_lab(self) -> Optional[str]: h, w = img_arr.shape[:2] TARGET_PIXELS = 2_000_000 stride = max(1, int(np.sqrt(h * w / TARGET_PIXELS))) - sub = np.ascontiguousarray(img_arr[::stride, ::stride]) # contiguous for cv2 + sub = np.ascontiguousarray( + img_arr[::stride, ::stride] + ) # contiguous for cv2 arr = (np.clip(sub, 0.0, 1.0) * 255).astype(np.uint8) log.debug( "AWB: subsampled %dx%d -> %dx%d (stride %d)", - w, h, arr.shape[1], arr.shape[0], stride, + w, + h, + arr.shape[1], + arr.shape[0], + stride, ) else: # Fallback: use original_image (full PIL Image) @@ -6758,7 +7014,8 @@ def auto_white_balance_lab(self) -> Optional[str]: int((t_awb_mask - t_awb_subsample) * 1000), int((t_awb_end - t_awb_mask) * 1000), int((t_awb_end - t_awb_start) * 1000), - arr.shape[1], arr.shape[0], + arr.shape[1], + arr.shape[0], ) self.update_status_message(msg) return msg @@ -6905,9 +7162,10 @@ def main( debug_thumb_trace: bool = False, ): """FastStack Application Entry Point""" - global _debug_mode, _debug_thumb_timing + global _debug_mode, _debug_thumb_timing, _debug_thumb_trace _debug_mode = debug _debug_thumb_timing = debug_thumb_timing + _debug_thumb_trace = debug_thumb_trace t0 = time.perf_counter() setup_logging(debug) @@ -6924,6 +7182,7 @@ def main( # Enable Ctrl-C to terminate the application import signal + signal.signal(signal.SIGINT, lambda *args: app.quit()) # Ensure Python's signal handler runs (Qt blocks main thread) timer = QTimer() @@ -7013,7 +7272,10 @@ def main( # all of which can run after the first event loop iteration. QTimer.singleShot(0, controller.load) if debug: - log.info("Startup: controller.load() deferred to event loop (%.3fs to window)", time.perf_counter() - t0) + log.info( + "Startup: controller.load() deferred to event loop (%.3fs to window)", + time.perf_counter() - t0, + ) # Graceful shutdown with timeout fallback import threading @@ -7023,10 +7285,9 @@ def _log_live_threads(tag: str): """Log non-daemon threads for debugging shutdown hangs.""" threads = threading.enumerate() alive = [ - t for t in threads - if t.is_alive() - and not t.daemon - and t.name != "MainThread" + t + for t in threads + if t.is_alive() and not t.daemon and t.name != "MainThread" ] if not alive: return @@ -7057,11 +7318,11 @@ def _shutdown_with_timeout(): # Run Qt cleanup on main thread controller.shutdown_qt() - + # Consolidated shutdown for all thread pools and pending jobs # This replaces previous ad-hoc shutdown logic controller.shutdown_nonqt() - + _log_live_threads("after shutdown_executors") finally: diff --git a/faststack/check_daemon.py b/faststack/check_daemon.py index 8e03bc6..e8a3de3 100644 --- a/faststack/check_daemon.py +++ b/faststack/check_daemon.py @@ -1,6 +1,7 @@ import threading from concurrent.futures import ThreadPoolExecutor + def set_daemon(): try: threading.current_thread().daemon = True @@ -8,8 +9,12 @@ def set_daemon(): except Exception as e: print(f"Failed to set daemon for {threading.current_thread().name}: {e}") + def check_daemon(): return threading.current_thread().daemon -with ThreadPoolExecutor(max_workers=1, initializer=set_daemon) as executor: - print(f"Result: {executor.submit(check_daemon).result()}") + +if __name__ == "__main__": + with ThreadPoolExecutor(max_workers=1, initializer=set_daemon) as executor: + print(f"Result: {executor.submit(check_daemon).result()}") + diff --git a/faststack/debug_path_norm.py b/faststack/debug_path_norm.py deleted file mode 100644 index 40f8107..0000000 --- a/faststack/debug_path_norm.py +++ /dev/null @@ -1,22 +0,0 @@ - -import os -from pathlib import Path - -def norm_path(p: Path) -> str: - return os.path.normcase(os.path.abspath(str(p))) - -p1 = Path("C:/Test/File.JPG") -p2 = Path("C:/Test/file.jpg") - -print(f"p1: {p1}") -print(f"p2: {p2}") -print(f"p1 == p2: {p1 == p2}") - -n1 = Path(norm_path(p1)) -n2 = Path(norm_path(p2)) - -print(f"n1: {n1}") -print(f"n2: {n2}") -print(f"n1 == n2: {n1 == n2}") -print(f"n1 == p1: {n1 == p1}") - diff --git a/faststack/deletion_types.py b/faststack/deletion_types.py index 77afa42..67940a5 100644 --- a/faststack/deletion_types.py +++ b/faststack/deletion_types.py @@ -11,11 +11,12 @@ from typing import Any, List, Optional, Tuple - from enum import Enum + class DeletionErrorCodes(str, Enum): """Standardized error codes for deletion failures.""" + RECYCLE_FAILED = "recycle_failed" PERMISSION_DENIED = "permission_denied" TRASH_FULL = "trash_full" @@ -115,29 +116,35 @@ def _to_path(v): successes = [] for s in raw.get("successes", []): - successes.append(DeleteRecord( - jpg=_to_path(s.get("jpg")), - recycled_jpg=_to_path(s.get("recycled_jpg")), - raw=_to_path(s.get("raw")), - recycled_raw=_to_path(s.get("recycled_raw")), - )) + successes.append( + DeleteRecord( + jpg=_to_path(s.get("jpg")), + recycled_jpg=_to_path(s.get("recycled_jpg")), + raw=_to_path(s.get("raw")), + recycled_raw=_to_path(s.get("recycled_raw")), + ) + ) warnings = [] for w in raw.get("warnings", []): - warnings.append(DeleteWarning( - jpg=_to_path(w.get("jpg")), - raw=_to_path(w.get("raw")), - message=w.get("message", ""), - )) + warnings.append( + DeleteWarning( + jpg=_to_path(w.get("jpg")), + raw=_to_path(w.get("raw")), + message=w.get("message", ""), + ) + ) failures = [] for f in raw.get("failures", []): - failures.append(DeleteFailure( - jpg=_to_path(f.get("jpg")), - raw=_to_path(f.get("raw")), - code=f.get("code", ""), - message=f.get("message", ""), - )) + failures.append( + DeleteFailure( + jpg=_to_path(f.get("jpg")), + raw=_to_path(f.get("raw")), + code=f.get("code", ""), + message=f.get("message", ""), + ) + ) return cls( job_id=raw.get("job_id", 0), diff --git a/faststack/imaging/cache.py b/faststack/imaging/cache.py index 8e9581e..1e190e0 100644 --- a/faststack/imaging/cache.py +++ b/faststack/imaging/cache.py @@ -5,18 +5,15 @@ from typing import Any, Callable, Optional, Union import time import threading -from cachetools import LRUCache +from cachetools import Cache, LRUCache log = logging.getLogger(__name__) def get_decoded_image_size(item) -> int: - """Calculates the size of a DecodedImage object.""" - # In this simplified example, we only store the buffer. - # In the full app, this would also account for the QImage/QTexture. - from faststack.models import DecodedImage - - if isinstance(item, DecodedImage): + """Calculates the size of a DecodedImage object or similar buffer-holding object.""" + # Use duck typing to support DecodedImage and similar objects (e.g. in tests) + if hasattr(item, "buffer"): # Handle both numpy arrays and memoryview buffers if hasattr(item.buffer, "nbytes"): return item.buffer.nbytes @@ -24,8 +21,22 @@ def get_decoded_image_size(item) -> int: return len(item.buffer) else: # Fallback: estimate from dimensions (more accurate for image buffers than sys.getsizeof) - bytes_per_pixel = getattr(item, "channels", 4) # Default to RGBA - return item.width * item.height * bytes_per_pixel + width = getattr(item, "width", 0) + height = getattr(item, "height", 0) + if width <= 0 or height <= 0: + return 1 # No usable dimensions + + if hasattr(item, "bytes_per_line") and item.bytes_per_line > 0: + bytes_per_pixel = item.bytes_per_line // width + else: + bytes_per_pixel = 4 # Default to RGBA + + # Guard against 0 (e.g. bytes_per_line=0) which would yield size 0 + # and break cache accounting. Don't clamp to 4 — that overcounts + # legitimate RGB (3 Bpp) buffers and causes premature evictions. + bytes_per_pixel = max(1, min(bytes_per_pixel, 16)) + + return width * height * bytes_per_pixel log.warning( f"Unexpected item type in cache: {type(item)}. Returning estimated size of 1." @@ -81,7 +92,9 @@ def __setitem__(self, key, value): # Fast check: iterate tombstones (usually very few) # Remove expired tombstones lazily now = time.monotonic() - expired = [p for p, expiry in self._tombstone_expiry.items() if now > expiry] + expired = [ + p for p, expiry in self._tombstone_expiry.items() if now > expiry + ] for p in expired: self._tombstones.discard(p) del self._tombstone_expiry[p] @@ -91,18 +104,44 @@ def __setitem__(self, key, value): log.debug(f"Refusing to cache tombstoned key: {key}") return + # Check if this is a replacement to handle its callback if __delitem__ isn't called. + _MISSING = object() + old_value = _MISSING + if self.on_evict and key in self: + try: + # Cache.__getitem__ reads the value without updating LRU order, + # avoiding a subtle issue where peeking could change which item + # gets evicted when the subsequent __setitem__ triggers LRU eviction. + old_value = Cache.__getitem__(self, key) + except KeyError: + old_value = _MISSING + # Before adding a new item, we might need to evict others # This is handled by the parent class, which will call popitem if needed. - # We override popitem to capture callbacks if they occur during this call. + # We wrap the call to super().__setitem__ to capture all eviction + # callbacks triggered by popitem() -> __delitem__(). self._pending_callbacks = pending_callbacks self._pending_callbacks_owner = threading.get_ident() try: super().__setitem__(key, value) + + # If it was a replacement, we must ensure an eviction callback is added + # for the old value, because cachetools.__setitem__ for replacements + # does not call __delitem__ (it just overwrites the dict entry). + if old_value is not _MISSING and self.on_evict: + + def _replace_cb(k=key, v=old_value): + if self.on_evict: + self.on_evict(k, v) + + pending_callbacks.append(_replace_cb) finally: self._pending_callbacks = None self._pending_callbacks_owner = None - - log.debug(f"Cached item '{key}'. Cache size: {self.currsize / 1024**2:.2f} MB") + + log.debug( + f"Cached item '{key}'. Cache size: {self.currsize / 1024**2:.2f} MB" + ) # Execute all captured eviction callbacks OUTSIDE the lock for callback in pending_callbacks: @@ -121,52 +160,50 @@ def __contains__(self, key): with self._lock: return super().__contains__(key) - def get(self, key, default=None): - """Thread-safe get.""" + def __delitem__(self, key): + """Thread-safe delete with eviction callback.""" + callback = None with self._lock: - return super().get(key, default) - - def popitem(self): - """Extend popitem to log eviction. + # Peek at value before deletion for the callback. + # Use Cache.__getitem__ to avoid LRU order mutation (harmless since + # the key is about to be deleted, but avoids surprising side-effects + # on eviction order of remaining items). + try: + value = Cache.__getitem__(self, key) + except KeyError: + raise KeyError(key) from None - Lock note: acquires _lock, which is safe because _lock is an RLock. - When called from __setitem__ -> super().__setitem__() (LRU eviction), - the lock is already held by the same thread; RLock allows re-entry. - Eviction callbacks are deferred via _pending_callbacks when inside - __setitem__, and always execute OUTSIDE _lock. - """ - with self._lock: - key, value = super().popitem() + super().__delitem__(key) log.debug( - f"Evicted item '{key}'. Cache size after eviction: {self.currsize / 1024**2:.2f} MB" + f"Removed item '{key}'. Cache size: {self.currsize / 1024**2:.2f} MB" ) - - # Create a bound callback for this specific item - callback = None + if self.on_evict: - # Capture key/value in closure - # We use a default arg to bind immediate values + def _callback_func(k=key, v=value): if self.on_evict: self.on_evict(k, v) - - # If we are inside a __setitem__ call on the SAME thread, defer the callback - if self._pending_callbacks is not None and threading.get_ident() == self._pending_callbacks_owner: + + # If we are inside a call that defers callbacks (like __setitem__ or evict_paths), + # append to the shared list. + if ( + self._pending_callbacks is not None + and threading.get_ident() == self._pending_callbacks_owner + ): self._pending_callbacks.append(_callback_func) - callback = None # Already deferred else: callback = _callback_func - # Execute callback OUTSIDE the lock to avoid deadlocks/reentrancy if callback: try: callback() except Exception: log.exception("Error in eviction callback") - # In a real Qt app, `value` would be a tuple like (numpy_buffer, qtexture_id) - # and we would explicitly free the GPU texture here. - return key, value + def get(self, key, default=None): + """Thread-safe get.""" + with self._lock: + return super().get(key, default) def clear(self): """Clear cache without triggering eviction callbacks.""" @@ -195,6 +232,7 @@ def pop_path(self, path: Union[Path, str]): pass keys_to_remove = [] + pending_callbacks = [] with self._lock: # Use list(self.keys()) to avoid mutation during iteration for key in list(self.keys()): @@ -206,8 +244,21 @@ def pop_path(self, path: Union[Path, str]): keys_to_remove.append(key) break - for k in keys_to_remove: - self.pop(k, None) + self._pending_callbacks = pending_callbacks + self._pending_callbacks_owner = threading.get_ident() + try: + for k in keys_to_remove: + self.pop(k, None) + finally: + self._pending_callbacks = None + self._pending_callbacks_owner = None + + # Execute all captured eviction callbacks OUTSIDE the lock + for callback in pending_callbacks: + try: + callback() + except Exception: + log.exception("Error in eviction callback") if keys_to_remove: log.debug( @@ -216,7 +267,7 @@ def pop_path(self, path: Union[Path, str]): def evict_paths(self, paths: list[Union[Path, str]]): """Targeted eviction of all keys starting with given paths. - + Args: paths: List of Path objects or strings. """ @@ -232,7 +283,7 @@ def evict_paths(self, paths: list[Union[Path, str]]): else: # String might be Windows-style, normalize to forward slashes prefix = str(p).replace("\\", "/") - + # Append separator to ensure we match directory/file boundary # e.g. "foo.jpg" -> "foo.jpg::" prefixes.append(f"{prefix}::") @@ -251,7 +302,7 @@ def evict_paths(self, paths: list[Union[Path, str]]): # 3. Optimistic scan: iterate keys once and collect matches # Convert prefixes to tuple for fast startswith check prefix_tuple = tuple(prefixes) - + keys_to_remove = [] for key in list(self.keys()): # Keys are strings like "path/to/file.jpg::0" @@ -260,15 +311,30 @@ def evict_paths(self, paths: list[Union[Path, str]]): # 4. Remove keys removed_bytes = 0 - for k in keys_to_remove: - # Use super().pop to avoid re-acquiring our lock / calling our overridden pop logic. - val = super().pop(k, None) - if val is not None: - try: - size = get_decoded_image_size(val) - except Exception: - size = 0 # Fallback - removed_bytes += size + pending_callbacks = [] + self._pending_callbacks = pending_callbacks + self._pending_callbacks_owner = threading.get_ident() + try: + for k in keys_to_remove: + # Use self.pop (which calls __delitem__) to trigger eviction callbacks. + # It will re-acquire our RLock safely. + val = self.pop(k, None) + if val is not None: + try: + size = get_decoded_image_size(val) + except Exception: + size = 0 # Fallback + removed_bytes += size + finally: + self._pending_callbacks = None + self._pending_callbacks_owner = None + + # Execute all captured eviction callbacks OUTSIDE the lock + for callback in pending_callbacks: + try: + callback() + except Exception: + log.exception("Error in eviction callback") if keys_to_remove: log.info( diff --git a/faststack/imaging/editor.py b/faststack/imaging/editor.py index 164564a..511f0af 100644 --- a/faststack/imaging/editor.py +++ b/faststack/imaging/editor.py @@ -578,14 +578,17 @@ def load_image( float_image_orientation_applied = orientation > 1 log.warning( "OpenCV loaded unexpected channel count, falling back to Pillow: %s", - load_filepath + load_filepath, ) loaded_float_image = arr if loaded_bit_depth == 16: log.info("Loaded 16-bit image via OpenCV: %s", load_filepath) else: - log.info("Loaded 8-bit image via Pillow (OpenCV fallback): %s", load_filepath) + log.info( + "Loaded 8-bit image via Pillow (OpenCV fallback): %s", + load_filepath, + ) else: # Fallback to Pillow logic for 8-bit or if OpenCV failed/returned 8-bit loaded_bit_depth = 8 @@ -609,12 +612,14 @@ def load_image( if float_image_orientation_applied: log.debug( "EXIF orientation %d already applied during PIL load: %s", - orientation, load_filepath, + orientation, + load_filepath, ) else: log.info( "Applying EXIF orientation %d to float buffer (CV2 path): %s", - orientation, load_filepath, + orientation, + load_filepath, ) loaded_original = ImageOps.exif_transpose(loaded_original) if loaded_float_image is not None: @@ -956,7 +961,9 @@ def _apply_edits( # cv2.convertScaleAbs is very fast for saturation casting [0,1]*255 to uint8 srgb_u8_stride = cv2.convertScaleAbs(arr_stride, alpha=255.0) else: - srgb_u8_stride = (np.clip(arr_stride, 0.0, 1.0) * 255).astype(np.uint8) + srgb_u8_stride = (np.clip(arr_stride, 0.0, 1.0) * 255).astype( + np.uint8 + ) arr = _srgb_to_linear(arr) @@ -1300,7 +1307,9 @@ def _extract_2d(blur_result): if _skip_linear: arr = np.clip(arr, 0.0, 1.0) - return arr # May exceed 1.0 in preview/non-export; clipped for skip_linear export. + return ( + arr # May exceed 1.0 in preview/non-export; clipped for skip_linear export. + ) def auto_levels( self, threshold_percent: float = 0.1 @@ -1418,7 +1427,8 @@ def auto_levels( int((t_u8 - t_arr) * 1000), int((t_end - t_u8) * 1000), int((t_end - t0) * 1000), - w, h, + w, + h, "preview" if self.float_preview is not None else "full", ) return blacks, whites, float(p_low), float(p_high) @@ -1576,7 +1586,8 @@ def set_edit_param(self, key: str, value: Any) -> bool: if abs(val_deg - rounded_deg) > 1.0: log.warning( "'rotation' received %s. Rounding to %d. Use 'straighten_angle' for free rotation.", - value, final_val + value, + final_val, ) self.current_edits[key] = final_val @@ -1614,7 +1625,7 @@ def _apply_highlights_shadows( shadows: float, *, srgb_u8_stride: Optional[np.ndarray] = None, - srgb_u8: Optional[np.ndarray] = None, + srgb_u8: Optional[np.ndarray] = None, # Planned future alias for srgb_u8_stride analysis_state: Optional[Dict[str, float]] = None, edits: Optional[Dict[str, Any]] = None, ) -> np.ndarray: @@ -1634,7 +1645,7 @@ def _apply_highlights_shadows( shadows: -1.0 to 1.0, positive lifts shadows, negative crushes srgb_u8_stride: Optional uint8 sRGB array (strided) for accurate JPEG clipping detection (should be the image BEFORE linearization) - srgb_u8: Keyword-only alias for srgb_u8_stride (preferred if provided). + srgb_u8: Keyword-only alias for srgb_u8_stride (not yet active in call site). analysis_state: Optional pre-computed analysis state to avoid re-work. Returns: @@ -1648,7 +1659,7 @@ def _apply_highlights_shadows( state = analysis_state if state is None: # Re-compute locally if not provided - # We assume effective_srgb_u8 is ALREADY STRIDED if passed + # We assume effective_srgb_u8 is ALREADY STRIDED if passed arr_stride = arr[::4, ::4, :] # If effective_srgb_u8 was passed, use it directly (it's already small). # If it wasn't passed, we can't easily recreate the source state here without the original source buffer. @@ -1805,8 +1816,6 @@ def set_crop_box(self, crop_box: Tuple[int, int, int, int]): self.current_edits["crop_box"] = crop_box self._edits_rev += 1 - - def _write_tiff_16bit(self, path: Path, arr_float: np.ndarray): """ Writes a float32 (0-1) numpy array as an uncompressed 16-bit RGB TIFF using OpenCV. @@ -1890,7 +1899,7 @@ def _get_sanitized_exif_bytes(self) -> Optional[bytes]: except Exception as e: log.warning( "Failed to serialize sanitized EXIF: %s. Dropping EXIF to prevent rotation issues.", - e + e, ) return None except Exception as e: @@ -1919,8 +1928,6 @@ def _ensure_float_image(self) -> None: if self.float_image is None: self.float_image = float_arr - - def save_image( self, write_developed_jpg: bool = False, @@ -1951,10 +1958,7 @@ def save_image( raise RuntimeError("No image loaded") # Ensure float master exists (preview_only loads may not have it) - try: - self._ensure_float_image() - except RuntimeError: - raise + self._ensure_float_image() _debug = log.isEnabledFor(logging.DEBUG) if _debug: @@ -1966,11 +1970,13 @@ def save_image( # Re-check float image existence under lock (though _ensure calls it too) # Previously returned None, now raising to be explicit about failure if self.float_image is None: - raise RuntimeError("save_image called with no float_image (race condition?)") - + raise RuntimeError( + "save_image called with no float_image (race condition?)" + ) + # Determine if we can skip copy _safe_no_copy = self._edits_can_share_input(self.current_edits) - + # Snapshot the source data # If safe to share (read-only), we just grab the reference # If not safe, we MUST copy it here while holding the lock @@ -2015,9 +2021,7 @@ def save_image( else: # Check for geometric transforms rotation = edits_snapshot.get("rotation", 0) - straighten_angle = float( - edits_snapshot.get("straighten_angle", 0.0) - ) + straighten_angle = float(edits_snapshot.get("straighten_angle", 0.0)) transforms_applied = (rotation != 0) or (abs(straighten_angle) > 0.001) # Determine EXIF bytes to write @@ -2059,9 +2063,7 @@ def save_image( # Check for geometric transforms (re-check not strictly needed but for clarity) rotation = edits_snapshot.get("rotation", 0) - straighten_angle = float( - edits_snapshot.get("straighten_angle", 0.0) - ) + straighten_angle = float(edits_snapshot.get("straighten_angle", 0.0)) transforms_applied = (rotation != 0) or (abs(straighten_angle) > 0.001) # Determine EXIF for sidecar - prefer source EXIF (from paired JPEG) @@ -2103,7 +2105,8 @@ def save_image( int((t_backup - t_edits) * 1000), int((t_write - t_backup) * 1000), int((t_write - t0) * 1000), - w, h, + w, + h, original_path.name, ) return original_path, backup_path @@ -2113,7 +2116,8 @@ def save_image( raise RuntimeError("Save failed: %s" % str(e)) from e def save_image_uint8_levels( - self, save_target_path: Optional[Path] = None, + self, + save_target_path: Optional[Path] = None, ) -> Optional[Tuple[Path, Path]]: """Fast-path save using a uint8 LUT for levels-only edits. @@ -2138,7 +2142,9 @@ def save_image_uint8_levels( return None # Only applicable when blacks/whites are the sole active edits - edits = self.current_edits + with self._lock: + edits = self.current_edits.copy() + for key, default in self._initial_edits().items(): if key in ("blacks", "whites"): continue @@ -2168,10 +2174,14 @@ def save_image_uint8_levels( # Build 768-entry LUT matching _apply_edits step 13 (cached by rounded key) cache_key = (round(blacks, 3), round(whites, 3)) - cached = self._cached_u8_lut - if cached is not None and cached[0] == cache_key: - lut_rgb = cached[1] - else: + with self._lock: + cached = self._cached_u8_lut + if cached is not None and cached[0] == cache_key: + lut_rgb = cached[1] + else: + lut_rgb = None + + if lut_rgb is None: bp = -blacks * 0.15 wp = 1.0 - (whites * 0.15) if abs(wp - bp) < 0.0001: @@ -2180,7 +2190,8 @@ def save_image_uint8_levels( lut = (lut - bp) / (wp - bp) lut = np.clip(lut, 0.0, 1.0) lut_rgb = (lut * 255.0).astype(np.uint8).tolist() * 3 # 768 entries - self._cached_u8_lut = (cache_key, lut_rgb) + with self._lock: + self._cached_u8_lut = (cache_key, lut_rgb) # Apply LUT via Pillow .point() — single C call, no large NumPy allocation rgb_img = self.original_image @@ -2224,7 +2235,9 @@ def save_image_uint8_levels( os.replace(tmp_path, original_path) except OSError as e: # Windows: destination may be held open by another process - log.warning("Atomic replace failed (%s); falling back to direct save", e) + log.warning( + "Atomic replace failed (%s); falling back to direct save", e + ) try: img_u8.save(original_path, **save_kwargs) except Exception: @@ -2248,7 +2261,8 @@ def save_image_uint8_levels( int((t_backup - t_lut) * 1000), int((t_write - t_backup) * 1000), int((t_write - t0) * 1000), - w, h, + w, + h, original_path.name, ) return original_path, backup_path diff --git a/faststack/imaging/metadata.py b/faststack/imaging/metadata.py index a1c06e8..a736326 100644 --- a/faststack/imaging/metadata.py +++ b/faststack/imaging/metadata.py @@ -1,6 +1,8 @@ import logging +import math +from fractions import Fraction from pathlib import Path -from typing import Dict, Any, Union +from typing import Any, Dict, Optional, Union from PIL import Image, ExifTags log = logging.getLogger(__name__) @@ -37,6 +39,168 @@ def clean_exif_value(value: Any) -> str: return str(value) +# Camera-style 1/3-stop shutter speed labels (Nikon/Canon convention) +_SHUTTER_TABLE = [ + (30.0, "30s"), (25.0, "25s"), (20.0, "20s"), (15.0, "15s"), (13.0, "13s"), + (10.0, "10s"), (8.0, "8s"), (6.0, "6s"), (5.0, "5s"), (4.0, "4s"), + (3.2, "3.2s"), (2.5, "2.5s"), (2.0, "2s"), (1.6, "1.6s"), (1.3, "1.3s"), + (1.0, "1s"), (0.8, "0.8s"), (0.6, "0.6s"), (0.5, "0.5s"), (0.4, "0.4s"), (0.3, "0.3s"), + (1/4, "1/4s"), (1/5, "1/5s"), (1/6, "1/6s"), (1/8, "1/8s"), + (1/10, "1/10s"), (1/13, "1/13s"), (1/15, "1/15s"), + (1/20, "1/20s"), (1/25, "1/25s"), (1/30, "1/30s"), + (1/40, "1/40s"), (1/50, "1/50s"), (1/60, "1/60s"), + (1/80, "1/80s"), (1/100, "1/100s"), (1/125, "1/125s"), + (1/160, "1/160s"), (1/200, "1/200s"), (1/250, "1/250s"), + (1/320, "1/320s"), (1/400, "1/400s"), (1/500, "1/500s"), + (1/640, "1/640s"), (1/800, "1/800s"), (1/1000, "1/1000s"), + (1/1250, "1/1250s"), (1/1600, "1/1600s"), (1/2000, "1/2000s"), + (1/2500, "1/2500s"), (1/3200, "1/3200s"), (1/4000, "1/4000s"), + (1/5000, "1/5000s"), (1/6400, "1/6400s"), (1/8000, "1/8000s"), +] +_SHUTTER_SECONDS = [t for (t, _) in _SHUTTER_TABLE] +_SHUTTER_LOG_SECONDS = [math.log(t) for t in _SHUTTER_SECONDS] + + +def _exif_rational_to_seconds(x: Any) -> Optional[float]: + """Convert various EXIF rational-ish representations to seconds.""" + if x is None: + return None + if hasattr(x, "numerator") and hasattr(x, "denominator"): + try: + n, d = int(x.numerator), int(x.denominator) + if d != 0: + return float(Fraction(n, d)) + except Exception as e: + log.debug( + "_exif_rational_to_seconds failed for rational object %r (%s): %s", + x, + type(x).__name__, + e, + ) + if isinstance(x, (tuple, list)) and len(x) == 2: + try: + n, d = int(x[0]), int(x[1]) + if d != 0: + return float(Fraction(n, d)) + except Exception as e: + log.debug( + "_exif_rational_to_seconds failed for tuple/list %r (%s): %s", + x, + type(x).__name__, + e, + ) + try: + return float(x) + except Exception as e: + if x is not None: + log.debug( + "_exif_rational_to_seconds failed for value %r (%s): %s", + x, + type(x).__name__, + e, + ) + return None + + +def format_shutter_speed_camera_style(exposure_value: Any) -> str: + """Format ExposureTime like a camera UI (nearest standard 1/3-stop step).""" + sec = _exif_rational_to_seconds(exposure_value) + if sec is None or not math.isfinite(sec) or sec <= 0: + return "" + if sec >= _SHUTTER_SECONDS[0]: + return _SHUTTER_TABLE[0][1] + if sec <= _SHUTTER_SECONDS[-1]: + return _SHUTTER_TABLE[-1][1] + log_sec = math.log(sec) + best_i = 0 + best_err = float("inf") + for i, log_t in enumerate(_SHUTTER_LOG_SECONDS): + err = abs(log_t - log_sec) + if err < best_err: + best_err = err + best_i = i + return _SHUTTER_TABLE[best_i][1] + + +def get_exif_brief(path: Union[str, Path]) -> str: + """Return a compact EXIF summary for the status bar. + + Opens only the image header (Pillow lazy-loads), extracts ISO, aperture, + shutter speed, and capture time. Returns a pipe-separated string like + ``"ISO 800 | f/2.8 | 1/500s | 14:30:25"`` or ``""`` if no EXIF is found. + + Supported formats: JPEG, TIFF, HEIF. + """ + path = Path(path) + if path.suffix.lower() not in {".jpg", ".jpeg", ".jpe", ".tif", ".tiff", ".heif", ".heic"}: + return "" + if not path.exists(): + return "" + + try: + with Image.open(path) as img: + exif = img.getexif() + # getexif() nests EXIF sub-IFD tags; merge them for flat access + # Read them while file is open to avoid "I/O on closed file" + exif_ifd = dict(exif.get_ifd(ExifTags.IFD.Exif) if hasattr(ExifTags, "IFD") else {}) + + if not exif: + return "" + except Exception: + return "" + + tags = dict(exif) + tags.update(exif_ifd) + + parts: list[str] = [] + + # ISO (tag 0x8827 / ISOSpeedRatings) + iso = tags.get(0x8827) + if iso is not None: + # Some cameras return a list/tuple for ISO + if isinstance(iso, (list, tuple)) and len(iso) > 0: + iso = iso[0] + try: + parts.append(f"ISO {int(iso)}") + except (ValueError, TypeError): + parts.append(f"ISO {iso}") + + # Aperture / FNumber (tag 0x829D) + f_number = tags.get(0x829D) + if f_number is not None: + try: + val = float(f_number) + parts.append(f"f/{val:.1f}") + except (ValueError, TypeError) as e: + log.debug(f"Failed to convert f_number {f_number!r}: {e}", exc_info=True) + + # Shutter speed / ExposureTime (tag 0x829A) + # Note: Pillow's ExifTags maps 0x829A to ExposureTime. + exposure = tags.get(0x829A) + if exposure is not None: + try: + s = format_shutter_speed_camera_style(exposure) + if s: + parts.append(s) + except (ValueError, TypeError) as e: + log.debug(f"Failed to convert exposure {exposure!r}: {e}", exc_info=True) + + # Capture time / DateTimeOriginal (tag 0x9003), fallback DateTime (tag 0x0132) + dt = tags.get(0x9003) or tags.get(0x0132) + if dt: + try: + cleaned = clean_exif_value(dt) + # Format is "YYYY:MM:DD HH:MM:SS" — extract time portion + if " " in cleaned: + parts.append(cleaned.split(" ", 1)[1]) + else: + parts.append(cleaned) + except Exception as e: + log.error(f"Failed to parse EXIF datetime {dt!r}: {e}", exc_info=True) + + return " | ".join(parts) + + def get_exif_data(path: Union[str, Path]) -> Dict[str, Any]: """ Extracts EXIF data from an image file. @@ -50,14 +214,22 @@ def get_exif_data(path: Union[str, Path]) -> Dict[str, Any]: return {"summary": {}, "full": {}} try: - img = Image.open(path) - try: - exif = img._getexif() - finally: - img.close() - - if not exif: - return {"summary": {}, "full": {}} + with Image.open(path) as img: + exif_obj = img.getexif() + if not exif_obj: + return {"summary": {}, "full": {}} + + # Merge sub-IFD tags (ISO, Lens, etc.) + exif_ifd = dict(exif_obj.get_ifd(ExifTags.IFD.Exif) if hasattr(ExifTags, "IFD") else {}) + + # Fetch GPS sub-IFD while image is still open (Pillow ≥8.2 + # stores GPSInfo as an integer IFD offset, not a dict) + gps_ifd = dict(exif_obj.get_ifd(0x8825)) if hasattr(ExifTags, "IFD") else {} + + # Normalize to a dict for consistency + exif = dict(exif_obj) + exif.update(exif_ifd) + except Exception as e: log.warning(f"Failed to extract EXIF from {path}: {e}") return {"summary": {}, "full": {}} @@ -76,7 +248,10 @@ def get_val(key): # Date Taken date_taken = get_val("DateTimeOriginal") or get_val("DateTime") if date_taken: - summary["Date Taken"] = clean_exif_value(date_taken) + try: + summary["Date Taken"] = clean_exif_value(date_taken) + except Exception as e: + log.debug("failed parsing EXIF date %r: %s", date_taken, e) # Camera Model make = get_val("Make") @@ -106,7 +281,12 @@ def get_val(key): # ISO iso = get_val("ISOSpeedRatings") if iso: - summary["ISO"] = clean_exif_value(iso) + if isinstance(iso, (list, tuple)) and len(iso) > 0: + iso = iso[0] + try: + summary["ISO"] = str(int(iso)) + except (ValueError, TypeError): + summary["ISO"] = clean_exif_value(iso) # Aperture (FNumber) f_number = get_val("FNumber") @@ -157,8 +337,10 @@ def get_val(key): # We can just clean it for now. summary["Flash"] = clean_exif_value(flash) - # GPS - gps_info = get_val("GPSInfo") + # GPS — prefer the resolved sub-IFD dict; fall back to decoded tag only + # if it is already a mapping (older Pillow versions). + gps_raw = get_val("GPSInfo") + gps_info = gps_ifd if gps_ifd else (gps_raw if isinstance(gps_raw, dict) else None) if gps_info: try: @@ -168,9 +350,6 @@ def convert_to_degrees(value): s = float(value[2]) return d + (m / 60.0) + (s / 3600.0) - lat = None - lon = None - # GPSInfo keys are integers. # 1: GPSLatitudeRef, 2: GPSLatitude # 3: GPSLongitudeRef, 4: GPSLongitude @@ -187,10 +366,6 @@ def convert_to_degrees(value): summary["GPS"] = f"{lat:.5f}, {lon:.5f}" except Exception as e: log.warning(f"Failed to parse GPS info: {e}") - # Fallback to cleaning the raw info if parsing fails - # But user specifically asked for decimal, so maybe just don't show if it fails or show raw? - # Let's show raw if parsing fails but cleaned - # summary["GPS"] = clean_exif_value(gps_info) pass # Convert all values in full dict to string to ensure JSON serializability for QML diff --git a/faststack/imaging/prefetch.py b/faststack/imaging/prefetch.py index 851c39f..bde10a3 100644 --- a/faststack/imaging/prefetch.py +++ b/faststack/imaging/prefetch.py @@ -120,8 +120,6 @@ def get_monitor_profile() -> Optional[ImageCms.ImageCmsProfile]: # apply_orientation_to_np and apply_exif_orientation imported from orientation.py - - def apply_saturation_compensation( arr: np.ndarray, width: int, @@ -181,7 +179,9 @@ def __init__( self.debug = debug # Use CPU count for I/O-bound JPEG decoding # Rule of thumb: 2x CPU cores for I/O bound, 1x for CPU bound - optimal_workers = min((os.cpu_count() or 1) * 2, 8) # Cap at 8 for fast navigation + optimal_workers = min( + (os.cpu_count() or 1) * 2, 8 + ) # Cap at 8 for fast navigation self.executor = create_daemon_threadpool_executor( max_workers=optimal_workers, @@ -226,7 +226,9 @@ def update_prefetch( """ if self.debug: _t_start = time.perf_counter() - print(f"[DBGCACHE] {_t_start*1000:.3f} update_prefetch: START index={current_index} dir={direction}") + print( + f"[DBGCACHE] {_t_start*1000:.3f} update_prefetch: START index={current_index} dir={direction}" + ) # NOTE: Generation is NOT incremented here. It only changes when display size, # zoom state, or color mode changes - events that actually invalidate cached images. @@ -274,7 +276,8 @@ def update_prefetch( # Snapshoting and range computation inside lock with self._futures_lock: - n = len(self.image_files) + image_files = self.image_files + n = len(image_files) if n == 0: return # Ensure current_index is clamped @@ -305,13 +308,15 @@ def update_prefetch( # Cancel stale futures and remove from scheduled tasks_submitted = 0 # Clean up old generation entries to prevent memory leak - old_generations = [g for g in list(self._scheduled.keys()) if g < self.generation] + old_generations = [ + g for g in list(self._scheduled.keys()) if g < self.generation + ] for g in old_generations: self._scheduled.pop(g, None) # Get scheduled set for current generation scheduled = self._scheduled.setdefault(self.generation, set()) - + for index, future in list(self.futures.items()): if index < start or index >= end: if future.cancel(): @@ -328,29 +333,39 @@ def update_prefetch( if self.debug: _t_end = time.perf_counter() - print(f"[DBGCACHE] {_t_end*1000:.3f} update_prefetch: DONE submitted={tasks_submitted} total={(_t_end - _t_start)*1000:.2f}ms") + print( + f"[DBGCACHE] {_t_end*1000:.3f} update_prefetch: DONE submitted={tasks_submitted} total={(_t_end - _t_start)*1000:.2f}ms" + ) def submit_task( - self, index: int, generation: int, priority: bool = False, override_path: Optional[Path] = None + self, + index: int, + generation: int, + priority: bool = False, + override_path: Optional[Path] = None, ) -> Optional[Future]: """Submits a decoding task for a given index.""" if self._stop_event.is_set(): return None - # Capture list snapshot and check bounds before accessing path - image_files = self.image_files - if index < 0 or index >= len(image_files): - return None - - requested_path = override_path if override_path is not None else image_files[index].path - if self.debug and priority: _t_start = time.perf_counter() - print(f"[DBGCACHE] {_t_start*1000:.3f} submit_task: PRIORITY index={index} gen={generation} override={override_path}") + print( + f"[DBGCACHE] {_t_start*1000:.3f} submit_task: PRIORITY index={index} gen={generation} override={override_path}" + ) with self._futures_lock: - # We track by index. If we already have a job for this index, - # we must cancel it if the requested path is different + # Bounds check must happen inside the lock to stay consistent + # with self.image_files (which set_image_files can replace under lock). + if index < 0 or index >= len(self.image_files): + return None + + requested_path = ( + override_path if override_path is not None else self.image_files[index].path + ) + + # We track by index. If we already have a job for this index, + # we must cancel it if the requested path is different # (e.g. switching between main and variants). if index in self.futures and not self.futures[index].done(): current_path = self.future_paths.get(index) @@ -398,8 +413,6 @@ def submit_task( future.add_done_callback(lambda f, idx=index: self._cleanup_future(idx, f)) return future - - def _decode_and_cache( self, image_file: ImageFile, @@ -435,33 +448,56 @@ def _decode_and_cache( orientation = 1 if color_mode == "icc": monitor_profile = get_monitor_profile() - monitor_icc_path = config.get("color", "monitor_icc_path", fallback="").strip() + monitor_icc_path = config.get( + "color", "monitor_icc_path", fallback="" + ).strip() if monitor_profile is not None: if is_jpeg: try: with open(target_path, "rb") as f: - with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mmapped: + with mmap.mmap( + f.fileno(), 0, access=mmap.ACCESS_READ + ) as mmapped: if use_resized and should_resize: - buffer = decode_jpeg_resized(mmapped, display_width, display_height, fast_dct=fast_dct) + buffer = decode_jpeg_resized( + mmapped, + display_width, + display_height, + fast_dct=fast_dct, + ) else: - buffer = decode_jpeg_rgb(mmapped, fast_dct=fast_dct) + buffer = decode_jpeg_rgb( + mmapped, fast_dct=fast_dct + ) if buffer is not None and should_resize: img = PILImage.fromarray(buffer) - img.thumbnail((display_width, display_height), PILImage.Resampling.LANCZOS) + img.thumbnail( + (display_width, display_height), + PILImage.Resampling.LANCZOS, + ) buffer = np.array(img) - + if buffer is not None: try: # Seek to beginning of mmapped file for PIL mmapped.seek(0) with PILImage.open(mmapped) as pil_img: - icc_bytes = pil_img.info.get("icc_profile") - orientation = pil_img.getexif().get(274, 1) + icc_bytes = pil_img.info.get( + "icc_profile" + ) + orientation = pil_img.getexif().get( + 274, 1 + ) except Exception: - pass + log.debug("Failed to read EXIF from mmap for %s", target_path, exc_info=True) except Exception as e: - log.warning("Decode failed (ICC path) index=%d path=%s: %s", index, target_path, e) + log.warning( + "Decode failed (ICC path) index=%d path=%s: %s", + index, + target_path, + e, + ) buffer = None if buffer is None: @@ -470,14 +506,22 @@ def _decode_and_cache( orientation = img.getexif().get(274, 1) img = img.convert("RGB") if should_resize: - img.thumbnail((display_width, display_height), PILImage.Resampling.LANCZOS) + img.thumbnail( + (display_width, display_height), + PILImage.Resampling.LANCZOS, + ) buffer = np.array(img) except Exception as e: - log.warning("Decode failed (ICC fallback) index=%d path=%s: %s", index, target_path, e) + log.warning( + "Decode failed (ICC fallback) index=%d path=%s: %s", + index, + target_path, + e, + ) return None img = PILImage.fromarray(buffer) - + if icc_bytes is None: try: # Try to get ICC if we don't have it yet (but orientation should be set) @@ -486,13 +530,17 @@ def _decode_and_cache( if orientation == 1: orientation = orig.getexif().get(274, 1) except Exception as e: - log.warning("Failed to read metadata from %s: %s", target_path, e) + log.warning( + "Failed to read metadata from %s: %s", target_path, e + ) src_profile = None src_profile_key = None if icc_bytes: try: - src_profile = ImageCms.ImageCmsProfile(io.BytesIO(icc_bytes)) + src_profile = ImageCms.ImageCmsProfile( + io.BytesIO(icc_bytes) + ) src_profile_key = hashlib.sha256(icc_bytes).hexdigest() except Exception as e: log.warning("Failed to parse ICC profile: %s", e) @@ -502,26 +550,41 @@ def _decode_and_cache( src_profile_key = "srgb_builtin" try: - transform = get_icc_transform(src_profile, monitor_profile, src_profile_key, monitor_icc_path) + transform = get_icc_transform( + src_profile, + monitor_profile, + src_profile_key, + monitor_icc_path, + ) ImageCms.applyTransform(img, transform, inPlace=True) buffer = np.array(img, dtype=np.uint8) except Exception as e: log.warning("ICC conversion failed: %s", e) - + if buffer is None: if is_jpeg: try: with open(target_path, "rb") as f: - with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mmapped: + with mmap.mmap( + f.fileno(), 0, access=mmap.ACCESS_READ + ) as mmapped: if use_resized and should_resize: - buffer = decode_jpeg_resized(mmapped, display_width, display_height, fast_dct=fast_dct) + buffer = decode_jpeg_resized( + mmapped, + display_width, + display_height, + fast_dct=fast_dct, + ) else: buffer = decode_jpeg_rgb(mmapped, fast_dct=fast_dct) if buffer is not None and should_resize: img = PILImage.fromarray(buffer) - img.thumbnail((display_width, display_height), PILImage.Resampling.LANCZOS) + img.thumbnail( + (display_width, display_height), + PILImage.Resampling.LANCZOS, + ) buffer = np.array(img) - + # Capture orientation if we have a buffer if buffer is not None: try: @@ -529,7 +592,7 @@ def _decode_and_cache( with PILImage.open(mmapped) as pil_img: orientation = pil_img.getexif().get(274, 1) except Exception: - pass + log.debug("Failed to read EXIF from mmap for %s", target_path, exc_info=True) except Exception: buffer = None @@ -539,10 +602,15 @@ def _decode_and_cache( orientation = img.getexif().get(274, 1) img = img.convert("RGB") if should_resize: - img.thumbnail((display_width, display_height), PILImage.Resampling.LANCZOS) + img.thumbnail( + (display_width, display_height), + PILImage.Resampling.LANCZOS, + ) buffer = np.array(img) except Exception as e: - log.warning("Decode failed index=%d path=%s: %s", index, target_path, e) + log.warning( + "Decode failed index=%d path=%s: %s", index, target_path, e + ) return None if buffer is None: @@ -564,7 +632,13 @@ def _decode_and_cache( val = config.get("color", "saturation_factor", fallback="1.0") saturation_factor = float(val) if val is not None else 1.0 if saturation_factor != 1.0: - apply_saturation_compensation(buffer.ravel(), buffer.shape[1], buffer.shape[0], bytes_per_line, saturation_factor) + apply_saturation_compensation( + buffer.ravel(), + buffer.shape[1], + buffer.shape[0], + bytes_per_line, + saturation_factor, + ) mv = memoryview(buffer).cast("B") decoded = DecodedImage( @@ -584,7 +658,7 @@ def _decode_and_cache( except Exception as e: # Downgraded from ERROR to prevent log noise on bad files - log.warning("Error in _decode_and_cache: %s", e) + log.warning("Error in _decode_and_cache: %s", e, exc_info=True) return None def _cleanup_future(self, index: int, future: Future): @@ -614,7 +688,6 @@ def cancel_all(self): with self._futures_lock: self._cancel_all_locked() - def shutdown(self): """Initiates a clean shutdown of the prefetcher.""" log.info("Shutting down Prefetcher...") diff --git a/faststack/io/indexer.py b/faststack/io/indexer.py index dc0fc31..059ee60 100644 --- a/faststack/io/indexer.py +++ b/faststack/io/indexer.py @@ -8,7 +8,12 @@ from typing import List, Dict, Tuple from faststack.models import ImageFile -from faststack.io.variants import VariantGroup, build_variant_map, parse_variant_stem +from faststack.io.variants import ( + VariantGroup, + build_variant_map, + norm_path, + parse_variant_stem, +) log = logging.getLogger(__name__) @@ -88,7 +93,9 @@ def _build_image_list( effective_name = base_name.casefold() break img = ImageFile( - path=p, raw_pair=None, timestamp=effective_ts, + path=p, + raw_pair=None, + timestamp=effective_ts, sort_name_cf=effective_name, ) image_entries.append((image_sort_key(img), img)) @@ -124,15 +131,19 @@ def find_images(directory: Path) -> List[ImageFile]: elapsed = time.perf_counter() - t_start paired_count = sum( - 1 for im in image_files + 1 + for im in image_files if im.raw_pair and im.path.suffix.lower() in JPG_EXTENSIONS ) raw_only_count = sum( 1 for im in image_files if im.path.suffix.lower() not in JPG_EXTENSIONS ) log.info( - "Found %d images (%d paired, %d raw-only).", - len(image_files), paired_count, raw_only_count, + "Found %d images (%d paired, %d raw-only) in %.2fs.", + len(image_files), + paired_count, + raw_only_count, + elapsed, ) return image_files @@ -176,15 +187,20 @@ def find_images_with_variants( group_key, _, _ = parse_variant_stem(img.path.stem) key_cf = group_key.casefold() group = variant_map.get(key_cf) + img_norm = Path(norm_path(img.path)) if group is None or len(group.all_files) <= 1: # No variant group or singleton: keep as-is filtered.append(img) - elif group.main_path == img.path: - # This IS the main: keep it + elif group.main_path == img_norm: + # This IS the main: keep it (normalize to match build_variant_map) filtered.append(img) else: # This is a developed file reachable via badge: remove from visible list - log.debug("Filtering out variant %s (main=%s)", img.path.name, group.main_path.name if group.main_path else "?") + log.debug( + "Filtering out variant %s (main=%s)", + img.path.name, + group.main_path.name if group.main_path else "?", + ) # Annotate images with variant flags for img in filtered: @@ -196,7 +212,10 @@ def find_images_with_variants( group = variant_map.get(key_cf) if group: img.has_backups = bool(group.backup_paths) - img.has_developed = group.developed_path is not None and group.developed_path != group.main_path + img.has_developed = ( + group.developed_path is not None + and group.developed_path != group.main_path + ) image_files = filtered @@ -210,21 +229,13 @@ def find_images_with_variants( 1 for im in image_files if im.path.suffix.lower() not in JPG_EXTENSIONS ) - if log.isEnabledFor(logging.DEBUG): - log.info( - "Found %d total, %d paired, %d raw-only in %.3fs", - len(image_files), - paired_count, - raw_only_count, - elapsed, - ) - else: - log.info( - "Found %d images (%d paired, %d raw-only).", - len(image_files), - paired_count, - raw_only_count, - ) + log.info( + "Found %d images (%d paired, %d raw-only) in %.2fs.", + len(image_files), + paired_count, + raw_only_count, + elapsed, + ) return image_files, variant_map diff --git a/faststack/io/utils.py b/faststack/io/utils.py index 15d488c..6c0d11f 100644 --- a/faststack/io/utils.py +++ b/faststack/io/utils.py @@ -5,9 +5,10 @@ from pathlib import Path from typing import Union + def normalize_path_key(path: Union[Path, str]) -> str: """Normalize a path for use as a stable dictionary key. - + Handles Windows case-insensitivity by case-folding, and standardizes separators. This is critical for ensuring that paths from scanners match paths from resolved logic. """ @@ -18,17 +19,19 @@ def normalize_path_key(path: Union[Path, str]) -> str: # os.path.normcase on Windows: lowercases and converts / to \ # os.path.normcase on Linux: returns as-is # os.path.abspath: ensures absolute path and collapses .. + # Note: abspath does NOT resolve symlinks (that's realpath). return os.path.normcase(os.path.abspath(p_str)) + def compute_path_hash(path: Union[Path, str]) -> str: """Compute a fast, stable hash of the path for UI/Thumbnail IDs. - + Uses MD5 of the normalized path string. CRITICAL: Does NOT access the filesystem (no .resolve() calls). """ # normalize_path_key handles the canonicalization pure-string wise norm_path = normalize_path_key(path) - + # MD5 is used for ID generation, not security. # It must map the same path to the same ID across app restarts. return hashlib.md5(norm_path.encode("utf-8")).hexdigest()[:16] # noqa: S324 diff --git a/faststack/io/variants.py b/faststack/io/variants.py index 2c9f6f5..aa44b9c 100644 --- a/faststack/io/variants.py +++ b/faststack/io/variants.py @@ -3,18 +3,17 @@ Pure-logic module with no Qt dependencies. """ -import logging -import os import re from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple - -log = logging.getLogger(__name__) +from faststack.io.utils import normalize_path_key as norm_path # Token-boundary regex: match `-developed` as a real dash-delimited token. # Ensures "undeveloped" or "mydeveloped" do NOT match. -_DEVELOPED_TOKEN_RE = re.compile(r"(?:^|(?<=-))[Dd][Ee][Vv][Ee][Ll][Oo][Pp][Ee][Dd](?=$|(?=-))") +_DEVELOPED_TOKEN_RE = re.compile( + r"(?:^|(?<=-))[Dd][Ee][Vv][Ee][Ll][Oo][Pp][Ee][Dd](?=$|(?=-))" +) # Trailing `-backup(\d+)?` token at end of (stripped) stem. _BACKUP_TRAILING_RE = re.compile(r"(?:^|-)([Bb][Aa][Cc][Kk][Uu][Pp])(\d+)?$") @@ -55,12 +54,12 @@ def parse_variant_stem(stem: str) -> Tuple[str, bool, Optional[int]]: - backup_number: None if not a backup, 1 for `-backup`, N for `-backupN` """ # 1. Check for -developed token - is_developed = bool(_DEVELOPED_TOKEN_RE.search(stem)) + m = _DEVELOPED_TOKEN_RE.search(stem) + is_developed = m is not None # 2. Remove exactly one -developed token (first occurrence) -> stripped if is_developed: # Find the match and remove it, handling leading/trailing dashes - m = _DEVELOPED_TOKEN_RE.search(stem) start, end = m.start(), m.end() # Remove the token and any resulting double-dash or leading/trailing dash before = stem[:start] @@ -101,7 +100,11 @@ def build_variant_map( # 1. Parse all files groups: Dict[str, VariantGroup] = {} # keyed by group_key.casefold() - for path in all_jpg_paths: + # Sort paths to ensure deterministic behavior for group_key casing + # (The first-seen case preserved stem sets the group's display key). + sorted_paths = sorted(all_jpg_paths, key=lambda p: str(p).casefold()) + + for path in sorted_paths: group_key, is_developed, backup_number = parse_variant_stem(path.stem) key_cf = group_key.casefold() @@ -185,7 +188,8 @@ def _select_backups(group: VariantGroup) -> None: def get_group_key_for_path( - path: Path, variant_map: Dict[str, VariantGroup], + path: Path, + variant_map: Dict[str, VariantGroup], ) -> Optional[str]: """Look up the casefolded group key for a file path.""" group_key, _, _ = parse_variant_stem(path.stem) @@ -202,21 +206,24 @@ def build_badge_list(group: VariantGroup) -> List[Dict]: Each badge is a dict: {"label": str, "path": str, "kind": str}. """ badges = [] - norm = norm_path if group.main_path is not None: - badges.append({ - "label": "Main", - "path": norm(group.main_path), - "kind": "main", - }) + badges.append( + { + "label": "Main", + "path": norm_path(group.main_path), + "kind": "main", + } + ) if group.developed_path is not None and group.developed_path != group.main_path: - badges.append({ - "label": "D", - "path": norm(group.developed_path), - "kind": "developed", - }) + badges.append( + { + "label": "D", + "path": norm_path(group.developed_path), + "kind": "developed", + } + ) for n in sorted(group.backup_paths.keys()): bp = group.backup_paths[n] @@ -224,15 +231,14 @@ def build_badge_list(group: VariantGroup) -> List[Dict]: if bp == group.main_path or bp == group.developed_path: continue label = "Bk" if n == 1 else f"Bk{n}" - badges.append({ - "label": label, - "path": norm(bp), - "kind": "backup", - }) + badges.append( + { + "label": label, + "path": norm_path(bp), + "kind": "backup", + } + ) return badges -def norm_path(p: Path) -> str: - """Normalize a path for consistent comparison.""" - return os.path.normcase(os.path.abspath(str(p))) diff --git a/faststack/io/watcher.py b/faststack/io/watcher.py index 8eb12d9..a9aa74d 100644 --- a/faststack/io/watcher.py +++ b/faststack/io/watcher.py @@ -54,7 +54,11 @@ def on_deleted(self, event): def on_moved(self, event): if _is_ignored_path(event.src_path) or _is_ignored_path(event.dest_path): return - log.info("Detected file move: %s -> %s. Requesting refresh.", event.src_path, event.dest_path) + log.info( + "Detected file move: %s -> %s. Requesting refresh.", + event.src_path, + event.dest_path, + ) self.callback(event.src_path) self.callback(event.dest_path) diff --git a/faststack/qml/Components.qml b/faststack/qml/Components.qml index 07c31f6..ad49162 100644 --- a/faststack/qml/Components.qml +++ b/faststack/qml/Components.qml @@ -31,12 +31,6 @@ Item { } Keys.onEscapePressed: (event) => { - if (root.fullScreenLoupe) { - root.exitFullScreenLoupe() - event.accepted = true - return - } - if (uiState && uiState.isCropping) { if (mainMouseArea.isRotating) { // Revert rotation diff --git a/faststack/qml/Main.qml b/faststack/qml/Main.qml index 526aec9..ab535ca 100644 --- a/faststack/qml/Main.qml +++ b/faststack/qml/Main.qml @@ -913,12 +913,6 @@ ApplicationWindow { // Key bindings implemented in old Main.qml Keys.onPressed: function(event) { - if (root.fullScreenLoupe && event.key === Qt.Key_Escape) { - root.exitFullScreenLoupe() - event.accepted = true - return - } - if (!uiState || !controller) { return } @@ -1001,37 +995,43 @@ ApplicationWindow { } Label { text: (uiState && uiState.imageCount > 0) - ? ` | File: ${uiState.currentFilename || 'N/A'}` - : " | File: N/A" + ? (uiState.currentFilename || "N/A") + : "N/A" + color: root.currentTextColor + } + Label { + visible: (uiState && uiState.imageCount > 0 && uiState.exifBrief && uiState.exifBrief.length > 0) + text: uiState ? (uiState.exifBrief || "") : "" color: root.currentTextColor } + Item { Layout.fillWidth: true } Label { - text: uiState ? ` | Stacked: ${uiState.stackedDate}` : "" + text: uiState ? ` Stacked: ${uiState.stackedDate}` : "" color: "lightgreen" visible: uiState ? (uiState.imageCount > 0 && uiState.isStacked) : false } Label { - text: uiState ? ` | Uploaded on ${uiState.uploadedDate}` : "" + text: uiState ? ` Uploaded on ${uiState.uploadedDate}` : "" color: "lightgreen" visible: uiState ? (uiState.imageCount > 0 && uiState.isUploaded) : false } Label { - text: uiState ? ` | Edited on ${uiState.editedDate}` : "" + text: uiState ? ` Edited on ${uiState.editedDate}` : "" color: "lightgreen" visible: uiState ? (uiState.imageCount > 0 && uiState.isEdited) : false } Label { - text: uiState ? ` | Restacked on ${uiState.restackedDate}` : "" + text: uiState ? ` Restacked on ${uiState.restackedDate}` : "" color: "cyan" visible: uiState ? (uiState.imageCount > 0 && uiState.isRestacked) : false } Label { - text: " | Favorite" + text: " Favorite" color: "gold" visible: uiState ? (uiState.imageCount > 0 && uiState.isFavorite) : false } Label { - text: uiState ? ` | Filter: "${uiState.filterString}"` : "" + text: uiState ? ` Filter: "${uiState.filterString}"` : "" color: "yellow" font.bold: true visible: uiState ? (uiState.filterString !== "") : false diff --git a/faststack/qml/ThumbnailGridView.qml b/faststack/qml/ThumbnailGridView.qml index f896583..bff21ce 100644 --- a/faststack/qml/ThumbnailGridView.qml +++ b/faststack/qml/ThumbnailGridView.qml @@ -162,7 +162,7 @@ Item { if (currentIndex >= count) { currentIndex = count - 1 } - prefetchTimer.restart() + if (prefetchEnabled) prefetchTimer.restart() } // Empty state diff --git a/faststack/qml/ThumbnailTile.qml b/faststack/qml/ThumbnailTile.qml index c5e2c2e..be2de0c 100644 --- a/faststack/qml/ThumbnailTile.qml +++ b/faststack/qml/ThumbnailTile.qml @@ -49,6 +49,8 @@ Item { property color restackedColor: "#FF9800" // Orange for restacked (R) property color favoriteColor: "#FFD700" // Gold for favorite (F) property color batchColor: "#2196F3" // Blue for batch (B) + property color backupsColor: "#9C27B0" // Purple for backups (Bk) + property color developedColor: "#009688" // Teal for developed (D) property color cursorColor: "#00BFFF" // Cyan for keyboard cursor property color loadingColor: tile.isDarkTheme ? "#3c3c3c" : "#e0e0e0" property color counterUploadedCol: "#7BBF7F" // Muted green @@ -259,15 +261,36 @@ Item { visible: !tile.tileIsFolder layoutDirection: Qt.RightToLeft + // Backups badge (Bk) - Purple Rectangle { visible: tile.tileHasBackups - width: 18; height: 18; radius: 3; color: "#9C27B0" - Text { anchors.centerIn: parent; text: "Bk"; font.pixelSize: 9; font.bold: true; color: "white" } + width: 18 + height: 18 + radius: 3 + color: backupsColor + Text { + anchors.centerIn: parent + text: "Bk" + font.pixelSize: 9 + font.bold: true + color: "white" + } } + + // Developed badge (D) - Teal Rectangle { visible: tile.tileHasDeveloped - width: 18; height: 18; radius: 3; color: "#009688" - Text { anchors.centerIn: parent; text: "D"; font.pixelSize: 11; font.bold: true; color: "white" } + width: 18 + height: 18 + radius: 3 + color: developedColor + Text { + anchors.centerIn: parent + text: "D" + font.pixelSize: 11 + font.bold: true + color: "white" + } } } @@ -500,7 +523,9 @@ Item { } Component.onCompleted: { - if (tile.tileIndex === 0 && uiState && uiState.debugThumbTiming) + // Use robust check for uiState which might not be defined in all contexts + var hasUiState = (typeof uiState !== 'undefined' && uiState !== null); + if (tile.tileIndex === 0 && hasUiState && uiState.debugThumbTiming) console.log("[THUMB-TIMING] first delegate created (index 0) t=" + Date.now() + "ms") } diff --git a/faststack/repro_cache_lock.py b/faststack/repro_cache_lock.py index a276c17..0361007 100644 --- a/faststack/repro_cache_lock.py +++ b/faststack/repro_cache_lock.py @@ -1,47 +1,48 @@ - import threading from faststack.imaging.cache import ByteLRUCache + def repro_lock_contention(): lock_held_during_callback = False - + def on_evict_callback(key, value): nonlocal lock_held_during_callback - # Try to acquire the same lock. If it's held by the current thread (RLock), + # Try to acquire the same lock. If it's held by the current thread (RLock), # we can check if it would block others or if we can detect it's held. # Since it's an RLock, current thread can re-acquire it. - # But we can check if the lock is "locked" by looking at internal state + # But we can check if the lock is "locked" by looking at internal state # or just by the fact that we know we are in the callback. - + # A better way to check if the lock is held: # Since it's an RLock, it doesn't expose a simple "is_locked" that works across threads easily # but we can try to acquire it in a DIFFERENT thread. - + def check_lock(): nonlocal lock_held_during_callback if not cache._lock.acquire(blocking=False): lock_held_during_callback = True else: cache._lock.release() - + t = threading.Thread(target=check_lock) t.start() t.join() cache = ByteLRUCache(max_bytes=100, size_of=lambda x: x, on_evict=on_evict_callback) - + print("Adding item 'a' (50 bytes)") cache["a"] = 50 print("Adding item 'b' (50 bytes)") cache["b"] = 50 - + print("Adding item 'c' (50 bytes) -> should trigger eviction of 'a'") cache["c"] = 50 - + if lock_held_during_callback: print("FAILED: Lock was HELD during on_evict callback!") else: print("SUCCESS: Lock was NOT held during on_evict callback.") + if __name__ == "__main__": repro_lock_contention() diff --git a/faststack/repro_daemon_bug.py b/faststack/repro_daemon_bug.py index 816641b..1a8ec92 100644 --- a/faststack/repro_daemon_bug.py +++ b/faststack/repro_daemon_bug.py @@ -2,25 +2,32 @@ import threading import time + def check_daemon(): - print(f"Thread {threading.current_thread().name} daemon: {threading.current_thread().daemon}") + print( + f"Thread {threading.current_thread().name} daemon: {threading.current_thread().daemon}" + ) + def test_failure_mimic(): print("Main thread daemon:", threading.current_thread().daemon) executor_container = {} - + def creator(): - executor_container['executor'] = concurrent.futures.ThreadPoolExecutor(max_workers=1) - + executor_container["executor"] = concurrent.futures.ThreadPoolExecutor( + max_workers=1 + ) + t = threading.Thread(target=creator, name="CreatorThread") t.daemon = True t.start() - t.join() # Creator thread dies - - executor = executor_container['executor'] + t.join() # Creator thread dies + + executor = executor_container["executor"] # If the executor spawns worker threads when submit is called, # it might inherit from the CURRENT thread (main) instead of the creator thread. executor.submit(check_daemon).result() + if __name__ == "__main__": test_failure_mimic() diff --git a/faststack/repro_imports.py b/faststack/repro_imports.py index 812cbae..94b1f4f 100644 --- a/faststack/repro_imports.py +++ b/faststack/repro_imports.py @@ -1,11 +1,13 @@ try: from unittest.mock import MagicMock + print("Success: from unittest.mock import MagicMock") except ImportError as e: print(f"Failed: {e}") try: import faststack.app + print("Success: import faststack.app") except ImportError as e: print(f"Failed: import faststack.app: {e}") diff --git a/faststack/test_prespawn_strategy.py b/faststack/test_prespawn_strategy.py index 1bb183d..2f0e25a 100644 --- a/faststack/test_prespawn_strategy.py +++ b/faststack/test_prespawn_strategy.py @@ -2,20 +2,26 @@ import threading import time + def check_daemon(): - print(f"Thread {threading.current_thread().name} daemon: {threading.current_thread().daemon}") + print( + f"Thread {threading.current_thread().name} daemon: {threading.current_thread().daemon}" + ) + def test_prespawn(): print("Main thread daemon:", threading.current_thread().daemon) executor_container = {} max_workers = 4 - + def creator(): - print(f"Creator thread {threading.current_thread().name} daemon: {threading.current_thread().daemon}") + print( + f"Creator thread {threading.current_thread().name} daemon: {threading.current_thread().daemon}" + ) executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) - executor_container['executor'] = executor + executor_container["executor"] = executor # Force spawn all workers while we are in this daemon thread - # We need to submit at least 'max_workers' tasks and wait for them to be + # We need to submit at least 'max_workers' tasks and wait for them to be # picked up by separate threads. futures = [executor.submit(time.sleep, 0.05) for _ in range(max_workers)] concurrent.futures.wait(futures) @@ -25,10 +31,11 @@ def creator(): t.daemon = True t.start() t.join() - - executor = executor_container['executor'] + + executor = executor_container["executor"] print("Main thread calling submit (which should reuse a daemon worker)...") executor.submit(check_daemon).result() + if __name__ == "__main__": test_prespawn() diff --git a/faststack/tests/conftest.py b/faststack/tests/conftest.py index 8e94d85..a5af71f 100644 --- a/faststack/tests/conftest.py +++ b/faststack/tests/conftest.py @@ -4,6 +4,7 @@ import signal import sys + def _dump_usr2(signum, frame): sys.stderr.write(f"\n\n=== SIGUSR2: pid={os.getpid()} ===\n") sys.stderr.flush() @@ -11,6 +12,7 @@ def _dump_usr2(signum, frame): sys.stderr.write("=== end SIGUSR2 dump ===\n\n") sys.stderr.flush() + def pytest_configure(config): # Enable faulthandler for crashes too faulthandler.enable(all_threads=True) diff --git a/faststack/tests/debug_app_init.py b/faststack/tests/debug_app_init.py index e0c0156..6c06fbc 100644 --- a/faststack/tests/debug_app_init.py +++ b/faststack/tests/debug_app_init.py @@ -9,25 +9,29 @@ if not QApplication.instance(): _qapp = QApplication(sys.argv) + def test_app_init_only(): """Verify AppController can be instantiated with mocks.""" - with patch("faststack.app.ByteLRUCache"), \ - patch("faststack.app.ThumbnailModel"), \ - patch("faststack.app.Prefetcher"), \ - patch("faststack.app.PathResolver"), \ - patch("faststack.app.Watcher"), \ - patch("faststack.app.uuid"), \ - patch("faststack.app.QTimer"), \ - patch("faststack.app.concurrent.futures.ThreadPoolExecutor"): - + with ( + patch("faststack.app.ByteLRUCache"), + patch("faststack.app.ThumbnailModel"), + patch("faststack.app.Prefetcher"), + patch("faststack.app.PathResolver"), + patch("faststack.app.Watcher"), + patch("faststack.app.uuid"), + patch("faststack.app.QTimer"), + patch("faststack.app.concurrent.futures.ThreadPoolExecutor"), + ): + # Create QApplication instance from PySide6.QtWidgets import QApplication import sys + if not QApplication.instance(): - qapp = QApplication(sys.argv) + _ = QApplication(sys.argv) else: - qapp = QApplication.instance() - + _ = QApplication.instance() + mock_engine = MagicMock() try: app = AppController(Path("."), mock_engine) diff --git a/faststack/tests/debug_editor_error.py b/faststack/tests/debug_editor_error.py index 14328b0..6ffb2d4 100644 --- a/faststack/tests/debug_editor_error.py +++ b/faststack/tests/debug_editor_error.py @@ -1,4 +1,3 @@ - import unittest from unittest.mock import MagicMock, patch import numpy as np @@ -9,6 +8,7 @@ from faststack.imaging.editor import ImageEditor from PIL import Image + class TestDebugError(unittest.TestCase): def test_debug_save_image(self): editor = ImageEditor() @@ -19,7 +19,10 @@ def test_debug_save_image(self): editor.original_image = MagicMock() # Patch create_backup_file to succeed - with patch("faststack.imaging.editor.create_backup_file", return_value=Path("backup.jpg")): + with patch( + "faststack.imaging.editor.create_backup_file", + return_value=Path("backup.jpg"), + ): # Patch Image.fromarray at the module level where it's used with patch("faststack.imaging.editor.Image.fromarray") as mock_fromarray: # Configure the mock object returned by fromarray @@ -36,15 +39,18 @@ def test_save_image_raises_on_missing_float(self): editor.float_image = None editor.current_filepath = Path("fake.jpg") editor.original_image = MagicMock() - - # Simulate race: _ensure_float_image "thinks" it succeeded (or was raced), + + # Simulate race: _ensure_float_image "thinks" it succeeded (or was raced), # but float_image is actually None when we enter the lock. # We achieve this by silencing _ensure_float_image so it doesn't populate float_image. editor._ensure_float_image = MagicMock() # Should raise RuntimeError explicitly now (instead of returning None) - with self.assertRaisesRegex(RuntimeError, "save_image called with no float_image"): + with self.assertRaisesRegex( + RuntimeError, "save_image called with no float_image" + ): editor.save_image() + if __name__ == "__main__": unittest.main() diff --git a/faststack/tests/debug_exif.py b/faststack/tests/debug_exif.py index 3840ec3..8c41427 100644 --- a/faststack/tests/debug_exif.py +++ b/faststack/tests/debug_exif.py @@ -1,33 +1,69 @@ - +import shutil +import tempfile import unittest -import sys from pathlib import Path -from PIL import Image, ExifTags + +from PIL import ExifTags, Image + from faststack.imaging.editor import ImageEditor, sanitize_exif_orientation + class TestDebugExif(unittest.TestCase): - def test_debug_exif(self): - # Create source image with Orientation 6 + def test_sanitize_exif_orientation(self): + """Verify sanitize_exif_orientation resets the orientation tag to 1.""" + # Create source image with Orientation 6 (Rotated 90 CW) img = Image.new("RGB", (100, 50), color="red") exif = img.getexif() exif[ExifTags.Base.Orientation] = 6 exif_bytes = exif.tobytes() - - print(f"DEBUG: Source EXIF bytes len: {len(exif_bytes)}") - + # Test sanitize_exif_orientation directly sanitized = sanitize_exif_orientation(exif_bytes) - print(f"DEBUG: Sanitized bytes: {sanitized is not None}") - - if sanitized: - chk = Image.Exif() - chk.load(sanitized) - print(f"DEBUG: Sanitized Orientation: {chk.get(ExifTags.Base.Orientation)}") + self.assertIsNotNone(sanitized) + + chk = Image.Exif() + chk.load(sanitized) + self.assertEqual(chk.get(ExifTags.Base.Orientation), 1) + + def test_editor_full_workflow_exif(self): + """Verify ImageEditor workflow preserves and sanitizes EXIF in real file I/O.""" + tmp_dir = tempfile.mkdtemp() + try: + tmp_path = Path(tmp_dir) + img_path = tmp_path / "test_exif_workflow.jpg" - # Helper to simulate editor flow - editor = ImageEditor() - editor.float_image = ImageEditor()._initial_edits() # Dummy - # ... actually need real flow - + # 1. Create source file with EXIF Orientation 6 + img = Image.new("RGB", (100, 50), color="blue") + exif = img.getexif() + exif[ExifTags.Base.Orientation] = 6 + img.save(img_path, exif=exif) + + # 2. Load into editor + editor = ImageEditor() + self.assertTrue(editor.load_image(str(img_path))) + + # 3. Verify editor state + self.assertIsNotNone(editor.float_image) + # ImageEditor.load_image bakes orientation, so original (100x50) [WxH] orient 6 [90 CW] + # becomes (50x100) [WxH]. In NumPy (H, W, C), this is (100, 50, 3). + self.assertEqual(editor.float_image.shape[0], 100) # Height + self.assertEqual(editor.float_image.shape[1], 50) # Width + + # 4. Apply edit and save + editor.set_edit_param("brightness", 0.5) + # This triggers backup and save + saved = editor.save_image() + self.assertIsNotNone(saved) + saved_path, _ = saved + + # 5. Verify saved file has sterilized orientation + with Image.open(saved_path) as out_img: + out_exif = out_img.getexif() + # Orientation should be 1 because we baked the rotation into the pixels + self.assertEqual(out_exif.get(ExifTags.Base.Orientation), 1) + finally: + shutil.rmtree(tmp_dir) + + if __name__ == "__main__": unittest.main() diff --git a/faststack/tests/repro_futures_cleanup.py b/faststack/tests/repro_futures_cleanup.py index 3f3137a..c39960a 100644 --- a/faststack/tests/repro_futures_cleanup.py +++ b/faststack/tests/repro_futures_cleanup.py @@ -10,6 +10,7 @@ sys.modules["faststack.config"] = MagicMock() from faststack.imaging.prefetch import Prefetcher + class ReproFuturesCleanup(unittest.TestCase): def test_newer_future_is_not_deleted_by_older_task(self): # Dependencies @@ -32,36 +33,45 @@ def test_newer_future_is_not_deleted_by_older_task(self): future_a = MagicMock(spec=Future) future_a.done.return_value = False - + future_b = MagicMock(spec=Future) future_b.done.return_value = False index = 1 prefetcher.futures[index] = future_a - + # Simulate Task A's finally block running with its 'future' reference # but the actual prefetcher.futures[index] has been replaced by future_b prefetcher.futures[index] = future_b - + # Now simulate Task A completing its cleanup # This is what _decode_and_cache does in its finally block: # with self._futures_lock: # if self.futures.get(index) is future: # del self.futures[index] - + def simulate_cleanup(prefetcher, idx, fut): with prefetcher._futures_lock: if prefetcher.futures.get(idx) is fut: del prefetcher.futures[idx] simulate_cleanup(prefetcher, index, future_a) - - self.assertIn(index, prefetcher.futures, "Newer future was deleted by older task cleanup!") - self.assertIs(prefetcher.futures[index], future_b, "The future in self.futures is not the newer one!") - + + self.assertIn( + index, prefetcher.futures, "Newer future was deleted by older task cleanup!" + ) + self.assertIs( + prefetcher.futures[index], + future_b, + "The future in self.futures is not the newer one!", + ) + # Now simulate Task B cleanup simulate_cleanup(prefetcher, index, future_b) - self.assertNotIn(index, prefetcher.futures, "Future was not deleted after its OWN cleanup!") + self.assertNotIn( + index, prefetcher.futures, "Future was not deleted after its OWN cleanup!" + ) + if __name__ == "__main__": unittest.main() diff --git a/faststack/tests/test_cache.py b/faststack/tests/test_cache.py index 94c7ee9..6ede503 100644 --- a/faststack/tests/test_cache.py +++ b/faststack/tests/test_cache.py @@ -63,3 +63,54 @@ def test_cache_update_item(): # Replace with a smaller item cache["a"] = MockItem(10) assert cache.currsize == 10 + + +def test_get_decoded_image_size_with_nbytes(): + """Tests when buffer has nbytes.""" + from faststack.imaging.cache import get_decoded_image_size + from faststack.models import DecodedImage + + class MockBuffer: + def __init__(self, nbytes): + self.nbytes = nbytes + + buffer = MockBuffer(nbytes=100) + item = DecodedImage( + buffer=buffer, width=10, height=10, bytes_per_line=40, format=None + ) + assert get_decoded_image_size(item) == 100 + + +def test_get_decoded_image_size_fallback_metadata(): + """Tests fallback when buffer lacks nbytes but has metadata.""" + from faststack.imaging.cache import get_decoded_image_size + from faststack.models import DecodedImage + + class MockBuffer: + pass + + buffer = MockBuffer() + item = DecodedImage( + buffer=buffer, width=10, height=10, bytes_per_line=30, format=None + ) + # bytes_per_pixel = 30 // 10 = 3 (RGB, no overcounting) + # size = 10 * 10 * 3 = 300 + assert get_decoded_image_size(item) == 300 + + +def test_get_decoded_image_size_fallback_default(): + """Tests fallback when metadata is missing (should default to 4).""" + from faststack.imaging.cache import get_decoded_image_size + from types import SimpleNamespace + + class MockBuffer: + pass + + buffer = MockBuffer() + # Use SimpleNamespace to build a minimal object that lacks bytes_per_line + item = SimpleNamespace( + buffer=buffer, width=10, height=10 + ) + + # size = 10 * 10 * 4 = 400 + assert get_decoded_image_size(item) == 400 diff --git a/faststack/tests/test_cache_replacement_callback.py b/faststack/tests/test_cache_replacement_callback.py new file mode 100644 index 0000000..faacb7e --- /dev/null +++ b/faststack/tests/test_cache_replacement_callback.py @@ -0,0 +1,233 @@ +"""Tests for ByteLRUCache eviction callbacks.""" + +import threading +from faststack.imaging.cache import ByteLRUCache + + +def _make_cache(max_bytes, on_evict): + """Helper: integer-sized cache with eviction callback.""" + return ByteLRUCache(max_bytes=max_bytes, size_of=lambda x: x, on_evict=on_evict) + + +# ── Replacement ────────────────────────────────────────────────────── + + +def test_replacement_fires_callback_exactly_once(): + """Put key A, then put key A again -> callback called once with old value.""" + evicted = [] + cache = _make_cache(200, lambda k, v: evicted.append((k, v))) + + cache["a"] = 40 + cache["a"] = 60 # replace + + assert evicted == [("a", 40)] + assert cache["a"] == 60 + assert cache.currsize == 60 + + +def test_replacement_plus_lru_eviction(): + """Replace key A with a larger value that forces eviction of key B.""" + evicted = [] + cache = _make_cache(100, lambda k, v: evicted.append((k, v))) + + cache["a"] = 40 + cache["b"] = 40 + # a(40) + b(40) = 80. Now replace a with 70 -> a(70) + b(40) = 110 > 100. + cache["a"] = 70 + + from collections import defaultdict + evicted_map = defaultdict(list) + for k, v in evicted: + evicted_map[k].append(v) + + assert "a" in evicted_map, "old value of 'a' should be reported" + assert "b" in evicted_map, "'b' should be evicted by LRU pressure" + assert 40 in evicted_map["a"] + assert 40 in evicted_map["b"] + + +# ── LRU eviction ──────────────────────────────────────────────────── + + +def test_lru_eviction_fires_callback(): + """Fill cache past maxsize -> LRU item evicted with callback.""" + evicted = [] + cache = _make_cache(100, lambda k, v: evicted.append((k, v))) + + cache["a"] = 60 + cache["b"] = 60 # total 120 > 100: evicts "a" + + assert evicted == [("a", 60)] + assert "a" not in cache + assert "b" in cache + + +def test_multiple_evictions_in_one_put(): + """A single large insert can evict multiple items.""" + evicted = [] + cache = _make_cache(100, lambda k, v: evicted.append((k, v))) + + cache["a"] = 30 + cache["b"] = 30 + cache["c"] = 30 + # a+b+c = 90. Insert d=80 -> must evict a, b, c to make room. + cache["d"] = 80 + + evicted_keys = {k for k, _ in evicted} + assert {"a", "b", "c"} <= evicted_keys + + +# ── Eviction order ────────────────────────────────────────────────── + + +def test_replacement_peek_does_not_change_eviction_order(): + """Peeking at old value during replacement must not alter LRU eviction order. + + Scenario: a(LRU), b, c(MRU) near capacity. Replace c → old-c is peeked. + If the peek moved c to MRU, that's fine (c is being replaced anyway). + The critical thing: a must still be evicted first, not b. + """ + evicted = [] + cache = _make_cache(100, lambda k, v: evicted.append((k, v))) + + cache["a"] = 30 # LRU + cache["b"] = 30 + cache["c"] = 30 # MRU (total 90) + + # Touch "a" to make it NOT the LRU — now b is LRU + _ = cache["a"] + + # Replace "c" with larger value that forces an eviction. + # Total would be 30(a) + 30(b) + 50(c) = 110 > 100. + # b is LRU and should be evicted, NOT a. + cache["c"] = 50 + + evicted_keys = [k for k, _ in evicted] + # "c" old value reported as replacement + assert "c" in evicted_keys + # "b" evicted by LRU pressure (it was least recently used) + assert "b" in evicted_keys + # "a" must NOT be evicted (it was touched more recently than b) + assert "a" not in evicted_keys + assert "a" in cache + + +# ── Eviction path verification ────────────────────────────────────── + + +def test_eviction_goes_through_delitem(): + """Prove that LRU eviction during __setitem__ routes through __delitem__. + + If cachetools ever changes popitem() to bypass __delitem__, this test + will catch the regression and we'll need to reintroduce a popitem() override. + """ + delitem_keys = [] + + class TracingCache(ByteLRUCache): + def __delitem__(self, key): + delitem_keys.append(key) + super().__delitem__(key) + + cache = TracingCache(max_bytes=100, size_of=lambda x: x, on_evict=None) + cache["a"] = 60 + cache["b"] = 60 # total 120 > 100: must evict "a" via __delitem__ + + assert "a" in delitem_keys, ( + "LRU eviction did NOT route through __delitem__. " + "Reintroduce popitem() override to restore eviction callbacks." + ) + + +def test_on_evict_fires_for_both_overflow_and_replacement(): + """Combined test: on_evict fires for both LRU overflow and key replacement.""" + evicted = [] + cache = _make_cache(100, lambda k, v: evicted.append((k, v))) + + # Phase 1: LRU overflow eviction + cache["a"] = 60 + cache["b"] = 60 # evicts "a" + assert ("a", 60) in evicted + + # Phase 2: replacement overwrite + evicted.clear() + cache["b"] = 50 # replaces old "b" (60) with new value (50) + assert ("b", 60) in evicted + assert cache["b"] == 50 + + +# ── evict_paths + tombstones ──────────────────────────────────────── + + +def test_evict_paths_fires_callbacks(): + """evict_paths() should trigger on_evict for each removed key.""" + evicted = [] + cache = _make_cache(10_000, lambda k, v: evicted.append((k, v))) + + cache["photo.jpg::0"] = 100 + cache["photo.jpg::1"] = 200 + cache["other.jpg::0"] = 300 + + from pathlib import Path + + cache.evict_paths([Path("photo.jpg")]) + + evicted_keys = {k for k, _ in evicted} + assert "photo.jpg::0" in evicted_keys + assert "photo.jpg::1" in evicted_keys + assert "other.jpg::0" not in evicted_keys + + +def test_evict_paths_tombstone_blocks_reinsert(): + """After evict_paths(), re-caching the same prefix is silently blocked.""" + cache = _make_cache(10_000, None) + + cache["photo.jpg::0"] = 100 + from pathlib import Path + + cache.evict_paths([Path("photo.jpg")]) + assert "photo.jpg::0" not in cache + + # Tombstone should block re-insertion + cache["photo.jpg::0"] = 100 + assert "photo.jpg::0" not in cache + + +def test_evict_paths_no_deadlock_under_contention(): + """evict_paths() must not deadlock when another thread is writing.""" + cache = _make_cache(1_000_000, None) + + # Pre-populate + for i in range(50): + cache[f"img_{i}.jpg::0"] = 100 + + errors = [] + barrier = threading.Barrier(2, timeout=5) + + def writer(): + try: + barrier.wait() + for i in range(50, 150): + cache[f"new_{i}.jpg::0"] = 100 + except Exception as e: + errors.append(e) + + def evictor(): + try: + barrier.wait() + from pathlib import Path + + paths = [Path(f"img_{i}.jpg") for i in range(50)] + cache.evict_paths(paths) + except Exception as e: + errors.append(e) + + t1 = threading.Thread(target=writer) + t2 = threading.Thread(target=evictor) + t1.start() + t2.start() + t1.join(timeout=10) + t2.join(timeout=10) + + assert not errors, f"Errors during concurrent access: {errors}" + assert not t1.is_alive(), "Writer thread deadlocked" + assert not t2.is_alive(), "Evictor thread deadlocked" diff --git a/faststack/tests/test_cache_size_clamping.py b/faststack/tests/test_cache_size_clamping.py new file mode 100644 index 0000000..1528e26 --- /dev/null +++ b/faststack/tests/test_cache_size_clamping.py @@ -0,0 +1,70 @@ +"""Test for get_decoded_image_size clamping logic.""" + +from faststack.imaging.cache import get_decoded_image_size +from faststack.models import DecodedImage + + +class MockBuffer: + pass + + +def test_zero_bytes_per_line_defaults_to_rgba(): + """bytes_per_line=0 is invalid metadata -> falls back to RGBA (4 bpp).""" + item = DecodedImage( + buffer=MockBuffer(), width=10, height=10, bytes_per_line=0, format=None + ) + # bytes_per_line=0 fails the > 0 guard, so bytes_per_pixel defaults to 4 (RGBA) + assert get_decoded_image_size(item) == 10 * 10 * 4 + + +def test_small_bytes_per_line_clamps_to_1(): + """bytes_per_line < width -> integer division gives 0, clamped to 1.""" + item = DecodedImage( + buffer=MockBuffer(), width=10, height=10, bytes_per_line=5, format=None + ) + assert get_decoded_image_size(item) == 10 * 10 * 1 + + +def test_rgb_3bpp_not_overcounted(): + """RGB buffers (3 bytes/pixel) must not be inflated to 4.""" + item = DecodedImage( + buffer=MockBuffer(), width=100, height=100, bytes_per_line=300, format=None + ) + # bytes_per_pixel = 300 // 100 = 3 + assert get_decoded_image_size(item) == 100 * 100 * 3 + + +def test_rgba_4bpp_unchanged(): + """RGBA buffers (4 bytes/pixel) pass through unchanged.""" + item = DecodedImage( + buffer=MockBuffer(), width=100, height=100, bytes_per_line=400, format=None + ) + assert get_decoded_image_size(item) == 100 * 100 * 4 + + +def test_high_bpp_clamped_to_16(): + """Absurdly large bytes_per_pixel clamped to 16.""" + item = DecodedImage( + buffer=MockBuffer(), width=10, height=10, bytes_per_line=500, format=None + ) + # bytes_per_pixel = 500 // 10 = 50 -> clamped to 16 + assert get_decoded_image_size(item) == 10 * 10 * 16 + + +def test_missing_dimensions_returns_1(): + """Buffer present but no width/height -> returns 1 (not AttributeError).""" + from types import SimpleNamespace + + item = SimpleNamespace(buffer=MockBuffer()) + assert get_decoded_image_size(item) == 1 + + +def test_zero_dimensions_returns_1(): + """Buffer present with zero width or height -> returns 1.""" + from types import SimpleNamespace + + item = SimpleNamespace(buffer=MockBuffer(), width=0, height=100) + assert get_decoded_image_size(item) == 1 + + item = SimpleNamespace(buffer=MockBuffer(), width=100, height=0) + assert get_decoded_image_size(item) == 1 diff --git a/faststack/tests/test_config_setters.py b/faststack/tests/test_config_setters.py index cb99274..7c84f1c 100644 --- a/faststack/tests/test_config_setters.py +++ b/faststack/tests/test_config_setters.py @@ -12,17 +12,17 @@ class TestConfigSetters(unittest.TestCase): def setUp(self): # Apply patches for all dependencies of AppController to isolate it # and prevent side effects (like Qt init or file I/O). - + # Patch the config object specifically in faststack.app # faststack.app imports config as: from faststack.config import config self.config_patch = patch("faststack.app.config") self.mock_config = self.config_patch.start() - + # Default mock config behavior self.mock_config.getfloat.return_value = 0.1 self.mock_config.getboolean.return_value = False self.mock_config.getint.return_value = 4 - + self.patches = [ # Qt classes patch("faststack.app.QTimer"), @@ -30,7 +30,6 @@ def setUp(self): patch("faststack.app.QPixmap"), patch("faststack.app.QMimeData"), patch("faststack.app.QFileDialog"), - # Application classes patch("faststack.app.DecodedImage"), patch("faststack.app.ImageEditor"), @@ -46,7 +45,6 @@ def setUp(self): patch("faststack.app.PathResolver"), patch("faststack.app.UIState"), patch("faststack.app.ImageProvider"), - # Standard lib/Other patch("faststack.app.Path"), patch("faststack.app.concurrent.futures.ThreadPoolExecutor"), @@ -55,7 +53,7 @@ def setUp(self): for p in self.patches: p.start() self.addCleanup(p.stop) - + self.addCleanup(self.config_patch.stop) # Initialize controller with mock engine and path @@ -99,11 +97,11 @@ def test_set_auto_level_strength(self): # Wait, if config.getfloat returned 0.1 for threshold, did it return 0.1 for strength too? # Yes, line 62 in original: mock_config_obj.getfloat.return_value = 0.1 # In setUp I set it to 0.1. - + # But wait, config.getfloat is called with default 1.0 for strength in app.py: # self.auto_level_strength = config.getfloat("core", "auto_level_strength", 1.0) # If I mock getfloat to always return 0.1, then it's 0.1. - + new_val = 0.8 self.controller.set_auto_level_strength(new_val) @@ -126,13 +124,17 @@ def test_set_auto_level_strength_auto(self): self.assertEqual(self.controller.get_auto_level_strength_auto(), new_val) # Should be normalized "true" string - self.mock_config.set.assert_called_with("core", "auto_level_strength_auto", "true") + self.mock_config.set.assert_called_with( + "core", "auto_level_strength_auto", "true" + ) self.mock_config.save.assert_called_once() # Test False self.mock_config.set.reset_mock() self.controller.set_auto_level_strength_auto(False) - self.mock_config.set.assert_called_with("core", "auto_level_strength_auto", "false") + self.mock_config.set.assert_called_with( + "core", "auto_level_strength_auto", "false" + ) if __name__ == "__main__": diff --git a/faststack/tests/test_delete_worker_edge_cases.py b/faststack/tests/test_delete_worker_edge_cases.py index 17d9a14..58e7c52 100644 --- a/faststack/tests/test_delete_worker_edge_cases.py +++ b/faststack/tests/test_delete_worker_edge_cases.py @@ -1,4 +1,3 @@ - import threading from unittest.mock import MagicMock, patch import pytest @@ -6,6 +5,7 @@ from faststack.app import AppController from faststack.deletion_types import DeletionErrorCodes + def test_delete_worker_invalid_item_shape(tmp_path): """Verify worker handles invalid item shapes gracefully (no crash).""" job_id = 999 @@ -13,59 +13,64 @@ def test_delete_worker_invalid_item_shape(tmp_path): images_to_delete = [ "not_a_tuple", ("only_one_item",), - (tmp_path / "ok.jpg", tmp_path / "ok.raw"), # Valid one to ensure continued processing + ( + tmp_path / "ok.jpg", + tmp_path / "ok.raw", + ), # Valid one to ensure continued processing ] - + # Create the valid files (tmp_path / "ok.jpg").touch() (tmp_path / "ok.raw").touch() - + cancel_event = threading.Event() - + result = AppController._delete_worker(job_id, images_to_delete, cancel_event) - + # Should complete without crashing assert result["status"] == "completed" - + # 2 invalid items should result in 2 failures with INVALID_WORK_ITEM # 1 valid item should be a success assert len(result["failures"]) == 2 assert result["failures"][0]["code"] == DeletionErrorCodes.INVALID_WORK_ITEM assert result["failures"][1]["code"] == DeletionErrorCodes.INVALID_WORK_ITEM - + assert len(result["successes"]) == 1 assert Path(result["successes"][0]["jpg"]) == tmp_path / "ok.jpg" + @patch("faststack.app.AppController._move_to_recycle") def test_delete_worker_permission_error(mock_recycle, tmp_path): """Verify PermissionError is mapped to PERMISSION_DENIED code.""" job_id = 888 img_path = tmp_path / "locked.jpg" img_path.touch() - + images_to_delete = [(img_path, None)] cancel_event = threading.Event() - + # Mock recycle to raise PermissionError mock_recycle.side_effect = PermissionError("Access denied") - + result = AppController._delete_worker(job_id, images_to_delete, cancel_event) - + assert len(result["failures"]) == 1 failure = result["failures"][0] assert failure["code"] == DeletionErrorCodes.PERMISSION_DENIED assert Path(failure["jpg"]) == img_path + def test_delete_worker_cancellation_safe_unpack(tmp_path): """Verify cancellation loop also handles invalid shapes safely.""" job_id = 777 # 1. First item valid (will be processed) # 2. Second item INVALID (will be skipped in main loop? No, we want to cancel BEFORE it) # Actually, to test cancellation loop, we need to set cancel_event. - + img1 = tmp_path / "1.jpg" img1.touch() - + # We want to simulate cancellation happening. # We can't easily interrupt the loop from outside in a synchronous test without threading. # But we can pass a pre-set cancel_event! @@ -73,28 +78,28 @@ def test_delete_worker_cancellation_safe_unpack(tmp_path): # Let's check the code: # for i, item in enumerate(images_to_delete): # if cancel_event.is_set(): ... break - + # So if we set it immediately, item 0 triggers the break. # formatting: cancel_index = 0. # remaining = images_to_delete[0:] -> All items. - + cancel_event = threading.Event() cancel_event.set() - + images_to_delete = [ - (img1, None), # Valid - "invalid_shape", # Invalid - (1, 2, 3) # Invalid length + (img1, None), # Valid + "invalid_shape", # Invalid + (1, 2, 3), # Invalid length ] - + result = AppController._delete_worker(job_id, images_to_delete, cancel_event) - + assert result["cancelled"] is True - + # The cancellation loop should run for all 3 items. # It should record failures for valid items (as CANCELLED). # It should gracefully skip invalid items (no crash). - + # We expect 1 failure (the valid item, code=CANCELLED) # The invalid items are skipped in the cancellation loop with "continue" assert len(result["failures"]) == 1 diff --git a/faststack/tests/test_delete_worker_integration.py b/faststack/tests/test_delete_worker_integration.py index d8d6613..afd5f71 100644 --- a/faststack/tests/test_delete_worker_integration.py +++ b/faststack/tests/test_delete_worker_integration.py @@ -140,4 +140,3 @@ def move_side_effect(src, dst, *args, **kwargs): assert not jpg_path.exists() # Verify RAW is still there (failed to move) assert raw_path.exists() - diff --git a/faststack/tests/test_deletion_perf_structure.py b/faststack/tests/test_deletion_perf_structure.py index 246c61a..d476cc3 100644 --- a/faststack/tests/test_deletion_perf_structure.py +++ b/faststack/tests/test_deletion_perf_structure.py @@ -13,18 +13,21 @@ if not QApplication.instance(): _qapp = QApplication(sys.argv) + @pytest.fixture def mock_app(): """Create a partial mock of AppController for deletion testing.""" - with patch("faststack.app.ByteLRUCache") as MockCache, \ - patch("faststack.app.ThumbnailModel") as MockModel, \ - patch("faststack.app.Prefetcher") as MockPrefetcher, \ - patch("faststack.app.PathResolver") as MockResolver, \ - patch("faststack.app.Watcher"), \ - patch("faststack.app.uuid"), \ - patch("faststack.app.QTimer"), \ - patch("faststack.app.concurrent.futures.ThreadPoolExecutor"): - + with ( + patch("faststack.app.ByteLRUCache"), + patch("faststack.app.ThumbnailModel"), + patch("faststack.app.Prefetcher"), + patch("faststack.app.PathResolver"), + patch("faststack.app.Watcher"), + patch("faststack.app.uuid"), + patch("faststack.app.QTimer"), + patch("faststack.app.concurrent.futures.ThreadPoolExecutor"), + ): + # Pass mock engine mock_engine = MagicMock() app = AppController(Path("."), mock_engine) @@ -37,131 +40,117 @@ def mock_app(): app._path_to_index = {} app.sidecar = MagicMock() app._delete_executor = MagicMock() - + # Mock PathResolver update to verify no resolve calls return app -def test_delete_uses_targeted_eviction(): + +def test_delete_uses_targeted_eviction(mock_app): """Verify delete_indices calls evict_paths and NOT clear.""" - # Setup - use real controller with patched subsystems - with patch("faststack.app.ByteLRUCache") as MockCache, \ - patch("faststack.app.ThumbnailModel") as MockModel, \ - patch("faststack.app.Prefetcher") as MockPrefetcher, \ - patch("faststack.app.PathResolver") as MockResolver, \ - patch("faststack.app.Watcher"), \ - patch("faststack.app.uuid"), \ - patch("faststack.app.QTimer"), \ - patch("faststack.app.concurrent.futures.ThreadPoolExecutor") as MockExecutor: - - mock_engine = MagicMock() - app = AppController(Path("."), mock_engine) - - # Configure mocks - app.image_cache = MagicMock() - app.prefetcher = MagicMock() - # Ensure evict_paths is a method on the cache mock - app.image_cache.evict_paths = MagicMock() - - # Setup data - img1 = ImageFile(Path("c:/images/img1.jpg"), raw_pair=Path("c:/images/img1.CR2")) - img2 = ImageFile(Path("c:/images/img2.jpg")) - app.image_files = [img1, img2] - - # Manually populate path index since we bypassed load() - app._path_to_index = { - app._key(img1.path): 0, - app._key(img2.path): 1 - } - app.current_index = 0 - app.display_generation = 10 - - print("DEBUG: Calling _delete_indices") - # Act - summary = app._delete_indices([0], "test") - print("DEBUG: Returned from _delete_indices") - - # Assert - # 1. Should not clear entire cache - app.image_cache.clear.assert_not_called() - - # 2. Should not bump display generation (targeted update handled elsewhere) - # Note: optimistic deletion might bump generation if it triggers refresh, - # but here we check it doesn't do a full destructive clear that resets everything. - # Actually _delete_indices does NOT bump display_generation itself, - # that happens in undo or refresh. - assert app.display_generation == 10 - - # 3. Should call evict_paths with correct paths - app.image_cache.evict_paths.assert_called_once() - args, _ = app.image_cache.evict_paths.call_args - evicted = args[0] - assert len(evicted) == 2 - assert img1.path in evicted - assert img1.raw_pair in evicted - - # 4. Should cancel prefetch - app.prefetcher.cancel_all.assert_called_once() + app = mock_app + + # Setup data + img1 = ImageFile( + Path("c:/images/img1.jpg"), raw_pair=Path("c:/images/img1.CR2") + ) + img2 = ImageFile(Path("c:/images/img2.jpg")) + app.image_files = [img1, img2] + + # Manually populate path index since we bypassed load() + app._path_to_index = {app._key(img1.path): 0, app._key(img2.path): 1} + app.current_index = 0 + app.display_generation = 10 + + # Act + app._delete_indices([0], "test") + + # Assert + # 1. Should not clear entire cache + app.image_cache.clear.assert_not_called() + + # 2. Should not bump display generation (targeted update handled elsewhere) + # Note: optimistic deletion might bump generation if it triggers refresh, + # but here we check it doesn't do a full destructive clear that resets everything. + # Actually _delete_indices does NOT bump display_generation itself, + # that happens in undo or refresh. + assert app.display_generation == 10 + + # 3. Should call evict_paths with correct paths + app.image_cache.evict_paths.assert_called_once() + args, _ = app.image_cache.evict_paths.call_args + evicted = args[0] + assert len(evicted) == 2 + assert img1.path in evicted + assert img1.raw_pair in evicted + + # 4. Should cancel prefetch + app.prefetcher.cancel_all.assert_called_once() + def test_evict_paths_windows_handling(): """Verify ByteLRUCache.evict_paths handles Windows paths correctly.""" from faststack.imaging.cache import ByteLRUCache - + # Create a real cache instance (mocking LRUCache methods if needed, but ByteLRUCache is simple) # Pass a simple size_of function to avoid dependency on get_decoded_image_size - cache = ByteLRUCache(1000, size_of=lambda x: 1) - + cache = ByteLRUCache(1000, size_of=lambda _: 1) + # Add entries with forward slashes (as build_cache_key does) key1 = "C:/images/img1.jpg::0" key2 = "C:/images/img1.jpg::1" # Different generation key3 = "C:/images/img2.jpg::0" # Keep this - + cache[key1] = 1 cache[key2] = 1 cache[key3] = 1 - + # Act: Evict using Windows-style path string win_path = "C:\\images\\img1.jpg" cache.evict_paths([win_path]) - + # Assert assert key1 not in cache assert key2 not in cache assert key3 in cache - + # Act: Evict using Path object path_obj = Path("C:/images/img2.jpg") cache.evict_paths([path_obj]) - + # Assert assert key3 not in cache + def test_model_hashing_no_resolve(): """Verify PathResolver and ThumbnailModel do NOT call resolve().""" from faststack.thumbnail_view.model import ThumbnailModel from faststack.thumbnail_view.provider import PathResolver from faststack.models import ImageFile as ModelImageFile - + # Mock Path.resolve to raise exception - with patch("pathlib.Path.resolve", side_effect=Exception("Should not call resolve!")): - # Note: we need to patch wherever usage might occur or globally. - # Since we changed code to NOT use it, calling the methods should be safe. - - # Test Helper directly - from faststack.io.utils import compute_path_hash - p = Path("c:/foo/bar.jpg") - # This should NOT fail - h = compute_path_hash(p) - assert len(h) == 16 - - # Test Resolver update - resolver = PathResolver() - model = MagicMock() - model.rowCount.return_value = 1 - entry = MagicMock() - entry.path = p - entry.is_folder = False - model.get_entry.return_value = entry - - resolver.update_from_model(model) - # Should succeed and have entry - assert len(resolver._hash_to_path) == 1 + with patch( + "pathlib.Path.resolve", side_effect=Exception("Should not call resolve!") + ): + # Note: we need to patch wherever usage might occur or globally. + # Since we changed code to NOT use it, calling the methods should be safe. + + # Test Helper directly + from faststack.io.utils import compute_path_hash + + p = Path("c:/foo/bar.jpg") + # This should NOT fail + h = compute_path_hash(p) + assert len(h) == 16 + + # Test Resolver update + resolver = PathResolver() + model = MagicMock() + model.rowCount.return_value = 1 + entry = MagicMock() + entry.path = p + entry.is_folder = False + model.get_entry.return_value = entry + + resolver.update_from_model(model) + # Should succeed and have entry + assert len(resolver._hash_to_path) == 1 diff --git a/faststack/tests/test_deletion_unification.py b/faststack/tests/test_deletion_unification.py index 96d62fd..7f63ac2 100644 --- a/faststack/tests/test_deletion_unification.py +++ b/faststack/tests/test_deletion_unification.py @@ -40,6 +40,7 @@ def mock_controller(tmp_path, qapp): # Mock the executor to prevent background jobs from running during tests from concurrent.futures import Future + controller._delete_executor = Mock() controller._delete_executor.submit.side_effect = lambda *a, **kw: Future() @@ -57,6 +58,7 @@ def mock_controller(tmp_path, qapp): # ── Optimistic UI tests ────────────────────────────────────────────── + def test_delete_batch_optimistic_removal(mock_controller): """Test that batch deletion optimistically removes images from the list.""" img1 = ImageFile(Path("test1.jpg")) @@ -214,6 +216,7 @@ def test_delete_schedules_refresh(mock_controller): # ── Undo tests ─────────────────────────────────────────────────────── + def test_undo_pending_delete_restores_items(mock_controller): """Test that undo during pending delete restores items without disk ops.""" img1 = ImageFile(Path("test1.jpg")) @@ -255,6 +258,7 @@ def test_undo_pending_batch_delete_restores_all(mock_controller): # ── Cancel mid-flight restores unprocessed items ────────────────────── + def test_cancel_midlight_restores_unprocessed(mock_controller): """Cancel mid-flight: completion with partial success restores unprocessed items.""" img1 = ImageFile(Path("img1.jpg")) @@ -271,12 +275,14 @@ def test_cancel_midlight_restores_unprocessed(mock_controller): # Simulate worker result: 1 success, 2 cancelled (unprocessed) result = { "job_id": job_id, - "successes": [{ - "jpg": img1.path.resolve(), - "recycled_jpg": Path("recycle/img1.jpg"), - "raw": None, - "recycled_raw": None - }], + "successes": [ + { + "jpg": img1.path.resolve(), + "recycled_jpg": Path("recycle/img1.jpg"), + "raw": None, + "recycled_raw": None, + } + ], "failures": [ {"jpg": img2.path.resolve(), "raw": None, "code": "cancelled"}, {"jpg": img3.path.resolve(), "raw": None, "code": "cancelled"}, @@ -298,6 +304,7 @@ def test_cancel_midlight_restores_unprocessed(mock_controller): # ── Undo pending prevents later bookkeeping ────────────────────────── + def test_undo_pending_auto_restores_moved_files(mock_controller): """Undo pending delete, then completion arrives: files are auto-restored (Policy 1).""" img1 = ImageFile(Path("img1.jpg")) @@ -316,12 +323,14 @@ def test_undo_pending_auto_restores_moved_files(mock_controller): # Simulate completion arriving AFTER undo (some files already moved) result = { "job_id": job_id, - "successes": [{ - "jpg": img1.path.resolve(), - "recycled_jpg": Path("recycle/img1.jpg"), - "raw": None, - "recycled_raw": None - }], + "successes": [ + { + "jpg": img1.path.resolve(), + "recycled_jpg": Path("recycle/img1.jpg"), + "raw": None, + "recycled_raw": None, + } + ], "failures": [ {"jpg": img2.path.resolve(), "raw": None, "code": "cancelled"}, ], @@ -336,21 +345,21 @@ def test_undo_pending_auto_restores_moved_files(mock_controller): # 2. UI list should still have both images assert len(mock_controller.image_files) == 2 - + # 3. Auto-restore should have been called for img1 (the success) mock_controller._restore_from_recycle_bin_safe.assert_called_with( img1.path.resolve(), Path("recycle/img1.jpg") ) # 4. Status message should update - mock_controller.update_status_message.assert_called_with("Deletion cancelled (files restored)") + mock_controller.update_status_message.assert_called_with( + "Deletion cancelled (files restored)" + ) # ── Permanent delete result handled ────────────────────────────────── - - def test_recycle_failure_prompts_perm_delete(mock_controller, tmp_path): """Verify that recycle failure triggers a permanent delete prompt.""" img_path = tmp_path / "test.jpg" @@ -360,42 +369,42 @@ def test_recycle_failure_prompts_perm_delete(mock_controller, tmp_path): summary = mock_controller._delete_indices([0], "test") job_id = summary["job_id"] - + # Simulate worker result: recycle failed result = { "job_id": job_id, "successes": [], - "failures": [{ - "jpg": img_path.resolve(), - "raw": None, - "code": "recycle_failed" - }], + "failures": [ + {"jpg": img_path.resolve(), "raw": None, "code": "recycle_failed"} + ], "cancelled": False, } # PATCH confirm_permanent_delete to say YES - with patch("faststack.app.confirm_permanent_delete", return_value=True) as mock_confirm: - mock_controller._on_delete_finished(result) - - # Should have prompted - mock_confirm.assert_called_once() - - # Should have submitted to executor (ASYNC) - # Called twice: 1. initial delete, 2. perm delete - assert mock_controller._delete_executor.submit.call_count == 2 - - # Verify the last call was for _perm_delete_worker - args, _ = mock_controller._delete_executor.submit.call_args - assert args[0] == AppController._perm_delete_worker - - # Simulate async worker completion - perm_result = { - "job_id": job_id, - "_perm_result": True, - "perm_success": [(0, img)], - "perm_fail": [] - } - mock_controller._on_delete_finished(perm_result) + with patch( + "faststack.app.confirm_permanent_delete", return_value=True + ) as mock_confirm: + mock_controller._on_delete_finished(result) + + # Should have prompted + mock_confirm.assert_called_once() + + # Should have submitted to executor (ASYNC) + # Called twice: 1. initial delete, 2. perm delete + assert mock_controller._delete_executor.submit.call_count == 2 + + # Verify the last call was for _perm_delete_worker + args, _ = mock_controller._delete_executor.submit.call_args + assert args[0] == AppController._perm_delete_worker + + # Simulate async worker completion + perm_result = { + "job_id": job_id, + "_perm_result": True, + "perm_success": [(0, img)], + "perm_fail": [], + } + mock_controller._on_delete_finished(perm_result) # Since it succeeded, item should be gone from UI (it was removed optimistically and confirmed) # Wait: optimistically removed -> failed -> perm prompt -> success. @@ -405,6 +414,7 @@ def test_recycle_failure_prompts_perm_delete(mock_controller, tmp_path): # ── Batch/selection clearing tests ──────────────────────────────────── + # @pytest.mark.skip(reason="Flaky in mock environment - logic verified manually") def test_batch_restored_on_rollback(mock_controller): """Batch state is restored when delete completion rolls back failed items.""" @@ -432,7 +442,7 @@ def test_batch_restored_on_rollback(mock_controller): ], "cancelled": False, } - + # Mock confirm_batch_permanent_delete to return False (User says NO) # We patch it where it is imported in app.py with patch("faststack.app.confirm_batch_permanent_delete", return_value=False): diff --git a/faststack/tests/test_editor_lifecycle_and_safety.py b/faststack/tests/test_editor_lifecycle_and_safety.py index 7127621..48b745f 100644 --- a/faststack/tests/test_editor_lifecycle_and_safety.py +++ b/faststack/tests/test_editor_lifecycle_and_safety.py @@ -51,7 +51,7 @@ def tearDown(self): self.qapp_patcher.stop() # Ensure we shutdown executors to avoid hanging tests - self.controller._shutdown_executors() + self.controller.shutdown_nonqt() def test_memory_cleanup_on_editor_close(self): """Verify that memory is cleared when the editor is closed.""" diff --git a/faststack/tests/test_editor_no_copy.py b/faststack/tests/test_editor_no_copy.py index 9cf5e54..925f7ab 100644 --- a/faststack/tests/test_editor_no_copy.py +++ b/faststack/tests/test_editor_no_copy.py @@ -8,7 +8,12 @@ def fingerprint(arr: np.ndarray): """Strong content + identity-ish fingerprint.""" - return (str(arr.dtype), arr.shape, arr.strides, hashlib.sha256(arr.tobytes()).hexdigest()) + return ( + str(arr.dtype), + arr.shape, + arr.strides, + hashlib.sha256(arr.tobytes()).hexdigest(), + ) def make_editor_with_image() -> ImageEditor: @@ -59,7 +64,9 @@ def test_apply_edits_no_copy_does_not_mutate_input(): _out = ed._apply_edits(ed.float_image, for_export=True) after = fingerprint(ed.float_image) - assert after == before, "float_image was mutated by _apply_edits on the no-copy path" + assert ( + after == before + ), "float_image was mutated by _apply_edits on the no-copy path" def test_save_image_passes_float_image_without_copy_when_safe(tmp_path): @@ -69,14 +76,14 @@ def test_save_image_passes_float_image_without_copy_when_safe(tmp_path): to avoid global Path patches. """ ed = make_editor_with_image() - + # Create a real dummy file so we don't need to patch Path.exists/stat globally dummy_file = tmp_path / "test.jpg" dummy_file.write_bytes(b"fake_jpg_content") - + # Set modify time to something non-zero for stat checks # (though typically write_bytes sets mtime) - + ed.current_filepath = dummy_file # Safe edits only (no vignette/geometry/linear edits) @@ -115,20 +122,27 @@ def spy_apply(arr, for_export=False, *args, **kwargs): # Mock all the save_image I/O edges locally or on the instance # We no longer patch Path.exists or Path.stat globally! - + # We still need to patch create_backup_file to avoid actual backup copying logic - # or just let it run if we don't care? + # or just let it run if we don't care? # The existing test patched it. Let's patch it to return a dummy path without side effects. - - with patch.object(ed, "_apply_edits", side_effect=spy_apply), \ - patch("faststack.imaging.editor.create_backup_file", return_value=tmp_path / "backup.jpg"), \ - patch("PIL.Image.Image.save"), \ - patch.object(ed, "_restore_file_times"), \ - patch.object(ed, "_get_sanitized_exif_bytes", return_value=None): + + with ( + patch.object(ed, "_apply_edits", side_effect=spy_apply), + patch( + "faststack.imaging.editor.create_backup_file", + return_value=tmp_path / "backup.jpg", + ), + patch("PIL.Image.Image.save"), + patch.object(ed, "_restore_file_times"), + patch.object(ed, "_get_sanitized_exif_bytes", return_value=None), + ): ed.save_image() - assert seen["same_obj"] is True, "save_image did not pass self.float_image directly on safe no-copy path" + assert ( + seen["same_obj"] is True + ), "save_image did not pass self.float_image directly on safe no-copy path" def test_edits_can_share_input_exclusions(): diff --git a/faststack/tests/test_executor_shutdown.py b/faststack/tests/test_executor_shutdown.py index c3dc1c5..0f79a23 100644 --- a/faststack/tests/test_executor_shutdown.py +++ b/faststack/tests/test_executor_shutdown.py @@ -5,16 +5,17 @@ from faststack.util.executors import ( create_priority_executor, create_daemon_threadpool_executor, - PriorityExecutor + PriorityExecutor, ) + def test_shutdown_drains_queue_by_default(): """Test that shutdown(cancel_futures=False) allows queued tasks to run.""" executor = create_priority_executor(max_workers=1, thread_name_prefix="TestDrain") - + results = [] started_event = threading.Event() - + def task(val): started_event.set() time.sleep(0.1) @@ -24,19 +25,19 @@ def task(val): # Occupy the worker f1 = executor.submit(task, "head") started_event.wait(timeout=1.0) - + # Queue some items f2 = executor.submit(task, "queued1") f3 = executor.submit(task, "queued2") - + # Shutdown without cancelling futures (wait=True by default) # This should wait for f1, and then process f2 and f3 executor.shutdown(wait=True, cancel_futures=False) - + assert f1.result() == "head" assert f2.result() == "queued1" assert f3.result() == "queued2" - + # Since PriorityExecutor is LIFO for same priority: # queued2 is newer than queued1, so it runs first? # Let's check the implementation: @@ -47,13 +48,14 @@ def task(val): assert "queued2" in results assert len(results) == 3 + def test_shutdown_can_cancel_queued(): """Test that shutdown(cancel_futures=True) cancels queued tasks.""" executor = create_priority_executor(max_workers=1, thread_name_prefix="TestCancel") - + results = [] started_event = threading.Event() - + def task(val): started_event.set() time.sleep(0.1) @@ -63,52 +65,56 @@ def task(val): # Occupy the worker f1 = executor.submit(task, "head") started_event.wait(timeout=1.0) - + # Queue some items f2 = executor.submit(task, "queued1") f3 = executor.submit(task, "queued2") - + # Shutdown WITH cancelling futures executor.shutdown(wait=True, cancel_futures=True) - + # f1 should finish (it was running) assert f1.result() == "head" assert "head" in results - + # f2 and f3 should be cancelled with pytest.raises(concurrent.futures.CancelledError): f2.result() - + with pytest.raises(concurrent.futures.CancelledError): f3.result() - + assert "queued1" not in results assert "queued2" not in results + def test_daemon_threadpool_executor(): """Test that create_daemon_threadpool_executor creates daemon threads.""" - executor = create_daemon_threadpool_executor(max_workers=2, thread_name_prefix="TestDaemon") - + executor = create_daemon_threadpool_executor( + max_workers=2, thread_name_prefix="TestDaemon" + ) + def check_daemon(): return threading.current_thread().daemon - + futs = [executor.submit(check_daemon) for _ in range(4)] results = [f.result() for f in futs] - + assert all(results), "All worker threads should be daemon" executor.shutdown() + def test_spawn_overhead_and_error_handling(): """Test that the creator correctly propagates errors if something was broken.""" # This checks the defensive coding in create_daemon_threadpool_executor - - # Since we can't easily inject a failure into ThreadPoolExecutor constructor directly + + # Since we can't easily inject a failure into ThreadPoolExecutor constructor directly # without patching, we'll verify it works normally and has the expected structure. - + executor = create_daemon_threadpool_executor(max_workers=1) assert isinstance(executor, concurrent.futures.ThreadPoolExecutor) executor.shutdown() - + # Verify ValueError on invalid workers with pytest.raises(ValueError): create_daemon_threadpool_executor(max_workers=0) diff --git a/faststack/tests/test_exif_orientation.py b/faststack/tests/test_exif_orientation.py index 2c5bbee..45bfbe4 100644 --- a/faststack/tests/test_exif_orientation.py +++ b/faststack/tests/test_exif_orientation.py @@ -13,7 +13,7 @@ class TestExifOrientation(unittest.TestCase): def setUp(self): self.test_dir = tempfile.mkdtemp() self.editor = ImageEditor() - + # Patch cv2 in the editor module if needed for some tests, # but ImageEditor logic mainly uses PIL/numpy unless specialized. # If we really need to mock cv2, we should do it on the imported module attribute. @@ -160,7 +160,7 @@ def test_raw_mode_exif_preservation(self): # If we ARE developing a RAW, we usually want to bake in the orientation # or at least ensure the output is correct. - + # Current logic: We ALWAYS sanitize to 1 because we bake orientation on load. # This prevents "double rotation". diff --git a/faststack/tests/test_feedback_fixes.py b/faststack/tests/test_feedback_fixes.py index 1869b4c..d1f6f23 100644 --- a/faststack/tests/test_feedback_fixes.py +++ b/faststack/tests/test_feedback_fixes.py @@ -7,114 +7,143 @@ from faststack.io.variants import build_variant_map, norm_path from faststack.io.indexer import find_images_with_variants + def test_cache_evict_callback_payload(): """Verify that ByteLRUCache passes (key, value) to on_evict.""" evicted_items = [] - + def on_evict(k, v): evicted_items.append((k, v)) - + # Small cache to trigger eviction cache = ByteLRUCache(max_bytes=100, on_evict=on_evict) - + # Create some test images - img1 = DecodedImage(buffer=np.zeros(60, dtype=np.uint8), width=60, height=1, bytes_per_line=60, format=1) - img2 = DecodedImage(buffer=np.zeros(60, dtype=np.uint8), width=60, height=1, bytes_per_line=60, format=1) - + img1 = DecodedImage( + buffer=np.zeros(60, dtype=np.uint8), + width=60, + height=1, + bytes_per_line=60, + format=1, + ) + img2 = DecodedImage( + buffer=np.zeros(60, dtype=np.uint8), + width=60, + height=1, + bytes_per_line=60, + format=1, + ) + cache["k1"] = img1 assert len(evicted_items) == 0 - + # Adding k2 should evict k1 cache["k2"] = img2 assert len(evicted_items) == 1 assert evicted_items[0][0] == "k1" assert evicted_items[0][1] == img1 + def test_cache_popitem_callback_payload(): """Verify popitem passes (key, value) to on_evict.""" evicted_items = [] + def on_evict(k, v): evicted_items.append((k, v)) - + cache = ByteLRUCache(max_bytes=1000, on_evict=on_evict) - img = DecodedImage(buffer=np.zeros(10, dtype=np.uint8), width=10, height=1, bytes_per_line=10, format=1) + img = DecodedImage( + buffer=np.zeros(10, dtype=np.uint8), + width=10, + height=1, + bytes_per_line=10, + format=1, + ) cache["k1"] = img - + cache.popitem() assert len(evicted_items) == 1 assert evicted_items[0] == ("k1", img) -def test_image_editor_save_exception(tmp_path): + +def test_image_editor_save_exception(): """Verify ImageEditor.save_image raises RuntimeError if no float_image.""" editor = ImageEditor() # No image loaded with pytest.raises(RuntimeError, match="No file path set"): editor.save_image() + def test_variant_normalization_consistency(): """Verify variant mapping handles mixed-case extensions and paths consistently.""" # Simulate mixed case extensions - paths = [ - Path("test.JPG"), - Path("test-backup.jpeg"), - Path("test-developed.jpg") - ] - + paths = [Path("test.JPG"), Path("test-backup.jpeg"), Path("test-developed.jpg")] + vmap = build_variant_map(paths) key_cf = "test".casefold() assert key_cf in vmap - + group = vmap[key_cf] assert group.main_path is not None assert norm_path(group.main_path) == norm_path(Path("test.JPG")) assert group.developed_path is not None assert group.developed_path.name.lower() == "test-developed.jpg" + def test_orphan_developed_preservation(): """Verify find_images_with_variants keeps developed JPGs even if no main JPG exists.""" import tempfile + with tempfile.TemporaryDirectory() as td: tdp = Path(td) # Create an orphan developed file orphan = tdp / "orphan-developed.jpg" orphan.write_text("dummy") - + # Also create a regular pair for comparison pair_main = tdp / "pair.jpg" pair_dev = tdp / "pair-developed.jpg" pair_main.write_text("dummy") pair_dev.write_text("dummy") - - visible, vmap = find_images_with_variants(tdp) - + + visible, _vmap = find_images_with_variants(tdp) + visible_names = [img.path.name for img in visible] assert "orphan-developed.jpg" in visible_names assert "pair.jpg" in visible_names assert "pair-developed.jpg" not in visible_names + def test_cache_evict_lock_not_held(): """Verify that on_evict is called OUTSIDE the cache lock.""" lock_held_during_callback = False - + def on_evict(k, v): nonlocal lock_held_during_callback import threading + def check_lock(): nonlocal lock_held_during_callback if not cache._lock.acquire(blocking=False): lock_held_during_callback = True else: cache._lock.release() - + t = threading.Thread(target=check_lock) t.start() t.join() # Small cache to trigger eviction on second set cache = ByteLRUCache(max_bytes=100, on_evict=on_evict) - img = DecodedImage(buffer=np.zeros(60, dtype=np.uint8), width=60, height=1, bytes_per_line=60, format=1) - + img = DecodedImage( + buffer=np.zeros(60, dtype=np.uint8), + width=60, + height=1, + bytes_per_line=60, + format=1, + ) + cache["k1"] = img cache["k2"] = img - + assert not lock_held_during_callback, "Lock was held during eviction callback!" diff --git a/faststack/tests/test_handle_failures.py b/faststack/tests/test_handle_failures.py index eb90a88..3b6cbe5 100644 --- a/faststack/tests/test_handle_failures.py +++ b/faststack/tests/test_handle_failures.py @@ -25,6 +25,7 @@ class DummyController: (_delete_executor, _pending_delete_jobs, etc.) are instance attributes that won't exist on the class spec. """ + pass @@ -68,7 +69,9 @@ def test_handle_delete_failures_recycle_codes(): controller._perm_delete_worker = MagicMock() # Bind the real method onto the dummy instance - controller._handle_delete_failures = AppController._handle_delete_failures.__get__(controller, AppController) + controller._handle_delete_failures = AppController._handle_delete_failures.__get__( + controller, AppController + ) # Create a result with a RECYCLE_FAILED failure fail_code = DeletionErrorCodes.RECYCLE_FAILED.value # "recycle_failed" @@ -88,13 +91,21 @@ def test_handle_delete_failures_recycle_codes(): ) # Patch confirm_permanent_delete in faststack.app (where it's used) - with patch("faststack.app.confirm_permanent_delete", return_value=True) as mock_confirm: + with patch( + "faststack.app.confirm_permanent_delete", return_value=True + ) as mock_confirm: controller._handle_delete_failures(result, job) assert mock_confirm.called, "Should have prompted for permanent delete" - assert 123 in controller._pending_delete_jobs, "Job should be stored in pending map" - assert controller._delete_executor.submit.called, "Should have submitted perm delete worker" - assert fut.add_done_callback.called, "Should have registered callback on the future" + assert ( + 123 in controller._pending_delete_jobs + ), "Job should be stored in pending map" + assert ( + controller._delete_executor.submit.called + ), "Should have submitted perm delete worker" + assert ( + fut.add_done_callback.called + ), "Should have registered callback on the future" # Non-recycle code: should rollback, not prompt controller._pending_delete_jobs.clear() @@ -103,13 +114,21 @@ def test_handle_delete_failures_recycle_codes(): fut.add_done_callback.reset_mock() result.failures[0].code = "some_other_error" - with patch("faststack.app.confirm_permanent_delete", return_value=True) as mock_confirm: + with patch( + "faststack.app.confirm_permanent_delete", return_value=True + ) as mock_confirm: controller._handle_delete_failures(result, job) assert not mock_confirm.called, "Should NOT have prompted for non-recycle error" - assert controller._rollback_ui_items.called, "Should have rolled back UI for non-recycle error" - assert 123 not in controller._pending_delete_jobs, "Job should NOT be kept pending for non-recycle error" - assert not controller._delete_executor.submit.called, "Should NOT submit perm delete for non-recycle error" + assert ( + controller._rollback_ui_items.called + ), "Should have rolled back UI for non-recycle error" + assert ( + 123 not in controller._pending_delete_jobs + ), "Job should NOT be kept pending for non-recycle error" + assert ( + not controller._delete_executor.submit.called + ), "Should NOT submit perm delete for non-recycle error" if __name__ == "__main__": @@ -118,6 +137,6 @@ def test_handle_delete_failures_recycle_codes(): print("Test passed!") except Exception: import traceback + traceback.print_exc() sys.exit(1) - diff --git a/faststack/tests/test_handle_failures_isolated.py b/faststack/tests/test_handle_failures_isolated.py index 3738873..6c634e0 100644 --- a/faststack/tests/test_handle_failures_isolated.py +++ b/faststack/tests/test_handle_failures_isolated.py @@ -1,13 +1,18 @@ - import sys from unittest.mock import MagicMock, patch from pathlib import Path -from faststack.deletion_types import DeletionErrorCodes, DeleteResult, DeleteFailure, DeleteJob +from faststack.deletion_types import ( + DeletionErrorCodes, + DeleteResult, + DeleteFailure, + DeleteJob, +) # Mocks for global functions that might be called confirm_permanent_delete = MagicMock(return_value=True) confirm_batch_permanent_delete = MagicMock(return_value=True) + class MockController: def __init__(self): self._pending_delete_jobs = {} @@ -25,40 +30,41 @@ def _key(self, p): # Dynamically bind the real method we want to test # This ensures we test the ACTUAL logic in app.py, not a copy from faststack.app import AppController + _handle_delete_failures = AppController._handle_delete_failures + @patch("faststack.app.confirm_permanent_delete", return_value=True) @patch("faststack.app.confirm_batch_permanent_delete", return_value=True) -def test_handle_delete_failures_recycle_codes_isolation(mock_confirm, mock_batch_confirm): +def test_handle_delete_failures_recycle_codes_isolation( + mock_confirm_batch_permanent_delete, mock_confirm_permanent_delete +): controller = MockController() - + # Create failure with RECYCLE_FAILED code fail_code = DeletionErrorCodes.RECYCLE_FAILED.value - + result = DeleteResult( - job_id=1, - failures=[ - DeleteFailure(jpg=Path("foo.jpg"), code=fail_code) - ] + job_id=1, failures=[DeleteFailure(jpg=Path("foo.jpg"), code=fail_code)] ) - + job = DeleteJob( job_id=1, removed_items=[(0, MagicMock(path=Path("foo.jpg")))], - action_type="loupe", # dummy + action_type="loupe", # dummy timestamp=0, cancel_event=None, previous_index=0, - images_to_delete=[] + images_to_delete=[], ) - + controller._handle_delete_failures(result, job) - - # Since we found a recycle code, we should NOT have called _rollback_ui_items + + # Since we found a recycle code, we should NOT have called _rollback_ui_items # for the full list (it would happen for non-candidates). # In this case, foo.jpg IS a candidate. # So _rollback_ui_items should NOT be called for foo.jpg - + # The code path: # if perm_candidates: # to_rollback = [items NOT in candidates] -> empty @@ -66,22 +72,26 @@ def test_handle_delete_failures_recycle_codes_isolation(mock_confirm, mock_batch # # prompt logic (omitted in copy, satisfied by pass) # else: # call _rollback_ui_items(all) - - assert not controller._rollback_ui_items.called, "Should not rollback candidate for perm delete" + + assert ( + not controller._rollback_ui_items.called + ), "Should not rollback candidate for perm delete" # Now test with NON-recycle code controller._rollback_ui_items.reset_mock() result.failures[0].code = "some_other_error" - + controller._handle_delete_failures(result, job) - + assert controller._rollback_ui_items.called, "Should rollback non-recycle failure" + if __name__ == "__main__": try: test_handle_delete_failures_recycle_codes_isolation() print("Test passed!") except Exception as e: import traceback + traceback.print_exc() print(f"Test failed: {e}") diff --git a/faststack/tests/test_helicon_cleanup.py b/faststack/tests/test_helicon_cleanup.py index 6277ec5..c8f246c 100644 --- a/faststack/tests/test_helicon_cleanup.py +++ b/faststack/tests/test_helicon_cleanup.py @@ -6,57 +6,61 @@ from faststack.app import AppController from faststack.models import ImageFile + @pytest.fixture def mock_controller(): # Mock dependencies required by AppController init engine = MagicMock() - with patch("faststack.app.Watcher"), \ - patch("faststack.app.SidecarManager"), \ - patch("faststack.app.ImageEditor"), \ - patch("faststack.app.ByteLRUCache"), \ - patch("faststack.app.Prefetcher"), \ - patch("faststack.app.ThumbnailCache"), \ - patch("faststack.app.PathResolver"), \ - patch("faststack.app.ThumbnailPrefetcher"), \ - patch("faststack.app.ThumbnailModel"), \ - patch("faststack.app.ThumbnailProvider"), \ - patch("faststack.app.concurrent.futures.ThreadPoolExecutor"), \ - patch("faststack.app.QTimer"), \ - patch("faststack.app.QApplication"): # Mock QApplication to avoid segfaults + with ( + patch("faststack.app.Watcher"), + patch("faststack.app.SidecarManager"), + patch("faststack.app.ImageEditor"), + patch("faststack.app.ByteLRUCache"), + patch("faststack.app.Prefetcher"), + patch("faststack.app.ThumbnailCache"), + patch("faststack.app.PathResolver"), + patch("faststack.app.ThumbnailPrefetcher"), + patch("faststack.app.ThumbnailModel"), + patch("faststack.app.ThumbnailProvider"), + patch("faststack.app.concurrent.futures.ThreadPoolExecutor"), + patch("faststack.app.QTimer"), + patch("faststack.app.QApplication"), + ): # Mock QApplication to avoid segfaults controller = AppController(image_dir=Path("c:/images"), engine=engine) - + # Setup dummy images for the controller img1 = ImageFile(Path("c:/images/img1.jpg")) controller.image_files = [img1] - + return controller + def test_deferred_cleanup(mock_controller): """Verify that temp files are tracked and deleted on shutdown.""" - + # Create a real temporary file to verify deletion with tempfile.NamedTemporaryFile("w", delete=False) as tmp: tmp.write("test_file_list") tmp_path = Path(tmp.name) - + try: # Mock launch_helicon_focus to return success and our real temp path with patch("faststack.app.launch_helicon_focus", return_value=(True, tmp_path)): - + # Simulate launching helicon with some files # We bypass the stack logic and call _launch_helicon_with_files directly or via launch_helicon if valid # Let's call _launch_helicon_with_files directly for simplicity files = [Path("c:/images/img1.jpg")] success = mock_controller._launch_helicon_with_files(files) - + assert success is True assert tmp_path in mock_controller._temp_files_to_clean assert tmp_path.exists(), "File should still exist before shutdown" - + # Now simulate shutdown mock_controller.shutdown_nonqt() - + assert not tmp_path.exists(), "File should be deleted after shutdown" assert len(mock_controller._temp_files_to_clean) == 0 diff --git a/faststack/tests/test_helicon_launch.py b/faststack/tests/test_helicon_launch.py index 22fa6e7..a7b002c 100644 --- a/faststack/tests/test_helicon_launch.py +++ b/faststack/tests/test_helicon_launch.py @@ -12,6 +12,7 @@ @dataclass(frozen=True) class DummyImage: """Minimal stand-in for faststack.models.ImageFile used by launch_helicon().""" + path: Path raw_pair: Path | None = None @@ -21,22 +22,26 @@ def mock_controller(): # Mock dependencies required by AppController init engine = MagicMock() - with patch("faststack.app.Watcher"), \ - patch("faststack.app.SidecarManager"), \ - patch("faststack.app.ImageEditor"), \ - patch("faststack.app.ByteLRUCache"), \ - patch("faststack.app.Prefetcher"), \ - patch("faststack.app.ThumbnailCache"), \ - patch("faststack.app.PathResolver"), \ - patch("faststack.app.ThumbnailPrefetcher"), \ - patch("faststack.app.ThumbnailModel"), \ - patch("faststack.app.ThumbnailProvider"), \ - patch("faststack.app.concurrent.futures.ThreadPoolExecutor"): + with ( + patch("faststack.app.Watcher"), + patch("faststack.app.SidecarManager"), + patch("faststack.app.ImageEditor"), + patch("faststack.app.ByteLRUCache"), + patch("faststack.app.Prefetcher"), + patch("faststack.app.ThumbnailCache"), + patch("faststack.app.PathResolver"), + patch("faststack.app.ThumbnailPrefetcher"), + patch("faststack.app.ThumbnailModel"), + patch("faststack.app.ThumbnailProvider"), + patch("faststack.app.concurrent.futures.ThreadPoolExecutor"), + ): controller = AppController(image_dir=Path("c:/images"), engine=engine) # Provide image_files as simple objects with `.path` and `.raw_pair` - img1 = DummyImage(path=Path("c:/images/img1.jpg"), raw_pair=Path("c:/images/img1.CR2")) + img1 = DummyImage( + path=Path("c:/images/img1.jpg"), raw_pair=Path("c:/images/img1.CR2") + ) img2 = DummyImage(path=Path("c:/images/img2.jpg"), raw_pair=None) # No RAW fallback controller.image_files = [img1, img2] @@ -108,4 +113,3 @@ def test_uistate_delegation(mock_controller): ui_state.launch_helicon(False) files = _called_file_list(mock_controller) assert files[0].suffix.lower() == ".jpg" - diff --git a/faststack/tests/test_jump_to_last_uploaded.py b/faststack/tests/test_jump_to_last_uploaded.py index cc46b81..8610035 100644 --- a/faststack/tests/test_jump_to_last_uploaded.py +++ b/faststack/tests/test_jump_to_last_uploaded.py @@ -4,15 +4,18 @@ from faststack.app import AppController from faststack.models import ImageFile, EntryMetadata + @pytest.fixture(scope="session") def qapp(): """Ensure a QApplication exists for tests that might touch UI elements.""" from PySide6.QtWidgets import QApplication + app = QApplication.instance() if app is None: app = QApplication([]) return app + @pytest.fixture def mock_controller(tmp_path, qapp): """Creates an AppController with mocked dependencies.""" @@ -34,20 +37,22 @@ def mock_controller(tmp_path, qapp): patch("faststack.app.find_images", return_value=[]), ): controller = AppController(tmp_path, engine) - + # Additional mocks needed for jump_to_last_uploaded controller.ui_state = Mock() controller.sidecar = Mock() controller.update_status_message = Mock() controller.jump_to_image = Mock() - + # Make jump_to_image actually update the index to support state-based assertions def update_index(index): controller.current_index = index + controller.jump_to_image.side_effect = update_index - + return controller + def test_jump_to_last_uploaded_success(mock_controller): """Tests jumping to the last uploaded image in a list.""" img1 = ImageFile(Path("img1.jpg")) @@ -60,10 +65,10 @@ def test_jump_to_last_uploaded_success(mock_controller): meta1 = EntryMetadata(uploaded=True) meta2 = EntryMetadata(uploaded=False) meta3 = EntryMetadata(uploaded=True) - + def side_effect(stem): return {"img1": meta1, "img2": meta2, "img3": meta3}.get(stem, EntryMetadata()) - + mock_controller.sidecar.get_metadata.side_effect = side_effect mock_controller.jump_to_last_uploaded() @@ -73,6 +78,7 @@ def side_effect(stem): # Should emit grid scroll signal mock_controller.ui_state.gridScrollToIndex.emit.assert_called_once_with(2) + def test_jump_to_last_uploaded_already_there(mock_controller): """Tests behavior when already at the last uploaded image.""" img1 = ImageFile(Path("img1.jpg")) @@ -86,7 +92,10 @@ def test_jump_to_last_uploaded_already_there(mock_controller): # Should stay at index 0 assert mock_controller.current_index == 0 - mock_controller.update_status_message.assert_called_with("Already at last uploaded image") + mock_controller.update_status_message.assert_called_with( + "Already at last uploaded image" + ) + def test_jump_to_last_uploaded_none_found(mock_controller): """Tests behavior when no images are marked as uploaded.""" @@ -101,13 +110,19 @@ def test_jump_to_last_uploaded_none_found(mock_controller): # Should stay at index 0 assert mock_controller.current_index == 0 - mock_controller.update_status_message.assert_called_with("No uploaded images found in this folder") + mock_controller.update_status_message.assert_called_with( + "No uploaded images found in this folder" + ) + def test_jump_to_last_uploaded_empty_folder(mock_controller): """Tests behavior when the folder is empty.""" mock_controller.image_files = [] mock_controller.jump_to_last_uploaded() - mock_controller.update_status_message.assert_called_with("No images in current folder") + mock_controller.update_status_message.assert_called_with( + "No images in current folder" + ) + def test_jump_to_last_uploaded_one(mock_controller): """Tests jumping when only one uploaded image exists.""" @@ -115,19 +130,19 @@ def test_jump_to_last_uploaded_one(mock_controller): meta1 = EntryMetadata(uploaded=False) meta2 = EntryMetadata(uploaded=True) meta3 = EntryMetadata(uploaded=False) - + img1 = ImageFile(Path("img1.jpg")) img2 = ImageFile(Path("img2.jpg")) img3 = ImageFile(Path("img3.jpg")) mock_controller.image_files = [img1, img2, img3] mock_controller.current_index = 0 - + def side_effect(stem): return {"img1": meta1, "img2": meta2, "img3": meta3}.get(stem, EntryMetadata()) - + mock_controller.sidecar.get_metadata.side_effect = side_effect mock_controller.jump_to_last_uploaded() - + # Should jump to index 1 assert mock_controller.current_index == 1 diff --git a/faststack/tests/test_loupe_delete.py b/faststack/tests/test_loupe_delete.py index 43d93e7..9e5f894 100644 --- a/faststack/tests/test_loupe_delete.py +++ b/faststack/tests/test_loupe_delete.py @@ -9,6 +9,7 @@ @dataclass(frozen=True) class DummyImage: """Minimal stand-in for faststack.models.ImageFile.""" + path: Path raw_pair: Path | None = None @@ -47,6 +48,7 @@ def mock_controller(tmp_path, qapp): # Prevent background jobs from actually running from concurrent.futures import Future + controller._delete_executor = Mock() controller._delete_executor.submit.side_effect = lambda *a, **kw: Future() @@ -81,7 +83,7 @@ def _assert_cache_cleanup(mock_controller, deleted_paths): if hasattr(cache, "evict_paths") and cache.evict_paths.call_count: args, _kwargs = cache.evict_paths.call_args assert args, "evict_paths should receive at least one arg" - arg0 = list(args[0]) if not isinstance(args[0], (list, tuple, set)) else list(args[0]) + arg0 = list(args[0]) deleted_strs = {str(p) for p in deleted_paths} arg0_strs = {str(p) for p in arg0} assert deleted_strs & arg0_strs, "evict_paths should include deleted path(s)" @@ -148,12 +150,14 @@ def test_delete_async_completion(mock_controller, tmp_path): result = { "job_id": job_id, - "successes": [{ - "jpg": img_path_resolved, - "recycled_jpg": recycled, - "raw": None, - "recycled_raw": None - }], + "successes": [ + { + "jpg": img_path_resolved, + "recycled_jpg": recycled, + "raw": None, + "recycled_raw": None, + } + ], "failures": [], "cancelled": False, } @@ -161,10 +165,14 @@ def test_delete_async_completion(mock_controller, tmp_path): delete_entries = [e for e in mock_controller.undo_history if e[0] == "delete"] assert len(delete_entries) == 1 - pending_entries = [e for e in mock_controller.undo_history if e[0] == "pending_delete"] + pending_entries = [ + e for e in mock_controller.undo_history if e[0] == "pending_delete" + ] assert len(pending_entries) == 0 - mock_controller.update_status_message.assert_called_with("Image moved to recycle bin") + mock_controller.update_status_message.assert_called_with( + "Image moved to recycle bin" + ) def test_delete_current_image_cancel(mock_controller): @@ -212,16 +220,20 @@ def test_recycle_failure_restores_image_automatically(mock_controller): result = { "job_id": job_id, "successes": [], - "failures": [{ - "jpg": img1.path.resolve(), - "raw": None, - "code": "recycle_failed", - }], + "failures": [ + { + "jpg": img1.path.resolve(), + "raw": None, + "code": "recycle_failed", + } + ], "cancelled": False, } # User declines permanent delete -> expect rollback/restore - with patch("faststack.app.confirm_permanent_delete", return_value=False) as mock_confirm: + with patch( + "faststack.app.confirm_permanent_delete", return_value=False + ) as mock_confirm: mock_controller._on_delete_finished(result) mock_confirm.assert_called_once() diff --git a/faststack/tests/test_metadata.py b/faststack/tests/test_metadata.py index 54c6fdb..7904edf 100644 --- a/faststack/tests/test_metadata.py +++ b/faststack/tests/test_metadata.py @@ -1,7 +1,7 @@ import unittest from unittest.mock import MagicMock, patch from pathlib import Path -from faststack.imaging.metadata import get_exif_data, clean_exif_value +from faststack.imaging.metadata import get_exif_data, clean_exif_value, get_exif_brief from PIL import ExifTags @@ -36,8 +36,12 @@ def test_get_exif_data_success(self, mock_open, mock_exists): }, } - mock_img._getexif.return_value = exif_dict - mock_open.return_value = mock_img + class MockExif(dict): + def get_ifd(self, tag): + return {} + + mock_img.getexif.return_value = MockExif(exif_dict) + mock_open.return_value.__enter__.return_value = mock_img # Test result = get_exif_data(Path("dummy.jpg")) @@ -91,11 +95,12 @@ def test_clean_exif_value(self): # Test lists self.assertEqual(clean_exif_value([1, 2]), "['1', '2']") + @patch("pathlib.Path.exists", return_value=True) @patch("faststack.imaging.metadata.Image.open") - def test_get_exif_data_no_exif(self, mock_open): + def test_get_exif_data_no_exif(self, mock_open, mock_exists): mock_img = MagicMock() - mock_img._getexif.return_value = None - mock_open.return_value = mock_img + mock_img.getexif.return_value = None + mock_open.return_value.__enter__.return_value = mock_img result = get_exif_data(Path("dummy.jpg")) self.assertEqual(result["summary"], {}) @@ -107,6 +112,32 @@ def test_get_exif_data_real_file_not_found(self): self.assertEqual(result["summary"], {}) self.assertEqual(result["full"], {}) + @patch("pathlib.Path.exists", return_value=True) + @patch("faststack.imaging.metadata.log") + @patch("faststack.imaging.metadata.Image.open") + def test_get_exif_brief_failure(self, mock_open, mock_log, mock_exists): + # Setup mock image + mock_img = MagicMock() + + # Use MockExif similar to other tests + class MockExif(dict): + def get_ifd(self, tag): return {} + + # 36867 is DateTimeOriginal (0x9003) + mock_img.getexif.return_value = MockExif({36867: "2023:01:01 12:00:00"}) + + mock_open.return_value.__enter__.return_value = mock_img + + # Patch clean_exif_value to raise Exception + with patch("faststack.imaging.metadata.clean_exif_value", side_effect=ValueError("Forced Error")): + get_exif_brief(Path("dummy.jpg")) + + # Verify log.error was called + mock_log.error.assert_called() + call_args = mock_log.error.call_args[0] + self.assertIn("Failed to parse EXIF datetime", call_args[0]) + self.assertIn("Forced Error", call_args[0]) + if __name__ == "__main__": unittest.main() diff --git a/faststack/tests/test_new_features.py b/faststack/tests/test_new_features.py index 3aa953a..2b9fa87 100644 --- a/faststack/tests/test_new_features.py +++ b/faststack/tests/test_new_features.py @@ -43,7 +43,6 @@ def _to_gray_u8(result): ) - class TestNewFeatures(unittest.TestCase): def setUp(self): self.editor = ImageEditor() @@ -121,7 +120,6 @@ def test_highlights_recovery(self): # Allow small deviation due to float/int conversion self.assertTrue(abs(val_128 - 128) < 2) - def test_straighten_angle(self): # Set straighten angle self.editor.current_edits["straighten_angle"] = 45.0 diff --git a/faststack/tests/test_raw_pipeline.py b/faststack/tests/test_raw_pipeline.py index 10116f3..02f845b 100644 --- a/faststack/tests/test_raw_pipeline.py +++ b/faststack/tests/test_raw_pipeline.py @@ -25,6 +25,7 @@ class DummyImageFile: This avoids test-order issues where other tests may monkeypatch sys.modules["faststack.models"] and turn ImageFile into a MagicMock. """ + path: Path raw_pair: Path | None = None @@ -286,7 +287,9 @@ def fake_write_tiff_16bit(path: Path, arr_float: np.ndarray): with open(path, "wb") as f: f.write(b"II\x2a\x00") - with patch.object(editor, "_write_tiff_16bit", side_effect=fake_write_tiff_16bit): + with patch.object( + editor, "_write_tiff_16bit", side_effect=fake_write_tiff_16bit + ): res = editor.save_image(write_developed_jpg=True) self.assertIsNotNone(res) @@ -332,4 +335,3 @@ def test_editor_edit_float_logic(self): edits = {"brightness": 0.5} res = editor._apply_edits(arr.copy(), edits, for_export=True) np.testing.assert_allclose(res, 0.75, atol=0.01) - diff --git a/faststack/tests/test_reactive_delete.py b/faststack/tests/test_reactive_delete.py index b8edc46..c4aad5d 100644 --- a/faststack/tests/test_reactive_delete.py +++ b/faststack/tests/test_reactive_delete.py @@ -36,6 +36,7 @@ def app_controller(tmp_path): # Mock the executor to return a real Future we can control from concurrent.futures import Future + controller._delete_executor = MagicMock() controller._delete_executor.submit.side_effect = lambda *a, **kw: Future() @@ -113,12 +114,14 @@ def test_async_delete_completion(app_controller): # 3. Resolve future result = { "job_id": job_id, - "successes": [{ - "jpg": img_path, - "recycled_jpg": recycled_path, - "raw": None, - "recycled_raw": None - }], + "successes": [ + { + "jpg": img_path, + "recycled_jpg": recycled_path, + "raw": None, + "recycled_raw": None, + } + ], "failures": [], "cancelled": False, } @@ -160,11 +163,7 @@ def test_delete_rollback_on_cancel(app_controller): result = { "job_id": job_id, "successes": [], - "failures": [{ - "jpg": img_path, - "raw": None, - "code": "cancelled" - }], + "failures": [{"jpg": img_path, "raw": None, "code": "cancelled"}], "cancelled": True, } app_controller._on_delete_finished(result) @@ -231,12 +230,9 @@ def test_cancel_midlight_with_real_files(app_controller): result = { "job_id": job_id, - "successes": [{ - "jpg": p1, - "recycled_jpg": recycled_a, - "raw": None, - "recycled_raw": None - }], + "successes": [ + {"jpg": p1, "recycled_jpg": recycled_a, "raw": None, "recycled_raw": None} + ], "failures": [ {"jpg": p2, "raw": None, "code": "cancelled"}, {"jpg": p3, "raw": None, "code": "cancelled"}, @@ -279,12 +275,14 @@ def test_undo_midflight_auto_restores(app_controller, tmp_path): # Completion arrives (file was moved before cancel took effect) result = { "job_id": job_id, - "successes": [{ - "jpg": p1, - "recycled_jpg": Path("recycle/test.jpg"), - "raw": None, - "recycled_raw": None - }], + "successes": [ + { + "jpg": p1, + "recycled_jpg": Path("recycle/test.jpg"), + "raw": None, + "recycled_raw": None, + } + ], "failures": [], "cancelled": True, } @@ -298,4 +296,6 @@ def test_undo_midflight_auto_restores(app_controller, tmp_path): assert len(app_controller.image_files) == 1 # 3. Restore was attempted - app_controller._restore_from_recycle_bin_safe.assert_called_with(p1, Path("recycle/test.jpg")) + app_controller._restore_from_recycle_bin_safe.assert_called_with( + p1, Path("recycle/test.jpg") + ) diff --git a/faststack/tests/test_refresh_crash.py b/faststack/tests/test_refresh_crash.py index a536848..5b4b2fb 100644 --- a/faststack/tests/test_refresh_crash.py +++ b/faststack/tests/test_refresh_crash.py @@ -3,13 +3,14 @@ from unittest.mock import Mock, patch from faststack.thumbnail_view import ThumbnailModel + @pytest.fixture def model(tmp_path): # Mocking dependencies that might trigger complex I/O or UI logic with ( - patch('faststack.thumbnail_view.model.count_images_in_folder', return_value=0), - patch('faststack.thumbnail_view.model.read_folder_stats', return_value=None), - patch('faststack.thumbnail_view.model.find_images', return_value=[]), + patch("faststack.thumbnail_view.model.count_images_in_folder", return_value=0), + patch("faststack.thumbnail_view.model.read_folder_stats", return_value=None), + patch("faststack.thumbnail_view.model.find_images", return_value=[]), ): model = ThumbnailModel(tmp_path, tmp_path) # Mock Qt-specific calls that need a running event loop or app @@ -18,11 +19,13 @@ def model(tmp_path): model.selectionChanged = Mock() yield model + def test_refresh_no_name_error(model): """Verify that refresh() doesn't raise NameError (fix for regression).""" # This should not raise NameError for t0, t1, t2, t3 model.refresh() + def test_refresh_from_controller_no_name_error(model): """Verify that refresh_from_controller() doesn't raise NameError.""" # This should not raise NameError diff --git a/faststack/tests/test_refresh_optimization.py b/faststack/tests/test_refresh_optimization.py index e183858..ebba5dc 100644 --- a/faststack/tests/test_refresh_optimization.py +++ b/faststack/tests/test_refresh_optimization.py @@ -7,6 +7,7 @@ @pytest.fixture(scope="session") def qapp(): from PySide6.QtWidgets import QApplication + app = QApplication.instance() if app is None: app = QApplication([]) @@ -17,22 +18,23 @@ def qapp(): def controller(tmp_path, qapp): _ = qapp with ( - patch('faststack.app.Watcher'), - patch('faststack.app.SidecarManager'), - patch('faststack.app.setup_logging'), - patch('faststack.app.QQmlApplicationEngine'), - patch('faststack.app.ThumbnailModel'), + patch("faststack.app.Watcher"), + patch("faststack.app.SidecarManager"), + patch("faststack.app.setup_logging"), + patch("faststack.app.QQmlApplicationEngine"), + patch("faststack.app.ThumbnailModel"), ): ctrl = AppController(tmp_path, Mock()) ctrl._thumbnail_model = Mock() ctrl._path_resolver = Mock() return ctrl + def test_do_delete_refresh_updates_resolver(controller): """Verify that _do_delete_refresh updates the path resolver without full model rebuild.""" controller.image_files = [Mock(), Mock()] - with patch('faststack.app._debug_mode', True): + with patch("faststack.app._debug_mode", True): controller._do_delete_refresh() # Should NOT have called refresh_from_controller (trusts optimistic updates) diff --git a/faststack/tests/test_sidecar.py b/faststack/tests/test_sidecar.py index 5e9c351..2c97eae 100644 --- a/faststack/tests/test_sidecar.py +++ b/faststack/tests/test_sidecar.py @@ -147,4 +147,3 @@ def test_favorite_toggle_roundtrip(mock_sidecar_dir): sm2 = SidecarManager(d, None) meta2 = sm2.get_metadata("IMG_FAV") assert meta2.favorite is False - diff --git a/faststack/tests/test_skip_linear.py b/faststack/tests/test_skip_linear.py index cb99da3..8f7b466 100644 --- a/faststack/tests/test_skip_linear.py +++ b/faststack/tests/test_skip_linear.py @@ -17,7 +17,7 @@ def setUp(self): np.random.seed(42) self.editor = ImageEditor() # Deterministic float32 image in [0.1, 0.9] — avoids clip boundaries - self.arr = (np.random.rand(100, 100, 3).astype(np.float32) * 0.8 + 0.1) + self.arr = np.random.rand(100, 100, 3).astype(np.float32) * 0.8 + 0.1 def test_skip_linear_output_matches_full_pipeline(self): """Skip-linear uint8 output must match full-pipeline output within 1/255. @@ -32,7 +32,9 @@ def test_skip_linear_output_matches_full_pipeline(self): # Skip path: exposure == 0 → _skip_linear=True result_skip = self.editor._apply_edits( - self.arr.copy(), edits=edits_base, for_export=True, + self.arr.copy(), + edits=edits_base, + for_export=True, ) u8_skip = (np.clip(result_skip, 0.0, 1.0) * 255).astype(np.uint8) @@ -40,13 +42,18 @@ def test_skip_linear_output_matches_full_pipeline(self): edits_full = dict(edits_base) edits_full["exposure"] = 0.002 result_full = self.editor._apply_edits( - self.arr.copy(), edits=edits_full, for_export=True, + self.arr.copy(), + edits=edits_full, + for_export=True, ) u8_full = (np.clip(result_full, 0.0, 1.0) * 255).astype(np.uint8) - max_diff = int(np.max(np.abs(u8_skip.astype(np.int16) - u8_full.astype(np.int16)))) + max_diff = int( + np.max(np.abs(u8_skip.astype(np.int16) - u8_full.astype(np.int16))) + ) self.assertLessEqual( - max_diff, 1, + max_diff, + 1, f"Skip-linear output diverged from full pipeline by {max_diff}/255", ) @@ -75,7 +82,8 @@ def test_no_copy_path_does_not_mutate_float_image(self): # source must be identical (byte-for-byte) self.assertEqual( - source.data.tobytes().__hash__(), source_hash, + source.data.tobytes().__hash__(), + source_hash, "float_image was mutated by _apply_edits on the no-copy path", ) diff --git a/faststack/tests/test_startup_opt.py b/faststack/tests/test_startup_opt.py index 774db52..754be19 100644 --- a/faststack/tests/test_startup_opt.py +++ b/faststack/tests/test_startup_opt.py @@ -3,22 +3,25 @@ from unittest.mock import Mock, patch from faststack.app import AppController + @pytest.fixture(scope="session") def qapp(): from PySide6.QtWidgets import QApplication + app = QApplication.instance() if app is None: app = QApplication([]) return app + @pytest.fixture def controller(tmp_path, qapp): _ = qapp with ( - patch('faststack.app.Watcher'), - patch('faststack.app.SidecarManager'), - patch('faststack.app.setup_logging'), - patch('faststack.app.QQmlApplicationEngine'), + patch("faststack.app.Watcher"), + patch("faststack.app.SidecarManager"), + patch("faststack.app.setup_logging"), + patch("faststack.app.QQmlApplicationEngine"), ): ctrl = AppController(tmp_path, Mock()) ctrl._thumbnail_model = Mock() @@ -26,52 +29,57 @@ def controller(tmp_path, qapp): ctrl._path_resolver = Mock() return ctrl + def test_startup_only_one_scan(controller): - """Verify that startup performs exactly one variant scan and zero simple scans.""" - with patch('faststack.app.find_images_with_variants', return_value=([], {})) as mock_scan: + """Verify that startup performs exactly one variant scan.""" + with patch( + "faststack.app.find_images_with_variants", return_value=([], {}) + ) as mock_scan: controller.load() - + assert controller._scan_count_variant == 1 - assert controller._scan_count_simple == 0 assert controller._grid_refreshes == 1 assert mock_scan.call_count == 1 - + # refresh_from_controller should be used instead of refresh assert controller._thumbnail_model.refresh_from_controller.called assert not controller._thumbnail_model.refresh.called + def test_toggle_grid_avoids_redundant_refresh(controller): """Verify that toggling grid view does not trigger a refresh if the model is already current.""" # Setup: simulate a state where model is already current controller.image_files = [Mock()] controller._grid_model_dirty = False - controller._is_grid_view_active = False + controller._is_grid_view_active = False controller._thumbnail_model.rowCount.return_value = 1 controller._grid_refreshes = 0 - + controller._thumbnail_model.find_image_index.return_value = -1 - + # Toggle to grid controller._set_grid_view_active(True) - + # Should NOT have refreshed because it wasn't dirty and rowCount > 0 assert not controller._thumbnail_model.refresh_from_controller.called assert not controller._thumbnail_model.refresh.called assert controller._grid_refreshes == 0 + def test_apply_filter_uses_efficient_refresh(controller): """Verify that apply_filter uses refresh_from_controller and only one refresh.""" controller.image_files = [Mock()] controller._thumbnail_model.refresh_from_controller.reset_mock() controller._thumbnail_model.refresh.reset_mock() - + controller.apply_filter("test", []) - + assert controller._thumbnail_model.refresh_from_controller.called assert not controller._thumbnail_model.refresh.called # Check that it called set_filter with refresh=False controller._thumbnail_model.set_filter.assert_called_with("test", refresh=False) + def test_loupe_filter_handles_dirty_flag(controller): """Verify that filtering in loupe mode doesn't refresh the grid but marks it dirty.""" controller.image_files = [Mock()] @@ -79,21 +87,25 @@ def test_loupe_filter_handles_dirty_flag(controller): controller._grid_model_dirty = False controller._grid_refreshes = 0 controller._thumbnail_model.refresh_from_controller.reset_mock() - - print(f"DEBUG: before apply_filter: grid_active={controller._is_grid_view_active}, dirty={controller._grid_model_dirty}") + controller.apply_filter("test", []) - print(f"DEBUG: after apply_filter: grid_active={controller._is_grid_view_active}, dirty={controller._grid_model_dirty}") - + # Grid should NOT have refreshed - assert not controller._thumbnail_model.refresh_from_controller.called, "refresh_from_controller should NOT have been called" - assert controller._grid_refreshes == 0, f"grid_refreshes should be 0, got {controller._grid_refreshes}" + assert ( + not controller._thumbnail_model.refresh_from_controller.called + ), "refresh_from_controller should NOT have been called" + assert ( + controller._grid_refreshes == 0 + ), f"grid_refreshes should be 0, got {controller._grid_refreshes}" # Dirty flag should be set - assert controller._grid_model_dirty is True, "grid_model_dirty should be True after filtering in loupe mode" - + assert ( + controller._grid_model_dirty is True + ), "grid_model_dirty should be True after filtering in loupe mode" + # Toggle to grid should now trigger the refresh controller._thumbnail_model.find_image_index.return_value = -1 controller._set_grid_view_active(True) - + assert controller._thumbnail_model.refresh_from_controller.called assert controller._grid_refreshes == 1 assert controller._grid_model_dirty is False diff --git a/faststack/tests/test_startup_optimization.py b/faststack/tests/test_startup_optimization.py index 824c5cc..732a99a 100644 --- a/faststack/tests/test_startup_optimization.py +++ b/faststack/tests/test_startup_optimization.py @@ -9,12 +9,14 @@ # We mock AppController dependencies here # Assuming these tests are run in an environment where faststack is importable + @pytest.fixture(scope="session", autouse=True) def qapplication(): if not QApplication.instance(): app = QApplication(sys.argv) yield QApplication.instance() + @patch("faststack.app.UIState") @patch("faststack.app.Keybinder") @patch("faststack.app.ImageEditor") @@ -30,29 +32,38 @@ def qapplication(): @patch("faststack.app.config") def test_startup_optimization( MockConfig, - MockThumbnailModel, MockSidecarManager, mock_find_images, MockWatcher, - MockByteLRUCache, MockPrefetcher, MockThumbnailPrefetcher, MockThumbnailCache, - MockThumbnailProvider, MockImageEditor, MockKeybinder, MockUIState + MockThumbnailModel, + MockSidecarManager, + mock_find_images, + MockWatcher, + MockByteLRUCache, + MockPrefetcher, + MockThumbnailPrefetcher, + MockThumbnailCache, + MockThumbnailProvider, + MockImageEditor, + MockKeybinder, + MockUIState, ): """Verify that startup only triggers one disk scan and one model refresh.""" - + # Delayed import to ensure patches are active if AppController is imported at top level # (though typically patching modules works fine) from faststack.app import AppController # Setup mocks - mock_find_images.return_value = ([], {}) # Empty list of images - + mock_find_images.return_value = ([], {}) # Empty list of images + mock_model_instance = MockThumbnailModel.return_value - mock_model_instance.rowCount.return_value = 0 # Empty model initially + mock_model_instance.rowCount.return_value = 0 # Empty model initially mock_engine = MagicMock() - + controller = AppController(Path("."), mock_engine) - + # Simulate load() controller.load() - + # Assertions # 1. Exactly one scan variant (from refresh_image_list called by load) assert controller._scan_count_variant == 1 @@ -80,37 +91,46 @@ def test_startup_optimization( @patch("faststack.app.config") def test_filter_optimization( MockConfig, - MockThumbnailModel, MockSidecarManager, mock_find_images, MockWatcher, - MockByteLRUCache, MockPrefetcher, MockThumbnailPrefetcher, MockThumbnailCache, - MockThumbnailProvider, MockImageEditor, MockKeybinder, MockUIState + MockThumbnailModel, + MockSidecarManager, + mock_find_images, + MockWatcher, + MockByteLRUCache, + MockPrefetcher, + MockThumbnailPrefetcher, + MockThumbnailCache, + MockThumbnailProvider, + MockImageEditor, + MockKeybinder, + MockUIState, ): """Verify that filtering uses optimized refresh logic.""" from faststack.app import AppController - + # Setup mocks - mock_find_images.return_value = ([], {}) - + mock_find_images.return_value = ([], {}) + mock_model_instance = MockThumbnailModel.return_value - mock_model_instance.rowCount.return_value = 0 + mock_model_instance.rowCount.return_value = 0 mock_engine = MagicMock() controller = AppController(Path("."), mock_engine) - + # Initial load controller.load() - + # Reset mock_model_instance.reset_mock() controller._grid_refreshes = 0 controller._scan_count_variant = 0 - + # Apply filter controller.apply_filter("test", []) - + # Verify behavior mock_model_instance.set_filter.assert_called_with("test", refresh=False) mock_model_instance.set_filter_flags.assert_called_with([], refresh=False) - + # refresh_from_controller called (grid active by default) mock_model_instance.refresh_from_controller.assert_called_once() mock_model_instance.refresh.assert_not_called() @@ -121,8 +141,8 @@ def test_filter_optimization( mock_model_instance.reset_mock() controller._grid_refreshes = 0 controller._filter_enabled = True - + controller.clear_filter() - + mock_model_instance.refresh_from_controller.assert_not_called() assert controller._grid_model_dirty is True diff --git a/faststack/tests/test_thumbnail_ready_emits_datachanged.py b/faststack/tests/test_thumbnail_ready_emits_datachanged.py index 7dcece9..6413338 100644 --- a/faststack/tests/test_thumbnail_ready_emits_datachanged.py +++ b/faststack/tests/test_thumbnail_ready_emits_datachanged.py @@ -31,9 +31,24 @@ def thumbnail_model(qapp): # Manually add entries (bypass refresh which would scan disk) entries = [ - ThumbnailEntry(path=Path("/fake/dir/img001.jpg"), name="img001.jpg", is_folder=False, mtime_ns=1000), - ThumbnailEntry(path=Path("/fake/dir/img002.jpg"), name="img002.jpg", is_folder=False, mtime_ns=2000), - ThumbnailEntry(path=Path("/fake/dir/img003.jpg"), name="img003.jpg", is_folder=False, mtime_ns=3000), + ThumbnailEntry( + path=Path("/fake/dir/img001.jpg"), + name="img001.jpg", + is_folder=False, + mtime_ns=1000, + ), + ThumbnailEntry( + path=Path("/fake/dir/img002.jpg"), + name="img002.jpg", + is_folder=False, + mtime_ns=2000, + ), + ThumbnailEntry( + path=Path("/fake/dir/img003.jpg"), + name="img003.jpg", + is_folder=False, + mtime_ns=3000, + ), ] model.beginResetModel() @@ -100,5 +115,7 @@ def test_all_entries_have_mapping(thumbnail_model): for i, entry in enumerate(thumbnail_model._entries): if not entry.is_folder: tid = thumbnail_model._make_thumbnail_id(entry) - assert tid in thumbnail_model._id_to_row, f"Entry {i} ({entry.name}) not in _id_to_row" + assert ( + tid in thumbnail_model._id_to_row + ), f"Entry {i} ({entry.name}) not in _id_to_row" assert thumbnail_model._id_to_row[tid] == i diff --git a/faststack/tests/test_ui_prefetch_safety.py b/faststack/tests/test_ui_prefetch_safety.py index 0b9efc2..f8f0492 100644 --- a/faststack/tests/test_ui_prefetch_safety.py +++ b/faststack/tests/test_ui_prefetch_safety.py @@ -3,38 +3,39 @@ from pathlib import Path from faststack.ui.provider import UIState + class TestUIPrefetchSafety(unittest.TestCase): def setUp(self): self.app_controller = MagicMock() self.model = MagicMock() self.prefetcher = MagicMock() - + # Setup model and prefetcher on app_controller self.app_controller._thumbnail_model = self.model self.app_controller._thumbnail_prefetcher = self.prefetcher - + # Mock prefetcher constants self.prefetcher.PRIO_MED = 1 - + # Fake clock state self.current_time = 100.0 - + def fake_clock(): return self.current_time - + self.ui_state = UIState(self.app_controller, clock_func=fake_clock) - + # Default mock behavior self.model.rowCount.return_value = 5000 - self.model.thumbnail_size = (256, 256) - + self.model.thumbnail_size = 256 + def get_entry_mock(i): entry = MagicMock() entry.path = Path(f"image_{i}.jpg") entry.mtime_ns = 123456789 entry.is_folder = False return entry - + self.model.get_entry.side_effect = get_entry_mock def test_budget_trimming(self): @@ -55,18 +56,18 @@ def test_duplicate_suppression(self): self.ui_state.gridPrefetchRange(10, 50, maxCount=100) self.assertEqual(self.prefetcher.submit.call_count, 41) self.prefetcher.submit.reset_mock() - + # 2. Duplicate call at t+10ms - should be suppressed self.current_time += 0.010 self.ui_state.gridPrefetchRange(10, 50, maxCount=100) self.assertEqual(self.prefetcher.submit.call_count, 0) - + # 3. Duplicate call at t+40ms (from start) - should succeed (cooldown expired) self.current_time += 0.030 self.ui_state.gridPrefetchRange(10, 50, maxCount=100) self.assertEqual(self.prefetcher.submit.call_count, 41) self.prefetcher.submit.reset_mock() - + # 4. Different range at t+10ms (from last) - should succeed (not duplicate) self.current_time += 0.010 self.ui_state.gridPrefetchRange(10, 51, maxCount=100) @@ -79,12 +80,12 @@ def test_index_sanity(self): self.prefetcher.submit.reset_mock() self.ui_state.gridPrefetchRange(0, 10, 100) self.assertEqual(self.prefetcher.submit.call_count, 0) - + # Case 2: startIndex > endIndex self.model.rowCount.return_value = 5000 self.ui_state.gridPrefetchRange(100, 50, 100) self.assertEqual(self.prefetcher.submit.call_count, 0) - + # Case 3: Out-of-bounds clamping self.prefetcher.submit.reset_mock() self.ui_state.gridPrefetchRange(-10, 10000, 100) @@ -92,5 +93,6 @@ def test_index_sanity(self): self.ui_state.gridPrefetchRange(-10, 10000, 100) self.assertEqual(self.prefetcher.submit.call_count, 100) + if __name__ == "__main__": unittest.main() diff --git a/faststack/tests/test_ui_state_recycle.py b/faststack/tests/test_ui_state_recycle.py index 5401c20..e759b42 100644 --- a/faststack/tests/test_ui_state_recycle.py +++ b/faststack/tests/test_ui_state_recycle.py @@ -3,10 +3,11 @@ from pathlib import Path from faststack.ui.provider import UIState + def test_recycle_bin_detailed_text_formatting(): # Mock AppController mock_controller = Mock() - + # Define sample recycle bin stats sample_stats = [ { @@ -15,7 +16,7 @@ def test_recycle_bin_detailed_text_formatting(): "jpg_count": 1, "raw_count": 1, "other_count": 0, - "file_paths": ["image1.jpg", "image1.ARW"] + "file_paths": ["image1.jpg", "image1.ARW"], }, { "path": "D:/other/image recycle bin", @@ -23,32 +24,33 @@ def test_recycle_bin_detailed_text_formatting(): "jpg_count": 0, "raw_count": 0, "other_count": 1, - "file_paths": ["doc.txt"] - } + "file_paths": ["doc.txt"], + }, ] - + mock_controller.get_recycle_bin_stats.return_value = sample_stats - + # Initialize UIState ui_state = UIState(mock_controller) - + # Get detailed text detailed_text = ui_state.recycleBinDetailedText - + # Verify content assert "Directory: C:/images/image recycle bin" in detailed_text assert " - image1.jpg" in detailed_text assert " - image1.ARW" in detailed_text assert "Directory: D:/other/image recycle bin" in detailed_text assert " - doc.txt" in detailed_text - + # Verify separators (trailing newline) assert detailed_text.count(" - ") == 3 assert detailed_text.endswith("\n") or detailed_text.count("\n\n") > 0 + def test_recycle_bin_detailed_text_empty(): mock_controller = Mock() mock_controller.get_recycle_bin_stats.return_value = [] - + ui_state = UIState(mock_controller) assert ui_state.recycleBinDetailedText == "" diff --git a/faststack/tests/test_variants.py b/faststack/tests/test_variants.py index c4f6696..9e46ad0 100644 --- a/faststack/tests/test_variants.py +++ b/faststack/tests/test_variants.py @@ -17,6 +17,7 @@ # ── parse_variant_stem ────────────────────────────────────────────────────── + class TestParseVariantStem: """Tests for parse_variant_stem().""" @@ -65,7 +66,8 @@ def test_developed_and_backup(self): def test_backup_and_developed(self): """photo-backup2-developed → developed + backup 2. - Note: -developed token comes after -backup, but -backup is at end of stripped stem.""" + Note: -developed token comes after -backup, but -backup is at end of stripped stem. + """ key, dev, backup = parse_variant_stem("photo-backup2-developed") assert key == "photo" assert dev is True @@ -111,6 +113,7 @@ def test_stem_with_dashes_and_backup(self): # ── build_variant_map ──────────────────────────────────────────────────────── + class TestBuildVariantMap: """Tests for build_variant_map().""" @@ -205,6 +208,7 @@ def test_complex_variant_set(self): # ── build_badge_list ───────────────────────────────────────────────────────── + class TestBuildBadgeList: """Tests for build_badge_list().""" @@ -248,6 +252,7 @@ def test_singleton_no_badges(self): # ── Integration: indexer find_images_with_variants ─────────────────────────── + class TestFindImagesWithVariants: """Tests for find_images_with_variants() filtering.""" @@ -331,11 +336,13 @@ def test_variant_map_built(self, tmp_path): # ── Thumbnail model roles ─────────────────────────────────────────────────── + class TestThumbnailModelVariantRoles: """Tests that ThumbnailModel propagates variant flags.""" def test_entry_has_variant_fields(self): from faststack.thumbnail_view.model import ThumbnailEntry + entry = ThumbnailEntry( path=Path("/dir/photo.jpg"), name="photo.jpg", @@ -348,6 +355,7 @@ def test_entry_has_variant_fields(self): def test_entry_defaults_false(self): from faststack.thumbnail_view.model import ThumbnailEntry + entry = ThumbnailEntry( path=Path("/dir/photo.jpg"), name="photo.jpg", diff --git a/faststack/tests/test_variants_logic.py b/faststack/tests/test_variants_logic.py index fde69cf..193282f 100644 --- a/faststack/tests/test_variants_logic.py +++ b/faststack/tests/test_variants_logic.py @@ -1,13 +1,14 @@ - import unittest +import os from pathlib import Path -from faststack.io.variants import build_variant_map, VariantGroup +from faststack.io.variants import build_variant_map, VariantGroup, norm_path + class TestVariantsLogic(unittest.TestCase): def test_main_selection_priority(self): """Verify Main selection priority: Non-dev > Dev > Backup.""" - - # Cas 1: Pure Main exists + + # Case 1: Pure Main exists paths = [ Path("img.jpg"), Path("img-developed.jpg"), @@ -15,21 +16,21 @@ def test_main_selection_priority(self): ] groups = build_variant_map(paths) group = groups["img"] - + self.assertEqual(group.main_path.name, "img.jpg") self.assertEqual(group.developed_path.name, "img-developed.jpg") self.assertEqual(group.backup_paths[1].name, "img-backup.jpg") def test_main_selection_orphan_developed(self): """Verify Developed is chosen as Main if no pure Main exists.""" - + paths = [ Path("img-developed.jpg"), Path("img-backup.jpg"), ] groups = build_variant_map(paths) group = groups["img"] - + # developed becomes Main because it's Tier 1 (vs Backup Tier 2) self.assertEqual(group.main_path.name, "img-developed.jpg") # It is ALSO the developed path @@ -37,15 +38,15 @@ def test_main_selection_orphan_developed(self): def test_main_selection_orphan_backup(self): """Verify Backup is chosen as Main if nothing else exists.""" - + paths = [ Path("img-backup.jpg"), Path("img-backup2.jpg"), ] groups = build_variant_map(paths) group = groups["img"] - - self.assertEqual(group.main_path.name, "img-backup.jpg") # Lowest backup + + self.assertEqual(group.main_path.name, "img-backup.jpg") # Lowest backup self.assertIsNone(group.developed_path) def test_backup_sorting(self): @@ -54,11 +55,11 @@ def test_backup_sorting(self): Path("img-backup2.jpg"), Path("img.jpg"), Path("img-backup10.jpg"), - Path("img-backup.jpg"), # implies 1 + Path("img-backup.jpg"), # implies 1 ] groups = build_variant_map(paths) group = groups["img"] - + self.assertEqual(len(group.backup_paths), 3) self.assertEqual(group.backup_paths[1].name, "img-backup.jpg") self.assertEqual(group.backup_paths[2].name, "img-backup2.jpg") @@ -68,25 +69,45 @@ def test_developed_backup_handling(self): """Verify developed backups are handled correctly.""" # Policy: developed-backup is "developed" candidate, AND "backup" candidate. # But _select_developed prefers non-backup developed. - + paths = [ Path("img.jpg"), Path("img-developed.jpg"), - Path("img-backup-developed.jpg"), # This is a backup AND developed? + Path("img-backup-developed.jpg"), # This is a backup AND developed? # Note: parse_variant_stem handles "-developed" then "-backup". # "img-backup-developed" -> stem "img-backup". is_developed=True. # Then "img-backup" -> group "img", backup=1. ] - + groups = build_variant_map(paths) group = groups["img"] - + self.assertEqual(group.main_path.name, "img.jpg") self.assertEqual(group.developed_path.name, "img-developed.jpg") - + # Check that backup list contains the backup-developed file # It has backup_number=1. self.assertEqual(group.backup_paths[1].name, "img-backup-developed.jpg") -if __name__ == '__main__': + def test_path_normalization(self): + """Verify path normalization (case-insensitivity, absolute paths).""" + if os.name == "nt": + p1 = Path("C:/Test/File.JPG") + p2 = Path("C:/Test/file.jpg") + n1 = Path(norm_path(p1)) + n2 = Path(norm_path(p2)) + # On Windows, these should match after normalization + self.assertEqual(n1, n2) + self.assertTrue(n1.is_absolute()) + else: + # On POSIX, case is preserved + p1 = Path("/tmp/File.JPG") + p2 = Path("/tmp/file.jpg") + n1 = norm_path(p1) + n2 = norm_path(p2) + self.assertNotEqual(n1, n2) + self.assertTrue(Path(n1).is_absolute()) + + +if __name__ == "__main__": unittest.main() diff --git a/faststack/tests/thumbnail_view/test_prefetcher.py b/faststack/tests/thumbnail_view/test_prefetcher.py index 30c3819..fb28144 100644 --- a/faststack/tests/thumbnail_view/test_prefetcher.py +++ b/faststack/tests/thumbnail_view/test_prefetcher.py @@ -90,7 +90,9 @@ def _assert_ready_callback_called_once(callback: MagicMock): emit_calls = callback.emit.call_count if hasattr(callback, "emit") else 0 total = direct_calls + emit_calls - assert total == 1, f"Expected callback delivery once; direct={direct_calls}, emit={emit_calls}" + assert ( + total == 1 + ), f"Expected callback delivery once; direct={direct_calls}, emit={emit_calls}" if direct_calls == 1: args, _kwargs = callback.call_args @@ -210,7 +212,9 @@ def test_submit_schedules_job(self, prefetcher, test_image, cache, qt_app): # Wait for job to complete (cache filled) path_hash = compute_path_hash(test_image) cache_key = f"200/{path_hash}/{mtime_ns}" - assert _wait_until(lambda: cache.get(cache_key) is not None, timeout_s=2.0, qt_app=qt_app) + assert _wait_until( + lambda: cache.get(cache_key) is not None, timeout_s=2.0, qt_app=qt_app + ) assert cache.get(cache_key) is not None @@ -250,7 +254,9 @@ def test_callback_called_on_complete(self, cache, test_image, qt_app): def _single_shot_immediate(_ms, fn): fn() - from PySide6.QtCore import QTimer # import here so patch.object has the real type + from PySide6.QtCore import ( + QTimer, + ) # import here so patch.object has the real type with patch.object(QTimer, "singleShot", side_effect=_single_shot_immediate): prefetcher = ThumbnailPrefetcher( @@ -266,7 +272,11 @@ def _single_shot_immediate(_ms, fn): # Wait for decode completion (cache fill proves the worker finished) path_hash = compute_path_hash(test_image) cache_key = f"200/{path_hash}/{mtime_ns}" - assert _wait_until(lambda: cache.get(cache_key) is not None, timeout_s=2.0, qt_app=qt_app) + assert _wait_until( + lambda: cache.get(cache_key) is not None, + timeout_s=2.0, + qt_app=qt_app, + ) delivered_key = _assert_ready_callback_called_once(callback) assert "200/" in str(delivered_key) @@ -314,7 +324,9 @@ def test_decode_applies_exif_orientation(self, cache, temp_folder, qt_app): # Wait for completion (cache filled) path_hash = compute_path_hash(img_path) cache_key = f"100/{path_hash}/{mtime_ns}" - assert _wait_until(lambda: cache.get(cache_key) is not None, timeout_s=2.0, qt_app=qt_app) + assert _wait_until( + lambda: cache.get(cache_key) is not None, timeout_s=2.0, qt_app=qt_app + ) cached_bytes = cache.get(cache_key) assert cached_bytes is not None @@ -342,7 +354,9 @@ def test_decode_handles_png(self, cache, temp_folder, qt_app): path_hash = compute_path_hash(img_path) cache_key = f"200/{path_hash}/{mtime_ns}" - assert _wait_until(lambda: cache.get(cache_key) is not None, timeout_s=2.0, qt_app=qt_app) + assert _wait_until( + lambda: cache.get(cache_key) is not None, timeout_s=2.0, qt_app=qt_app + ) assert cache.get(cache_key) is not None finally: prefetcher.shutdown() diff --git a/faststack/tests/thumbnail_view/test_prefetcher_priority.py b/faststack/tests/thumbnail_view/test_prefetcher_priority.py index 1c25cac..46b7629 100644 --- a/faststack/tests/thumbnail_view/test_prefetcher_priority.py +++ b/faststack/tests/thumbnail_view/test_prefetcher_priority.py @@ -4,15 +4,17 @@ from pathlib import Path from faststack.thumbnail_view.prefetcher import ThumbnailCache, ThumbnailPrefetcher + @pytest.fixture def cache(): return ThumbnailCache(max_bytes=1024 * 1024, max_items=100) + def test_prefetcher_priority(cache): """Verify that high priority jobs jump ahead of medium priority ones.""" finished_jobs = [] - - def mock_decode(path, path_hash, mtime_ns, size): + + def mock_decode(path, path_hash, mtime_ns, size, *args, **kwargs): # Simulate some work time.sleep(0.1) finished_jobs.append(path.name) @@ -25,44 +27,45 @@ def mock_decode(path, path_hash, mtime_ns, size): max_workers=1, target_size=200, ) - + try: with patch.object(pf, "_decode_worker", side_effect=mock_decode): # 1. Submit 5 medium priority jobs # med_0 will start immediately on the single worker thread pf.submit(Path("med_0.jpg"), 1000, priority=pf.PRIO_MED) - + # small sleep to ensure med_0 is pulled by the worker time.sleep(0.02) - + for i in range(1, 5): pf.submit(Path(f"med_{i}.jpg"), 1000, priority=pf.PRIO_MED) - + # 2. Submit 1 high priority job pf.submit(Path("high_0.jpg"), 1000, priority=pf.PRIO_HIGH) - + # 3. Wait for all to finish deadline = time.time() + 2.0 while len(finished_jobs) < 6 and time.time() < deadline: time.sleep(0.1) - + # Verification: # - finished_jobs[0] should be med_0.jpg (started first) # - finished_jobs[1] should be high_0.jpg (jumped the queue) # - others should follow - + assert len(finished_jobs) == 6 assert finished_jobs[0] == "med_0.jpg" assert finished_jobs[1] == "high_0.jpg" - + finally: pf.shutdown() + def test_prefetcher_lifo_behavior(cache): """Verify that jobs within same priority have LIFO behavior (most recent first).""" finished_jobs = [] - - def mock_decode(path, path_hash, mtime_ns, size): + + def mock_decode(path, path_hash, mtime_ns, size, *args, **kwargs): time.sleep(0.05) finished_jobs.append(path.name) return b"fake_data" @@ -73,25 +76,25 @@ def mock_decode(path, path_hash, mtime_ns, size): max_workers=1, target_size=200, ) - + try: with patch.object(pf, "_decode_worker", side_effect=mock_decode): # Submit first job to busy the worker pf.submit(Path("job_0.jpg"), 1000) time.sleep(0.01) - + # Submit sequential jobs pf.submit(Path("job_1.jpg"), 1000) time.sleep(0.01) pf.submit(Path("job_2.jpg"), 1000) time.sleep(0.01) pf.submit(Path("job_3.jpg"), 1000) - + # Wait for all deadline = time.time() + 2.0 while len(finished_jobs) < 4 and time.time() < deadline: time.sleep(0.05) - + assert len(finished_jobs) == 4 assert finished_jobs[0] == "job_0.jpg" # job_3 should be second because it was submitted LAST (LIFO) diff --git a/faststack/tests/thumbnail_view/test_provider_logic.py b/faststack/tests/thumbnail_view/test_provider_logic.py new file mode 100644 index 0000000..5527b81 --- /dev/null +++ b/faststack/tests/thumbnail_view/test_provider_logic.py @@ -0,0 +1,152 @@ +"""Tests for _parse_id logic and cache-hit decode recovery in ThumbnailProvider.""" + +import pytest +from pathlib import Path +from unittest.mock import MagicMock + +from faststack.thumbnail_view.provider import ThumbnailProvider, PLACEHOLDER_COLOR, ERROR_COLOR +from faststack.thumbnail_view.prefetcher import ThumbnailCache +from PySide6.QtCore import QSize + + +class TestProviderLogic: + @pytest.fixture + def provider(self): + p = ThumbnailProvider.__new__(ThumbnailProvider) + p._default_size = 200 + return p + + def test_parse_id_file_success(self, provider): + id_str = "256/pathhash123/123456789?r=1&reason=scroll" + parsed = provider._parse_id(id_str) + + assert parsed.id_clean == "256/pathhash123/123456789" + assert parsed.parts == ["256", "pathhash123", "123456789"] + assert parsed.thumb_size == 256 + assert parsed.path_hash == "pathhash123" + assert parsed.mtime_ns == 123456789 + assert parsed.reason == "scroll" + assert parsed.is_folder is False + assert parsed.is_valid is True + + def test_parse_id_folder_success(self, provider): + id_str = "folder/pathhash456/987654321?r=2" + parsed = provider._parse_id(id_str) + + assert parsed.id_clean == "folder/pathhash456/987654321" + assert parsed.parts == ["folder", "pathhash456", "987654321"] + assert parsed.thumb_size == 200 # Default size + assert parsed.path_hash == "pathhash456" + assert parsed.mtime_ns == 987654321 + assert parsed.reason == "unknown" + assert parsed.is_folder is True + assert parsed.is_valid is True + + def test_parse_id_invalid_format(self, provider): + id_str = "invalid/id" + parsed = provider._parse_id(id_str) + + assert parsed.is_valid is False + + def test_parse_id_invalid_number(self, provider): + id_str = "abc/pathhash/123" + parsed = provider._parse_id(id_str) + + assert parsed.is_valid is False + + +class TestCacheDiscard: + """Tests for ThumbnailCache.discard().""" + + def test_discard_existing_key(self): + cache = ThumbnailCache(max_bytes=1024, max_items=10) + cache.put("a", b"hello") + assert cache.size == 1 + assert cache.bytes_used == 5 + + assert cache.discard("a") is True + assert cache.size == 0 + assert cache.bytes_used == 0 + assert cache.get("a") is None + + def test_discard_missing_key_returns_false(self): + cache = ThumbnailCache(max_bytes=1024, max_items=10) + assert cache.discard("nonexistent") is False + assert cache.size == 0 + assert cache.bytes_used == 0 + + def test_discard_updates_bytes_correctly(self): + cache = ThumbnailCache(max_bytes=1024, max_items=10) + cache.put("a", b"12345") # 5 bytes + cache.put("b", b"67890ab") # 7 bytes + assert cache.bytes_used == 12 + + cache.discard("a") + assert cache.bytes_used == 7 + assert cache.size == 1 + + +class TestDecodeFailureRecovery: + """Cache-hit decode failure should behave like a cache miss.""" + + @pytest.fixture + def wired_provider(self): + """Build a ThumbnailProvider with mock cache, prefetcher, and path_resolver.""" + cache = ThumbnailCache(max_bytes=1024, max_items=10) + prefetcher = MagicMock() + prefetcher.PRIO_HIGH = 0 + prefetcher.submit = MagicMock(return_value=True) + + resolver = MagicMock(return_value=Path("/fake/photo.jpg")) + + provider = ThumbnailProvider( + cache=cache, + prefetcher=prefetcher, + path_resolver=resolver, + default_size=200, + ) + return provider, cache, prefetcher + + def test_bad_cached_bytes_returns_placeholder_not_error(self, wired_provider): + provider, cache, _prefetcher = wired_provider + + # Inject invalid JPEG bytes into the cache + cache_key = "200/abc123/999" + cache.put(cache_key, b"NOT-VALID-JPEG-DATA") + + out_size = QSize() + result = provider.requestImage(f"{cache_key}?r=1", out_size, QSize()) + + # Should return the loading placeholder (neutral gray), NOT error (dark red). + # Compare RGB components to avoid QColor format/alpha surprises. + assert not result.isNull() + pixel = result.pixelColor(0, 0) + assert (pixel.red(), pixel.green(), pixel.blue()) == ( + PLACEHOLDER_COLOR.red(), + PLACEHOLDER_COLOR.green(), + PLACEHOLDER_COLOR.blue(), + ) + assert (pixel.red(), pixel.green(), pixel.blue()) != ( + ERROR_COLOR.red(), + ERROR_COLOR.green(), + ERROR_COLOR.blue(), + ) + + def test_bad_cached_bytes_evicts_and_submits(self, wired_provider): + provider, cache, prefetcher = wired_provider + + cache_key = "200/abc123/999" + cache.put(cache_key, b"NOT-VALID-JPEG-DATA") + + provider.requestImage(f"{cache_key}?r=1", QSize(), QSize()) + + # The bad entry should have been evicted + assert cache.get(cache_key) is None + + # Prefetcher should have been asked to re-decode with correct args + prefetcher.submit.assert_called_once() + args, kwargs = prefetcher.submit.call_args + assert args[0] == Path("/fake/photo.jpg") + assert args[1] == 999 # mtime_ns + assert args[2] == 200 # thumb_size + assert kwargs["priority"] == prefetcher.PRIO_HIGH diff --git a/faststack/tests/thumbnail_view/test_reason_tracking.py b/faststack/tests/thumbnail_view/test_reason_tracking.py new file mode 100644 index 0000000..7f6edce --- /dev/null +++ b/faststack/tests/thumbnail_view/test_reason_tracking.py @@ -0,0 +1,112 @@ +"""Tests for thumbnail source reason tracking in ThumbnailModel.""" + +from pathlib import Path +import pytest +from PySide6.QtCore import Qt, QModelIndex +from faststack.thumbnail_view.model import ThumbnailModel, ThumbnailEntry + + +@pytest.fixture +def model(): + """Create a ThumbnailModel with fake entries.""" + model = ThumbnailModel( + base_directory=Path("/fake/dir"), + current_directory=Path("/fake/dir"), + thumbnail_size=200, + ) + + # Manually add entries + entries = [ + ThumbnailEntry( + path=Path("/fake/dir/img1.jpg"), + name="img1.jpg", + is_folder=False, + mtime_ns=1000, + ), + ThumbnailEntry( + path=Path("/fake/dir/img2.jpg"), + name="img2.jpg", + is_folder=False, + mtime_ns=2000, + ), + ThumbnailEntry( + path=Path("/fake/dir/img3.jpg"), + name="img3.jpg", + is_folder=False, + mtime_ns=3000, + ), + ] + model._entries = entries + model._rebuild_id_mapping() + return model + + +def test_reason_persistence_across_all_items(model): + """All visible items in a batch should carry the same reason.""" + model._next_source_reason = "filter" + + src1 = model.data(model.index(0), model.ThumbnailSourceRole) + assert "reason=filter" in src1 + + src2 = model.data(model.index(1), model.ThumbnailSourceRole) + assert "reason=filter" in src2 + + src3 = model.data(model.index(2), model.ThumbnailSourceRole) + assert "reason=filter" in src3 + + +def test_reason_defaults_to_scroll_when_none(model): + """When no reason is set, data() returns reason=scroll.""" + model._next_source_reason = None + + src = model.data(model.index(0), model.ThumbnailSourceRole) + assert "reason=scroll" in src + + +def test_reason_survives_endResetModel(model): + """Reason must still be readable after endResetModel, before deferred clear.""" + model._next_source_reason = "jump" + + # Simulate what refresh() does: beginResetModel + endResetModel + model.beginResetModel() + model.endResetModel() + + # Reason should still be alive — QML reads data() after modelReset + src = model.data(model.index(0), model.ThumbnailSourceRole) + assert "reason=jump" in src + + +def test_clear_next_source_reason_method(model): + """_clear_next_source_reason() resets to None so data() returns scroll.""" + model._next_source_reason = "refresh" + + # Reason is alive + src = model.data(model.index(0), model.ThumbnailSourceRole) + assert "reason=refresh" in src + + # Simulate deferred clear (what QTimer.singleShot(0, ...) will call) + model._clear_next_source_reason() + + assert model._next_source_reason is None + src = model.data(model.index(0), model.ThumbnailSourceRole) + assert "reason=scroll" in src + + +def test_refresh_sets_deferred_clear(model): + """refresh() should NOT synchronously clear reason.""" + from unittest.mock import patch + from faststack.models import ImageFile + + with patch("faststack.thumbnail_view.model.find_images") as mock_find: + # Note: This only verifies model._next_source_reason is preserved synchronously + # after refresh() returns; the deferred clearing via QTimer is not tested here. + mock_find.return_value = [ + ImageFile(path=Path("/fake/dir/img1.jpg")), + ImageFile(path=Path("/fake/dir/img2.jpg")), + ] + + model.set_filter("img") # sets reason="filter", calls refresh() + + # Reason should still be alive right after refresh() returns + # (deferred clear hasn't fired yet — no event loop iteration) + assert model._next_source_reason == "filter" diff --git a/faststack/thumbnail_view/model.py b/faststack/thumbnail_view/model.py index ba35960..f663bde 100644 --- a/faststack/thumbnail_view/model.py +++ b/faststack/thumbnail_view/model.py @@ -13,6 +13,7 @@ QAbstractListModel, QModelIndex, QThread, + QTimer, Qt, Signal, Slot, @@ -132,6 +133,7 @@ def __init__( parent=None, ): super().__init__(parent) + self._next_source_reason: Optional[str] = None self._base_directory = base_directory.resolve() self._current_directory = current_directory.resolve() self._get_metadata = get_metadata_callback @@ -143,17 +145,13 @@ def __init__( self._selected_indices: Set[int] = set() self._last_selected_index: Optional[int] = None self._active_filter: str = "" # current filename filter (set by AppController) - self._active_filter_flags: list = [] # current flag filters (e.g. ["uploaded", "stacked"]) - - # One-shot reason for logging - self._next_source_reason: Optional[str] = None + self._active_filter_flags: list = ( + [] + ) # current flag filters (e.g. ["uploaded", "stacked"]) # Mapping from thumbnail_id (without query params) to row index # id format: "{size}/{path_hash}/{mtime_ns}" self._id_to_row: Dict[str, int] = {} - - # One-shot reason for logging - self._next_source_reason: Optional[str] = None # Connect our own signal to handle thumbnail ready events self.thumbnailReady.connect(self._on_thumbnail_ready) @@ -204,8 +202,6 @@ def data(self, index: QModelIndex, role: int = Qt.ItemDataRole.DisplayRole): return entry.is_edited elif role == self.ThumbnailSourceRole: reason = self._next_source_reason or "scroll" - # Consume one-shot reason - self._next_source_reason = None return self._get_thumbnail_source(entry, reason=reason) elif role == self.FolderStatsRole: if entry.folder_stats: @@ -291,7 +287,9 @@ def roleNames(self) -> Dict[int, bytes]: self.HasDevelopedRole: b"hasDeveloped", } - def _get_thumbnail_source(self, entry: ThumbnailEntry, reason: str = "scroll") -> str: + def _get_thumbnail_source( + self, entry: ThumbnailEntry, reason: str = "scroll" + ) -> str: """Build thumbnail URL for QML Image source. Format: image://thumbnail/{size}/{path_hash}/{mtime_ns}?r={rev}&reason={reason} @@ -369,9 +367,9 @@ def _add_folders_to_entries(self): def refresh(self): """Refresh the model by rescanning the current directory.""" cur, own = QThread.currentThread(), self.thread() - assert cur == own, ( - f"ThumbnailModel.refresh() thread mismatch: current={cur}, owner={own}" - ) + assert ( + cur == own + ), f"ThumbnailModel.refresh() thread mismatch: current={cur}, owner={own}" self.beginResetModel() t0 = time.perf_counter() try: @@ -400,9 +398,11 @@ def refresh(self): meta = self._get_metadata(img.path.stem) if not isinstance(meta, dict): # Ensure it's a dict before .get() - log.debug("Metadata for %s is not a dict: %r", img.path.stem, meta) + log.debug( + "Metadata for %s is not a dict: %r", img.path.stem, meta + ) continue - + if all(meta.get(flag, False) for flag in flags): filtered.append(img) except Exception as e: @@ -418,6 +418,8 @@ def refresh(self): finally: self.endResetModel() + # Defer clear so QML's next render frame still sees the reason + QTimer.singleShot(0, self._clear_next_source_reason) self._folder_count = sum(1 for e in self._entries if e.is_folder) self.selectionChanged.emit() log.info( @@ -428,7 +430,11 @@ def refresh(self): ) log.info( "refresh timings: folders=%.3f images=%.3f idmap=%.3f total=%.3f n=%d", - t1-t0, t2-t1, t3-t2, t3-t0, len(images) + t1 - t0, + t2 - t1, + t3 - t2, + t3 - t0, + len(images), ) def remove_rows_by_path(self, paths: List[Path]) -> None: @@ -447,7 +453,9 @@ def remove_rows_by_path(self, paths: List[Path]) -> None: return # Count folders being removed (for cached counter) - folders_removed = sum(1 for i in indices_to_remove if self._entries[i].is_folder) + folders_removed = sum( + 1 for i in indices_to_remove if self._entries[i].is_folder + ) # 2. Sort in reverse to maintain index validity during removal indices_to_remove.sort(reverse=True) @@ -455,7 +463,10 @@ def remove_rows_by_path(self, paths: List[Path]) -> None: # 3. Group consecutive indices for batch removal calls ranges = [] if indices_to_remove: - current_range = [indices_to_remove[0], indices_to_remove[0]] # [last, first] + current_range = [ + indices_to_remove[0], + indices_to_remove[0], + ] # [last, first] for idx in indices_to_remove[1:]: if idx == current_range[1] - 1: current_range[1] = idx @@ -487,18 +498,23 @@ def remove_rows_by_path(self, paths: List[Path]) -> None: # 7. Rebuild mapping self._rebuild_id_mapping() self.selectionChanged.emit() - log.info("ThumbnailModel removed %d rows via targeted removal", len(indices_to_remove)) + log.info( + "ThumbnailModel removed %d rows via targeted removal", + len(indices_to_remove), + ) - def refresh_from_controller(self, images: List, metadata_map: Optional[Dict[str, dict]] = None): + def refresh_from_controller( + self, images: List, metadata_map: Optional[Dict[str, dict]] = None + ): """Refresh images from a pre-loaded list without scanning disk. - + Folders are still scanned, but image entries are built from the provided objects. """ self._next_source_reason = "refresh" cur, own = QThread.currentThread(), self.thread() - assert cur == own, f"ThumbnailModel refresh thread mismatch" - + assert cur == own, "ThumbnailModel refresh thread mismatch" + self.beginResetModel() try: self._entries.clear() @@ -509,7 +525,7 @@ def refresh_from_controller(self, images: List, metadata_map: Optional[Dict[str, t0 = time.perf_counter() self._add_folders_to_entries() t1 = time.perf_counter() - + # Apply active filename filter if set if self._active_filter: needle = self._active_filter.lower() @@ -527,9 +543,11 @@ def refresh_from_controller(self, images: List, metadata_map: Optional[Dict[str, meta = self._get_metadata(img.path.stem) else: continue - + if not isinstance(meta, dict): - log.debug("Metadata for %s is not a dict: %r", img.path.stem, meta) + log.debug( + "Metadata for %s is not a dict: %r", img.path.stem, meta + ) continue if all(meta.get(flag, False) for flag in flags): @@ -546,19 +564,27 @@ def refresh_from_controller(self, images: List, metadata_map: Optional[Dict[str, finally: self.endResetModel() + QTimer.singleShot(0, self._clear_next_source_reason) self._folder_count = sum(1 for e in self._entries if e.is_folder) self.selectionChanged.emit() log.info( "refresh_from_controller timings: folders=%.3f images=%.3f idmap=%.3f total=%.3f n=%d (bulk_meta=%s)", - t1-t0, t2-t1, t3-t2, t3-t0, len(images), metadata_map is not None + t1 - t0, + t2 - t1, + t3 - t2, + t3 - t0, + len(images), + metadata_map is not None, ) - def _add_images_to_entries(self, images: List, metadata_map: Optional[Dict[str, dict]] = None): + def _add_images_to_entries( + self, images: List, metadata_map: Optional[Dict[str, dict]] = None + ): """Convert list of objects (ImageFile or similar) to ThumbnailEntry.""" for img in images: try: # Use mtime from object if available to avoid stat() - if hasattr(img, 'timestamp') and img.timestamp: + if hasattr(img, "timestamp") and img.timestamp: mtime_ns = int(img.timestamp * 1e9) else: mtime_ns = img.path.stat().st_mtime_ns @@ -589,12 +615,14 @@ def _add_images_to_entries(self, images: List, metadata_map: Optional[Dict[str, is_restacked = meta.get("restacked", False) is_favorite = meta.get("favorite", False) else: - log.debug("Metadata for %s is not a dict: %r", img.path.stem, meta) + log.debug( + "Metadata for %s is not a dict: %r", img.path.stem, meta + ) except Exception as e: log.debug("Error getting metadata for %s: %s", img.path, e) - has_backups = getattr(img, 'has_backups', False) - has_developed = getattr(img, 'has_developed', False) + has_backups = getattr(img, "has_backups", False) + has_developed = getattr(img, "has_developed", False) self._entries.append( ThumbnailEntry( @@ -636,10 +664,14 @@ def _on_thumbnail_ready(self, thumbnail_id: str): """Handle thumbnail ready signal - bump revision and emit dataChanged.""" if thumbnail_id not in self._id_to_row: if log.isEnabledFor(logging.DEBUG): - actual_size = thumbnail_id.split("/", 1)[0] if "/" in thumbnail_id else "?" + actual_size = ( + thumbnail_id.split("/", 1)[0] if "/" in thumbnail_id else "?" + ) log.debug( "thumbnail ready id not in model map: id=%s expected_size=%s actual_size=%s", - thumbnail_id, self._thumbnail_size, actual_size, + thumbnail_id, + self._thumbnail_size, + actual_size, ) return @@ -707,6 +739,10 @@ def navigate_to(self, path: Path, update_base_if_above: bool = False): self._next_source_reason = "jump" self.refresh() + def _clear_next_source_reason(self): + """Deferred clear of _next_source_reason (called via QTimer.singleShot).""" + self._next_source_reason = None + # Selection methods def select_index(self, idx: int, shift: bool = False, ctrl: bool = False): @@ -793,7 +829,7 @@ def get_entry(self, row: int) -> Optional[ThumbnailEntry]: def _compute_path_hash(self, path: Path) -> str: """Computes a stable hash for the given path. - + Now uses centralized helper which is purely string-based (no .resolve() calls). """ return compute_path_hash(path) diff --git a/faststack/thumbnail_view/prefetcher.py b/faststack/thumbnail_view/prefetcher.py index dfc66c3..e0c1b38 100644 --- a/faststack/thumbnail_view/prefetcher.py +++ b/faststack/thumbnail_view/prefetcher.py @@ -3,16 +3,20 @@ import logging import os import time +from collections import OrderedDict from concurrent.futures import Future from pathlib import Path from threading import Lock import threading from typing import Dict, Optional, Set, Tuple, Callable, TYPE_CHECKING + if TYPE_CHECKING: from faststack.imaging.cache import ByteLRUCache import numpy as np +import io from PIL import Image +from contextlib import nullcontext from faststack.util.executors import create_priority_executor from faststack.imaging.orientation import get_exif_orientation, apply_orientation_to_np @@ -46,12 +50,6 @@ class _ReadyEmitter(QObject): log.debug("TurboJPEG not available, using PIL for thumbnail decoding") - - - - - - class ThumbnailPrefetcher: """Background thumbnail decoder with ThreadPoolExecutor. @@ -64,7 +62,7 @@ class ThumbnailPrefetcher: # Priority levels PRIO_HIGH = 0 # Visible items - PRIO_MED = 1 # Prefetch items + PRIO_MED = 1 # Prefetch items def __init__( self, @@ -99,7 +97,9 @@ def __init__( # Track in-flight jobs to avoid duplicates # Key: (size, path_hash, mtime_ns) # Value: (rid, ThumbTimer) - self._inflight: Dict[Tuple[int, str, int], Tuple[int, "thumb_debug.ThumbTimer"]] = {} + self._inflight: Dict[ + Tuple[int, str, int], Tuple[int, Optional["thumb_debug.ThumbTimer"]] + ] = {} self._inflight_lock = Lock() self._debug_trace = debug_trace @@ -112,8 +112,12 @@ def __init__( if _HAS_QT and self._on_ready: try: if QCoreApplication.instance() is not None: - self._ready_emitter = _ReadyEmitter() # created on constructing thread (should be Qt thread) - self._ready_emitter.ready.connect(self._on_ready, Qt.QueuedConnection) + self._ready_emitter = ( + _ReadyEmitter() + ) # created on constructing thread (should be Qt thread) + self._ready_emitter.ready.connect( + self._on_ready, Qt.QueuedConnection + ) except Exception: self._ready_emitter = None @@ -128,9 +132,12 @@ def __init__( ) def submit( - self, path: Path, mtime_ns: int, size: int = None, + self, + path: Path, + mtime_ns: int, + size: Optional[int] = None, priority: int = PRIO_MED, - timer: Optional["thumb_debug.ThumbTimer"] = None + timer: Optional["thumb_debug.ThumbTimer"] = None, ) -> bool: """Submit a thumbnail decode job. @@ -169,40 +176,55 @@ def submit( existing_rid, existing_timer = self._inflight[job_key] if existing_timer: # Capture where this request originated - existing_timer.coalesced_from = timer.reason + if timer: + existing_timer.coalesced_from = timer.reason # Update effective priority if this requested one is higher - if existing_timer.prio_effective is not None and priority < existing_timer.prio_effective: + if ( + existing_timer.prio_effective is not None + and priority < existing_timer.prio_effective + ): existing_timer.prio_effective = priority if timer: - thumb_debug.log_trace("prio_bump", rid=existing_timer.rid, new_prio=priority, triggered_by_rid=timer.rid) - + thumb_debug.log_trace( + "prio_bump", + rid=existing_timer.rid, + new_prio=priority, + triggered_by_rid=timer.rid, + ) + if timer: thumb_debug.inc("decode_coalesced") - thumb_debug.log_trace("coalesced", rid=timer.rid, existing_rid=existing_rid) + thumb_debug.log_trace( + "coalesced", rid=timer.rid, existing_rid=existing_rid + ) return False - + if timer: timer.t_queued = time.perf_counter() timer.prio_submitted = priority timer.prio_effective = priority thumb_debug.inc("decode_submitted") - thumb_debug.log_trace("queued", rid=timer.rid, prio=priority, qdepth=len(self._inflight)+1) + thumb_debug.log_trace( + "queued", + rid=timer.rid, + prio=priority, + qdepth=len(self._inflight) + 1, + ) self._inflight[job_key] = (timer.rid if timer else 0, timer) thumb_debug.gauge("inflight", len(self._inflight)) - thumb_debug.gauge("qdepth", len(self._inflight)) # Submit decode job try: self._submit_count += 1 - + future = self._executor.submit( self._decode_worker, path, path_hash, mtime_ns, size, - timer=timer, + timer, priority=priority, ) @@ -240,7 +262,7 @@ def _decode_worker( path_hash: str, mtime_ns: int, size: int, - timer: "thumb_debug.ThumbTimer", + timer: Optional["thumb_debug.ThumbTimer"] = None, ) -> Optional[bytes]: """Worker function to decode a thumbnail. @@ -250,52 +272,64 @@ def _decode_worker( timer.t_worker_start = time.perf_counter() timer.started = True thumb_debug.inc("decode_started") - thumb_debug.log_trace("worker_start", rid=timer.rid) - + thumb_debug.log_trace( + "worker_start", + rid=timer.rid, + prio=getattr(timer, "prio_effective", None), + ) + try: # Read and decode rgb_array = self._decode_image(path, size, timer=timer) - + if rgb_array is None: return None # Check cancellation early if possible - if timer.cancelled or self._stop_event.is_set(): - thumb_debug.log_trace("cancel_honored", rid=timer.rid, stage="after_decode") + if (timer and timer.cancelled) or self._stop_event.is_set(): + if timer: + thumb_debug.log_trace( + "cancel_honored", rid=timer.rid, stage="after_decode" + ) return None # Get EXIF orientation and apply (single point of orientation) - with timer.stage("orientation"): + with timer.stage("orientation") if timer else nullcontext(): orientation = get_exif_orientation(path) rgb_array = apply_orientation_to_np(rgb_array, orientation) # Check cancellation again - if timer.cancelled or self._stop_event.is_set(): - thumb_debug.log_trace("cancel_honored", rid=timer.rid, stage="after_orientation") + if (timer and timer.cancelled) or self._stop_event.is_set(): + if timer: + thumb_debug.log_trace( + "cancel_honored", rid=timer.rid, stage="after_orientation" + ) return None # Encode to JPEG bytes for storage - with timer.stage("encode"): + with timer.stage("encode") if timer else nullcontext(): pil_image = Image.fromarray(rgb_array, mode="RGB") - - # Use BytesIO to encode to JPEG - import io - buf = io.BytesIO() pil_image.save(buf, format="JPEG", quality=85) result = buf.getvalue() - - if timer.cancelled: + + if timer and timer.cancelled: thumb_debug.log_trace("cancel_too_late", rid=timer.rid) - + return result except Exception as e: - thumb_debug.log_trace("worker_error", rid=timer.rid, error=str(e)) + if timer: + thumb_debug.log_trace("worker_error", rid=timer.rid, error=str(e)) log.debug("Failed to decode thumbnail for %s: %s", path, e) return None - def _decode_image(self, path: Path, target_size: int, timer: "thumb_debug.ThumbTimer") -> Optional[np.ndarray]: + def _decode_image( + self, + path: Path, + target_size: int, + timer: Optional["thumb_debug.ThumbTimer"] = None, + ) -> Optional[np.ndarray]: """Decode image to numpy array at target size. Uses TurboJPEG if available for faster decoding. @@ -306,11 +340,11 @@ def _decode_image(self, path: Path, target_size: int, timer: "thumb_debug.ThumbT # Try TurboJPEG for JPEG files if HAS_TURBOJPEG and suffix in (".jpg", ".jpeg"): try: - with timer.stage("io"): + with timer.stage("io") if timer else nullcontext(): with open(path, "rb") as f: jpeg_data = f.read() - with timer.stage("decode"): + with timer.stage("decode") if timer else nullcontext(): # Get dimensions first width, height, _, _ = _tj.decode_header(jpeg_data) @@ -332,7 +366,7 @@ def _decode_image(self, path: Path, target_size: int, timer: "thumb_debug.ThumbT # Further resize with PIL if needed h, w = rgb.shape[:2] if w > target_size or h > target_size: - with timer.stage("decode"): + with timer.stage("resize") if timer else nullcontext(): pil_img = Image.fromarray(rgb) pil_img.thumbnail( (target_size, target_size), Image.Resampling.LANCZOS @@ -348,29 +382,34 @@ def _decode_image(self, path: Path, target_size: int, timer: "thumb_debug.ThumbT # Fallback to PIL try: - with timer.stage("decode"): - pil_img = Image.open(path) - # Convert to RGB if needed - if pil_img.mode != "RGB": - pil_img = pil_img.convert("RGB") - - # Resize - pil_img.thumbnail((target_size, target_size), Image.Resampling.LANCZOS) - return np.array(pil_img) + with timer.stage("decode") if timer else nullcontext(): + with Image.open(path) as pil_img: + # Convert to RGB if needed + if pil_img.mode != "RGB": + pil_img = pil_img.convert("RGB") + + # Resize + pil_img.thumbnail( + (target_size, target_size), Image.Resampling.LANCZOS + ) + return np.array(pil_img.copy()) except Exception as e: log.debug("PIL decode failed for %s: %s", path, e) return None def _on_decode_done( - self, future: Future, job_key: Tuple[int, str, int], cache_key: str, timer: "thumb_debug.ThumbTimer" + self, + future: Future, + job_key: Tuple[int, str, int], + cache_key: str, + timer: Optional["thumb_debug.ThumbTimer"], ): """Callback when decode completes.""" # Always remove bookkeeping first to avoid stranding entries with self._inflight_lock: self._inflight.pop(job_key, None) thumb_debug.gauge("inflight", len(self._inflight)) - thumb_debug.gauge("qdepth", len(self._inflight)) if self._futures.get(job_key) is future: del self._futures[job_key] @@ -389,7 +428,11 @@ def _on_decode_done( if future.cancelled() or (timer and timer.cancelled): if timer: thumb_debug.inc("decode_cancelled") - event = "cancelled_midflight" if timer.started else "cancelled_before_start" + event = ( + "cancelled_midflight" + if timer.started + else "cancelled_before_start" + ) thumb_debug.log_trace(event, rid=timer.rid) return @@ -397,7 +440,7 @@ def _on_decode_done( if jpeg_bytes: # Store in cache self._cache.put(cache_key, jpeg_bytes) - + if timer: thumb_debug.inc("decode_done_ok") thumb_debug.log_trace("completed", rid=timer.rid) @@ -427,8 +470,9 @@ def cancel_all(self): thumb_debug.gauge("inflight", 0) for timer in inflight_timers: - timer.cancelled = True - thumb_debug.log_trace("cancel_requested", rid=timer.rid) + if timer is not None: + timer.cancelled = True + thumb_debug.log_trace("cancel_requested", rid=timer.rid) for f in futures: try: @@ -456,8 +500,7 @@ class ThumbnailCache: def __init__(self, max_bytes: int = 256 * 1024 * 1024, max_items: int = 5000): self._max_bytes = max_bytes self._max_items = max_items - self._cache: Dict[str, bytes] = {} - self._order: list = [] # LRU order (oldest first) + self._cache: Dict[str, bytes] = OrderedDict() self._current_bytes = 0 self._lock = Lock() @@ -467,39 +510,47 @@ def get(self, key: str) -> Optional[bytes]: if key not in self._cache: return None # Move to end (most recently used) - self._order.remove(key) - self._order.append(key) + self._cache.move_to_end(key, last=True) return self._cache[key] def put(self, key: str, value: bytes): """Put item in cache, evicting if necessary.""" with self._lock: - # If already present, remove old entry first + # If already present, remove old entry logic by just updating (OrderedDict handles key existence) + # But we must update _current_bytes first if it exists if key in self._cache: - old_value = self._cache[key] - self._current_bytes -= len(old_value) - self._order.remove(key) - - # Add new entry + self._current_bytes -= len(self._cache[key]) + self._cache.move_to_end(key, last=True) + self._cache[key] = value - self._order.append(key) self._current_bytes += len(value) # Evict if over limits while ( self._current_bytes > self._max_bytes or len(self._cache) > self._max_items - ) and self._order: - oldest = self._order.pop(0) - if oldest in self._cache: - self._current_bytes -= len(self._cache[oldest]) - del self._cache[oldest] + ): + # Pop oldest (first item) + _, oldest_val = self._cache.popitem(last=False) + self._current_bytes -= len(oldest_val) + + def discard(self, key: str) -> bool: + """Remove a single entry if present. No-op if missing. + + Returns True if the key was present and removed, False otherwise. + """ + with self._lock: + try: + val = self._cache.pop(key) + except KeyError: + return False + self._current_bytes -= len(val) + return True def clear(self): """Clear the cache.""" with self._lock: self._cache.clear() - self._order.clear() self._current_bytes = 0 @property diff --git a/faststack/thumbnail_view/provider.py b/faststack/thumbnail_view/provider.py index 8e9b148..bdbd83b 100644 --- a/faststack/thumbnail_view/provider.py +++ b/faststack/thumbnail_view/provider.py @@ -2,17 +2,17 @@ import logging import time +from urllib.parse import unquote from pathlib import Path -from typing import TYPE_CHECKING, Optional +from typing import TYPE_CHECKING, Optional, NamedTuple import faststack.util.thumb_debug as thumb_debug from PySide6.QtCore import QSize -from PySide6.QtGui import QImage, QPixmap, QColor +from PySide6.QtGui import QImage, QColor from PySide6.QtQuick import QQuickImageProvider from faststack.io.utils import compute_path_hash -from faststack.models import DecodedImage if TYPE_CHECKING: from faststack.thumbnail_view.model import ThumbnailModel @@ -26,11 +26,24 @@ ERROR_COLOR = QColor(80, 40, 40) # Dark red for errors +class ParsedId(NamedTuple): + """Container for parsed thumbnail ID fields.""" + + id_clean: str + parts: list[str] + thumb_size: Optional[int] + path_hash: Optional[str] + mtime_ns: Optional[int] + reason: str + is_folder: bool + is_valid: bool + + class ThumbnailProvider(QQuickImageProvider): """QML Image Provider for thumbnails. Non-blocking O(1) implementation: - - Returns cached pixmap if available + - Returns cached QImage if available - Returns placeholder immediately if not cached - Schedules decode via prefetcher (does NOT decode inline) @@ -58,7 +71,7 @@ def __init__( debug_timing: Enable [THUMB-TIMING] log lines debug_trace: Enable verbose trace logs """ - super().__init__(QQuickImageProvider.ImageType.Pixmap) + super().__init__(QQuickImageProvider.ImageType.Image) self._cache = cache self._prefetcher = prefetcher self._path_resolver = path_resolver @@ -66,12 +79,12 @@ def __init__( self._debug_timing = debug_timing self._debug_trace = debug_trace - # Pre-create placeholder pixmaps + # Pre-create placeholder images self._placeholder = self._create_placeholder(default_size, PLACEHOLDER_COLOR) self._folder_placeholder = self._create_folder_placeholder(default_size) self._error_placeholder = self._create_placeholder(default_size, ERROR_COLOR) - # Timing stats for requestPixmap + # Timing stats for requestImage self._first_request_time: Optional[float] = None self._request_count = 0 self._first_second_logged = False @@ -79,20 +92,20 @@ def __init__( log.debug("ThumbnailProvider initialized with default size %d", default_size) - def _create_placeholder(self, size: int, color: QColor) -> QPixmap: - """Create a solid color placeholder pixmap.""" - pixmap = QPixmap(size, size) - pixmap.fill(color) - return pixmap + def _create_placeholder(self, size: int, color: QColor) -> QImage: + """Create a solid color placeholder image.""" + image = QImage(size, size, QImage.Format.Format_RGB888) + image.fill(color) + return image - def _create_folder_placeholder(self, size: int) -> QPixmap: + def _create_folder_placeholder(self, size: int) -> QImage: """Create a folder icon placeholder.""" from PySide6.QtGui import QPainter, QPen, QBrush - pixmap = QPixmap(size, size) - pixmap.fill(FOLDER_COLOR) + image = QImage(size, size, QImage.Format.Format_RGB888) + image.fill(FOLDER_COLOR) - painter = QPainter(pixmap) + painter = QPainter(image) painter.setRenderHint(QPainter.RenderHint.Antialiasing) # Draw a simple folder shape @@ -120,147 +133,171 @@ def _create_folder_placeholder(self, size: int) -> QPixmap: ) painter.end() - return pixmap + return image + + def _parse_id(self, id_str: str) -> ParsedId: + """Parse the thumbnail ID string. + + Format: {size}/{path_hash}/{mtime_ns}?r={rev} + Or: folder/{path_hash}/{mtime_ns}?r={rev} + + Returns: + ParsedId named tuple with extracted fields. + """ + # Split query params + parts_query = id_str.split("?") + id_clean = parts_query[0] + reason = "unknown" + + if len(parts_query) > 1: + query = parts_query[1] + for param in query.split("&"): + if param.startswith("reason="): + # Robust parsing with split(..., 1) and URL decoding + reason = unquote(param.split("=", 1)[1]) + + parts = id_clean.split("/") + if len(parts) < 3: + return ParsedId(id_clean, parts, None, None, None, reason, False, False) + + is_folder = parts[0] == "folder" + try: + # If folder, we don't have a thumb_size in the first part, + # but we need path_hash and mtime_ns. + if is_folder: + thumb_size = self._default_size + path_hash = parts[1] + mtime_ns = int(parts[2]) + else: + thumb_size = int(parts[0]) + path_hash = parts[1] + mtime_ns = int(parts[2]) + return ParsedId( + id_clean, parts, thumb_size, path_hash, mtime_ns, reason, is_folder, True + ) + except (ValueError, IndexError): + return ParsedId( + id_clean, parts, None, None, None, reason, is_folder, False + ) - def requestPixmap(self, id_str: str, size: QSize, requestedSize: QSize) -> QPixmap: - """Request a pixmap for the given ID. + def requestImage(self, id_str: str, size: QSize, _requestedSize: QSize) -> QImage: + """Request an image for the given ID. This method is O(1) - returns immediately with cached data or placeholder. Args: id_str: URL path after "image://thumbnail/" size: Output size reference (set by us) - requestedSize: Requested size from QML + _requestedSize: Requested size from QML (unused) Returns: - QPixmap of the thumbnail or placeholder + QImage of the thumbnail or placeholder """ # Parse the ID - # Format: {size}/{path_hash}/{mtime_ns}?r={rev} - # Or: folder/{path_hash}/{mtime_ns}?r={rev} + parsed = self._parse_id(id_str) + + if not parsed.is_valid: + log.debug("Invalid thumbnail ID: %s", id_str) + size.setWidth(self._error_placeholder.width()) + size.setHeight(self._error_placeholder.height()) + return self._error_placeholder + + if parsed.is_folder: + size.setWidth(self._folder_placeholder.width()) + size.setHeight(self._folder_placeholder.height()) + return self._folder_placeholder - # Strip query params - id_clean = id_str.split("?")[0] - # Track total requests if logging enabled if thumb_debug.timing_enabled or thumb_debug.trace_enabled: thumb_debug.inc_request_count() # Deferred logging setup timer = None + cache_key = parsed.id_clean + + # Resolve path - we already have path_hash and mtime_ns + path = self._path_resolver(parsed.path_hash) if self._path_resolver else None + if thumb_debug.timing_enabled or thumb_debug.trace_enabled: - # Parse reason - reason = "unknown" - parts_query = id_str.split("?") - if len(parts_query) > 1: - query = parts_query[1] - for param in query.split("&"): - if param.startswith("reason="): - reason = param.split("=")[1] - - # Key/ID parts - # Key format: {size}/{path_hash}/{mtime_ns} - parts = id_clean.split("/") - if len(parts) < 3: - log.debug("Invalid thumbnail ID format: %s", id_str) - return self._error_placeholder - - # Determine if folder (early exit for folders) - if parts[0] == "folder": - return self._folder_placeholder - - try: - thumb_size = int(parts[0]) - path_hash = parts[1] - mtime_ns = int(parts[2]) - except (ValueError, IndexError): - log.debug("Invalid thumbnail ID: %s", id_str) - return self._error_placeholder - - cache_key = f"{thumb_size}/{path_hash}/{mtime_ns}" - # Resolve path only if needed for trace - path = self._path_resolver(path_hash) if self._path_resolver else None - timer = thumb_debug.ThumbTimer(key=cache_key, path=path, reason=reason) - thumb_debug.log_trace("requested", rid=timer.rid, key=timer.key, src=timer.src, reason=reason) - else: - # Normal fast path — already have id_clean - cache_key = id_clean + timer = thumb_debug.ThumbTimer( + key=cache_key, path=path, reason=parsed.reason + ) + thumb_debug.log_trace( + "requested", + rid=timer.rid, + key=timer.key, + src=timer.src, + reason=parsed.reason, + ) # Check cache (O(1) lookup) - t_cache_get_start = time.perf_counter() + t_cache_get_start = time.perf_counter() if timer else 0 cached_bytes = self._cache.get(cache_key) - dt_cache_get = (time.perf_counter() - t_cache_get_start) * 1000 - + dt_cache_get = (time.perf_counter() - t_cache_get_start) * 1000 if timer else 0 + if cached_bytes: if timer: thumb_debug.inc("req_cache_hit") - thumb_debug.log_trace("cache_hit", rid=timer.rid, ms=f"{dt_cache_get:.3f}") - - # Decode JPEG bytes to pixmap - t_pixmap_start = time.perf_counter() - pixmap = self._bytes_to_pixmap(cached_bytes) - dt_pixmap = (time.perf_counter() - t_pixmap_start) * 1000 - - if pixmap and not pixmap.isNull(): + thumb_debug.log_trace( + "cache_hit", rid=timer.rid, ms=f"{dt_cache_get:.3f}" + ) + + # Decode JPEG bytes to QImage + t_decode_start = time.perf_counter() if timer else 0 + image = self._bytes_to_image(cached_bytes) + dt_decode = (time.perf_counter() - t_decode_start) * 1000 if timer else 0 + + if image is not None and not image.isNull(): if timer: - thumb_debug.log_trace("delivered", rid=timer.rid, pixmap_ms=f"{dt_pixmap:.3f}") + thumb_debug.log_trace( + "delivered", rid=timer.rid, decode_ms=f"{dt_decode:.3f}" + ) timer.log_timing( - cache="hit", + cache="hit", cache_get_ms=f"{dt_cache_get:.3f}", - pixmap_ms=f"{dt_pixmap:.3f}" + pixmap_ms=f"{dt_decode:.3f}", # keep tag for consistency in logs ) - return pixmap - + size.setWidth(image.width()) + size.setHeight(image.height()) + return image + else: + # Decode of cached bytes failed — evict the bad entry so + # the prefetcher can re-decode on the next request. + if self._cache.discard(cache_key): + log.warning("Evicted bad cache entry: %s", cache_key) + if timer: thumb_debug.inc("req_cache_miss") thumb_debug.log_trace("cache_miss", rid=timer.rid, ms=f"{dt_cache_get:.3f}") - # Not in cache - parse parts if we haven't already - # If timer was created, parts, thumb_size, path_hash, mtime_ns are already set. - # If not, we need to parse them now. - if not timer: - parts = id_clean.split("/") - if len(parts) < 3: - log.debug("Invalid thumbnail ID format: %s", id_str) - return self._error_placeholder - - # Determine if folder (early exit for folders) - if parts[0] == "folder": - path_hash = parts[1] - try: - mtime_ns = int(parts[2]) - except ValueError: - return self._error_placeholder - return self._folder_placeholder - - try: - thumb_size = int(parts[0]) - path_hash = parts[1] - mtime_ns = int(parts[2]) - except (ValueError, IndexError): - log.debug("Invalid thumbnail ID: %s", id_str) - return self._error_placeholder - - # Resolve path - path = self._path_resolver(path_hash) if self._path_resolver else None if path: self._prefetcher.submit( - path, mtime_ns, thumb_size, + path, + parsed.mtime_ns, + parsed.thumb_size, priority=self._prefetcher.PRIO_HIGH, - timer=timer + timer=timer, ) # Return placeholder immediately (non-blocking) + size.setWidth(self._placeholder.width()) + size.setHeight(self._placeholder.height()) return self._placeholder - def _bytes_to_pixmap(self, jpeg_bytes: bytes) -> Optional[QPixmap]: - """Convert JPEG bytes to QPixmap.""" + def _bytes_to_image(self, jpeg_bytes: bytes) -> Optional[QImage]: + """Convert JPEG bytes to QImage. + + Returns: + QImage of the decoded bytes, or None on failure. + """ try: - qimage = QImage() - if qimage.loadFromData(jpeg_bytes, "JPEG"): - return QPixmap.fromImage(qimage) + image = QImage() + if image.loadFromData(jpeg_bytes, "JPEG"): + return image + log.warning("JPEG decode failed for cached bytes") except Exception as e: - log.debug("Failed to convert bytes to pixmap: %s", e) + # Guard against Qt/C++ interop runtime errors or other failures during decode + log.warning("Exception during JPEG decode from cache: %s", e, exc_info=True) return None @@ -300,4 +337,8 @@ def update_from_model(self, model: "ThumbnailModel"): self._hash_to_path[path_hash] = entry.path dt = time.perf_counter() - t0 - log.debug(f"PathResolver update took {dt*1000:.2f}ms for {model.rowCount()} items") + log.debug( + "PathResolver update took %.2fms for %d items", + dt * 1000, + model.rowCount(), + ) diff --git a/faststack/ui/provider.py b/faststack/ui/provider.py index 00c048b..762f6a2 100644 --- a/faststack/ui/provider.py +++ b/faststack/ui/provider.py @@ -39,7 +39,8 @@ def __init__(self, app_controller): def requestImage(self, id: str, size: object, requestedSize: object) -> QImage: """Handles image requests from QML.""" import time - _debug = getattr(self.app_controller, 'debug_cache', False) + + _debug = getattr(self.app_controller, "debug_cache", False) if _debug: _t_start = time.perf_counter() print(f"[DBGCACHE] {_t_start*1000:.3f} requestImage: START id={id}") @@ -83,7 +84,9 @@ def requestImage(self, id: str, size: object, requestedSize: object) -> QImage: if _debug: _t_got = time.perf_counter() - print(f"[DBGCACHE] {_t_got*1000:.3f} requestImage: got image_data in {(_t_got - _t_get)*1000:.2f}ms") + print( + f"[DBGCACHE] {_t_got*1000:.3f} requestImage: got image_data in {(_t_got - _t_get)*1000:.2f}ms" + ) if image_data: # Handle format being None (from prefetcher) or missing @@ -133,7 +136,9 @@ def requestImage(self, id: str, size: object, requestedSize: object) -> QImage: if _debug: _t_end = time.perf_counter() - print(f"[DBGCACHE] {_t_end*1000:.3f} requestImage: DONE id={id} total={(_t_end - _t_start)*1000:.2f}ms") + print( + f"[DBGCACHE] {_t_end*1000:.3f} requestImage: DONE id={id} total={(_t_end - _t_start)*1000:.2f}ms" + ) # Buffer is now safe to release (handled by copy), but original_buffer ref in Python object stays # We don't need to manually attach original_buffer to qimg anymore since we copied. @@ -320,9 +325,11 @@ def __init__(self, app_controller, clock_func=None): self.app_controller.batchAutoLevelsFinished.connect( self._on_batch_al_finished ) - + # Ensure image source updates when switching grid/loupe - self.isGridViewActiveChanged.connect(lambda _: self.currentImageSourceChanged.emit()) + self.isGridViewActiveChanged.connect( + lambda _: self.currentImageSourceChanged.emit() + ) def _on_batch_al_progress(self, current: int, total: int): self._batch_al_current = current @@ -412,6 +419,12 @@ def currentFilename(self): return "" return self.app_controller.get_current_metadata().get("filename", "") + @Property(str, notify=metadataChanged) + def exifBrief(self): + if not self.app_controller.image_files: + return "" + return self.app_controller.get_current_metadata().get("exif_brief", "") + @Property(bool, notify=metadataChanged) def isStacked(self): if not self.app_controller.image_files: @@ -559,7 +572,9 @@ def statusMessage(self, value: str): @Property(str, notify=variantSaveHintChanged) def variantSaveHint(self): """Returns a hint message when saving from a variant.""" - return self.app_controller.get_variant_save_hint() + if hasattr(self.app_controller, "get_variant_save_hint"): + return self.app_controller.get_variant_save_hint() + return "" @Property(str, notify=filterStringChanged) def filterString(self): @@ -696,7 +711,6 @@ def addUploadedToBatch(self): def jumpToLastUploaded(self): self.app_controller.jump_to_last_uploaded() - @Slot(result=str) def get_helicon_path(self): return self.app_controller.get_helicon_path() @@ -1513,7 +1527,10 @@ def gridPrefetchRange(self, startIndex: int, endIndex: int, maxCount: int = 800) # 2. Duplicate Suppression now = self._clock() current_req = (startIndex, endIndex, maxCount) - if current_req == self._last_prefetch_data and (now - self._last_prefetch_time) < 0.030: + if ( + current_req == self._last_prefetch_data + and (now - self._last_prefetch_time) < 0.030 + ): return self._last_prefetch_data = current_req @@ -1522,14 +1539,16 @@ def gridPrefetchRange(self, startIndex: int, endIndex: int, maxCount: int = 800) # 3. Budgeting / Hard Cap HARD_LIMIT = 800 budget = max(1, min(maxCount, HARD_LIMIT)) - + # Trim endIndex if the requested range exceeds the budget if (endIndex - startIndex + 1) > budget: endIndex = startIndex + budget - 1 # Submit prefetch jobs for visible range # Defensive fallback if thumbnail_size is refactored away - size = getattr(model, "thumbnail_size", None) or getattr(prefetcher, "_target_size", None) + size = getattr(model, "thumbnail_size", None) or getattr( + prefetcher, "_target_size", None + ) for i in range(startIndex, endIndex + 1): entry = model.get_entry(i) if entry and not entry.is_folder: diff --git a/faststack/util/executors.py b/faststack/util/executors.py index 1cac583..880ce81 100644 --- a/faststack/util/executors.py +++ b/faststack/util/executors.py @@ -14,6 +14,7 @@ import weakref from concurrent.futures.thread import _worker, _threads_queues + class DaemonThreadPoolExecutor(ThreadPoolExecutor): """ThreadPoolExecutor whose worker threads are daemon threads. @@ -42,13 +43,17 @@ def weakref_cb(_, q=self._work_queue): num_threads = len(self._threads) if num_threads < self._max_workers: - thread_name = '%s_%d' % (self._thread_name_prefix or self, - num_threads) - t = threading.Thread(name=thread_name, target=_worker, - args=(weakref.ref(self, weakref_cb), - self._work_queue, - self._initializer, - self._initargs)) + thread_name = "%s_%d" % (self._thread_name_prefix or self, num_threads) + t = threading.Thread( + name=thread_name, + target=_worker, + args=( + weakref.ref(self, weakref_cb), + self._work_queue, + self._initializer, + self._initargs, + ), + ) t.daemon = True t.start() self._threads.add(t) @@ -59,7 +64,7 @@ def weakref_cb(_, q=self._work_queue): def create_daemon_threadpool_executor( max_workers: int, thread_name_prefix: str = "" -) -> ThreadPoolExecutor: +) -> DaemonThreadPoolExecutor: """ Create a ThreadPoolExecutor whose worker threads are daemon threads. Returns a DaemonThreadPoolExecutor instance which is a subclass of ThreadPoolExecutor. @@ -97,7 +102,9 @@ class PriorityExecutor: Workers are daemon threads. """ - def __init__(self, max_workers: int, thread_name_prefix: str = "", maxsize: int = 0): + def __init__( + self, max_workers: int, thread_name_prefix: str = "", maxsize: int = 0 + ): if max_workers < 1: raise ValueError("max_workers must be >= 1") @@ -156,7 +163,9 @@ def _worker_loop(self) -> None: except Exception: pass - def submit(self, fn: Callable[..., Any], *args: Any, priority: int = 1, **kwargs: Any) -> Future: + def submit( + self, fn: Callable[..., Any], *args: Any, priority: int = 1, **kwargs: Any + ) -> Future: """Submit a task to the priority queue. Args: @@ -194,7 +203,9 @@ def shutdown(self, wait: bool = True, cancel_futures: bool = False) -> None: # Cancel queued work so workers can exit once queue empties. while True: try: - _priority, _neg_seq, _fn, _args, _kwargs, fut = self._queue.get_nowait() + _priority, _neg_seq, _fn, _args, _kwargs, fut = ( + self._queue.get_nowait() + ) except queue.Empty: break try: diff --git a/faststack/util/thumb_debug.py b/faststack/util/thumb_debug.py index a8628f3..7a8dfc7 100644 --- a/faststack/util/thumb_debug.py +++ b/faststack/util/thumb_debug.py @@ -24,7 +24,6 @@ "req_total": 0, "req_cache_hit": 0, "req_cache_miss": 0, - # Decode stats "decode_submitted": 0, "decode_coalesced": 0, @@ -32,7 +31,6 @@ "decode_done_ok": 0, "decode_done_fail": 0, "decode_cancelled": 0, - # Summary helpers "total_ms": 0.0, "max_ms": 0.0, @@ -60,11 +58,11 @@ def log_trace(event: str, **kwargs): """Log a verbose trace event if enabled.""" if not trace_enabled: return - + parts = [f"thumbtrace event={event}"] for k, v in kwargs.items(): parts.append(f"{k}={v}") - + log.info(" ".join(parts)) @@ -87,7 +85,9 @@ def gauge(name: str, value: Any): if name in _interval_stats: _interval_stats[name] = value if name == "qdepth": - _interval_stats["qdepth_max"] = max(_interval_stats["qdepth_max"], value) + _interval_stats["qdepth_max"] = max( + _interval_stats["qdepth_max"], value + ) def inc_request_count(): @@ -117,7 +117,7 @@ def record_stat(name: str, value: Any): class ThumbTimer: """Timer and tracer for a single thumbnail request.""" - + def __init__(self, key: str, path: Optional[Path] = None, reason: str = "unknown"): self.rid = next(_rid_counter) self.key = key @@ -128,18 +128,18 @@ def __init__(self, key: str, path: Optional[Path] = None, reason: str = "unknown self._stage_start: float = 0.0 self.cancelled = False self.started = False - + # Timestamps (perf_counter) self.t_requested = time.perf_counter() self.t_queued: Optional[float] = None self.t_worker_start: Optional[float] = None self.t_done: Optional[float] = None - + # Priority info self.prio_submitted: Optional[int] = None self.prio_effective: Optional[int] = None self.coalesced_from: Optional[str] = None - + # Short path for logging self.src = str(path.name) if path else "none" @@ -163,11 +163,11 @@ def log_timing(self, **kwargs): """Log the final timing summary for this request.""" if not timing_enabled: return - + now = time.perf_counter() total_ms = (now - self.t_requested) * 1000 inc("total_ms", total_ms) - + # Base parts parts = [ "thumbtiming", @@ -176,39 +176,42 @@ def log_timing(self, **kwargs): f"src={self.src}", f"reason={self.reason}", ] - + # Priority info if self.prio_submitted is not None: parts.append(f"prio={self.prio_submitted}") - if self.prio_effective is not None and self.prio_effective != self.prio_submitted: + if ( + self.prio_effective is not None + and self.prio_effective != self.prio_submitted + ): parts.append(f"prio_eff={self.prio_effective}") if self.coalesced_from: parts.append(f"coalesced_from={self.coalesced_from}") - + # Overall timing parts.append(f"total_ms={total_ms:.1f}") - + # Phase timings - if self.t_queued: + if self.t_queued is not None: sched_ms = (self.t_queued - self.t_requested) * 1000 parts.append(f"sched_ms={sched_ms:.1f}") - - if self.t_worker_start: + + if self.t_worker_start is not None: wait_ms = (self.t_worker_start - self.t_queued) * 1000 parts.append(f"wait_ms={wait_ms:.1f}") - - if self.t_done: + + if self.t_done is not None: worker_ms = (self.t_done - self.t_worker_start) * 1000 parts.append(f"worker_ms={worker_ms:.1f}") # Stage breakdown for stage, ms in self.stages.items(): parts.append(f"{stage}_ms={ms:.1f}") - + # Extra tags for k, v in kwargs.items(): parts.append(f"{k}={v}") - + log.info(" ".join(parts)) @@ -216,22 +219,30 @@ def check_periodic_summary(): """Print the periodic summary if the interval has passed.""" if not (timing_enabled or trace_enabled): return - + global _last_summary_time now = time.monotonic() if now - _last_summary_time < _summary_interval: return - + with _stats_lock: stats = _interval_stats.copy() # Reset interval stats # NOTE: inflight and qdepth are persistent states, do NOT reset them to 0. # qdepth_max is reset so we see the max for the NEW interval. for k in [ - "req_total", "req_cache_hit", "req_cache_miss", - "decode_submitted", "decode_coalesced", "decode_started", - "decode_done_ok", "decode_done_fail", "decode_cancelled", - "total_ms", "max_ms", "qdepth_max" + "req_total", + "req_cache_hit", + "req_cache_miss", + "decode_submitted", + "decode_coalesced", + "decode_started", + "decode_done_ok", + "decode_done_fail", + "decode_cancelled", + "total_ms", + "max_ms", + "qdepth_max", ]: if k in _interval_stats: _interval_stats[k] = 0 @@ -239,15 +250,21 @@ def check_periodic_summary(): # Summary is useful if ANYTHING happened or if there is work in flight activity = ( - stats["req_total"] + stats["decode_submitted"] + - stats["decode_done_ok"] + stats["decode_cancelled"] + - stats["inflight"] + stats["req_total"] + + stats["decode_submitted"] + + stats["decode_done_ok"] + + stats["decode_cancelled"] + + stats["inflight"] ) if activity == 0: return - avg_ms = stats["total_ms"] / stats["decode_done_ok"] if stats["decode_done_ok"] > 0 else 0 - + avg_ms = ( + stats["total_ms"] / stats["decode_done_ok"] + if stats["decode_done_ok"] > 0 + else 0 + ) + log.info( f"thumbtiming-summary " f"REQ[tot={stats['req_total']} hit={stats['req_cache_hit']} miss={stats['req_cache_miss']}] " diff --git a/faststack/verify_cache_fix.py b/faststack/verify_cache_fix.py index 5135428..dafb102 100644 --- a/faststack/verify_cache_fix.py +++ b/faststack/verify_cache_fix.py @@ -9,21 +9,35 @@ from models import DecodedImage import numpy as np + def test_cache(): evicted = [] + def on_evict(k, v): evicted.append((k, v)) print(f"Evicted: {k}") cache = ByteLRUCache(max_bytes=100, size_of=sys.getsizeof, on_evict=on_evict) - img1 = DecodedImage(buffer=memoryview(np.zeros(60, dtype=np.uint8)), width=60, height=1, bytes_per_line=60, format="dummy_format") - img2 = DecodedImage(buffer=memoryview(np.zeros(60, dtype=np.uint8)), width=60, height=1, bytes_per_line=60, format="dummy_format") + img1 = DecodedImage( + buffer=memoryview(np.zeros(60, dtype=np.uint8)), + width=60, + height=1, + bytes_per_line=60, + format="dummy_format", + ) + img2 = DecodedImage( + buffer=memoryview(np.zeros(60, dtype=np.uint8)), + width=60, + height=1, + bytes_per_line=60, + format="dummy_format", + ) cache["k1"] = img1 print("Added k1") cache["k2"] = img2 print("Added k2") - + assert len(evicted) == 1 assert evicted[0][0] == "k1" print("Eviction verified!") @@ -33,6 +47,7 @@ def on_evict(k, v): assert evicted[1][0] == "k2" print("Popitem verification passed!") + if __name__ == "__main__": try: test_cache() @@ -40,5 +55,6 @@ def on_evict(k, v): except Exception as e: print(f"TEST FAILED: {e}") import traceback + traceback.print_exc() sys.exit(1) diff --git a/implementation_summary.md b/implementation_summary.md deleted file mode 100644 index 5ccff6c..0000000 --- a/implementation_summary.md +++ /dev/null @@ -1,20 +0,0 @@ -The "no full-res decode in grid view" gating cleanup has been completed with the requested 5 fixes and additional safety improvements. - -**Summary of Changes:** - -1. **`faststack/app.py`**: - * **Fix 1**: Moved the `_loupe_decode_allowed()` check to the very top of `_do_prefetch`. This ensures no resize handling or other side effects occur when prefetch is blocked (e.g. in grid view). - * **Fix 2**: In `_set_grid_view_active`, explicitly cleared `self.pending_prefetch_index = None` when entering grid view to prevent stale deferred prefetches from firing upon return to loupe. - * **Fix 3**: Simplified `_loupe_decode_allowed` to access `self._folder_loaded` directly, removing `getattr`. - -2. **`faststack/ui/provider.py`**: - * **Fix 4**: Connected `isGridViewActiveChanged` to `currentImageSourceChanged.emit` in `UIState.__init__`. This fixes a bug where the signal was being re-connected repeatedly in `_on_dialog_state_changed`, causing potential leaks and performance issues. - -3. **`faststack/qml/ThumbnailGridView.qml`** & **`faststack/qml/Main.qml`**: - * **Fix 5**: Centralized prefetch gating ownership in `Main.qml` and improved robustness. - * **Main.qml**: Updated `onIsGridViewActiveChanged` and `gridViewLoader.onLoaded` to safely check for item existence and validity inside `Qt.callLater` callbacks, preventing crashes if the loader state changes quickly. - * **ThumbnailGridView.qml**: Removed the auto-enable logic from `Component.onCompleted`. Gated `onWidthChanged` and `onHeightChanged` timer restarts to only run when `prefetchEnabled` is true, avoiding unnecessary wakeups. - -**Verification:** -* Re-ran `pytest faststack/tests/test_startup_optimization.py` to ensure no regressions in existing tests. -* Confirmed logical consistency: grid view operations now strictly avoid full-res decode paths, and view switching handles state cleanup and QML property updates deterministically and safely.