diff --git a/ChangeLog.md b/ChangeLog.md
index c77e639..a30cf56 100644
--- a/ChangeLog.md
+++ b/ChangeLog.md
@@ -11,6 +11,7 @@ Todo: More testing Linux / Mac. Create Windows .exe. Write better document
- Enhanced metadata display with camera-style shutter speed formatting.
- Added new thumbnail badges for Backups (Bk) and Developed (D) variants.
- Improved cache eviction handling and thread-safety for concurrent operations.
+- Fixed a bug where deleting an image could mess up the batch selection ranges if the delete was cancelled, failed, or undone.
## 1.5.8 (2026-02-13)
diff --git a/faststack/all_verification_results.txt b/faststack/all_verification_results.txt
deleted file mode 100644
index 0948837..0000000
Binary files a/faststack/all_verification_results.txt and /dev/null differ
diff --git a/faststack/all_verification_results_utf8.txt b/faststack/all_verification_results_utf8.txt
deleted file mode 100644
index 2f22428..0000000
--- a/faststack/all_verification_results_utf8.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-============================= test session starts =============================
-platform win32 -- Python 3.12.10, pytest-9.0.2, pluggy-1.6.0 -- C:\code\faststack\faststack\verify_venv\Scripts\python.exe
-rootdir: C:\code\faststack
-configfile: pyproject.toml
-collecting ... collected 14 items
-
-tests\test_editor_rotation.py::test_rotated_rect_edge_cases PASSED [ 7%]
-tests\test_editor_rotation.py::test_rotated_rect_calculation_branches[100-100-0] PASSED [ 14%]
-tests\test_editor_rotation.py::test_rotated_rect_calculation_branches[200-100-45] PASSED [ 21%]
-tests\test_editor_rotation.py::test_rotated_rect_calculation_branches[1000-500-15] PASSED [ 28%]
-tests\test_editor_rotation.py::test_rotated_rect_calculation_branches[500-1000-15] PASSED [ 35%]
-tests\test_editor_rotation.py::test_rotate_autocrop_rgb_behavior PASSED [ 42%]
-tests\test_editor_rotation.py::test_boundary_clamping PASSED [ 50%]
-tests\test_editor_rotation.py::test_integration_straighten_modes FAILED [ 57%]
-tests\test_editor_rotation.py::test_rotate_cw PASSED [ 64%]
-tests\test_editor_rotation.py::test_rotate_ccw PASSED [ 71%]
-tests\test_rotation_unittest.py::TestEditorRotation::test_rotate_cw PASSED [ 78%]
-tests\test_rotation_unittest.py::TestEditorRotation::test_straighten_angle PASSED [ 85%]
-tests\test_editor_integration.py::TestEditorIntegration::test_missing_methods
diff --git a/faststack/app.py b/faststack/app.py
index d94daff..5436eb6 100644
--- a/faststack/app.py
+++ b/faststack/app.py
@@ -16,6 +16,7 @@
import uuid
import bisect
import functools
+from collections import deque
# Must set before importing PySide6
os.environ["QT_LOGGING_RULES"] = "qt.qpa.mime.warning=false"
@@ -268,7 +269,7 @@ def __init__(
# Cache Warning State
self._last_cache_warning_time = 0
self._eviction_lock = threading.Lock()
- self._eviction_timestamps = [] # List of eviction timestamps for rate detection
+ self._eviction_timestamps: deque[float] = deque() # Rolling window for rate detection
self.display_ready = False # Track if display size has been reported
self.pending_prefetch_index: Optional[int] = None # Deferred prefetch index
@@ -2076,6 +2077,7 @@ def _get_metadata_dict(self, stem: str) -> dict:
"edited": getattr(meta, "edited", False),
"restacked": getattr(meta, "restacked", False),
"favorite": getattr(meta, "favorite", False),
+ "todo": getattr(meta, "todo", False),
}
except Exception as e: # Broad catch for UI plumbing - don't crash grid view
log.debug("Failed to get metadata for %s: %s", stem, e)
@@ -2085,6 +2087,7 @@ def _get_metadata_dict(self, stem: str) -> dict:
"edited": False,
"restacked": False,
"favorite": False,
+ "todo": False,
}
def _get_bulk_metadata_map(self) -> Dict[str, dict]:
@@ -2099,6 +2102,7 @@ def _get_bulk_metadata_map(self) -> Dict[str, dict]:
"edited": getattr(meta, "edited", False),
"restacked": getattr(meta, "restacked", False),
"favorite": getattr(meta, "favorite", False),
+ "todo": getattr(meta, "todo", False),
}
except Exception as e:
log.warning("Failed to build bulk metadata map: %s", e)
@@ -2162,6 +2166,31 @@ def toggle_uploaded(self):
self.update_status_message(f"Marked as {status}")
log.info("Toggled uploaded flag to %s for %s", meta.uploaded, stem)
+ def toggle_todo(self):
+ """Toggle todo flag for current image."""
+ if not self.image_files or self.current_index >= len(self.image_files):
+ return
+
+ from datetime import datetime
+
+ today = datetime.now().strftime("%Y-%m-%d")
+ stem = self.image_files[self.current_index].path.stem
+ meta = self.sidecar.get_metadata(stem)
+
+ meta.todo = not getattr(meta, "todo", False)
+ if meta.todo:
+ meta.todo_date = today
+ else:
+ meta.todo_date = None
+
+ self.sidecar.save()
+ self._metadata_cache_index = (-1, -1)
+ self.dataChanged.emit()
+ self.sync_ui_state()
+ status = "todo" if meta.todo else "not todo"
+ self.update_status_message(f"Marked as {status}")
+ log.info("Toggled todo flag to %s for %s", meta.todo, stem)
+
def toggle_edited(self):
"""Toggle edited flag for current image."""
if not self.image_files or self.current_index >= len(self.image_files):
@@ -2308,6 +2337,8 @@ def get_current_metadata(self) -> Dict:
"restacked": meta.restacked,
"restacked_date": meta.restacked_date or "",
"favorite": meta.favorite,
+ "todo": getattr(meta, "todo", False),
+ "todo_date": getattr(meta, "todo_date", None) or "",
"stack_info_text": stack_info,
"batch_info_text": batch_info,
}
@@ -4164,6 +4195,41 @@ def _on_perm_done(future):
self._rebuild_path_to_index()
self.sync_ui_state()
+ @staticmethod
+ def _recompute_batches_after_deletions(
+ saved_batches: List[List[int]], still_deleted: List[int]
+ ) -> List[List[int]]:
+ """Return a copy of saved_batches with index spans adjusted for still_deleted.
+
+ Used during partial rollbacks: start from the pre-delete snapshot and
+ re-apply only the deletions that were not reversed.
+ """
+ if not still_deleted:
+ return [b[:] for b in saved_batches]
+
+ deleted_set = set(still_deleted)
+
+ def _shift(orig_idx: int) -> int:
+ return orig_idx - sum(1 for d in still_deleted if d < orig_idx)
+
+ new_batches = []
+ for b_start, b_end in saved_batches:
+ first_ok = next(
+ (i for i in range(b_start, b_end + 1) if i not in deleted_set),
+ None,
+ )
+ if first_ok is None:
+ continue # whole batch still deleted — drop it
+ last_ok = next(
+ (i for i in range(b_end, b_start - 1, -1) if i not in deleted_set),
+ None,
+ )
+ ns = _shift(first_ok)
+ ne = _shift(last_ok)
+ if ns <= ne:
+ new_batches.append([ns, ne])
+ return new_batches
+
def _rollback_ui_items(self, items: List[Tuple[int, Any]], job: DeleteJob) -> None:
"""Restore items to the UI list in correct order."""
# Sort reverse by index to insert correctly
@@ -4193,8 +4259,18 @@ def _rollback_ui_items(self, items: List[Tuple[int, Any]], job: DeleteJob) -> No
# Restore saved batch state if present
if job.saved_batches and items:
- self.batches = job.saved_batches
- self.batch_start_index = job.saved_batch_start_index
+ original = {idx for idx, _ in job.removed_items}
+ restored = {idx for idx, _ in items}
+ if restored == original:
+ # Full rollback: restore pre-delete snapshot directly
+ self.batches = [b[:] for b in job.saved_batches]
+ self.batch_start_index = job.saved_batch_start_index
+ else:
+ # Partial rollback: re-apply the deletions that were not reversed
+ still_deleted = sorted(original - restored)
+ self.batches = self._recompute_batches_after_deletions(
+ job.saved_batches, still_deleted
+ )
self._invalidate_batch_cache()
def _schedule_delete_refresh(self) -> None:
@@ -4286,6 +4362,7 @@ def _delete_indices(self, indices: List[int], action_type: str) -> dict:
for idx in sorted(sorted_indices)
if 0 <= idx < len(self.image_files)
]
+ original_count = len(self.image_files)
previous_index = self.current_index
# Remove from in-memory list immediately for instant visual feedback
@@ -4294,10 +4371,65 @@ def _delete_indices(self, indices: List[int], action_type: str) -> dict:
del self.image_files[idx]
# Reposition current_index immediately (fast, in-memory only)
+ validated_sorted = sorted(i for i in sorted_indices if 0 <= i < original_count)
+ deleted_set = set(validated_sorted)
if not self.image_files:
self.current_index = 0
- else:
+ elif previous_index in deleted_set:
+ # Current image was deleted → stay at same position (shows next image) or clamp
self.current_index = min(previous_index, len(self.image_files) - 1)
+ else:
+ # Current image survived → shift index down for each deletion before it
+ shift = sum(1 for d in validated_sorted if d < previous_index)
+ self.current_index = max(0, min(previous_index - shift, len(self.image_files) - 1))
+
+ # Save batch state before mutation so _rollback_ui_items can restore it
+ # for any delete type (loupe, grid, batch). batch delete_batch_images()
+ # will overwrite saved_batches with the same pre-mutation value anyway.
+ pre_batch_snapshot = [b[:] for b in self.batches] if self.batches else None
+ pre_batch_start_snapshot = self.batch_start_index if self.batches else None
+
+ # Adjust batch index ranges to account for removed entries.
+ # Deleting index d shifts every index > d down by one. Without this,
+ # batches that sit above any deleted image reference the wrong files.
+ if self.batches:
+ deleted_ascending = sorted(validated_sorted)
+
+ def _shift(orig_idx: int) -> int:
+ return orig_idx - sum(1 for d in deleted_ascending if d < orig_idx)
+
+ new_batches = []
+ for b_start, b_end in self.batches:
+ # Anchor on the first and last *surviving* indices in the range.
+ # If every image in the batch was deleted, discard it entirely so
+ # it cannot migrate onto unrelated images.
+ first_ok = next(
+ (i for i in range(b_start, b_end + 1) if i not in deleted_set),
+ None,
+ )
+ if first_ok is None:
+ continue # whole batch deleted — drop it
+ last_ok = next(
+ (i for i in range(b_end, b_start - 1, -1) if i not in deleted_set),
+ None,
+ )
+ ns = _shift(first_ok)
+ ne = _shift(last_ok)
+ if ns <= ne:
+ new_batches.append([ns, ne])
+ if new_batches != self.batches:
+ self.batches = new_batches
+ self._invalidate_batch_cache()
+
+ # Adjust batch_start_index for removed entries
+ if pre_batch_start_snapshot is not None:
+ if pre_batch_start_snapshot in deleted_set:
+ self.batch_start_index = None
+ else:
+ shifted = _shift(pre_batch_start_snapshot)
+ if shifted != self.batch_start_index:
+ self.batch_start_index = shifted
+ self._invalidate_batch_cache()
# Update UI immediately - this is fast since it just reads from memory
# Check for existence, not truthiness (empty cache is falsy)
@@ -4375,6 +4507,8 @@ def _delete_indices(self, indices: List[int], action_type: str) -> dict:
cancel_event=cancel_event,
previous_index=previous_index,
images_to_delete=images_to_delete,
+ saved_batches=pre_batch_snapshot,
+ saved_batch_start_index=pre_batch_start_snapshot,
)
# Add single placeholder undo entry per job
@@ -4463,7 +4597,7 @@ def delete_batch_images(self):
indices_to_delete.add(i)
# 2. Save batch state for rollback, then clear optimistically
- saved_batches = list(self.batches)
+ saved_batches = [b[:] for b in self.batches]
saved_batch_start = self.batch_start_index
# 3. Call unified engine
@@ -4669,6 +4803,11 @@ def undo_delete(self):
if self.image_files:
self.prefetcher.update_prefetch(self.current_index)
self._rebuild_path_to_index()
+ # Restore batch state that was shifted during _delete_indices
+ if job.saved_batches and removed_items:
+ self.batches = job.saved_batches
+ self.batch_start_index = job.saved_batch_start_index
+ self._invalidate_batch_cache()
self.sync_ui_state()
count = len(removed_items)
@@ -4870,6 +5009,16 @@ def shutdown_qt(self):
log.info("Detaching QML engine.")
self.engine = None
+ @staticmethod
+ def _safe_shutdown_executor(executor, name, *, wait=False, cancel_futures=True):
+ """Shut down a single executor, logging and swallowing any error."""
+ if executor is None:
+ return
+ try:
+ executor.shutdown(wait=wait, cancel_futures=cancel_futures)
+ except Exception as e:
+ log.warning("Error shutting down %s executor: %s", name, e, exc_info=True)
+
def shutdown_nonqt(self):
"""Shutdown non-Qt resources - safe to run in background thread."""
log.info("Shutting down background resources.")
@@ -4892,21 +5041,17 @@ def shutdown_nonqt(self):
if not (entry[0] == "pending_delete" and entry[1] in pending_ids)
]
- # Shutdown thread pool executors
- try:
- log.info("Shutting down background executors...")
- self._hist_executor.shutdown(wait=False, cancel_futures=True)
- self._preview_executor.shutdown(wait=False, cancel_futures=True)
-
- exif_exec = getattr(self, "_exif_executor", None)
- if exif_exec:
- exif_exec.shutdown(wait=False, cancel_futures=True)
-
- # wait=True ensures pending saves/deletes complete to avoid data loss/corruption
- self._save_executor.shutdown(wait=True, cancel_futures=False)
- self._delete_executor.shutdown(wait=True, cancel_futures=False)
- except Exception as e:
- log.warning("Error shutting down executors: %s", e)
+ # Shutdown thread pool executors — each isolated so one failure can't
+ # prevent the others (especially save/delete) from shutting down.
+ log.info("Shutting down background executors...")
+ self._safe_shutdown_executor(self._hist_executor, "histogram", wait=False)
+ self._safe_shutdown_executor(self._preview_executor, "preview", wait=False)
+ self._safe_shutdown_executor(
+ getattr(self, "_exif_executor", None), "exif", wait=False,
+ )
+ # wait=True ensures pending saves/deletes complete to avoid data loss/corruption
+ self._safe_shutdown_executor(self._save_executor, "save", wait=True, cancel_futures=False)
+ self._safe_shutdown_executor(self._delete_executor, "delete", wait=True, cancel_futures=False)
# Shutdown prefetcher
try:
@@ -4972,17 +5117,55 @@ def empty_recycle_bin(self):
clear_raw_count_cache()
log.info("Emptied recycle bins and cleared delete history")
- def _on_cache_evict(self, key, value):
- """Callback for when the image cache evicts an item."""
+ def _on_cache_evict(self, key, value, info):
+ """Callback for when the image cache evicts an item.
+
+ Args:
+ key: Cache key that was evicted.
+ value: Cached value that was evicted.
+ info: Dict with eviction context captured at eviction time:
+ reason ("pressure"|"replace"|"manual"), usage_bytes, max_bytes,
+ entry_count, thread_id.
+ """
+ reason = info.get("reason", "unknown")
+
+ # Only count capacity-pressure evictions toward thrashing detection.
+ # Replacements and manual removals (pop_path, popitem resize) are not
+ # indicators of cache size being too small.
+ if reason != "pressure":
+ if self.debug_cache:
+ log.debug(
+ "Cache evict (skipped for thrash): reason=%s key=%s",
+ reason,
+ key,
+ )
+ return
+
+ # Use usage captured at eviction time (inside the lock), not current
+ # currsize which may be stale if clear()/evict_paths() ran between
+ # the eviction and this callback executing outside the lock.
+ eviction_usage = info.get("usage_bytes", 0)
+ eviction_max = info.get("max_bytes", 1)
+
+ if self.debug_cache:
+ log.debug(
+ "Cache evict (pressure): key=%s usage=%.2fMB/%.2fMB "
+ "entries=%d thread=%s",
+ key,
+ eviction_usage / (1024**2),
+ eviction_max / (1024**2),
+ info.get("entry_count", -1),
+ info.get("thread_id", "?"),
+ )
+
now = time.time()
with self._eviction_lock:
- # 1. Record eviction timestamp / prune
+ # 1. Record eviction timestamp / prune oldest outside window
self._eviction_timestamps.append(now)
cutoff = now - CACHE_THRASH_WINDOW_SECS
- self._eviction_timestamps = [
- t for t in self._eviction_timestamps if t > cutoff
- ]
+ while self._eviction_timestamps and self._eviction_timestamps[0] <= cutoff:
+ self._eviction_timestamps.popleft()
# 2. Check for thrashing (e.g., > threshold evictions in window)
if len(self._eviction_timestamps) > CACHE_THRASH_THRESHOLD:
@@ -4991,11 +5174,11 @@ def _on_cache_evict(self, key, value):
self._last_cache_warning_time = now
self._has_warned_cache_full = True
- # UI update logic
- used_gb = self.image_cache.currsize / (1024**3)
- max_gb = self.image_cache.max_bytes / (1024**3)
+ # Use captured usage from eviction time for accurate reporting
+ used_gb = eviction_usage / (1024**3)
+ max_gb = eviction_max / (1024**3)
- # Include key/value summary to fix lint error and provide context
+ # Include key/value summary for context
val_summary = ""
if hasattr(value, "width") and hasattr(value, "height"):
val_summary = f" ({value.width}x{value.height})"
@@ -6304,7 +6487,7 @@ def execute_crop(self):
@Slot()
def auto_levels(self):
"""Calculates and applies auto levels (preview only). Returns False if skipped."""
- if not self.image_files:
+ if not self.image_files or self.current_index >= len(self.image_files):
self.update_status_message("No image to adjust")
return False
diff --git a/faststack/check_daemon.py b/faststack/check_daemon.py
deleted file mode 100644
index e8a3de3..0000000
--- a/faststack/check_daemon.py
+++ /dev/null
@@ -1,20 +0,0 @@
-import threading
-from concurrent.futures import ThreadPoolExecutor
-
-
-def set_daemon():
- try:
- threading.current_thread().daemon = True
- print(f"Set daemon for {threading.current_thread().name}")
- except Exception as e:
- print(f"Failed to set daemon for {threading.current_thread().name}: {e}")
-
-
-def check_daemon():
- return threading.current_thread().daemon
-
-
-if __name__ == "__main__":
- with ThreadPoolExecutor(max_workers=1, initializer=set_daemon) as executor:
- print(f"Result: {executor.submit(check_daemon).result()}")
-
diff --git a/faststack/check_scipy.py b/faststack/check_scipy.py
deleted file mode 100644
index f1df113..0000000
--- a/faststack/check_scipy.py
+++ /dev/null
@@ -1,6 +0,0 @@
-try:
- import scipy.ndimage
-
- print("scipy available")
-except ImportError:
- print("scipy NOT available")
diff --git a/faststack/imaging/cache.py b/faststack/imaging/cache.py
index 1e190e0..7692c43 100644
--- a/faststack/imaging/cache.py
+++ b/faststack/imaging/cache.py
@@ -1,6 +1,8 @@
"""Byte-aware LRU cache for storing decoded image data (CPU and GPU)."""
+import inspect
import logging
+from collections import deque
from pathlib import Path
from typing import Any, Callable, Optional, Union
import time
@@ -51,9 +53,10 @@ def __init__(
self,
max_bytes: int,
size_of: Callable[[Any], int] = get_decoded_image_size,
- on_evict: Optional[Callable[[Any, Any], None]] = None,
+ on_evict: Optional[Callable[..., None]] = None,
):
super().__init__(maxsize=max_bytes, getsizeof=size_of)
+ self._on_evict_arity = self._detect_arity(on_evict)
self.on_evict = on_evict
# RLock is required: __setitem__ holds _lock and calls super().__setitem__(),
# which may call our overridden popitem() for LRU eviction. A non-reentrant
@@ -66,6 +69,10 @@ def __init__(
self._tombstone_expiry: dict[str, float] = {}
self._pending_callbacks: Optional[list[Callable[[], None]]] = None
self._pending_callbacks_owner: Optional[int] = None
+ # Flag: True when __delitem__ is being called from __setitem__'s capacity
+ # eviction path (popitem), as opposed to targeted removal (pop_path, evict_paths).
+ self._pressure_eviction_active = False
+ self._pressure_eviction_owner: Optional[int] = None
log.info(
f"Initialized byte-aware LRU cache with {max_bytes / 1024**2:.2f} MB capacity."
)
@@ -82,6 +89,47 @@ def max_bytes(self, value: int) -> None:
self.maxsize = v
log.debug(f"Cache max_bytes updated to {v / 1024**2:.2f} MB")
+ @staticmethod
+ def _detect_arity(callback: Optional[Callable]) -> int:
+ """Detect whether callback accepts 2 args (key, value) or 3 (key, value, info)."""
+ if callback is None:
+ return 2
+ try:
+ sig = inspect.signature(callback)
+ # Count parameters that can accept positional args
+ positional = sum(
+ 1
+ for p in sig.parameters.values()
+ if p.kind
+ in (
+ inspect.Parameter.POSITIONAL_ONLY,
+ inspect.Parameter.POSITIONAL_OR_KEYWORD,
+ )
+ and p.default is inspect.Parameter.empty
+ )
+ return 3 if positional >= 3 else 2
+ except (ValueError, TypeError):
+ return 2
+
+ def _fire_evict(self, key: Any, value: Any, info: dict) -> None:
+ """Invoke on_evict, dispatching by detected arity."""
+ if not self.on_evict:
+ return
+ if self._on_evict_arity >= 3:
+ self.on_evict(key, value, info)
+ else:
+ self.on_evict(key, value)
+
+ def _build_eviction_info(self, reason: str, pre_usage: int) -> dict:
+ """Build eviction context dict captured at eviction time (inside lock)."""
+ return {
+ "reason": reason,
+ "usage_bytes": pre_usage,
+ "max_bytes": self.maxsize,
+ "entry_count": len(self),
+ "thread_id": threading.get_ident(),
+ }
+
def __setitem__(self, key, value):
pending_callbacks = []
with self._lock:
@@ -122,6 +170,9 @@ def __setitem__(self, key, value):
# callbacks triggered by popitem() -> __delitem__().
self._pending_callbacks = pending_callbacks
self._pending_callbacks_owner = threading.get_ident()
+ # Mark that any __delitem__ calls from here are capacity-pressure evictions
+ self._pressure_eviction_active = True
+ self._pressure_eviction_owner = threading.get_ident()
try:
super().__setitem__(key, value)
@@ -129,13 +180,16 @@ def __setitem__(self, key, value):
# for the old value, because cachetools.__setitem__ for replacements
# does not call __delitem__ (it just overwrites the dict entry).
if old_value is not _MISSING and self.on_evict:
+ info = self._build_eviction_info("replace", self.currsize)
+ info["inserting_key"] = str(key)
- def _replace_cb(k=key, v=old_value):
- if self.on_evict:
- self.on_evict(k, v)
+ def _replace_cb(k=key, v=old_value, _info=info):
+ self._fire_evict(k, v, _info)
pending_callbacks.append(_replace_cb)
finally:
+ self._pressure_eviction_active = False
+ self._pressure_eviction_owner = None
self._pending_callbacks = None
self._pending_callbacks_owner = None
@@ -173,16 +227,34 @@ def __delitem__(self, key):
except KeyError:
raise KeyError(key) from None
+ # Capture usage BEFORE deletion for accurate thrashing detection.
+ # After super().__delitem__, currsize will already be decremented.
+ pre_usage = self.currsize
+
+ # Determine eviction reason based on calling context.
+ # This is a heuristic: _pressure_eviction_active is only True when
+ # __setitem__ is executing super().__setitem__(), which calls
+ # popitem() when currsize + new_size > maxsize (cachetools LRU).
+ # Any other path into __delitem__ — pop_path(), direct del,
+ # popitem() from manual cache resize — is classified as "manual"
+ # by design, since those are intentional removals, not capacity
+ # pressure indicating the cache is too small.
+ is_pressure = (
+ self._pressure_eviction_active
+ and threading.get_ident() == self._pressure_eviction_owner
+ )
+ reason = "pressure" if is_pressure else "manual"
+
super().__delitem__(key)
log.debug(
f"Removed item '{key}'. Cache size: {self.currsize / 1024**2:.2f} MB"
)
if self.on_evict:
+ info = self._build_eviction_info(reason, pre_usage)
- def _callback_func(k=key, v=value):
- if self.on_evict:
- self.on_evict(k, v)
+ def _callback_func(k=key, v=value, _info=info):
+ self._fire_evict(k, v, _info)
# If we are inside a call that defers callbacks (like __setitem__ or evict_paths),
# append to the shared list.
@@ -206,15 +278,21 @@ def get(self, key, default=None):
return super().get(key, default)
def clear(self):
- """Clear cache without triggering eviction callbacks."""
- # Temporarily disable callback to prevent "thrashing" warnings during mass clear
+ """Clear cache without triggering eviction callbacks.
+
+ Uses _pending_callbacks discard pattern (same as evict_paths) rather
+ than setting on_evict=None, which would race with closures that read
+ on_evict outside the lock on other threads.
+ """
with self._lock:
- saved_callback = self.on_evict
- self.on_evict = None
+ _discard: list[Callable[[], None]] = []
+ self._pending_callbacks = _discard
+ self._pending_callbacks_owner = threading.get_ident()
try:
super().clear()
finally:
- self.on_evict = saved_callback
+ self._pending_callbacks = None
+ self._pending_callbacks_owner = None
def pop_path(self, path: Union[Path, str]):
"""Targeted invalidation of all generations for a given path.
@@ -309,15 +387,17 @@ def evict_paths(self, paths: list[Union[Path, str]]):
if str(key).startswith(prefix_tuple):
keys_to_remove.append(key)
- # 4. Remove keys
+ # 4. Remove keys — capture eviction callbacks but discard them,
+ # since these are intentional removals, not LRU pressure.
+ # We use _pending_callbacks to collect (and then drop) rather than
+ # setting on_evict=None, which would race with closures that read
+ # on_evict outside the lock.
removed_bytes = 0
- pending_callbacks = []
- self._pending_callbacks = pending_callbacks
+ _discard = []
+ self._pending_callbacks = _discard
self._pending_callbacks_owner = threading.get_ident()
try:
for k in keys_to_remove:
- # Use self.pop (which calls __delitem__) to trigger eviction callbacks.
- # It will re-acquire our RLock safely.
val = self.pop(k, None)
if val is not None:
try:
@@ -328,13 +408,7 @@ def evict_paths(self, paths: list[Union[Path, str]]):
finally:
self._pending_callbacks = None
self._pending_callbacks_owner = None
-
- # Execute all captured eviction callbacks OUTSIDE the lock
- for callback in pending_callbacks:
- try:
- callback()
- except Exception:
- log.exception("Error in eviction callback")
+ # _discard is intentionally not executed
if keys_to_remove:
log.info(
diff --git a/faststack/integration_results.txt b/faststack/integration_results.txt
deleted file mode 100644
index 53f7d04..0000000
Binary files a/faststack/integration_results.txt and /dev/null differ
diff --git a/faststack/integration_traceback.txt b/faststack/integration_traceback.txt
deleted file mode 100644
index efb9cdb..0000000
Binary files a/faststack/integration_traceback.txt and /dev/null differ
diff --git a/faststack/models.py b/faststack/models.py
index 776e619..1207245 100644
--- a/faststack/models.py
+++ b/faststack/models.py
@@ -72,6 +72,8 @@ class EntryMetadata:
restacked: bool = False
restacked_date: Optional[str] = None
favorite: bool = False
+ todo: bool = False
+ todo_date: Optional[str] = None
@dataclasses.dataclass
diff --git a/faststack/path_check.txt b/faststack/path_check.txt
deleted file mode 100644
index e253897..0000000
Binary files a/faststack/path_check.txt and /dev/null differ
diff --git a/faststack/qml/FilterDialog.qml b/faststack/qml/FilterDialog.qml
index 8246650..a27e813 100644
--- a/faststack/qml/FilterDialog.qml
+++ b/faststack/qml/FilterDialog.qml
@@ -110,6 +110,14 @@ Dialog {
Material.accent: "#ce93d8"
onCheckedChanged: _collectFlags()
}
+ CheckBox {
+ id: cbTodo
+ text: "Todo"
+ checked: false
+ Material.foreground: filterDialog.textColor
+ Material.accent: "#64B5F6"
+ onCheckedChanged: _collectFlags()
+ }
CheckBox {
id: cbFavorite
text: "Favorite"
@@ -136,6 +144,7 @@ Dialog {
if (cbStacked.checked) flags.push("stacked")
if (cbEdited.checked) flags.push("edited")
if (cbRestacked.checked) flags.push("restacked")
+ if (cbTodo.checked) flags.push("todo")
if (cbFavorite.checked) flags.push("favorite")
filterDialog.filterFlags = flags
}
@@ -156,6 +165,7 @@ Dialog {
cbStacked.checked = currentFlags.indexOf("stacked") >= 0
cbEdited.checked = currentFlags.indexOf("edited") >= 0
cbRestacked.checked = currentFlags.indexOf("restacked") >= 0
+ cbTodo.checked = currentFlags.indexOf("todo") >= 0
cbFavorite.checked = currentFlags.indexOf("favorite") >= 0
filterField.forceActiveFocus()
diff --git a/faststack/qml/Main.qml b/faststack/qml/Main.qml
index ab535ca..a088fc0 100644
--- a/faststack/qml/Main.qml
+++ b/faststack/qml/Main.qml
@@ -68,6 +68,7 @@ ApplicationWindow {
}
if (uiState && uiState.hasRecycleBinItems) {
close.accepted = false
+ uiState.refreshRecycleBinStats()
recycleBinCleanupDialog.open()
} else {
close.accepted = true
@@ -1015,6 +1016,11 @@ ApplicationWindow {
color: "lightgreen"
visible: uiState ? (uiState.imageCount > 0 && uiState.isUploaded) : false
}
+ Label {
+ text: uiState ? (uiState.todoDate ? ` Todo since ${uiState.todoDate}` : " Todo") : ""
+ color: "#64B5F6"
+ visible: uiState ? (uiState.imageCount > 0 && uiState.isTodo) : false
+ }
Label {
text: uiState ? ` Edited on ${uiState.editedDate}` : ""
color: "lightgreen"
@@ -1374,6 +1380,7 @@ ApplicationWindow {
" }: End current batch
" +
" \\: Clear all batches
" +
"Flag Toggles:
" +
+ " D: Toggle todo flag
" +
" F: Toggle favorite flag
" +
" U: Toggle uploaded flag
" +
" Ctrl+E: Toggle edited flag
" +
@@ -1620,13 +1627,14 @@ ApplicationWindow {
Behavior on height { NumberAnimation { duration: 250; easing.type: Easing.OutCubic } }
ScrollView {
+ id: detailsScrollView
anchors.fill: parent
anchors.margins: 8
-
+
TextArea {
id: detailsText
- width: parent.width
+ width: detailsScrollView.availableWidth
text: uiState ? uiState.recycleBinDetailedText : ""
color: root.isDarkTheme ? "#efefef" : "#333333"
font.family: "Consolas, 'Courier New', monospace"
@@ -1635,7 +1643,9 @@ ApplicationWindow {
wrapMode: Text.WrapAnywhere
readOnly: true
selectByMouse: true
- background: null
+ background: Rectangle {
+ color: "transparent"
+ }
}
}
}
diff --git a/faststack/qml/ThumbnailGridView.qml b/faststack/qml/ThumbnailGridView.qml
index bff21ce..90df553 100644
--- a/faststack/qml/ThumbnailGridView.qml
+++ b/faststack/qml/ThumbnailGridView.qml
@@ -59,6 +59,7 @@ Item {
tileIsEdited: isEdited || false
tileIsRestacked: isRestacked || false
tileIsFavorite: isFavorite || false
+ tileIsTodo: isTodo || false
tileIsInBatch: isInBatch || false
tileIsCurrent: isCurrent || false
tileThumbnailSource: thumbnailSource || ""
diff --git a/faststack/qml/ThumbnailTile.qml b/faststack/qml/ThumbnailTile.qml
index be2de0c..8b31fc4 100644
--- a/faststack/qml/ThumbnailTile.qml
+++ b/faststack/qml/ThumbnailTile.qml
@@ -16,6 +16,7 @@ Item {
property bool tileIsEdited: false
property bool tileIsRestacked: false
property bool tileIsFavorite: false
+ property bool tileIsTodo: false
property bool tileIsInBatch: false
property bool tileIsCurrent: false
property string tileThumbnailSource: ""
@@ -45,6 +46,7 @@ Item {
// Flag colors for badges
property color stackedColor: "#FF9800" // Orange for stacked (S)
property color uploadedColor: "#4CAF50" // Green for uploaded (U)
+ property color todoColor: "#2196F3" // Blue for todo (D)
property color editedColor: "#FFEB3B" // Yellow for edited (E)
property color restackedColor: "#FF9800" // Orange for restacked (R)
property color favoriteColor: "#FFD700" // Gold for favorite (F)
@@ -203,6 +205,22 @@ Item {
}
}
+ // Todo badge (D) - Blue
+ Rectangle {
+ visible: tile.tileIsTodo
+ width: 18
+ height: 18
+ radius: 3
+ color: todoColor
+ Text {
+ anchors.centerIn: parent
+ text: "D"
+ font.pixelSize: 11
+ font.bold: true
+ color: "white"
+ }
+ }
+
// Favorite badge (F) - Gold
Rectangle {
visible: tile.tileIsFavorite
@@ -429,7 +447,7 @@ Item {
property string numFont: "Consolas, Monaco, monospace"
property int numSize: 11
- // Coverage sparkline (dual-channel: upload green, stack orange)
+ // Coverage sparkline (triple-channel: upload green, stack orange, todo red)
Row {
id: sparklineRow
anchors.horizontalCenter: parent.horizontalCenter
@@ -451,7 +469,7 @@ Item {
color: tile.counterUploadedCol
opacity: modelData[0] * 0.9 + 0.1 // 0.1 base opacity, up to 1.0
}
- // Stack bar (orange) - bottom
+ // Stack bar (orange) - middle
Rectangle {
width: 3
height: 2
@@ -459,6 +477,14 @@ Item {
color: tile.counterStackedCol
opacity: modelData[1] * 0.9 + 0.1 // 0.1 base opacity, up to 1.0
}
+ // Todo bar (red) - bottom
+ Rectangle {
+ width: 3
+ height: 2
+ radius: 0.5
+ color: "#F44336"
+ opacity: modelData[2] * 0.9 + 0.1 // 0.1 base opacity, up to 1.0
+ }
}
}
}
diff --git a/faststack/repro_cache_lock.py b/faststack/repro_cache_lock.py
deleted file mode 100644
index 0361007..0000000
--- a/faststack/repro_cache_lock.py
+++ /dev/null
@@ -1,48 +0,0 @@
-import threading
-from faststack.imaging.cache import ByteLRUCache
-
-
-def repro_lock_contention():
- lock_held_during_callback = False
-
- def on_evict_callback(key, value):
- nonlocal lock_held_during_callback
- # Try to acquire the same lock. If it's held by the current thread (RLock),
- # we can check if it would block others or if we can detect it's held.
- # Since it's an RLock, current thread can re-acquire it.
- # But we can check if the lock is "locked" by looking at internal state
- # or just by the fact that we know we are in the callback.
-
- # A better way to check if the lock is held:
- # Since it's an RLock, it doesn't expose a simple "is_locked" that works across threads easily
- # but we can try to acquire it in a DIFFERENT thread.
-
- def check_lock():
- nonlocal lock_held_during_callback
- if not cache._lock.acquire(blocking=False):
- lock_held_during_callback = True
- else:
- cache._lock.release()
-
- t = threading.Thread(target=check_lock)
- t.start()
- t.join()
-
- cache = ByteLRUCache(max_bytes=100, size_of=lambda x: x, on_evict=on_evict_callback)
-
- print("Adding item 'a' (50 bytes)")
- cache["a"] = 50
- print("Adding item 'b' (50 bytes)")
- cache["b"] = 50
-
- print("Adding item 'c' (50 bytes) -> should trigger eviction of 'a'")
- cache["c"] = 50
-
- if lock_held_during_callback:
- print("FAILED: Lock was HELD during on_evict callback!")
- else:
- print("SUCCESS: Lock was NOT held during on_evict callback.")
-
-
-if __name__ == "__main__":
- repro_lock_contention()
diff --git a/faststack/repro_daemon_bug.py b/faststack/repro_daemon_bug.py
deleted file mode 100644
index 1a8ec92..0000000
--- a/faststack/repro_daemon_bug.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import concurrent.futures
-import threading
-import time
-
-
-def check_daemon():
- print(
- f"Thread {threading.current_thread().name} daemon: {threading.current_thread().daemon}"
- )
-
-
-def test_failure_mimic():
- print("Main thread daemon:", threading.current_thread().daemon)
- executor_container = {}
-
- def creator():
- executor_container["executor"] = concurrent.futures.ThreadPoolExecutor(
- max_workers=1
- )
-
- t = threading.Thread(target=creator, name="CreatorThread")
- t.daemon = True
- t.start()
- t.join() # Creator thread dies
-
- executor = executor_container["executor"]
- # If the executor spawns worker threads when submit is called,
- # it might inherit from the CURRENT thread (main) instead of the creator thread.
- executor.submit(check_daemon).result()
-
-
-if __name__ == "__main__":
- test_failure_mimic()
diff --git a/faststack/repro_imports.py b/faststack/repro_imports.py
deleted file mode 100644
index 94b1f4f..0000000
--- a/faststack/repro_imports.py
+++ /dev/null
@@ -1,15 +0,0 @@
-try:
- from unittest.mock import MagicMock
-
- print("Success: from unittest.mock import MagicMock")
-except ImportError as e:
- print(f"Failed: {e}")
-
-try:
- import faststack.app
-
- print("Success: import faststack.app")
-except ImportError as e:
- print(f"Failed: import faststack.app: {e}")
-except Exception as e:
- print(f"Failed: import faststack.app error: {e}")
diff --git a/faststack/repro_type_error.py b/faststack/repro_type_error.py
deleted file mode 100644
index a536809..0000000
--- a/faststack/repro_type_error.py
+++ /dev/null
@@ -1,30 +0,0 @@
-import sys
-from pathlib import Path
-
-# Ensure we can import faststack
-repo_root = str(Path(__file__).resolve().parent.parent)
-sys.path.insert(0, repo_root)
-
-from faststack.imaging.editor import ImageEditor
-from PIL import Image
-import numpy as np
-
-editor = ImageEditor()
-img = Image.new("RGB", (100, 100), (255, 0, 0))
-editor.original_image = img
-
-print("Calling _apply_edits...")
-try:
- res = editor._apply_edits(img)
- print(f"Result type: {type(res)}")
- if res is not None:
- print(
- f"Result shape/size: {getattr(res, 'shape', 'N/A')} / {getattr(res, 'size', 'N/A')}"
- )
- else:
- print("Result is None!")
-except Exception as e: # noqa: BLE001
- print(f"Caught exception: {type(e).__name__}: {e}")
- import traceback
-
- traceback.print_exc()
diff --git a/faststack/rotation_error.txt b/faststack/rotation_error.txt
deleted file mode 100644
index ab0537e..0000000
Binary files a/faststack/rotation_error.txt and /dev/null differ
diff --git a/faststack/test_prespawn_strategy.py b/faststack/test_prespawn_strategy.py
deleted file mode 100644
index 2f0e25a..0000000
--- a/faststack/test_prespawn_strategy.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import concurrent.futures
-import threading
-import time
-
-
-def check_daemon():
- print(
- f"Thread {threading.current_thread().name} daemon: {threading.current_thread().daemon}"
- )
-
-
-def test_prespawn():
- print("Main thread daemon:", threading.current_thread().daemon)
- executor_container = {}
- max_workers = 4
-
- def creator():
- print(
- f"Creator thread {threading.current_thread().name} daemon: {threading.current_thread().daemon}"
- )
- executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
- executor_container["executor"] = executor
- # Force spawn all workers while we are in this daemon thread
- # We need to submit at least 'max_workers' tasks and wait for them to be
- # picked up by separate threads.
- futures = [executor.submit(time.sleep, 0.05) for _ in range(max_workers)]
- concurrent.futures.wait(futures)
- print("All workers spawned from daemon thread.")
-
- t = threading.Thread(target=creator, name="CreatorThread")
- t.daemon = True
- t.start()
- t.join()
-
- executor = executor_container["executor"]
- print("Main thread calling submit (which should reuse a daemon worker)...")
- executor.submit(check_daemon).result()
-
-
-if __name__ == "__main__":
- test_prespawn()
diff --git a/faststack/tests/test_cache_replacement_callback.py b/faststack/tests/test_cache_replacement_callback.py
index faacb7e..f269afb 100644
--- a/faststack/tests/test_cache_replacement_callback.py
+++ b/faststack/tests/test_cache_replacement_callback.py
@@ -158,8 +158,8 @@ def test_on_evict_fires_for_both_overflow_and_replacement():
# ── evict_paths + tombstones ────────────────────────────────────────
-def test_evict_paths_fires_callbacks():
- """evict_paths() should trigger on_evict for each removed key."""
+def test_evict_paths_suppresses_callbacks():
+ """evict_paths() should NOT trigger on_evict (intentional removal, not LRU)."""
evicted = []
cache = _make_cache(10_000, lambda k, v: evicted.append((k, v)))
@@ -171,10 +171,13 @@ def test_evict_paths_fires_callbacks():
cache.evict_paths([Path("photo.jpg")])
- evicted_keys = {k for k, _ in evicted}
- assert "photo.jpg::0" in evicted_keys
- assert "photo.jpg::1" in evicted_keys
- assert "other.jpg::0" not in evicted_keys
+ # Keys should be removed from cache
+ assert "photo.jpg::0" not in cache
+ assert "photo.jpg::1" not in cache
+ assert "other.jpg::0" in cache
+
+ # But on_evict should NOT have been called (intentional removal)
+ assert len(evicted) == 0
def test_evict_paths_tombstone_blocks_reinsert():
diff --git a/faststack/tests/thumbnail_view/test_folder_stats.py b/faststack/tests/thumbnail_view/test_folder_stats.py
index 088ee90..90866e9 100644
--- a/faststack/tests/thumbnail_view/test_folder_stats.py
+++ b/faststack/tests/thumbnail_view/test_folder_stats.py
@@ -270,7 +270,7 @@ def test_single_file_uploaded(self):
buckets = _compute_coverage_buckets(jpg_files, entries, num_buckets=1)
assert len(buckets) == 1
- assert buckets[0] == (1.0, 0.0) # uploaded, not stacked
+ assert buckets[0] == (1.0, 0.0, 0.0) # uploaded, not stacked, not todo
def test_single_file_stacked(self):
"""Test with single stacked file."""
@@ -280,7 +280,7 @@ def test_single_file_stacked(self):
buckets = _compute_coverage_buckets(jpg_files, entries, num_buckets=1)
assert len(buckets) == 1
- assert buckets[0] == (0.0, 1.0) # not uploaded, stacked
+ assert buckets[0] == (0.0, 1.0, 0.0) # not uploaded, stacked, not todo
def test_even_distribution(self):
"""Test even distribution across buckets."""
diff --git a/faststack/thumbnail_view/folder_stats.py b/faststack/thumbnail_view/folder_stats.py
index 33f1c99..8711f06 100644
--- a/faststack/thumbnail_view/folder_stats.py
+++ b/faststack/thumbnail_view/folder_stats.py
@@ -25,10 +25,10 @@ class FolderStats:
# Named 'jpg_count' for historical reasons; displayed as "IMG" in UI
jpg_count: int = 0
raw_count: int = 0
- # Coverage sparkline data: list of (upload_ratio, stack_ratio) tuples per bucket
+ # Coverage sparkline data: list of (upload_ratio, stack_ratio, todo_ratio) tuples per bucket
# Each ratio is 0.0-1.0, representing the fraction of JPGs in that bucket
# that have the flag set. Empty list if no faststack.json or no JPGs.
- coverage_buckets: list[tuple[float, float]] = field(default_factory=list)
+ coverage_buckets: list[tuple[float, float, float]] = field(default_factory=list)
# Cache by (folder_path, json_mtime_ns, folder_mtime_ns) to avoid re-parsing during scroll
@@ -216,9 +216,9 @@ def _parse_faststack_json(json_path: Path) -> Optional[FolderStats]:
def _compute_coverage_buckets(
jpg_files: list, entries: Dict[str, dict], num_buckets: int = 40
) -> list:
- """Compute coverage sparkline buckets for uploads and stacks.
+ """Compute coverage sparkline buckets for uploads, stacks, and todos.
- Returns a list of (upload_ratio, stack_ratio) tuples, one per bucket.
+ Returns a list of (upload_ratio, stack_ratio, todo_ratio) tuples, one per bucket.
Each ratio is 0.0-1.0, representing the fraction of JPGs in that bucket
with the respective flag set.
@@ -228,7 +228,7 @@ def _compute_coverage_buckets(
num_buckets: Number of buckets to divide files into (default 40)
Returns:
- List of (upload_ratio, stack_ratio) tuples, or empty list if no JPGs.
+ List of (upload_ratio, stack_ratio, todo_ratio) tuples, or empty list if no JPGs.
"""
if not jpg_files:
return []
@@ -238,8 +238,8 @@ def _compute_coverage_buckets(
num_buckets = total_files
# Single-pass accumulation into buckets to avoid redundant list processing
- # Each entry is [uploaded_count, stacked_count, total_in_bucket]
- accumulators = [[0, 0, 0] for _ in range(num_buckets)]
+ # Each entry is [uploaded_count, stacked_count, todo_count, total_in_bucket]
+ accumulators = [[0, 0, 0, 0] for _ in range(num_buckets)]
for i, filename in enumerate(jpg_files):
# Map file index to bucket index using floor division
@@ -254,16 +254,18 @@ def _compute_coverage_buckets(
accumulators[bucket_idx][0] += 1
if meta.get("stacked", False):
accumulators[bucket_idx][1] += 1
+ if meta.get("todo", False):
+ accumulators[bucket_idx][2] += 1
- accumulators[bucket_idx][2] += 1
+ accumulators[bucket_idx][3] += 1
# Convert counts to ratios
buckets = []
- for uploaded, stacked, count in accumulators:
+ for uploaded, stacked, todo, count in accumulators:
if count == 0:
- buckets.append((0.0, 0.0))
+ buckets.append((0.0, 0.0, 0.0))
else:
- buckets.append((uploaded / count, stacked / count))
+ buckets.append((uploaded / count, stacked / count, todo / count))
return buckets
diff --git a/faststack/thumbnail_view/model.py b/faststack/thumbnail_view/model.py
index f663bde..3ce0fa9 100644
--- a/faststack/thumbnail_view/model.py
+++ b/faststack/thumbnail_view/model.py
@@ -80,6 +80,7 @@ class ThumbnailEntry:
is_edited: bool = False
is_restacked: bool = False
is_favorite: bool = False
+ is_todo: bool = False
folder_stats: Optional[FolderStats] = None
has_backups: bool = False
has_developed: bool = False
@@ -116,6 +117,7 @@ class ThumbnailModel(QAbstractListModel):
IsFavoriteRole = Qt.ItemDataRole.UserRole + 17
HasBackupsRole = Qt.ItemDataRole.UserRole + 18
HasDevelopedRole = Qt.ItemDataRole.UserRole + 19
+ IsTodoRole = Qt.ItemDataRole.UserRole + 20
# Signal emitted when a thumbnail is ready (id = "{size}/{path_hash}/{mtime_ns}")
thumbnailReady = Signal(str)
@@ -232,6 +234,8 @@ def data(self, index: QModelIndex, role: int = Qt.ItemDataRole.DisplayRole):
return entry.is_restacked
elif role == self.IsFavoriteRole:
return entry.is_favorite
+ elif role == self.IsTodoRole:
+ return entry.is_todo
elif role == self.IsInBatchRole:
# Check if this row's corresponding loupe index is in any batch
if self._get_batch_indices and not entry.is_folder:
@@ -260,7 +264,10 @@ def _get_loupe_index_for_entry(self, entry: ThumbnailEntry) -> Optional[int]:
# We'll use the parent (AppController) to look this up
parent = self.parent()
if parent and hasattr(parent, "_path_to_index"):
- return parent._path_to_index.get(entry.path.resolve())
+ # Must use the same key format as _rebuild_path_to_index (abspath,
+ # not realpath/resolve) so the lookup hits on Windows and Linux.
+ key = os.path.normcase(os.path.abspath(str(entry.path)))
+ return parent._path_to_index.get(key)
return None
def roleNames(self) -> Dict[int, bytes]:
@@ -285,6 +292,7 @@ def roleNames(self) -> Dict[int, bytes]:
self.IsFavoriteRole: b"isFavorite",
self.HasBackupsRole: b"hasBackups",
self.HasDevelopedRole: b"hasDeveloped",
+ self.IsTodoRole: b"isTodo",
}
def _get_thumbnail_source(
@@ -597,6 +605,7 @@ def _add_images_to_entries(
is_edited = False
is_restacked = False
is_favorite = False
+ is_todo = False
if metadata_map:
meta = metadata_map.get(img.path.stem, {})
@@ -605,6 +614,7 @@ def _add_images_to_entries(
is_edited = meta.get("edited", False)
is_restacked = meta.get("restacked", False)
is_favorite = meta.get("favorite", False)
+ is_todo = meta.get("todo", False)
elif self._get_metadata:
try:
meta = self._get_metadata(img.path.stem)
@@ -614,6 +624,7 @@ def _add_images_to_entries(
is_edited = meta.get("edited", False)
is_restacked = meta.get("restacked", False)
is_favorite = meta.get("favorite", False)
+ is_todo = meta.get("todo", False)
else:
log.debug(
"Metadata for %s is not a dict: %r", img.path.stem, meta
@@ -634,6 +645,7 @@ def _add_images_to_entries(
is_edited=is_edited,
is_restacked=is_restacked,
is_favorite=is_favorite,
+ is_todo=is_todo,
has_backups=has_backups,
has_developed=has_developed,
mtime_ns=mtime_ns,
diff --git a/faststack/traceback.txt b/faststack/traceback.txt
deleted file mode 100644
index b12d998..0000000
Binary files a/faststack/traceback.txt and /dev/null differ
diff --git a/faststack/ui/keystrokes.py b/faststack/ui/keystrokes.py
index d65f4d1..7807528 100644
--- a/faststack/ui/keystrokes.py
+++ b/faststack/ui/keystrokes.py
@@ -39,6 +39,7 @@ def __init__(self, controller):
# Toggle flags
Qt.Key_U: "toggle_uploaded",
Qt.Key_F: "toggle_favorite",
+ Qt.Key_D: "toggle_todo",
Qt.Key_I: "show_exif_dialog",
# Actions
Qt.Key_Enter: "launch_helicon",
diff --git a/faststack/ui/provider.py b/faststack/ui/provider.py
index 762f6a2..b5e6ffc 100644
--- a/faststack/ui/provider.py
+++ b/faststack/ui/provider.py
@@ -455,6 +455,18 @@ def uploadedDate(self):
return ""
return self.app_controller.get_current_metadata().get("uploaded_date", "")
+ @Property(bool, notify=metadataChanged)
+ def isTodo(self):
+ if not self.app_controller.image_files:
+ return False
+ return self.app_controller.get_current_metadata().get("todo", False)
+
+ @Property(str, notify=metadataChanged)
+ def todoDate(self):
+ if not self.app_controller.image_files:
+ return ""
+ return self.app_controller.get_current_metadata().get("todo_date", "")
+
@Property(str, notify=metadataChanged)
def batchInfoText(self):
if not self.app_controller.image_files:
@@ -1605,10 +1617,14 @@ def hasRecycleBinItems(self):
return len(stats) > 0
@Slot()
- def cleanupRecycleBins(self):
- """Deletes all tracked recycle bins."""
- self.app_controller.cleanup_recycle_bins()
-
+ def refreshRecycleBinStats(self):
+ """Notify QML that recycle-bin properties should be re-read."""
self.recycleBinStatsTextChanged.emit()
self.recycleBinDetailedTextChanged.emit()
self.hasRecycleBinItemsChanged.emit()
+
+ @Slot()
+ def cleanupRecycleBins(self):
+ """Deletes all tracked recycle bins."""
+ self.app_controller.cleanup_recycle_bins()
+ self.refreshRecycleBinStats()
diff --git a/faststack/verify_cache_fix.py b/faststack/verify_cache_fix.py
deleted file mode 100644
index dafb102..0000000
--- a/faststack/verify_cache_fix.py
+++ /dev/null
@@ -1,60 +0,0 @@
-import sys
-import os
-from pathlib import Path
-
-# Add current dir to path
-sys.path.append(os.getcwd())
-
-from imaging.cache import ByteLRUCache
-from models import DecodedImage
-import numpy as np
-
-
-def test_cache():
- evicted = []
-
- def on_evict(k, v):
- evicted.append((k, v))
- print(f"Evicted: {k}")
-
- cache = ByteLRUCache(max_bytes=100, size_of=sys.getsizeof, on_evict=on_evict)
- img1 = DecodedImage(
- buffer=memoryview(np.zeros(60, dtype=np.uint8)),
- width=60,
- height=1,
- bytes_per_line=60,
- format="dummy_format",
- )
- img2 = DecodedImage(
- buffer=memoryview(np.zeros(60, dtype=np.uint8)),
- width=60,
- height=1,
- bytes_per_line=60,
- format="dummy_format",
- )
-
- cache["k1"] = img1
- print("Added k1")
- cache["k2"] = img2
- print("Added k2")
-
- assert len(evicted) == 1
- assert evicted[0][0] == "k1"
- print("Eviction verified!")
-
- cache.popitem()
- assert len(evicted) == 2
- assert evicted[1][0] == "k2"
- print("Popitem verification passed!")
-
-
-if __name__ == "__main__":
- try:
- test_cache()
- print("STANDALONE TEST PASSED")
- except Exception as e:
- print(f"TEST FAILED: {e}")
- import traceback
-
- traceback.print_exc()
- sys.exit(1)
diff --git a/faststack/verify_wb.py b/faststack/verify_wb.py
deleted file mode 100644
index ffd0573..0000000
--- a/faststack/verify_wb.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import numpy as np
-from PIL import Image
-from faststack.imaging.editor import ImageEditor
-import os
-
-
-def test_white_balance():
- editor = ImageEditor()
-
- # 1. Test Black Preservation
- # Create a purely black image
- black_img = Image.new("RGB", (100, 100), (0, 0, 0))
- black_path = "test_black.jpg"
- black_img.save(black_path)
-
- editor.load_image(black_path)
-
- # Apply strong temperature and tint
- editor.set_edit_param("white_balance_by", 1.0) # Max Warm
- editor.set_edit_param("white_balance_mg", 1.0) # Max Magenta
-
- # Get processed image
- # We need to access the internal method or use save, but let's use _apply_edits directly for testing
- # editor.original_image is loaded.
- processed_img = editor._apply_edits(editor.original_image.copy())
- arr = np.array(processed_img)
-
- # Check max value - should still be 0 or very close to it
- max_val = arr.max()
- print(f"Black Image Max Value after WB: {max_val}")
-
- if max_val > 0:
- print("FAIL: Black level not preserved!")
- else:
- print("PASS: Black level preserved.")
-
- # 2. Test Grey Shift
- # Create a mid-grey image
- grey_img = Image.new("RGB", (100, 100), (128, 128, 128))
- grey_path = "test_grey.jpg"
- grey_img.save(grey_path)
-
- editor.load_image(grey_path)
- editor.set_edit_param("white_balance_by", 0.5) # Warm
- # r_gain = 1 + 0.25 = 1.25 -> 128 * 1.25 = 160
- # b_gain = 1 - 0.25 = 0.75 -> 128 * 0.75 = 96
-
- processed_img = editor._apply_edits(editor.original_image.copy())
- arr = np.array(processed_img)
- r, g, b = arr[0, 0]
- print(f"Grey Image RGB after Warm shift: R={r}, G={g}, B={b}")
-
- if r > 128 and b < 128:
- print("PASS: Grey shifted warm correctly.")
- else:
- print("FAIL: Grey did not shift as expected.")
-
- # Cleanup
- for path in [black_path, grey_path]:
- try:
- os.remove(path)
- except OSError:
- pass # File may not exist or be locked
-
-
-if __name__ == "__main__":
- test_white_balance()
diff --git a/scripts/smoke_verify.py b/scripts/smoke_verify.py
deleted file mode 100644
index bf06886..0000000
--- a/scripts/smoke_verify.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import sys
-import importlib.resources
-
-
-def check_imports():
- print("Checking imports...")
- try:
- import faststack
- import faststack.ui
- import faststack.io
- import faststack.imaging
- import faststack.app
-
- print(" [OK] Imports successful")
- except ImportError as e:
- print(f" [FAIL] Import failed: {e}")
- return False
- return True
-
-
-def check_cli():
- print("Checking CLI entry point...")
- try:
- from faststack.app import cli
-
- if not callable(cli):
- print(" [FAIL] faststack.app.cli is not callable")
- return False
- print(" [OK] faststack.app.cli found")
- except ImportError:
- print(" [FAIL] Could not import faststack.app.cli")
- return False
- except Exception as e:
- print(f" [FAIL] Error checking CLI: {e}")
- return False
- return True
-
-
-def check_assets():
- print("Checking assets (QML files)...")
- try:
- # For Python 3.9+ standard library importlib.resources
- # We look for any .qml file in faststack package
- qml_files = list(importlib.resources.files("faststack").rglob("*.qml"))
- count = len(qml_files)
- if count > 0:
- print(f" [OK] Found {count} QML files")
- for p in qml_files[:3]:
- print(f" - {p.name}")
- else:
- print(" [FAIL] No QML files found in package resources!")
- print(
- " (Did you include package_data in pyproject.toml / MANIFEST.in?)"
- )
- return False
- except Exception as e:
- print(f" [FAIL] Asset check failed: {e}")
- return False
- return True
-
-
-def main():
- print("=== FastStack Smoke Verification ===")
- print(f"Python: {sys.version}")
-
- if not check_imports():
- sys.exit(1)
-
- if not check_cli():
- sys.exit(1)
-
- if not check_assets():
- sys.exit(1)
-
- print("\n[SUCCESS] faststack package seems healthy.")
-
-
-if __name__ == "__main__":
- main()