diff --git a/backend/auth.py b/backend/auth.py index 3e4a6bf..511f234 100644 --- a/backend/auth.py +++ b/backend/auth.py @@ -10,29 +10,29 @@ from fastapi import Depends, HTTPException, status from fastapi.security import OAuth2PasswordBearer +logger = logging.getLogger(__name__) + def _resolve_secret_key() -> tuple[str, bool]: configured = str(os.getenv("SECRET_KEY") or "").strip() if configured: return configured, False - fallback_path = Path( - os.getenv( - "SECRET_KEY_FILE", - str(Path(os.getenv("TEMP") or "/tmp") / "codeai_jwt_secret.key"), - ) + configured_file = str(os.getenv("SECRET_KEY_FILE") or "").strip() + if configured_file: + fallback_path = Path(configured_file).expanduser() + try: + if fallback_path.exists() and fallback_path.is_file(): + cached_secret = fallback_path.read_text(encoding="utf-8").strip() + if cached_secret: + return cached_secret, True + except Exception: + pass + + logger.error( + "SECRET_KEY/SECRET_KEY_FILE is not configured; generating ephemeral runtime secret that invalidates tokens on restart." ) - try: - fallback_path.parent.mkdir(parents=True, exist_ok=True) - if fallback_path.exists(): - cached_secret = fallback_path.read_text(encoding="utf-8").strip() - if cached_secret: - return cached_secret, True - generated_secret = secrets.token_urlsafe(48) - fallback_path.write_text(generated_secret, encoding="utf-8") - return generated_secret, True - except Exception: - return secrets.token_urlsafe(48), True + return secrets.token_urlsafe(48), True SECRET_KEY, SECRET_KEY_IS_RUNTIME_FALLBACK = _resolve_secret_key() @@ -42,7 +42,6 @@ def _resolve_secret_key() -> tuple[str, bool]: ) oauth2_scheme = OAuth2PasswordBearer(tokenUrl="/api/auth/login") -logger = logging.getLogger(__name__) def get_password_hash(password: str) -> str: diff --git a/backend/llm/orchestrator.py b/backend/llm/orchestrator.py index 0bf7c48..55d5a0f 100644 --- a/backend/llm/orchestrator.py +++ b/backend/llm/orchestrator.py @@ -2684,6 +2684,7 @@ class OrchestrationAcceptedResponse(BaseModel): _ORCHESTRATION_PROGRESS_STORE: Dict[str, Dict[str, Any]] = {} +_ORCHESTRATION_PROGRESS_FILE_LOCK = threading.Lock() def _runtime_progress_root() -> Path: @@ -2694,9 +2695,8 @@ def _runtime_progress_root() -> Path: return progress_root -def _orchestration_progress_path(run_id: str) -> Path: - safe_run_id = re.sub(r"[^a-zA-Z0-9_.-]+", "-", str(run_id or "unknown")).strip("-") or "unknown" - return _runtime_progress_root() / f"{safe_run_id}.json" +def _orchestration_progress_store_path() -> Path: + return _runtime_progress_root() / "progress_store.json" def _build_progress_poll_url(run_id: str) -> str: @@ -2712,8 +2712,23 @@ def _save_orchestration_progress(run_id: str, payload: Dict[str, Any]) -> Dict[s normalized["run_id"] = str(run_id or normalized.get("run_id") or "") normalized.setdefault("updated_at", datetime.utcnow().isoformat() + "Z") _ORCHESTRATION_PROGRESS_STORE[normalized["run_id"]] = normalized - progress_path = _orchestration_progress_path(normalized["run_id"]) - progress_path.write_text(json.dumps(normalized, ensure_ascii=False, indent=2), encoding="utf-8") + progress_path = _orchestration_progress_store_path() + with _ORCHESTRATION_PROGRESS_FILE_LOCK: + persisted_payload: Dict[str, Any] = {} + try: + if progress_path.exists() and progress_path.is_file(): + existing_payload = json.loads(progress_path.read_text(encoding="utf-8")) + if isinstance(existing_payload, dict): + persisted_payload = dict(existing_payload) + except Exception: + logger.warning( + "Failed to read orchestration progress store from %s before write", + str(progress_path), + exc_info=True, + ) + persisted_payload = {} + persisted_payload[normalized["run_id"]] = normalized + progress_path.write_text(json.dumps(persisted_payload, ensure_ascii=False, indent=2), encoding="utf-8") return normalized @@ -2721,14 +2736,18 @@ def _load_orchestration_progress(run_id: str) -> Dict[str, Any]: cached = _ORCHESTRATION_PROGRESS_STORE.get(str(run_id or "")) if isinstance(cached, dict) and cached: return dict(cached) - progress_path = _orchestration_progress_path(run_id) + progress_path = _orchestration_progress_store_path() try: - if progress_path.exists() and progress_path.is_file(): - payload = json.loads(progress_path.read_text(encoding="utf-8")) - if isinstance(payload, dict): - _ORCHESTRATION_PROGRESS_STORE[str(run_id or "")] = dict(payload) - return dict(payload) + with _ORCHESTRATION_PROGRESS_FILE_LOCK: + if progress_path.exists() and progress_path.is_file(): + payload = json.loads(progress_path.read_text(encoding="utf-8")) + if isinstance(payload, dict): + stored = payload.get(str(run_id or "")) + if isinstance(stored, dict): + _ORCHESTRATION_PROGRESS_STORE[str(run_id or "")] = dict(stored) + return dict(stored) except Exception: + logger.error("Failed to load orchestration progress for run_id=%s", str(run_id or ""), exc_info=True) return {} return {} diff --git a/backend/main.py b/backend/main.py index 65c89cc..f520ea4 100644 --- a/backend/main.py +++ b/backend/main.py @@ -1031,6 +1031,8 @@ def _runtime_health_payload() -> Dict[str, Any]: from backend.marketplace.router import get_ad_queue_runtime_status queue_runtime = get_ad_queue_runtime_status() except Exception as exc: + logger.exception("Failed to load ad queue runtime status") + safe_queue_error = "queue_runtime_unavailable" queue_runtime = { "redis_queue": { "available": False, @@ -1038,7 +1040,7 @@ def _runtime_health_payload() -> Dict[str, Any]: "note": "Redis queue 진단을 로드하지 못했습니다.", "connection_id": "redis:video_render_queue", "queue_name": "video_render_queue", - "error": str(exc), + "error": safe_queue_error, }, "ad_worker": { "available": False, @@ -1047,7 +1049,7 @@ def _runtime_health_payload() -> Dict[str, Any]: "connection_id": "redis:video_render_queue", "queue_name": "video_render_queue", "worker_id": "ad-render-worker-001", - "error": str(exc), + "error": safe_queue_error, }, } redis_queue = queue_runtime.get("redis_queue", {}) diff --git a/backend/marketplace/router.py b/backend/marketplace/router.py index fc322c7..6d8e5b4 100644 --- a/backend/marketplace/router.py +++ b/backend/marketplace/router.py @@ -595,15 +595,20 @@ def _persist_progress(*, percent: int, step: str, state: str, message: str) -> N ), ) except Exception as exc: + logger.exception( + "Marketplace feature orchestrate stream failed run_id=%s", + request.run_id, + ) + public_error_message = "라이브뷰 실행 중 오류가 발생했습니다. 잠시 후 다시 시도해주세요." local_metadata["popup_state"] = "failed" local_metadata["last_event"] = "failed" - local_metadata["error"] = str(exc) + local_metadata["error"] = public_error_message local_metadata["updated_at"] = _utc_now_iso() - _persist_progress(percent=100, step="failed", state="failed", message=str(exc)) + _persist_progress(percent=100, step="failed", state="failed", message=public_error_message) local_stage_run = _set_feature_metadata(local_stage_run, local_metadata) - local_stage_run = _apply_feature_popup_state(local_stage_run, "failed", str(exc)) + local_stage_run = _apply_feature_popup_state(local_stage_run, "failed", public_error_message) save_stage_run(local_stage_run) - yield _build_feature_sse_event("failed", {"run_id": request.run_id, "state": "failed", "message": str(exc)}) + yield _build_feature_sse_event("failed", {"run_id": request.run_id, "state": "failed", "message": public_error_message}) yield _build_feature_sse_event( "progress", _build_feature_progress_payload( @@ -611,7 +616,7 @@ def _persist_progress(*, percent: int, step: str, state: str, message: str) -> N percent=100, step="failed", state="failed", - message=str(exc), + message=public_error_message, ), ) diff --git a/frontend/frontend/hooks/use-feature-orchestrator.ts b/frontend/frontend/hooks/use-feature-orchestrator.ts index ad39af5..7cb8b72 100644 --- a/frontend/frontend/hooks/use-feature-orchestrator.ts +++ b/frontend/frontend/hooks/use-feature-orchestrator.ts @@ -358,7 +358,7 @@ function buildDefaultCatalogItem(featureId: string): FeatureCatalogItem { const meta = FEATURE_EXPERIENCE_META[featureId] || FEATURE_EXPERIENCE_META['ai-sheet']; return { feature_id: featureId, - title: meta.popupKicker.replace('AI ', 'AI '), + title: meta.popupKicker, summary: meta.launcherSummary, popup_mode: preset.contextTags[1] || meta.outputKind, status: 'enabled', @@ -958,4 +958,4 @@ export function useFeatureOrchestrator() { progressSnapshot, progressHistory, }; -} \ No newline at end of file +} diff --git a/gpu-llm-server/custom-server/server.py b/gpu-llm-server/custom-server/server.py index 606bccf..1cb3cf0 100644 --- a/gpu-llm-server/custom-server/server.py +++ b/gpu-llm-server/custom-server/server.py @@ -176,7 +176,7 @@ def load_model(): logger.error(f"Failed to load model: {e}") model = None tokenizer = None - model_load_error = str(e) + model_load_error = "model_load_failed" logger.warning("Server will stay up without a loaded model.")