From 1270b5f59e2b2cf12d9316fbc4d0b4cab8dee4a5 Mon Sep 17 00:00:00 2001 From: jialilve <3485723235@qq.com> Date: Thu, 13 Nov 2025 15:57:07 +0000 Subject: [PATCH 1/8] backup WIP qwen-npu-decoding changes --- .../qwen_npu_decoding_git_workflow.md | 686 +++++++++ .../qwen_npu_decoding_requirements.md | 510 +++++++ docs/qnn_fix_bug/PR_PREPARATION_GUIDE.md | 453 ++++++ docs/qnn_fix_bug/adb_output.md | 1343 +++++++++++++++++ examples/qwen_npu/CODE_EXPLANATION.md | 951 ++++++++++++ examples/qwen_npu/main.cpp | 135 +- mllm/backends/cpu/ops/KVCacheOp.cpp | 2 + mllm/backends/cpu/ops/KVCacheOp.hpp | 2 + mllm/backends/qnn/QNNAllocator.cpp | 361 ++++- mllm/backends/qnn/QNNAllocator.hpp | 31 +- mllm/backends/qnn/QNNBackend.cpp | 2 + mllm/backends/qnn/QNNUtils.cpp | 48 +- mllm/backends/qnn/QNNUtils.hpp | 5 +- mllm/core/aops/KVCacheOp.hpp | 6 +- mllm/models/qwen_npu/modeling_qwen_npu.hpp | 7 + mllm/nn/layers/KVCache.cpp | 4 + mllm/nn/layers/KVCache.hpp | 3 + tests/qnn/CMakeLists.txt | 9 + tests/qnn/QNNOutputOrderTest.cpp | 97 ++ 19 files changed, 4609 insertions(+), 46 deletions(-) create mode 100644 docs/qnn_backend/qwen_npu_decoding_git_workflow.md create mode 100644 docs/qnn_backend/qwen_npu_decoding_requirements.md create mode 100644 docs/qnn_fix_bug/PR_PREPARATION_GUIDE.md create mode 100644 docs/qnn_fix_bug/adb_output.md create mode 100644 examples/qwen_npu/CODE_EXPLANATION.md create mode 100644 tests/qnn/CMakeLists.txt create mode 100644 tests/qnn/QNNOutputOrderTest.cpp diff --git a/docs/qnn_backend/qwen_npu_decoding_git_workflow.md b/docs/qnn_backend/qwen_npu_decoding_git_workflow.md new file mode 100644 index 000000000..e15481b9e --- /dev/null +++ b/docs/qnn_backend/qwen_npu_decoding_git_workflow.md @@ -0,0 +1,686 @@ +# QNN Decoding 功能开发 - Git 工作流指南 + +本文档提供 QNN Decoding 功能开发的完整 Git 工作流,从创建功能分支到提交 PR 的每一步都有详细说明。 + +## 📋 目录 + +1. [前置准备](#前置准备) +2. [阶段 1: 创建功能分支](#阶段-1-创建功能分支) +3. [阶段 2: 日常开发流程](#阶段-2-日常开发流程) +4. [阶段 3: 提交和推送](#阶段-3-提交和推送) +5. [阶段 4: 创建 Pull Request](#阶段-4-创建-pull-request) +6. [常见问题](#常见问题) +7. [快速参考命令](#快速参考命令) + +--- + +## 前置准备 + +### 1. 检查 Git 配置 + +确保 Git 已配置用户信息: + +```bash +# 检查当前配置 +git config user.name +git config user.email + +# 如果未配置,设置全局配置 +git config --global user.name "你的名字" +git config --global user.email "你的邮箱" +``` + +### 2. 检查远程仓库配置 + +```bash +# 查看远程仓库 +git remote -v +``` + +**期望输出:** +``` +origin git@github.com:jialilve/mllm.git (fetch) +origin git@github.com:jialilve/mllm.git (push) +upstream https://github.com/UbiquitousLearning/mllm.git (fetch) +upstream https://github.com/UbiquitousLearning/mllm.git (push) +``` + +**如果没有 upstream,添加它:** +```bash +git remote add upstream https://github.com/UbiquitousLearning/mllm.git +``` + +### 3. 检查当前状态 + +```bash +# 查看当前分支 +git branch + +# 查看当前状态 +git status + +# 查看最近的提交历史 +git log --oneline -5 +``` + +--- + +## 阶段 1: 创建功能分支 + +### 步骤 1.1: 同步 upstream 最新代码 + +在创建功能分支之前,确保基于最新的 upstream/v2 代码: + +```bash +# 1. 获取 upstream 的最新更改 +git fetch upstream + +# 2. 查看 upstream/v2 和本地 v2 的差异(可选) +git log v2..upstream/v2 --oneline + +# 3. 如果 upstream 有更新,同步到本地 v2(可选,用于保持本地 v2 最新) +git checkout v2 +git merge upstream/v2 +# 或者使用 rebase(更推荐,保持提交历史整洁) +# git rebase upstream/v2 +``` + +### 步骤 1.2: 创建功能分支 + +**重要:** 功能分支应该基于 `upstream/v2` 创建,而不是 `origin/v2` 或本地 `v2`。 + +```bash +# 创建并切换到新功能分支 +git checkout -b feature/qwen-npu-decoding upstream/v2 +``` + +**分支命名规范:** +- `feature/` - 新功能 +- `fix/` - 修复 bug +- `refactor/` - 重构 +- `docs/` - 文档更新 + +**示例:** +- ✅ `feature/qwen-npu-decoding` - 新功能 +- ✅ `fix/qnn-kv-cache-sync` - 修复 +- ❌ `my-branch` - 不推荐,不够描述性 + +### 步骤 1.3: 验证分支状态 + +```bash +# 确认当前在功能分支上 +git branch + +# 应该显示 * feature/qwen-npu-decoding + +# 查看分支基于哪个提交 +git log --oneline -1 + +# 查看与 upstream/v2 的关系 +git log --oneline --graph --decorate -5 +``` + +--- + +## 阶段 2: 日常开发流程 + +### 2.1 开始开发 + +在功能分支上进行开发: + +```bash +# 确认在功能分支上 +git branch + +# 开始编辑文件、添加代码等 +# ... +``` + +### 2.2 查看修改状态 + +定期检查你的修改: + +```bash +# 查看哪些文件被修改 +git status + +# 查看具体的修改内容 +git diff + +# 查看某个文件的修改 +git diff <文件路径> + +# 查看已暂存和未暂存的修改 +git diff --staged # 已暂存 +git diff # 未暂存 +``` + +### 2.3 暂存修改(准备提交) + +```bash +# 暂存所有修改 +git add . + +# 或者暂存特定文件 +git add <文件路径1> <文件路径2> + +# 或者暂存特定目录 +git add <目录路径>/ + +# 查看暂存的文件 +git status +``` + +**最佳实践:** +- 相关修改一起提交(例如:接口定义和实现一起提交) +- 不相关的修改分开提交 +- 每次提交应该是一个逻辑完整的改动 + +### 2.4 提交修改 + +```bash +# 提交暂存的修改 +git commit -m "提交信息" +``` + +**提交信息规范:** + +格式:`<类型>: <简短描述>` + +**类型:** +- `feat`: 新功能 +- `fix`: 修复 bug +- `docs`: 文档更新 +- `refactor`: 重构 +- `test`: 测试相关 +- `chore`: 构建/工具相关 + +**示例:** + +```bash +# 好的提交信息 +git commit -m "feat: add KV cache interface extension for Qwen NPU decoding" +git commit -m "fix: correct position_ids handling in decode loop" +git commit -m "docs: update decoding requirements document" + +# 多行提交信息(推荐用于复杂改动) +git commit -m "feat: implement decoding loop for Qwen NPU + +- Add KV cache sequence count management +- Implement decode loop with position_ids handling +- Add EOS token termination check +- Update forward method to support decode phase" +``` + +**不好的提交信息:** +```bash +# ❌ 太简单 +git commit -m "update" + +# ❌ 不够描述性 +git commit -m "fix bug" + +# ❌ 使用中文(除非项目要求) +git commit -m "修复问题" +``` + +--- + +## 阶段 3: 提交和推送 + +### 3.1 提交到本地仓库 + +```bash +# 提交修改 +git add . +git commit -m "feat: your commit message" +``` + +### 3.2 推送到 Fork(origin) + +**第一次推送:** + +```bash +# 推送功能分支到 origin(你的 Fork) +git push -u origin feature/qwen-npu-decoding +``` + +`-u` 参数设置上游分支,之后可以直接使用 `git push`。 + +**后续推送:** + +```bash +# 如果已设置上游分支 +git push + +# 或者明确指定 +git push origin feature/qwen-npu-decoding +``` + +### 3.3 处理推送冲突 + +如果 upstream 有更新,你的分支可能落后: + +```bash +# 1. 获取最新代码 +git fetch upstream + +# 2. 在功能分支上 rebase upstream/v2 +git rebase upstream/v2 + +# 3. 如果有冲突,解决冲突后继续 +# 解决冲突后: +git add <冲突文件> +git rebase --continue + +# 4. 如果 rebase 过程中想取消 +git rebase --abort + +# 5. 强制推送(因为 rebase 改变了历史) +git push --force-with-lease origin feature/qwen-npu-decoding +``` + +**注意:** 使用 `--force-with-lease` 比 `--force` 更安全,它会检查远程分支是否有其他人的提交。 + +--- + +## 阶段 4: 创建 Pull Request + +### 4.1 推送功能分支 + +确保所有修改都已提交并推送: + +```bash +# 检查状态 +git status + +# 如果有未提交的修改,先提交 +git add . +git commit -m "feat: final changes" + +# 推送到 Fork +git push origin feature/qwen-npu-decoding +``` + +### 4.2 在 GitHub 上创建 PR + +#### 方法 1: 通过 GitHub Web 界面 + +1. **访问你的 Fork 仓库:** + ``` + https://github.com/jialilve/mllm + ``` + +2. **你会看到提示创建 PR:** + - GitHub 通常会在你推送新分支后显示提示 + - 点击 "Compare & pull request" 按钮 + +3. **或者手动创建:** + - 点击 "Pull requests" 标签 + - 点击 "New pull request" + - 选择: + - **base repository:** `UbiquitousLearning/mllm` + - **base branch:** `v2` + - **compare repository:** `jialilve/mllm` + - **compare branch:** `feature/qwen-npu-decoding` + +#### 方法 2: 使用 GitHub CLI(如果已安装) + +```bash +# 创建 PR +gh pr create --base v2 --head jialilve:feature/qwen-npu-decoding --title "feat: Qwen NPU Decoding Support" --body "PR描述内容" +``` + +### 4.3 编写 PR 描述 + +**PR 标题格式:** +``` +feat: Qwen NPU Decoding Support +``` + +**PR 描述模板:** + +```markdown +## 功能描述 +实现 Qwen NPU 自回归解码功能,支持连续 token 生成。 + +## 主要改动 +- 扩展 KV Cache 接口,支持序列长度管理 +- 实现解码循环,支持 position_ids 自动递增 +- 添加 EOS token 终止检查 +- 更新 forward 方法以支持 decode 阶段 + +## 实现细节 +- 在 `QwenForCausalLM` 中添加 `setKVCacheSeqCnt` 方法 +- 实现基于 128 长度 KV cache 的解码循环 +- 正确处理 position_ids 的传递和递增 + +## 测试 +- [x] 编译通过 +- [x] 单次 prefill 测试通过 +- [x] 解码循环测试通过 +- [x] EOS token 终止测试通过 + +## 相关文档 +- [需求文档](../docs/qnn_backend/qwen_npu_decoding_requirements.md) + +## 相关 Issue +# (如果有) +``` + +### 4.4 PR 提交清单 + +在创建 PR 之前,确认: + +- [ ] 代码已编译通过,无编译错误 +- [ ] 已运行相关测试,测试通过 +- [ ] 代码已格式化(如果有格式化工具) +- [ ] 提交信息清晰,符合规范 +- [ ] 所有修改都已提交并推送 +- [ ] PR 描述清晰,说明了功能和改动 +- [ ] 已同步 upstream/v2 最新代码(避免冲突) + +--- + +## 常见问题 + +### Q1: 如何查看功能分支和 upstream/v2 的差异? + +```bash +# 查看所有差异 +git diff upstream/v2..feature/qwen-npu-decoding + +# 查看提交历史差异 +git log upstream/v2..feature/qwen-npu-decoding --oneline + +# 查看文件列表差异 +git diff --name-only upstream/v2..feature/qwen-npu-decoding +``` + +### Q2: 如何修改已提交的 commit? + +**修改最后一次提交:** + +```bash +# 修改提交信息 +git commit --amend -m "新的提交信息" + +# 添加遗漏的文件到上次提交 +git add <遗漏的文件> +git commit --amend --no-edit + +# 修改后需要强制推送 +git push --force-with-lease origin feature/qwen-npu-decoding +``` + +**修改更早的提交:** + +```bash +# 使用交互式 rebase +git rebase -i HEAD~3 # 修改最近 3 个提交 + +# 在编辑器中,将需要修改的提交标记为 'edit' +# 然后修改文件,执行: +git add . +git commit --amend +git rebase --continue +``` + +### Q3: 如何撤销未提交的修改? + +```bash +# 撤销工作区的修改(未暂存) +git checkout -- <文件路径> +# 或者 +git restore <文件路径> + +# 撤销所有未暂存的修改 +git checkout -- . +# 或者 +git restore . + +# 撤销暂存的修改(但保留工作区修改) +git reset HEAD <文件路径> +# 或者 +git restore --staged <文件路径> +``` + +### Q4: 如何查看分支的提交历史? + +```bash +# 简洁模式 +git log --oneline + +# 图形化显示 +git log --oneline --graph --decorate + +# 显示最近 10 个提交 +git log --oneline -10 + +# 显示某个文件的提交历史 +git log --oneline <文件路径> +``` + +### Q5: 如何切换分支? + +```bash +# 切换到其他分支 +git checkout <分支名> + +# 或者使用新的命令(Git 2.23+) +git switch <分支名> + +# 创建并切换新分支 +git checkout -b <新分支名> +# 或者 +git switch -c <新分支名> +``` + +### Q6: 如何删除分支? + +```bash +# 删除本地分支 +git branch -d feature/qwen-npu-decoding + +# 强制删除本地分支(即使未合并) +git branch -D feature/qwen-npu-decoding + +# 删除远程分支 +git push origin --delete feature/qwen-npu-decoding +``` + +### Q7: PR 被要求修改后怎么办? + +```bash +# 1. 在功能分支上继续修改 +git checkout feature/qwen-npu-decoding + +# 2. 进行修改 +# ... 编辑文件 ... + +# 3. 提交修改 +git add . +git commit -m "fix: address review comments" + +# 4. 推送到 Fork +git push origin feature/qwen-npu-decoding + +# PR 会自动更新,不需要重新创建 +``` + +### Q8: 如何同步 upstream 的最新代码到功能分支? + +```bash +# 方法 1: 使用 rebase(推荐,保持提交历史整洁) +git fetch upstream +git rebase upstream/v2 + +# 如果有冲突,解决后: +git add <冲突文件> +git rebase --continue + +# 方法 2: 使用 merge +git fetch upstream +git merge upstream/v2 +``` + +--- + +## 快速参考命令 + +### 日常开发流程 + +```bash +# 1. 切换到功能分支 +git checkout feature/qwen-npu-decoding + +# 2. 查看状态 +git status + +# 3. 暂存修改 +git add . + +# 4. 提交 +git commit -m "feat: your message" + +# 5. 推送 +git push +``` + +### 创建功能分支(一次性) + +```bash +# 1. 同步 upstream +git fetch upstream + +# 2. 创建功能分支 +git checkout -b feature/qwen-npu-decoding upstream/v2 + +# 3. 推送并设置上游 +git push -u origin feature/qwen-npu-decoding +``` + +### 同步 upstream 代码 + +```bash +# 1. 获取最新代码 +git fetch upstream + +# 2. 在功能分支上 rebase +git checkout feature/qwen-npu-decoding +git rebase upstream/v2 + +# 3. 如果有冲突,解决后继续 +git add <冲突文件> +git rebase --continue + +# 4. 强制推送 +git push --force-with-lease +``` + +### 查看差异和状态 + +```bash +# 查看工作区修改 +git diff + +# 查看与 upstream/v2 的差异 +git diff upstream/v2..feature/qwen-npu-decoding + +# 查看提交历史 +git log --oneline --graph --decorate -10 +``` + +--- + +## 完整工作流示例 + +假设你要实现 QNN Decoding 功能,完整流程如下: + +```bash +# ========== 阶段 1: 创建功能分支 ========== + +# 1. 同步 upstream +git fetch upstream + +# 2. 创建功能分支 +git checkout -b feature/qwen-npu-decoding upstream/v2 + +# 3. 推送并设置上游 +git push -u origin feature/qwen-npu-decoding + + +# ========== 阶段 2: 开发 ========== + +# 1. 开始开发(编辑文件) +vim mllm/models/qwen_npu/modeling_qwen_npu.hpp +# ... 添加代码 ... + +# 2. 查看修改 +git status +git diff + +# 3. 暂存并提交 +git add mllm/models/qwen_npu/modeling_qwen_npu.hpp +git commit -m "feat: add KV cache interface extension" + +# 4. 继续开发 +vim mllm/models/qwen_npu/modeling_qwen_npu.cpp +# ... 添加代码 ... + +# 5. 再次提交 +git add mllm/models/qwen_npu/modeling_qwen_npu.cpp +git commit -m "feat: implement setKVCacheSeqCnt method" + +# 6. 定期推送 +git push + + +# ========== 阶段 3: 准备 PR ========== + +# 1. 确保所有修改已提交 +git status + +# 2. 同步 upstream(避免冲突) +git fetch upstream +git rebase upstream/v2 + +# 3. 如果有冲突,解决后继续 +# git add <冲突文件> +# git rebase --continue + +# 4. 强制推送(如果 rebase 了) +git push --force-with-lease + +# 5. 在 GitHub 上创建 PR +# 访问: https://github.com/jialilve/mllm +# 点击 "Compare & pull request" +``` + +--- + +## 总结 + +**标准工作流:** + +1. ✅ **创建功能分支** - 基于 `upstream/v2` +2. ✅ **开发** - 在功能分支上编辑、提交 +3. ✅ **推送** - 定期推送到 Fork +4. ✅ **同步** - 必要时同步 upstream 代码 +5. ✅ **PR** - 在 GitHub 上创建 Pull Request + +**关键原则:** + +- 🎯 每个功能使用独立分支 +- 🎯 功能分支基于 `upstream/v2` +- 🎯 提交信息清晰、规范 +- 🎯 定期推送,避免丢失工作 +- 🎯 PR 前同步 upstream,避免冲突 + +--- + +**需要帮助?** 如果遇到问题,可以: +- 查看本文档的"常见问题"部分 +- 使用 `git help <命令>` 查看帮助 +- 参考项目的其他 PR 示例 + diff --git a/docs/qnn_backend/qwen_npu_decoding_requirements.md b/docs/qnn_backend/qwen_npu_decoding_requirements.md new file mode 100644 index 000000000..43e880795 --- /dev/null +++ b/docs/qnn_backend/qwen_npu_decoding_requirements.md @@ -0,0 +1,510 @@ +# QNN Backend Qwen NPU Decoding 功能需求分析文档 + +## 1. 项目背景 + +### 1.1 目标 +在 mllm_v2 框架上实现基于 QNN 加速的 Qwen3 4B 长文本推理功能。当前 QNN 已迁移到 v2 版本,但仅支持单个固定长度输入的 prefill 推理。需要实现自回归解码(decoding)功能,使模型能够连续生成文本。 + +### 1.2 当前状态 +- **功能限制**:QNN 后端仅支持单 chunk 128 长度的 prefill 推理 +- **问题现象**:`mllm-qwen-npu` 示例程序只输出单个 token 后即结束,无法进行连续生成 +- **技术约束**: + - QNN 端只允许单 chunk 128 长度 + - Decode 阶段的新 token 处理需要在 CPU 侧完成 + - QNN 负责输出 logits,CPU 负责采样和 token 管理 + +## 2. 功能需求 + +### 2.1 核心功能 +在输入长度 < chunk_size(chunk_size = 128)的场景下,实现基于 QNN 后端的自回归解码: + +1. **KV Cache 管理** + - KV cache 默认长度为 1K(1024) + - Prefill 阶段:real_seq 以内的真实输入 + (128 - real_seq) 的 padding + - Decode 阶段:利用 padding 区域存放新生成的 token + +2. **解码循环** + - 循环调用 `forward` 生成下一个 token + - 将新 token 写入 padding 区域(在 CPU buffer 中维护输入序列) + - 累积 seq_len,直至满足终止条件 + +3. **终止条件** + - 总长度达到 128(chunk_size) + - 生成 EOS token(token ID: 151645) + +### 2.2 预期效果 +示例程序能够输出完整句子,而非仅单个 token。推理流程能够连续生成文本,直到达到最大长度或遇到 EOS token。 + +## 3. 技术实现方案 + +### 3.1 KV Cache 接口扩展 + +#### 3.1.1 接口设计原则 +- **避免全局接口**:v1 中使用了大量全局接口,耦合性过大 +- **在 modeling 中体现接口**:方便后续功能扩展 +- **保持向后兼容**:确保新增接口不会破坏已有 trace/prefill 流程 + +#### 3.1.2 需要实现的接口层次 + +**层次 1:基类接口(aops::KVCacheOp)** +```cpp +// mllm/core/aops/KVCacheOp.hpp +class KVCacheOp : public BaseOp { +public: + // 现有接口 + void setLayerIndex(int32_t layer_idx); + virtual void clearCache(); + + // 新增接口 + virtual void setCurrentSeqCnt(int32_t seq); +}; +``` + +**层次 2:CPU 实现(CPUKVCacheOp)** +```cpp +// mllm/backends/cpu/ops/KVCacheOp.hpp +class CPUKVCacheOp final : public aops::KVCacheOp { +public: + void setCurrentSeqCnt(int32_t seq) override; + +private: + nn::StaticCache cache_; // 内部使用 StaticCache +}; +``` + +**层次 3:Layer 接口(nn::KVCache)** +```cpp +// mllm/nn/layers/KVCache.hpp +class KVCache : public Layer { +public: + void setCurrentSeqCnt(int32_t seq); + // 现有接口:clearCache(), setLayerIndex() +}; +``` + +**层次 4:Model 接口(QwenText/QwenForCausalLM)** +```cpp +// mllm/models/qwen_npu/modeling_qwen_npu.hpp +class QwenText : public nn::Module { +public: + void setKVCacheSeqCnt(int32_t seq); // 设置所有层的 KV cache 序列长度 + void clearKVCache(); // 现有接口 +}; + +class QwenForCausalLM : public nn::Module, public ARGeneration { +public: + void setKVCacheSeqCnt(int32_t seq); // 委托给 model.setKVCacheSeqCnt() +}; +``` + +#### 3.1.3 实现细节 + +**StaticCache::setCurrentSeqCnt 行为** +- 参考 `nn::StaticCache::setCurrentSeqCnt(int32_t seq)` +- 设置所有层的 `current_seq_cnt_[layer_idx] = seq` +- **关键**:不会覆盖已有 KV cache 数据,只是更新长度计数器 + +**CPUKVCacheOp::setCurrentSeqCnt 实现** +```cpp +void CPUKVCacheOp::setCurrentSeqCnt(int32_t seq) { + cache_.setCurrentSeqCnt(seq); +} +``` + +**nn::KVCache::setCurrentSeqCnt 实现** +```cpp +void KVCache::setCurrentSeqCnt(int32_t seq) { + std::static_pointer_cast(impl()->getInstancedOp())->setCurrentSeqCnt(seq); +} +``` + +### 3.2 解码循环实现 + +#### 3.2.1 在 main.cpp 中添加解码循环 + +**当前代码结构**(examples/qwen_npu/main.cpp): +```cpp +// Prefill 阶段 +auto out = model.forward(inputs, {{"seq_len", mllm::AnyValue((int)raw_input_tokens.shape()[1])}})["sequence"]; +auto sampled = model.sampleGreedy(out); +std::wcout << "token: " << sampled << " " << qwen_tokenizer.detokenize(sampled) << "\n"; +``` + +**需要添加的解码循环**(包含调试日志): +```cpp +const int chunk_size = 128; +const int real_seq = raw_input_tokens.shape()[1]; // 实际输入长度 +const int eos_token_id = 151645; + +// Prefill 阶段(已有代码) +MLLM_INFO("=== Prefill Phase ==="); +MLLM_INFO("Input sequence length: {}", real_seq); +auto prefill_output = model.forward(inputs, {{"seq_len", mllm::AnyValue(real_seq)}}); +auto sampled = model.sampleGreedy(prefill_output["sequence"]); +MLLM_INFO("Prefill generated token: {} ({})", sampled, qwen_tokenizer.detokenize(sampled)); +std::wcout << qwen_tokenizer.detokenize(sampled); + +// 解码循环 +int current_seq_len = real_seq; +auto& sequence_tensor = inputs["sequence"]; +auto sequence_ptr = sequence_tensor.ptr(); + +// 将第一个生成的 token 写入 padding 区域 +sequence_ptr[current_seq_len] = sampled; +current_seq_len++; + +// 保存 prefill 返回的 position_ids,用于第一次 decode +ARGenerationOutputPast past = prefill_output; + +MLLM_INFO("=== Decode Phase ==="); +MLLM_INFO("Starting decode loop, initial seq_len: {}", current_seq_len); + +// 循环生成直到达到 chunk_size 或遇到 EOS +int decode_step = 0; +while (current_seq_len < chunk_size) { + decode_step++; + MLLM_INFO("--- Decode Step {} ---", decode_step); + MLLM_INFO("Current sequence length: {}", current_seq_len); + + // 更新 KV cache 序列长度 + model.setKVCacheSeqCnt(current_seq_len); + + // 验证 KV cache 状态(调试用) + // 注意:需要通过 model.model 访问内部 KV cache + // 这里假设可以通过某种方式访问,实际实现时可能需要添加辅助方法 + // MLLM_INFO("KV cache seq_cnt after update: {}", model.model.getKVCacheSeqCnt(0)); + + // 准备输入:只包含当前要处理的 token(decode 阶段每次只处理 1 个 token) + // 注意:需要传入上一次返回的 position_ids,forward 方法会自动递增 + auto decode_input = ARGenerationOutputPast{ + {"sequence", Tensor::empty({1, 1}, kInt64, kCPU).alloc()}, + {"position_ids", past["position_ids"]} // 使用上一次返回的 position_ids + }; + decode_input["sequence"].ptr()[0] = sequence_ptr[current_seq_len - 1]; + + MLLM_INFO("Decode input token: {}", sequence_ptr[current_seq_len - 1]); + + // 调用 forward,传入当前序列长度 + // forward 方法会检测到 position_ids 存在且 seq_len == 1,自动递增位置 + auto decode_output = model.forward(decode_input, {{"seq_len", mllm::AnyValue(current_seq_len)}}); + + // 采样下一个 token + auto next_token = model.sampleGreedy(decode_output["sequence"]); + MLLM_INFO("Generated token: {} ({})", next_token, qwen_tokenizer.detokenize(next_token)); + std::wcout << qwen_tokenizer.detokenize(next_token); + + // 检查终止条件 + if (next_token == eos_token_id) { + MLLM_INFO("EOS token detected, stopping decode"); + break; + } + + // 将新 token 写入序列 + sequence_ptr[current_seq_len] = next_token; + current_seq_len++; + MLLM_INFO("Updated sequence length: {}", current_seq_len); + + // 保存本次输出,用于下次循环(包含更新后的 position_ids) + past = decode_output; +} + +MLLM_INFO("=== Decode Complete ==="); +MLLM_INFO("Total decode steps: {}", decode_step); +MLLM_INFO("Final sequence length: {}", current_seq_len); +MLLM_INFO("Remaining capacity: {}", chunk_size - current_seq_len); +std::wcout << "\n"; +``` + +#### 3.2.2 关键实现细节 + +**输入序列管理** +- Prefill 阶段:使用完整的 128 长度 tensor,real_seq 之前是真实 token,之后是 padding(-1) +- Decode 阶段:每次 forward 只传入单个 token(形状 [1, 1]),但需要正确设置 seq_len 参数 + +**KV Cache 同步** +- 每次解码循环前调用 `model.setKVCacheSeqCnt(current_seq_len)` +- 确保 KV cache 知道当前已处理的序列长度 +- 新 token 的 KV 会被追加到现有 cache 的末尾 + +**Position IDs 处理** +- Decode 阶段需要正确传递 position_ids +- 参考 `QwenForCausalLM::forward` 中的 position_ids 生成逻辑: + - Prefill 阶段:自动生成 `[0, 1, 2, ..., seq_len-1]` + - Decode 阶段:如果 input 中包含 position_ids,会自动递增最后一个位置 +- 实现要点: + - Prefill 返回的 output 中包含 position_ids + - 第一次 decode 时,使用 prefill 返回的 position_ids + - 后续 decode 时,使用上一次 forward 返回的 position_ids + - forward 方法会自动检测 `seq_len == 1` 且存在 position_ids,然后递增位置 + +### 3.3 量化信息处理 + +#### 3.3.1 量化 Scale 的作用 +- **GraphBuild 阶段**:量化 scale 用于构建 QNN 计算图 +- **执行阶段**:量化 scale 仍然有效,但不会被使用(QNN 内部已固化) + +#### 3.3.2 实现注意事项 +- 量化 scale 只在 quantize 前需要显式 attach 到 input tensor +- 参考 `QNNCastTypeOp.cpp::QNNQuantizePattern` 中从输入 tensor 获取 quant scale 的操作 +- 对于 scale 维持不变的算子(view, transpose),使用 `propagateQuantScale` 进行传递 +- 对于 Linear 算子,scale 通过模型加载而来 + +**结论**:在 decode 循环中,不需要重新设置量化 scale,因为: +1. 量化参数已在 GraphBuild 时附加到 QNN tensor 中 +2. 执行时 QNN 内部使用已固化的量化参数 + +## 4. 约束与关注点 + +### 4.1 技术约束 +1. **QNN 限制**:QNN 端只允许单 chunk 128 长度;decode 只能在 CPU 侧处理新增 token +2. **KV Cache 管理**:需要确保新增的接口不会破坏已有 trace/prefill 流程 +3. **内存管理**:避免在 decode 循环中创建新的 128 长度 KV cache,应复用现有 cache + +### 4.2 实现注意事项 +1. **Position IDs**:decode 阶段需要正确生成 position_ids,确保位置编码正确 +2. **序列长度参数**:每次 forward 需要传入正确的 seq_len,告知模型当前实际序列长度 +3. **Tensor 设备**:注意 QNN/CPU 之间的 tensor 转换,确保数据正确传递 +4. **错误处理**: + - 验证 `current_seq_len` 不超过 `chunk_size`(128) + - 验证输入序列长度 `real_seq` 小于 `chunk_size` + - 处理 `forward` 调用可能出现的异常 + - 验证 `setKVCacheSeqCnt` 的参数范围(0 <= seq <= chunk_size) +5. **边界情况**: + - 输入长度为 0 或负数(应在调用前验证) + - 输入长度等于或超过 chunk_size(应在调用前验证或拒绝) + - KV cache 已满的情况(理论上不应发生,因为限制在 chunk_size 内) + +### 4.3 调试与验证 +1. **调试环境**:Android 设备、ADB +2. **验证方法**: + - 检查输出是否连续生成多个 token + - 验证是否在 EOS 或达到 128 长度时正确停止 + - 确认没有内存泄漏或崩溃 + +## 5. 待解决问题 + +### 5.1 Context 析构问题 +**问题描述**:当前存在 SIGSEGV 崩溃,推测与析构顺序相关。 + +**解决方案**:需要在 context 析构中手动管理 backend 销毁顺序。 + +**实现位置**:待确认具体实现位置(可能在 QNNBackend 或 Context 相关代码中) + +### 5.2 Position IDs 生成逻辑(已解决) +**解决方案**:decode 循环中需要显式传递 position_ids。 + +**实现方式**: +- Prefill 阶段返回的 output 中包含 position_ids +- 第一次 decode 时,使用 prefill 返回的 position_ids +- 后续 decode 时,使用上一次 forward 返回的 position_ids +- `QwenForCausalLM::forward` 方法会自动检测 `seq_len == 1` 且存在 position_ids,然后递增位置 + +**参考实现**:见 3.2.1 节解码循环代码示例。 + +## 6. 调试日志与测试验证 + +### 6.1 调试日志需求 + +为了验证 KV 缓存长度控制的正确性以及解码流程的正确性,需要在关键位置添加调试日志。 + +#### 6.1.1 日志位置与内容 + +**1. Prefill 阶段日志** +- 输入序列长度(real_seq) +- 生成的第一个 token ID 和文本 + +**2. Decode 循环日志(每次迭代)** +- 当前解码步骤编号 +- 当前序列长度(current_seq_len) +- 输入 token ID(用于验证输入序列管理) +- 生成的 token ID 和文本 +- KV cache 序列长度(验证 `setKVCacheSeqCnt` 是否正确设置) +- 终止原因(EOS 或达到最大长度) + +**3. 解码完成日志** +- 总解码步数 +- 最终序列长度 +- 剩余容量(chunk_size - current_seq_len) + +#### 6.1.2 日志实现方式 + +使用项目现有的日志宏 `MLLM_INFO`(定义在 `mllm/utils/Log.hpp`): + +```cpp +#include "mllm/utils/Log.hpp" + +// 示例 +MLLM_INFO("Current sequence length: {}", current_seq_len); +MLLM_INFO("Generated token: {} ({})", token_id, token_text); +``` + +**日志级别控制**: +- 默认日志级别为 `LogLevel::kInfo`,会显示所有 `MLLM_INFO` 日志 +- 可以通过 `Logger::level()` 调整日志级别(如果需要减少日志输出) + +#### 6.1.3 KV Cache 状态验证 + +为了验证 KV cache 状态,需要添加辅助方法获取当前序列长度: + +**可选实现:在 Model 接口中添加查询方法** +```cpp +// mllm/models/qwen_npu/modeling_qwen_npu.hpp +class QwenText : public nn::Module { +public: + void setKVCacheSeqCnt(int32_t seq); + int32_t getKVCacheSeqCnt(int32_t layer_idx = 0) const; // 新增:获取指定层的序列长度 + void clearKVCache(); +}; +``` + +**实现方式**: +```cpp +int32_t QwenText::getKVCacheSeqCnt(int32_t layer_idx) const { + // 通过内部 KV cache 层获取序列长度 + // 需要访问 model 内部的 kv_cache_ 成员 + // 具体实现取决于内部结构 +} +``` + +**注意**:如果添加查询方法比较复杂,也可以暂时在调试时通过其他方式验证(如直接访问内部 cache),或使用条件编译宏控制调试代码。 + +### 6.2 测试验证要点 + +#### 6.2.1 功能验证 + +1. **序列长度递增验证** + - 验证 `current_seq_len` 从 `real_seq` 开始,每次循环递增 1 + - 验证最终长度不超过 `chunk_size`(128) + +2. **KV Cache 同步验证** + - 验证每次调用 `setKVCacheSeqCnt` 后,KV cache 的序列长度正确更新 + - 验证所有层的序列长度保持一致 + - 验证新 token 的 KV 被正确追加到 cache 末尾 + +3. **输入序列管理验证** + - 验证新生成的 token 被正确写入 `sequence_tensor` 的 padding 区域 + - 验证每次 decode 时,输入 token 来自序列的正确位置(`sequence_ptr[current_seq_len - 1]`) + +4. **终止条件验证** + - 验证遇到 EOS token(151645)时正确停止 + - 验证达到 chunk_size(128)时正确停止 + - 验证终止后不再继续生成 + +5. **Position IDs 验证** + - 验证 position_ids 在每次 decode 后正确递增 + - 验证 position_ids 与序列长度一致 + +#### 6.2.2 边界情况测试 + +1. **最小输入长度** + - 测试 `real_seq = 1` 的情况 + - 验证能够正常进行 decode + +2. **接近最大长度** + - 测试 `real_seq = 127` 的情况(只能生成 1 个 token) + - 验证在达到 128 时正确停止 + +3. **EOS 提前终止** + - 测试在生成过程中遇到 EOS token + - 验证提前终止后不再继续生成 + +4. **空输入处理** + - 测试边界情况下的输入验证 + +#### 6.2.3 性能与稳定性验证 + +1. **内存泄漏检查** + - 使用内存检测工具(如 Valgrind、AddressSanitizer)检查 + - 验证 decode 循环中不会创建不必要的临时对象 + +2. **崩溃检查** + - 验证不会出现 SIGSEGV 或其他崩溃 + - 特别关注 Context 析构相关的崩溃(见 5.1 节) + +3. **长时间运行稳定性** + - 测试多次 decode 循环的稳定性 + - 验证 KV cache 不会溢出或损坏 + +### 6.3 调试日志示例输出 + +期望的日志输出格式: + +``` +[INFO] examples/qwen_npu/main.cpp:140 === Prefill Phase === +[INFO] examples/qwen_npu/main.cpp:141 Input sequence length: 5 +[INFO] examples/qwen_npu/main.cpp:144 Prefill generated token: 12345 (你好) +[INFO] examples/qwen_npu/main.cpp:156 === Decode Phase === +[INFO] examples/qwen_npu/main.cpp:157 Starting decode loop, initial seq_len: 6 +[INFO] examples/qwen_npu/main.cpp:162 --- Decode Step 1 --- +[INFO] examples/qwen_npu/main.cpp:163 Current sequence length: 6 +[INFO] examples/qwen_npu/main.cpp:177 Decode input token: 12345 +[INFO] examples/qwen_npu/main.cpp:186 Generated token: 67890 (世界) +[INFO] examples/qwen_npu/main.cpp:195 Updated sequence length: 7 +[INFO] examples/qwen_npu/main.cpp:162 --- Decode Step 2 --- +[INFO] examples/qwen_npu/main.cpp:163 Current sequence length: 7 +... +[INFO] examples/qwen_npu/main.cpp:200 === Decode Complete === +[INFO] examples/qwen_npu/main.cpp:201 Total decode steps: 10 +[INFO] examples/qwen_npu/main.cpp:202 Final sequence length: 15 +[INFO] examples/qwen_npu/main.cpp:203 Remaining capacity: 113 +``` + +### 6.4 调试日志的后续处理 + +**开发阶段**: +- 保留所有调试日志,便于问题定位和验证 + +**PR 提交阶段**: +- 根据项目规范,可以选择: + - **方案 A**:保留日志,通过日志级别控制(推荐) + - **方案 B**:注释掉调试日志,保留代码以便将来使用 + - **方案 C**:使用条件编译宏控制(如 `#ifdef MLLM_DEBUG_DECODING`) + +**建议**:使用方案 A,通过日志级别控制。如果需要减少日志输出,可以在发布版本中设置更高的日志级别。 + +## 7. 实现步骤 + +### 阶段 1:KV Cache 接口扩展 +1. 在 `aops::KVCacheOp` 中添加 `setCurrentSeqCnt` 虚方法 +2. 在 `CPUKVCacheOp` 中实现该方法,调用 `cache_.setCurrentSeqCnt()` +3. 在 `nn::KVCache` 中添加 `setCurrentSeqCnt` 方法 +4. 在 `QwenText` 和 `QwenForCausalLM` 中添加 `setKVCacheSeqCnt` 方法 + +### 阶段 2:解码循环实现与调试日志 +1. 在 `main.cpp` 中添加解码循环代码(包含调试日志,见 3.2.1 节) +2. 实现输入序列管理(将新 token 写入 padding 区域) +3. 实现 KV cache 序列长度同步 +4. 实现终止条件检查(EOS 或达到 128 长度) +5. 添加调试日志输出(见 6.1 节) + +### 阶段 3:测试与验证 +1. 编译并运行示例程序 +2. 检查调试日志输出,验证 KV cache 状态和序列长度 +3. 验证是否能够连续生成多个 token +4. 验证终止条件是否正确工作(EOS 和最大长度) +5. 验证边界情况(最小输入、接近最大长度等) +6. 检查是否有内存泄漏或崩溃 +7. 验证长时间运行的稳定性 + +### 阶段 4:Context 析构修复(可选) +1. 定位 SIGSEGV 崩溃原因 +2. 实现 backend 销毁顺序管理 +3. 验证修复效果 + +## 8. 参考文档 + +- QNN Backend Design: `docs/qnn_backend/core_design.rst` +- QNN 量化文档: https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-10/quantization.html +- 模型量化基本概念: https://zhuanlan.zhihu.com/p/505570612 + +## 9. 相关代码文件 + +- `examples/qwen_npu/main.cpp` - 示例程序入口 +- `mllm/backends/cpu/ops/KVCacheOp.{hpp,cpp}` - CPU KV Cache 操作实现 +- `mllm/nn/layers/KVCache.{hpp,cpp}` - KV Cache Layer 接口 +- `mllm/nn/lmcache/StaticCache.{hpp,cpp}` - 静态缓存实现(包含 `getCurrentSeqCnt` 方法) +- `mllm/models/qwen_npu/modeling_qwen_npu.hpp` - Qwen NPU 模型实现 +- `mllm/core/aops/KVCacheOp.{hpp,cpp}` - KV Cache 操作基类 +- `mllm/backends/qnn/op/QNNCastTypeOp.cpp` - QNN 量化实现参考 +- `mllm/utils/Log.hpp` - 日志宏定义 + diff --git a/docs/qnn_fix_bug/PR_PREPARATION_GUIDE.md b/docs/qnn_fix_bug/PR_PREPARATION_GUIDE.md new file mode 100644 index 000000000..c4219fb16 --- /dev/null +++ b/docs/qnn_fix_bug/PR_PREPARATION_GUIDE.md @@ -0,0 +1,453 @@ +# QNN Execute Return Order Fix - PR 准备指南 + +## 问题 1: 调试日志信息处理 + +### 当前情况 +代码中添加了很多调试用的 `MLLM_INFO` 打印信息,用于验证 QNN Execute Return Order 修复是否正确。 + +### 解决方案 + +#### 方案 A: 使用条件编译宏(推荐) +创建一个调试宏,可以通过编译选项控制是否启用调试日志: + +**优点:** +- 代码保持整洁,不需要注释/取消注释 +- 可以通过编译选项控制(如 `-DMLLM_QNN_DEBUG_OUTPUT_ORDER=ON`) +- 保留所有调试代码,方便将来使用 + +**实现步骤:** +1. 在 `QNNBackend.hpp` 或相关头文件中定义宏 +2. 将所有调试日志用宏包裹 +3. 在 CMakeLists.txt 中添加编译选项 + +#### 方案 B: 注释掉调试日志(简单快速) +直接注释掉所有调试用的 `MLLM_INFO`,但保留代码以便将来使用。 + +**优点:** +- 实现简单快速 +- 代码清晰,明确标注为调试代码 +- 需要时可以快速取消注释 + +**缺点:** +- 代码中会有很多注释,可能不够美观 +- 需要手动注释/取消注释 + +#### 方案 C: 使用日志级别控制 +利用现有的 `LogLevel` 机制,将调试日志改为 `MLLM_DEBUG`(如果存在)或通过设置日志级别控制。 + +**注意:** 当前代码库中似乎没有 `MLLM_DEBUG`,只有 `MLLM_INFO/WARN/ERROR`。 + +### 推荐方案 +**建议使用方案 B(注释)**,因为: +1. 实现最简单,不需要修改构建系统 +2. 代码意图清晰,明确标注为调试代码 +3. 需要时可以快速恢复 +4. 对于 PR 来说,注释掉的调试代码是可以接受的 + +### 需要处理的文件 +- `mllm/backends/qnn/QNNBackend.cpp` - 包含大部分调试日志 +- `mllm/models/qwen_npu/modeling_qwen_npu.hpp` - 已注释(保持现状) + +--- + +## 问题 2: Git 工作流同步 + +### 当前情况 +- Fork 的项目与原项目(upstream)没有同步 +- 本地仓库与 Fork 的项目也没有同步 +- **重要:** 当前在 fork 的 `v2` 分支上工作,需要 PR 到主项目的 `v2` 分支 +- **重要:** fork 的 `v2` 分支上有多个 commits,但只有最新的这次修改需要 PR + +### 正确的 Git 工作流策略 + +#### 为什么需要功能分支? + +**最佳实践:** 每个功能/修复应该创建独立的功能分支,而不是直接在主分支(如 `v2`)上工作。 + +**优点:** +- ✅ 可以独立 PR 每个功能,不需要一次性 PR 所有改动 +- ✅ 保持主分支干净,只包含已合并的功能 +- ✅ 方便代码审查,每个 PR 只关注一个功能 +- ✅ 如果某个功能有问题,不影响其他功能 + +#### 当前情况的解决方案 + +如果你已经在 `v2` 分支上做了多个 commits,但只想 PR 其中一个,有以下几种方案: + +**方案 A: 创建新功能分支并 cherry-pick(推荐)** + +这是最干净的方法,创建一个新的功能分支,只包含你需要的修改: + +```bash +# 1. 确保 upstream 已配置并同步 +git fetch upstream +git checkout v2 +git merge upstream/v2 # 或 git rebase upstream/v2 + +# 2. 创建一个新的功能分支,基于 upstream/v2 +git checkout -b fix/qnn-execute-return-order upstream/v2 + +# 3. 找到你需要 PR 的 commit(假设是最新的 commit) +# 查看最近的 commits +git log --oneline -10 + +# 4. Cherry-pick 你需要的 commit(s) +# 如果是最新的 commit +git cherry-pick HEAD@{1} # 或者使用 commit hash +# 或者如果是多个相关的 commits +git cherry-pick ... + +# 5. 如果有未提交的更改,先提交 +git add . +git commit -m "fix: QNN Execute Return Order - handle output reordering" + +# 6. 推送到你的 Fork +git push origin fix/qnn-execute-return-order + +# 7. 在 GitHub 上创建 PR:从 fix/qnn-execute-return-order 到 upstream/v2 +``` + +**方案 B: 使用交互式 rebase 整理 commits** + +如果你想保留在 `v2` 分支上工作,但只 PR 部分 commits: + +```bash +# 1. 创建一个新分支用于 PR +git checkout -b fix/qnn-execute-return-order + +# 2. 使用交互式 rebase 整理 commits +git rebase -i upstream/v2 + +# 在编辑器中,只保留需要 PR 的 commits,其他标记为 drop +# 或者使用 squash 合并多个相关 commits + +# 3. 推送到 Fork +git push origin fix/qnn-execute-return-order +``` + +**方案 C: 创建补丁并应用到新分支** + +```bash +# 1. 在 v2 分支上,创建补丁文件 +git format-patch -1 HEAD # 为最新的 commit 创建补丁 + +# 2. 创建新功能分支 +git checkout -b fix/qnn-execute-return-order upstream/v2 + +# 3. 应用补丁 +git am + +# 4. 推送到 Fork +git push origin fix/qnn-execute-return-order +``` + +### 正确的 Git 工作流(未来参考) + +#### 步骤 1: 配置远程仓库 +```bash +# 查看当前远程仓库 +git remote -v + +# 如果没有 upstream,添加原项目为 upstream +git remote add upstream <原项目URL> + +# 如果已有 upstream,确认 URL 正确 +git remote set-url upstream <原项目URL> +``` + +#### 步骤 2: 同步 upstream 到本地(v2 分支) +```bash +# 获取 upstream 的最新更改 +git fetch upstream + +# 切换到 v2 分支 +git checkout v2 + +# 合并 upstream 的更改到本地 v2 分支 +git merge upstream/v2 + +# 或者使用 rebase(更推荐,保持提交历史整洁) +git rebase upstream/v2 +``` + +#### 步骤 3: 同步本地 v2 分支到 Fork(可选) +```bash +# 推送本地 v2 分支到你的 Fork(用于同步,不是 PR) +git push origin v2 +``` + +#### 步骤 4: 创建功能分支(从当前 v2 分支提取需要的修改) +```bash +# 方法 1: 如果修改还未提交,直接创建新分支 +git checkout -b fix/qnn-execute-return-order upstream/v2 +# 然后手动应用你的修改,或使用 git cherry-pick + +# 方法 2: 如果修改已提交,使用 cherry-pick +# 先找到你的 commit hash +git log --oneline -10 + +# 创建新分支基于 upstream/v2 +git checkout -b fix/qnn-execute-return-order upstream/v2 + +# Cherry-pick 你需要的 commit(s) +git cherry-pick + +# 方法 3: 如果修改还未提交,先暂存 +git stash +git checkout -b fix/qnn-execute-return-order upstream/v2 +git stash pop +# 然后提交 +``` + +#### 步骤 5: 处理调试日志并提交 +```bash +# 1. 注释掉所有调试日志(使用方案 B) +# 2. 确保代码编译通过 +# 3. 运行测试确保功能正常 + +# 提交更改 +git add . +git commit -m "fix: QNN Execute Return Order - handle output reordering + +- Fix QNN graphExecute output order mismatch +- Add output reordering logic based on expected order +- Remove debug logs for production (commented for future use)" +``` + +#### 步骤 6: 重新编译和测试 +```bash +# 清理之前的构建 +rm -rf build/ + +# 重新编译 +# 根据你的构建系统执行编译命令 +# 例如:cmake .. && make + +# 运行测试 +# 确保所有测试通过 +``` + +#### 步骤 7: 推送到 Fork 并创建 PR +```bash +# 推送功能分支到你的 Fork +git push origin fix/qnn-execute-return-order + +# 在 GitHub 上创建 Pull Request +# 从你的 Fork: fix/qnn-execute-return-order +# 到原项目: v2 分支 +``` + +### 针对当前情况的快速操作指南 + +如果你现在在 `v2` 分支上,有未提交的修改或已提交的修改,按以下步骤操作: + +#### 情况 1: 修改还未提交 +```bash +# 1. 暂存当前修改 +git stash + +# 2. 同步 upstream/v2 +git fetch upstream +git checkout v2 +git rebase upstream/v2 + +# 3. 创建功能分支 +git checkout -b fix/qnn-execute-return-order upstream/v2 + +# 4. 应用你的修改 +git stash pop + +# 5. 提交修改 +git add . +git commit -m "fix: QNN Execute Return Order - handle output reordering + +- Fix QNN graphExecute output order mismatch +- Add output reordering logic based on expected order +- Remove debug logs for production (commented for future use)" + +# 6. 推送到 Fork +git push origin fix/qnn-execute-return-order +``` + +#### 情况 2: 修改已提交(在 v2 分支上) +```bash +# 1. 查看最近的 commits,找到你的 commit hash +git log --oneline -10 + +# 2. 同步 upstream/v2 +git fetch upstream +git checkout v2 +git rebase upstream/v2 + +# 3. 创建功能分支 +git checkout -b fix/qnn-execute-return-order upstream/v2 + +# 4. Cherry-pick 你的 commit(s) +# 假设你的 commit hash 是 abc1234 +git cherry-pick abc1234 + +# 如果有多个相关 commits,可以一起 cherry-pick +# git cherry-pick abc1234 def5678 + +# 5. 推送到 Fork +git push origin fix/qnn-execute-return-order +``` + +#### 情况 3: 有多个 commits,但只想 PR 最新的 +```bash +# 1. 查看 commits,确认哪些需要 PR +git log --oneline -10 + +# 2. 同步 upstream/v2 +git fetch upstream +git checkout v2 +git rebase upstream/v2 + +# 3. 创建功能分支 +git checkout -b fix/qnn-execute-return-order upstream/v2 + +# 4. Cherry-pick 最新的 commit(或相关的几个 commits) +git cherry-pick HEAD@{1} # 或者使用具体的 commit hash + +# 5. 推送到 Fork +git push origin fix/qnn-execute-return-order +``` + +### 关于功能分支的常见问题 + +**Q: 必须全部一起 PR 到主项目吗?** +A: **不是的!** 这正是为什么需要功能分支的原因。每个功能分支可以独立 PR,不需要一次性 PR 所有改动。 + +**Q: 我的开发流程有问题吗?** +A: 在 `v2` 分支上直接开发是可以的(特别是如果你在 fork 上工作),但更好的做法是: +- 为每个功能创建独立的功能分支 +- 功能分支基于 `upstream/v2` 创建 +- 只将需要的功能分支 PR 到主项目 +- 其他不需要 PR 的改动保留在你的 fork 分支上 + +**Q: 我没创建过功能分支,怎么办?** +A: 不用担心!创建功能分支很简单: +```bash +# 创建新分支 +git checkout -b fix/qnn-execute-return-order upstream/v2 + +# 或者从当前分支创建 +git checkout -b fix/qnn-execute-return-order +``` +功能分支就是普通的 Git 分支,可以随时创建、删除、合并。 + +**Q: 如果我在 v2 分支上有很多 commits,只想 PR 其中一个怎么办?** +A: 使用 `cherry-pick`: +```bash +# 1. 创建新功能分支 +git checkout -b fix/qnn-execute-return-order upstream/v2 + +# 2. Cherry-pick 你需要的 commit +git cherry-pick + +# 3. 推送到 Fork 并创建 PR +git push origin fix/qnn-execute-return-order +``` + +--- + +## 前置条件:Git 配置 + +**重要:** 在开始之前,确保 Git 已配置 `user.name` 和 `user.email`。 + +如果遇到 "Committer identity unknown" 错误,请先配置 Git: + +```bash +# 全局配置(推荐) +git config --global user.name "你的名字" +git config --global user.email "你的邮箱" +``` + +详细说明请参考:[Git 配置说明](./GIT_SETUP.md) + +--- + +## 快速参考:当前情况的操作步骤 + +### 场景:在 fork 的 v2 分支上有多个 commits,只想 PR 最新的修改 + +**最简单的方法(推荐):** + +```bash +# 1. 确保 upstream 已配置 +git remote add upstream <原项目URL> # 如果还没有 + +# 2. 同步 upstream/v2 +git fetch upstream + +# 3. 查看你的 commits,找到需要 PR 的 commit hash +git log --oneline -10 + +# 4. 创建功能分支(基于 upstream/v2) +git checkout -b fix/qnn-execute-return-order upstream/v2 + +# 5. Cherry-pick 你需要的 commit(假设是 abc1234) +git cherry-pick abc1234 + +# 6. 确保调试日志已注释(已完成) +# 7. 编译和测试 +# 8. 推送到 Fork +git push origin fix/qnn-execute-return-order + +# 9. 在 GitHub 上创建 PR:从 fix/qnn-execute-return-order 到 upstream/v2 +``` + +**或者使用脚本:** + +```bash +# 运行自动化脚本 +./docs/qnn_fix_bug/sync_and_prepare_pr.sh + +# 脚本会引导你完成所有步骤 +``` + +--- + +## PR 提交清单 + +在提交 PR 之前,请确认: + +- [ ] 已同步 upstream 和本地仓库 +- [ ] 已注释掉所有调试日志(或使用条件编译) +- [ ] 代码已重新编译,无编译错误 +- [ ] 已运行测试,所有测试通过 +- [ ] 提交信息清晰,描述了修复的问题 +- [ ] 代码已推送到 Fork 的功能分支 +- [ ] PR 描述清晰,说明了问题和解决方案 + +--- + +## PR 描述模板 + +```markdown +## 问题描述 +修复 QNN Execute Return Order 问题:QNN graphExecute 返回的输出顺序与 MLLM 期望的顺序不一致。 + +## 解决方案 +- 在 `QNNBackend::graphExecute` 中添加输出重排序逻辑 +- 根据 `expectedOrder` 将 QNN 返回的输出重新排序到 MLLM 期望的顺序 +- 添加输出索引映射机制,确保正确匹配 tensor 名称 +- 注释掉调试日志,保留代码以便将来调试使用 + +## 修改的文件 +- `mllm/backends/qnn/QNNBackend.cpp` - 添加输出重排序逻辑 +- `mllm/backends/qnn/QNNModel.cpp` - 添加输出索引映射方法 +- `mllm/backends/qnn/QNNModel.hpp` - 添加输出索引映射方法声明 +- `mllm/backends/qnn/passes/QNNGraphBuildPass.cpp` - 设置期望输出顺序 + +## 测试 +- [x] 编译通过 +- [x] 运行测试通过 +- [x] 验证输出顺序正确 + +## 相关 Issue +# (如果有) +``` + +**注意:** PR 的目标分支应该是 `v2`,不是 `main` 或 `master`。 + diff --git a/docs/qnn_fix_bug/adb_output.md b/docs/qnn_fix_bug/adb_output.md new file mode 100644 index 000000000..1cbbd97f4 --- /dev/null +++ b/docs/qnn_fix_bug/adb_output.md @@ -0,0 +1,1343 @@ +modeling_qwen_npu.hpp中只在QwenAttentionProjNPU中的forward函数中最后一行return {query_states, key_states, value_states} +query states在view前的tensor放到return列表的最后 /data/local/tmp/zl/mllm-v2/bin_test目录下的QNNOutputOrderTest输出 + +```bash +root@zhulei:~/mllm_v2/build-android-qnn-dbg/bin# adb shell +manet:/ $ cd /data/local/tmp/zl/mllm-v2/bin_test +manet:/data/local/tmp/zl/mllm-v2/bin_test $ LD_LIBRARY_PATH=. ./mllm-qwen-npu +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNUtils.cpp:22 QNN Backend Lib: libQnnHtp.so +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:305 Registered Op Package: libQnnLLaMAPackage_CPU.so and interface provider: LLaMAPackageInterfaceProvider +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:305 Registered Op Package: libQnnLLaMAPackage_HTP.so and interface provider: LLaMAPackageInterfaceProvider +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:46 QNN Backend Build Id: v2.36.0.250627101419_123260 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:48 QNN backend supports tensor sparsity +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:51 QNN backend supports dynamic dimensions +[INFO] /root/mllm_v2/mllm/backends/base/PluginSystem.cpp:89 Register customized op: DequantizeAdd:4097 -> QNN +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.0_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.0_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.1_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.1_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.2_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.2_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.3_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.3_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.4_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.4_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.5_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.5_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.6_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.6_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.7_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.7_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.8_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.8_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.9_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.9_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.10_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.10_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.11_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.11_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.12_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.12_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.13_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.13_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.14_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.14_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.15_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.15_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.16_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.16_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.17_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.17_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.18_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.18_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.19_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.19_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.20_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.20_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.21_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.21_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.22_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.22_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.23_1' with 3 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.23_2' with 1 outputs +tensor( +[[151644, 8948, 198, 2610, 525, 264, ..., 30, 151645, 198, 151644, 77091, 198]], dtype=Int64, device=CPU) +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.0_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1377 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1378 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1379 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1377 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1378 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1379 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1377) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1378) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1379) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.0_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1431 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1431 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1431) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.1_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1451 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1452 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1453 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1451 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1452 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1453 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1451) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1452) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1453) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.1_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1504 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1504 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1504) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.2_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1524 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1525 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1526 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1524 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1525 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1526 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1524) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1525) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1526) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.2_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1577 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1577 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1577) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.3_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1597 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1598 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1599 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1597 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1598 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1599 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1597) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1598) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1599) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.3_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1650 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1650 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1650) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.4_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1670 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1671 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1672 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1670 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1671 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1672 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1670) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1671) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1672) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.4_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1723 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1723 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1723) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.5_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1743 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1744 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1745 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1743 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1744 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1745 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1743) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1744) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1745) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.5_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1796 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1796 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1796) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.6_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1816 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1817 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1818 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1816 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1817 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1818 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1816) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1817) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1818) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.6_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1869 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1869 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1869) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.7_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1889 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1890 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1891 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1889 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1890 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1891 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1889) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1890) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1891) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.7_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1942 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1942 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1942) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.8_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1962 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1963 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1964 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1962 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1963 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1964 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1962) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1963) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1964) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.8_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2015 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2015 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2015) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.9_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2035 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2036 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2037 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2035 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2036 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2037 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2035) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2036) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2037) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.9_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2088 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2088 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2088) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.10_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2108 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2109 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2110 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2108 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2109 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2110 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2108) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2109) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2110) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.10_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2161 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2161 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2161) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.11_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2181 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2182 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2183 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2181 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2182 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2183 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2181) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2182) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2183) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.11_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2234 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2234 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2234) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.12_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2254 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2255 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2256 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2254 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2255 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2256 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2254) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2255) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2256) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.12_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2307 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2307 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2307) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.13_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2327 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2328 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2329 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2327 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2328 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2329 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2327) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2328) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2329) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.13_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2380 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2380 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2380) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.14_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2400 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2401 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2402 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2400 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2401 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2402 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2400) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2401) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2402) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.14_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2453 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2453 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2453) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.15_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2473 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2474 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2475 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2473 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2474 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2475 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2473) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2474) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2475) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.15_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2526 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2526 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2526) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.16_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2546 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2547 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2548 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2546 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2547 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2548 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2546) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2547) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2548) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.16_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2599 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2599 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2599) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.17_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2619 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2620 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2621 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2619 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2620 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2621 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2619) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2620) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2621) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.17_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2672 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2672 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2672) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.18_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2692 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2693 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2694 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2692 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2693 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2694 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2692) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2693) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2694) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.18_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2745 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2745 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2745) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.19_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2765 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2766 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2767 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2765 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2766 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2767 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2765) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2766) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2767) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.19_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2818 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2818 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2818) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.20_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2838 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2839 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2840 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2838 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2839 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2840 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2838) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2839) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2840) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.20_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2891 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2891 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2891) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.21_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2911 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2912 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2913 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2911 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2912 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2913 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2911) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2912) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2913) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.21_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2964 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2964 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2964) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.22_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2984 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2985 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2986 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2984 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2985 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2986 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2984) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2985) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2986) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.22_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3037 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3037 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 3037) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.23_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3057 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 3058 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 3059 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3057 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 3058 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 3059 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 3057) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 3058) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 3059) [SAME] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.23_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3110 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3110 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 3110) [SAME] +token: 2121 As +Error: Received signal11 - SIGSEGV (Segmentation violation) +Stack trace: +#0 0x5c8fd2b12c +#1 0x5c8fd2af4c +#2 0x5c8fd2ac38 +#3 0x7645b91860 __kernel_rt_sigreturn +#4 0x739fdcbff4 +#5 0x739fdad750 +#6 0x739fd89748 +#7 0x739fb70a74 +#8 0x739fb7042c +#9 0x739fafbe14 +#10 0x739fafcc68 +#11 0x739fafead4 +#12 0x764075c3f0 __cxa_finalize +#13 0x764076155c exit +#14 0x7640755158 +Possible causes: invalid memory access, dangling pointer, stack overflow. +Shutting down... +``` + +modeling_qwen_npu.hpp中在QwenAttentionProjNPU中的forward函数中最后一行试试把query states在view前的tensor放到return列表的最后return {query_states, key_states, value_states, query_states_raw};这样修改代码后的/data/local/tmp/zl/mllm-v2/bin_test目录下的QNNOutputOrderTest输出 +```bash +manet:/data/local/tmp/zl/mllm-v2/bin_test $ LD_LIBRARY_PATH=. ./mllm-qwen-npu +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNUtils.cpp:22 QNN Backend Lib: libQnnHtp.so +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:305 Registered Op Package: libQnnLLaMAPackage_CPU.so and interface provider: LLaMAPackageInterfaceProvider +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:305 Registered Op Package: libQnnLLaMAPackage_HTP.so and interface provider: LLaMAPackageInterfaceProvider +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:46 QNN Backend Build Id: v2.36.0.250627101419_123260 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:48 QNN backend supports tensor sparsity +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:51 QNN backend supports dynamic dimensions +[INFO] /root/mllm_v2/mllm/backends/base/PluginSystem.cpp:89 Register customized op: DequantizeAdd:4097 -> QNN +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.0_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.0_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.1_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.1_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.2_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.2_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.3_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.3_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.4_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.4_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.5_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.5_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.6_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.6_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.7_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.7_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.8_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.8_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.9_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.9_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.10_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.10_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.11_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.11_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.12_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.12_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.13_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.13_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.14_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.14_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.15_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.15_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.16_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.16_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.17_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.17_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.18_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.18_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.19_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.19_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.20_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.20_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.21_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.21_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.22_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.22_2' with 1 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.23_1' with 4 outputs +[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.23_2' with 1 outputs +tensor( +[[151644, 8948, 198, 2610, 525, 264, ..., 30, 151645, 198, 151644, 77091, 198]], dtype=Int64, device=CPU) +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.0_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1377 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1378 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1379 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1362 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1362 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1377 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1378 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1379 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1377' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1378' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1379' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1362' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1377) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1378) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1379) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1362) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.0_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1431 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1431 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1431) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.1_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1451 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1452 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1453 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1436 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1436 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1451 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1452 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1453 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1451' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1452' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1453' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1436' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1451) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1452) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1453) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1436) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.1_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1504 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1504 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1504) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.2_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1524 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1525 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1526 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1509 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1509 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1524 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1525 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1526 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1524' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1525' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1526' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1509' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1524) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1525) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1526) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1509) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.2_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1577 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1577 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1577) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.3_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1597 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1598 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1599 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1582 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1582 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1597 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1598 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1599 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1597' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1598' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1599' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1582' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1597) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1598) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1599) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1582) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.3_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1650 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1650 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1650) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.4_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1670 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1671 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1672 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1655 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1655 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1670 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1671 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1672 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1670' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1671' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1672' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1655' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1670) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1671) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1672) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1655) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.4_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1723 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1723 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1723) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.5_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1743 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1744 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1745 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1728 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1728 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1743 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1744 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1745 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1743' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1744' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1745' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1728' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1743) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1744) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1745) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1728) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.5_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1796 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1796 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1796) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.6_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1816 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1817 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1818 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1801 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1801 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1816 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1817 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1818 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1816' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1817' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1818' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1801' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1816) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1817) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1818) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1801) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.6_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1869 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1869 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1869) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.7_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1889 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1890 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1891 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1874 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1874 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1889 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1890 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1891 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1889' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1890' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1891' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1874' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1889) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1890) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1891) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1874) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.7_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1942 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1942 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1942) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.8_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1962 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1963 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1964 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1947 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1947 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1962 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1963 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1964 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1962' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1963' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1964' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1947' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1962) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1963) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1964) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1947) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.8_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2015 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2015 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2015) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.9_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2035 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2036 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2037 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2020 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2020 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2035 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2036 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2037 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2035' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2036' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2037' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2020' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2035) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2036) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2037) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2020) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.9_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2088 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2088 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2088) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.10_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2108 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2109 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2110 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2093 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2093 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2108 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2109 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2110 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2108' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2109' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2110' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2093' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2108) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2109) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2110) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2093) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.10_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2161 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2161 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2161) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.11_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2181 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2182 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2183 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2166 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2166 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2181 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2182 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2183 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2181' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2182' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2183' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2166' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2181) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2182) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2183) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2166) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.11_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2234 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2234 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2234) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.12_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2254 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2255 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2256 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2239 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2239 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2254 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2255 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2256 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2254' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2255' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2256' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2239' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2254) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2255) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2256) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2239) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.12_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2307 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2307 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2307) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.13_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2327 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2328 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2329 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2312 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2312 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2327 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2328 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2329 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2327' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2328' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2329' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2312' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2327) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2328) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2329) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2312) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.13_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2380 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2380 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2380) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.14_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2400 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2401 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2402 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2385 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2385 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2400 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2401 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2402 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2400' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2401' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2402' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2385' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2400) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2401) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2402) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2385) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.14_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2453 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2453 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2453) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.15_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2473 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2474 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2475 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2458 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2458 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2473 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2474 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2475 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2473' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2474' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2475' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2458' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2473) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2474) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2475) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2458) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.15_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2526 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2526 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2526) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.16_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2546 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2547 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2548 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2531 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2531 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2546 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2547 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2548 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2546' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2547' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2548' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2531' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2546) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2547) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2548) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2531) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.16_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2599 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2599 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2599) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.17_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2619 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2620 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2621 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2604 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2604 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2619 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2620 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2621 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2619' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2620' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2621' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2604' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2619) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2620) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2621) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2604) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.17_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2672 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2672 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2672) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.18_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2692 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2693 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2694 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2677 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2677 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2692 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2693 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2694 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2692' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2693' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2694' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2677' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2692) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2693) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2694) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2677) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.18_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2745 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2745 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2745) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.19_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2765 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2766 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2767 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2750 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2750 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2765 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2766 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2767 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2765' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2766' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2767' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2750' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2765) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2766) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2767) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2750) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.19_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2818 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2818 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2818) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.20_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2838 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2839 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2840 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2823 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2823 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2838 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2839 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2840 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2838' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2839' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2840' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2823' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2838) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2839) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2840) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2823) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.20_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2891 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2891 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2891) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.21_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2911 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2912 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2913 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2896 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2896 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2911 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2912 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2913 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2911' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2912' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2913' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2896' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2911) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2912) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2913) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2896) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.21_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2964 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2964 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2964) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.22_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2984 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2985 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2986 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2969 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2969 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2984 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2985 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2986 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2984' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2985' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2986' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2969' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2984) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2985) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2986) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2969) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.22_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3037 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3037 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 3037) [SAME] +[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.23_1' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3057 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 3058 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 3059 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 3042 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3042 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 3057 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 3058 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 3059 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '3057' but it's at QNN[1] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '3058' but it's at QNN[2] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '3059' but it's at QNN[3] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '3042' but it's at QNN[0] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 3057) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 3058) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 3059) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 3042) [REORDERED] +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.23_2' +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3110 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3110 +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed +[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 3110) [SAME] +token: 2121 As +Error: Received signal11 - SIGSEGV (Segmentation violation) +Stack trace: +#0 0x5d5f57545c +#1 0x5d5f57527c +#2 0x5d5f574f68 +#3 0x74c41cf860 __kernel_rt_sigreturn +#4 0x7222ccdff4 +#5 0x7222caf750 +#6 0x7222c8b748 +#7 0x7222e6ea74 +#8 0x7222e6e42c +#9 0x7222df9e14 +#10 0x7222dfac68 +#11 0x7222dfcad4 +#12 0x74c23633f0 __cxa_finalize +#13 0x74c236855c exit +#14 0x74c235c158 +Possible causes: invalid memory access, dangling pointer, stack overflow. +Shutting down... +``` \ No newline at end of file diff --git a/examples/qwen_npu/CODE_EXPLANATION.md b/examples/qwen_npu/CODE_EXPLANATION.md new file mode 100644 index 000000000..aa2183b5a --- /dev/null +++ b/examples/qwen_npu/CODE_EXPLANATION.md @@ -0,0 +1,951 @@ +# Qwen NPU 示例代码详细解释 + +本文档逐行解释 `main.cpp` 中每行代码的具体逻辑,以及它们如何与其他文件结合。 + +## 1. 头文件包含(1-11行) + +```cpp +#include +#include +#include +#include + +#include "mllm/backends/qnn/passes/QNNGraphBuildPass.hpp" +#include "mllm/backends/qnn/passes/QNNGraphBuildPipeline.hpp" +#include "mllm/compile/PassManager.hpp" +#include "mllm/core/DataTypes.hpp" +#include "mllm/models/qwen_npu/tokenization_qwen.hpp" +#include "mllm/models/qwen_npu/modeling_qwen_npu.hpp" +``` + +### 详细说明: + +- **``**: 格式化库,用于打印输出 +- **``**: 标准整数类型(int64_t等) +- **``**: MLLM核心库,包含: + - `MLLM_MAIN` 宏定义(在 `mllm/mllm.hpp:401-412`) + - `Tensor` 类 + - `Context` 管理 + + - 信号处理等基础设施 +- **``**: 类型擦除的值容器,用于传递任意类型的参数 +- **QNN相关头文件**: QNN(Qualcomm Neural Network)后端相关的Pass +- **模型相关头文件**: Qwen NPU模型的实现和分词器 + +## 2. MLLM_MAIN 宏(15行) + +```cpp +MLLM_MAIN({ +``` + +### 详细说明: + +`MLLM_MAIN` 宏定义在 `mllm/mllm.hpp:401-412`: + +```cpp +#define MLLM_MAIN(...) \ + int main(int argc, char** argv) { \ + ::mllm::__setup_signal_handler(); \ + ::mllm::initializeContext(); \ + auto user_main = [&]() -> int { \ + __VA_ARGS__; \ + return 0; \ + }; \ + int result = ::mllm::__mllm_exception_main(user_main); \ + ::mllm::shutdownContext(); \ + return result; \ + } +``` + +**展开后的逻辑:** +1. 设置信号处理器(SIGINT, SIGTERM等) +2. 初始化MLLM上下文(内存管理器、设备等) +3. 将用户代码包装在lambda中 +4. 在异常处理中执行用户代码 +5. 程序结束时清理上下文 + +## 3. 初始化QNN后端(16行) + +```cpp + mllm::initQnnBackend(); +``` + +### 详细说明: + +- **位置**: `mllm/backends/qnn/Register.cpp:18` +- **作用**: + - 注册QNN后端操作(ops) + - 初始化QNN运行时环境 + - 使QNN相关的操作可以在MLLM中使用 + +## 4. 配置路径定义(18-19行) + +```cpp + const std::string config_path = "./config_1.8B_w8a16_qnn.json"; + const std::string model_path = "./qwen1.5-1.8b-chat-rot-qnn.mllm"; +``` + +### 详细说明: + +- **config_path**: 模型配置文件,包含: + - 模型架构参数(hidden_size, num_layers等) + - 量化配置(w8a16表示8bit权重,16bit激活) + - QNN特定配置 +- **model_path**: 模型权重文件(.mllm格式) + +## 5. 创建分词器(21行) + +```cpp + auto qwen_tokenizer = mllm::models::qwen_npu::QwenTokenizer("./tokenizer.json", "./qwen_merges.txt"); +``` + +### 详细说明: + +- **位置**: `mllm/models/qwen_npu/tokenization_qwen.hpp` +- **构造函数参数**: + - `tokenizer.json`: BPE(Byte Pair Encoding)词汇表 + - `qwen_merges.txt`: BPE合并规则 +- **作用**: 将文本转换为token IDs,或将token IDs转换回文本 + +### 内部实现(tokenization_qwen.hpp:285-309): + +```cpp +ARGenerationOutputPast convertMessage(const QwenMessage& message) { + // 1. 应用消息模板 + auto applied_string = QwenMessage::message_template; + size_t pos = applied_string.find("{{{prompt}}}"); + applied_string.replace(pos, 12, message.prompt); + + // 2. 分词 + auto sequence_str = tokenize(applied_string); + + // 3. 查找词汇表,转换为ID + std::vector ids; + for (const auto& str : sequence_str) { + ids.emplace_back(bpe_._lookup_vocab(str)); + } + + // 4. 创建Tensor + Tensor sequence = Tensor::empty({1, (int32_t)ids.size()}, kInt64, kCPU) + .alloc(); + auto ptr = sequence.ptr(); + for (size_t i = 0; i < ids.size(); ++i) { ptr[i] = ids[i]; } + + return {{"sequence", sequence}}; +} +``` + +## 6. 模型文件版本(23行) + +```cpp + mllm::ModelFileVersion file_version = mllm::ModelFileVersion::kV1; +``` + +### 详细说明: + +- 指定模型文件格式版本 +- 不同版本可能有不同的序列化格式 +- 用于 `mllm::load()` 函数正确解析模型文件 + +## 7. 创建模型配置和实例(25-26行) + +```cpp + auto cfg = mllm::models::qwen_npu::QwenNPUConfig(config_path); + auto model = mllm::models::qwen_npu::QwenForCausalLM("", cfg); +``` + +### 详细说明: + +#### 7.1 QwenNPUConfig(25行) + +- **位置**: `mllm/models/qwen_npu/configuration_qwen_npu.hpp` +- **作用**: 从JSON配置文件加载模型参数 +- **包含的参数**: + - `vocab_size`: 词汇表大小 + - `hidden_size`: 隐藏层维度 + - `num_attention_heads`: 注意力头数 + - `num_key_value_heads`: KV缓存头数(GQA) + - `num_hidden_layers`: Transformer层数 + - `intermediate_size`: MLP中间层维度 + - `max_position_embeddings`: 最大位置编码 + - `rope_theta`: RoPE旋转角度 + - `linear_impl_type`: 线性层实现类型(QNN特定) + +#### 7.2 QwenForCausalLM(26行) + +- **位置**: `mllm/models/qwen_npu/modeling_qwen_npu.hpp:445-454` +- **继承关系**: + - `nn::Module`: 神经网络模块基类 + - `ARGeneration`: 自回归生成接口 +- **构造函数逻辑**: + +```cpp +explicit QwenForCausalLM(const std::string& name, const QwenNPUConfig& cfg) + : cfg(cfg), nn::Module(name) { + // 注册主模型(Transformer堆叠) + model = reg("model", cfg); + + // 注册语言模型头(如果未共享权重) + if (!cfg.tie_word_embeddings) { + lm_head_ = reg("lm_head", cfg.hidden_size, cfg.vocab_size, + false, cfg.linear_impl_type); + } + tie_word_embeddings_ = cfg.tie_word_embeddings; +} +``` + +**`reg<>()` 函数**: +- 注册子模块到当前模块 +- 返回子模块的引用 +- 子模块会被添加到模块树中,用于参数加载和计算图构建 + +## 8. 加载模型参数(28-29行) + +```cpp + auto param = mllm::load(model_path, file_version); + model.load(param); +``` + +### 详细说明: + +#### 8.1 mllm::load()(28行) + +- **作用**: 从.mllm文件加载参数 +- **返回**: `ParameterFile::ptr_t`,包含所有模型权重 +- **内部流程**: + 1. 打开模型文件 + 2. 根据file_version解析文件格式 + 3. 读取所有张量数据(权重、偏置等) + 4. 返回参数容器 + +#### 8.2 model.load()(29行) + +- **位置**: `nn::Module::load()`(继承自Module基类) +- **作用**: 将参数加载到模型结构中 +- **匹配逻辑**: + - 根据模块名称匹配参数 + - 递归加载子模块参数 + - 将权重张量复制到对应的模块中 + +## 9. 创建Trace输入占位符(31行) + +```cpp + mllm::models::ARGenerationOutputPast inputs{{"sequence", mllm::Tensor::empty({1, 32}, mllm::kInt64, mllm::kCPU).alloc()}}; +``` + +### 详细说明: + +#### 9.1 ARGenerationOutputPast + +- **定义**: `mllm/models/ARGeneration.hpp:17` +```cpp +using ARGenerationOutputPast = std::unordered_map; +``` +- **作用**: 模型输入/输出的键值对容器 +- **常用键**: + - `"sequence"`: 输入token序列 + - `"position_ids"`: 位置编码(可选) + - 其他模型特定的输入 + +#### 9.2 Tensor::empty() + +- **位置**: `mllm/core/Tensor.cpp:70-74` +```cpp +static Tensor empty(const std::vector& shape, DataTypes dtype, DeviceTypes device) { + auto storage = TensorStorage::create(shape, dtype, device); + auto impl = TensorViewImpl::create(shape, storage); + return Tensor(impl); +} +``` +- **参数**: + - `{1, 32}`: shape,batch_size=1, seq_len=32 + - `kInt64`: 数据类型,64位整数 + - `kCPU`: 设备类型,CPU内存 +- **注意**: `empty()` **不分配内存**,只创建Tensor对象 + +#### 9.3 .alloc() + +- **位置**: `mllm/core/Tensor.cpp:63-66` +```cpp +Tensor& alloc() { + Context::instance().memoryManager()->alloc(impl_->storage()); + return *this; +} +``` +- **作用**: + - 通过内存管理器分配实际内存 + - 返回Tensor引用(支持链式调用) +- **内存布局**: 分配 `1 * 32 * sizeof(int64_t) = 256` 字节 + +## 10. Trace构建计算图(33行) + +```cpp + auto irs = model.trace(inputs, {}); +``` + +### 详细说明: + +#### 10.1 trace()方法 + +- **位置**: `mllm/models/qwen_npu/modeling_qwen_npu.hpp:514-563` +- **作用**: 构建计算图的中间表示(IR) +- **输入**: + - `inputs`: 占位符输入(用于确定形状) + - `args`: 额外参数(这里为空) + +#### 10.2 trace()内部流程(实际实现): + +```cpp +IROutput trace(const ARGenerationOutputPast& input, const ARGenerationArgs& args) override { + ir::IRContext::ptr_t llm_ir = nullptr; + + // 1. 开始trace:启用操作记录模式 + ir::lowlevel::traceStart(); + + // 2. 获取输入序列 + auto sequence = input.at("sequence"); + + // 3. Trace embedding层 + // 在trace模式下,embedding操作会被记录到IR中 + auto input_embeddings = model.embedding_(sequence); + + // 4. 暂停trace:停止记录操作 + // 接下来的操作(如创建position_ids)不需要被trace + ir::lowlevel::traceYield(); + + // 5. 准备RoPE嵌入(不在trace中) + auto batch_size = sequence.shape()[0]; + auto seq_len = sequence.shape()[1]; + auto position_ids = Tensor::empty({batch_size, seq_len}, kInt64, kCPU); + auto llm_embedding_sin = Tensor::empty({...}, kFloat32, kCPU); + auto llm_embedding_cos = Tensor::empty({...}, kFloat32, kCPU); + + // 6. 继续trace:恢复操作记录 + ir::lowlevel::traceContinue(); + + // 7. Trace模型主体(Transformer层) + // traceModule会记录整个模块的计算图 + auto hidden_states = ir::lowlevel::traceModule( + model, input_embeddings, llm_embedding_sin, llm_embedding_cos)[0]; + + // 8. 截取最后一个位置 + auto S = hidden_states.shape()[1]; + hidden_states = hidden_states[{kAll, {S - 1}, kAll}]; + + // 9. Trace语言模型头 + Tensor logits; + if (!tie_word_embeddings_) { + logits = lm_head_(hidden_states); + } + + // 10. 停止trace并获取IR + llm_ir = ir::lowlevel::traceStop(); + + return {{"model", llm_ir}}; +} +``` + +#### 10.2.1 trace函数说明: + +- **`traceStart()`**: + - 启用trace模式 + - 后续的操作会被记录到IR中 + - 操作不会实际执行,只记录计算图结构 + +- **`traceYield()`**: + - 暂停trace + - 接下来的操作不会被记录 + - 用于执行一些辅助操作(如创建辅助张量) + +- **`traceContinue()`**: + - 恢复trace + - 继续记录操作 + +- **`traceModule()`**: + - 专门用于trace模块(Module) + - 会递归trace模块的所有子操作 + - 返回模块的输出 + +- **`traceStop()`**: + - 停止trace + - 构建最终的IR + - 返回IRContext指针 + +#### 10.3 IR(Intermediate Representation) + +- **结构**: 计算图的节点和边 +- **节点**: 操作(Op),如MatMul、Add、Softmax等 +- **边**: 张量(Tensor)的流动 +- **用途**: + - 图优化 + - 后端代码生成 + - 静态分析 + +#### 10.4 返回值 + +- **类型**: `IROutput = std::unordered_map` +- **内容**: `{{"model", ir_context}}` +- **注意**: 此时KV Cache可能被更新,需要后续清理 + +## 11. QNN Graph Rewrite Pass(35-39行) + +```cpp + // QNN Graph Rewrite Pass + mllm::ir::PassManager rewritePM(irs["model"]); + rewritePM.reg(mllm::qnn::createQNNGraphIOTensorPass()); + rewritePM.reg(mllm::qnn::createQNNOpNamingPass()); + rewritePM.run(); +``` + +### 详细说明: + +#### 11.1 PassManager + +- **位置**: `mllm/compile/PassManager.hpp` +- **作用**: 管理IR转换Pass的执行 +- **构造函数**: 接收IR上下文 + +#### 11.2 Pass注册和执行 + +- **`reg()`**: 注册Pass到执行队列 +- **`run()`**: 按顺序执行所有Pass + +#### 11.3 QNNGraphIOTensorPass + +- **作用**: + - 识别输入/输出张量 + - 为QNN图准备IO张量 + - 处理形状信息 + +#### 11.4 QNNOpNamingPass + +- **作用**: + - 为QNN操作生成唯一名称 + - 确保操作名称符合QNN要求 + - 便于调试和日志记录 + +## 12. 输出IR到文件(42行) + +```cpp + mllm::redirect("qwen_npu.mir", [&]() { mllm::print(irs["model"]); }); +``` + +### 详细说明: + +- **`mllm::redirect()`**: 重定向输出到文件 +- **`mllm::print()`**: 打印IR的文本表示 +- **用途**: 调试,查看优化后的计算图结构 +- **文件内容**: MIR(MLLM IR)格式的计算图 + +## 13. QNN Graph Build Pass(44-47行) + +```cpp + // QNN Graph Build Pass + mllm::ir::PassManager graphBuildPM(irs["model"]); + graphBuildPM.reg(mllm::qnn::createQNNGraphBuildPass()); + graphBuildPM.run(); +``` + +### 详细说明: + +#### 13.1 QNNGraphBuildPass + +- **位置**: `mllm/backends/qnn/passes/QNNGraphBuildPass.hpp` +- **作用**: 将MLLM IR转换为QNN图 +- **转换过程**: + 1. **遍历IR节点**: 访问计算图中的每个操作节点 + 2. **操作映射**: 将MLLM操作映射到QNN操作 + - MatMul → QNN MatMul + - Add → QNN ElementWiseAdd + - Softmax → QNN Softmax + - 等等 + 3. **创建QNN图**: 使用QNN API创建图结构 + 4. **图优化**: + - 操作融合(如MatMul+Add → FullyConnected) + - 量化处理(w8a16量化) + - 内存优化 + 5. **编译图**: 编译为QNN可执行图 +- **结果**: + - 模型可以在QNN运行时执行 + - 图被编译并优化,准备在NPU上运行 + - 后续forward()调用会使用这个编译好的图 + +## 14. 清空KV Cache(50行) + +```cpp + // cache has been updated due to trace, clear cache + model.model.clearKVCache(); +``` + +### 详细说明: + +- **原因**: trace过程中可能执行了前向传播,更新了KV Cache +- **作用**: 清空所有层的KV Cache,准备新的推理 +- **位置**: `QwenText::clearKVCache()`,递归清空所有注意力层的缓存 + +## 15. 分词输入文本(52-53行) + +```cpp + auto raw_input_tokens = qwen_tokenizer.convertMessage({.prompt = "How are you?"})["sequence"]; + print(raw_input_tokens); +``` + +### 详细说明: + +#### 15.1 convertMessage() + +- **输入**: `QwenMessage` 结构,包含 `prompt` 字段 +- **处理流程**: + 1. 应用消息模板(添加系统提示等) + 2. BPE分词 + 3. 词汇表查找,转换为token IDs + 4. 创建Tensor并返回 + +#### 15.2 返回值 + +- **类型**: `ARGenerationOutputPast` +- **内容**: `{{"sequence", Tensor}}` +- **Tensor形状**: `[1, token_count]`,例如 `[1, 15]` + +#### 15.3 print() + +- **作用**: 打印Tensor内容(用于调试) +- **输出**: token IDs数组 + +## 16. 手动填充输入(55-59行) + +```cpp + // manually set input data as fill op is not supported in QNN + auto ptr = inputs["sequence"].ptr(); + auto input_data = raw_input_tokens.ptr(); + for (int i = 0; i < raw_input_tokens.shape()[1]; ++i) { ptr[i] = input_data[i]; } + for (int i = raw_input_tokens.shape()[1]; i < 32; ++i) { ptr[i] = -1; } +``` + +### 详细说明: + +#### 16.1 为什么手动填充? + +- **原因**: QNN后端不支持Fill操作 +- **解决方案**: 在CPU上手动填充,然后传递给QNN + +#### 16.2 ptr() + +- **位置**: `mllm/core/Tensor.hpp` +- **作用**: 获取张量的原始指针 +- **类型**: `int64_t*` +- **注意**: 必须确保张量已分配内存(已调用alloc()) + +#### 16.3 填充逻辑 + +```cpp +// 1. 复制有效token +for (int i = 0; i < raw_input_tokens.shape()[1]; ++i) { + ptr[i] = input_data[i]; +} + +// 2. 填充padding(-1表示无效位置) +for (int i = raw_input_tokens.shape()[1]; i < 32; ++i) { + ptr[i] = -1; +} +``` + +**结果**: +- 前15个位置:有效token IDs +- 后17个位置:-1(padding) + +## 17. 前向推理(61行) + +```cpp + auto out = model.forward(inputs, {{"seq_len", mllm::AnyValue((int)raw_input_tokens.shape()[1])}})["sequence"]; +``` + +### 详细说明: + +#### 17.1 forward()方法 + +- **位置**: `mllm/models/qwen_npu/modeling_qwen_npu.hpp:456-512` +- **签名**: +```cpp +ARGenerationOutputPast forward( + const ARGenerationOutputPast& input, + const ARGenerationArgs& args +) override +``` + +#### 17.2 forward()内部流程: + +```cpp +ARGenerationOutputPast forward(...) { + // 1. 获取输入序列 + auto sequence = input.at("sequence"); + auto batch_size = sequence.shape()[0]; // 1 + auto seq_len = sequence.shape()[1]; // 32 + + // 2. 获取真实序列长度 + auto real_seq = args.count("seq_len") + ? args.at("seq_len").get() // 15 + : seq_len; // 32(fallback) + + // 3. 生成位置编码 + Tensor position_ids = Tensor::empty({batch_size, seq_len}, kInt64, kCPU).alloc(); + // 填充 [0, 1, 2, ..., 31] + + // 4. 生成RoPE嵌入 + auto [llm_embedding_sin, llm_embedding_cos] = + makeRotaryPosEmbedding(position_ids, model.getBuffer("inv_freq"), 1.0f); + + // 5. 文本嵌入 + auto input_embeddings = model.embedding_(sequence); + // shape: [1, 32, hidden_size] + // + // 注意:QwenText中的embedding使用了QNN版本 + // (modeling_qwen_npu.hpp:415: embedding_.to(kQNN)) + // QNN版本的embedding会特殊处理padding token(-1): + // - 可能映射到特殊的embedding向量 + // - 或返回零向量 + // - 确保padding位置不影响计算 + + // 6. Transformer前向传播 + auto hidden_states = model(input_embeddings, llm_embedding_sin, llm_embedding_cos)[0]; + // shape: [1, 32, hidden_size] + + // 7. 截取有效部分 + hidden_states = hidden_states[{kAll, {real_seq - 1}, kAll}]; + // shape: [1, 1, hidden_size](只取最后一个有效位置) + + // 8. 语言模型头 + Tensor logits; + if (!tie_word_embeddings_) { + logits = lm_head_(hidden_states); + } else { + // 共享权重:使用embedding权重 + auto emb_w = model.embedding_.weight(); + logits = nn::functional::matmul(hidden_states, emb_w, false, true); + } + // shape: [1, 1, vocab_size] + + return { + {"sequence", logits}, + {"position_ids", position_ids} + }; +} +``` + +#### 17.3 关键点: + +1. **real_seq参数**: 告知模型真实序列长度,用于: + - 截取输出(只取最后一个有效位置) + - 可能影响attention mask(虽然当前实现可能未完全处理) + +2. **位置编码**: 生成 `[0, 1, 2, ..., 31]`,即使有padding + +3. **Embedding处理**: + - token ID `-1` 可能被映射为特殊embedding + - 或返回零向量 + +4. **输出截取**: + ```cpp + hidden_states[{kAll, {real_seq - 1}, kAll}] + ``` + - `kAll`: 保留batch维度 + - `{real_seq - 1}`: 只取第real_seq-1个位置(最后一个有效位置) + - `kAll`: 保留hidden维度 + +#### 17.4 返回值 + +- **类型**: `ARGenerationOutputPast` +- **内容**: + - `"sequence"`: logits,形状 `[1, 1, vocab_size]` + - `"position_ids"`: 位置编码,形状 `[1, 32]` + +## 18. 采样(63行) + +```cpp + auto sampled = model.sampleGreedy(out); +``` + +### 详细说明: + +#### 18.1 sampleGreedy() + +- **位置**: `mllm/models/ARGeneration.cpp` +- **作用**: 贪心采样,选择概率最高的token +- **实现**: + +```cpp +int64_t ARGeneration::sampleGreedy(Tensor& logits) { + // 1. 获取最后一个位置的logits + auto last_logits = getLastLogits(logits); + // shape: [vocab_size] + + // 2. 找到最大值索引 + int64_t max_idx = 0; + float max_val = last_logits.ptr()[0]; + for (int i = 1; i < vocab_size; ++i) { + if (last_logits.ptr()[i] > max_val) { + max_val = last_logits.ptr()[i]; + max_idx = i; + } + } + + return max_idx; +} +``` + +- **输入**: logits,形状 `[1, 1, vocab_size]` +- **输出**: token ID(int64_t) + +## 19. 输出结果(64行) + +```cpp + std::wcout << "token: " << sampled << " " << qwen_tokenizer.detokenize(sampled) << "\n"; +``` + +### 详细说明: + +#### 19.1 detokenize() + +- **位置**: `mllm/models/qwen_npu/tokenization_qwen.hpp` +- **作用**: 将token ID转换回文本 +- **流程**: + 1. 查找词汇表,获取token字符串 + 2. 合并BPE tokens + 3. 解码为UTF-8文本 + +#### 19.2 输出 + +- **格式**: `token: ` +- **示例**: `token: 1234 I'm` + +## 20. 返回(66行) + +```cpp + return 0; +``` + +### 详细说明: + +- 返回0表示程序成功执行 +- `MLLM_MAIN`宏会捕获返回值并传递给系统 + +## 数据流总结 + +### 完整执行流程 + +``` +┌─────────────────────────────────────────────────────────────┐ +│ 1. 初始化阶段 │ +├─────────────────────────────────────────────────────────────┤ +│ initQnnBackend() │ +│ └─> 注册QNN后端操作 │ +│ └─> 初始化QNN运行时环境 │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 2. 模型加载阶段 │ +├─────────────────────────────────────────────────────────────┤ +│ QwenNPUConfig(config_path) │ +│ └─> 从JSON读取模型配置 │ +│ └─> 解析架构参数(hidden_size, num_layers等) │ +│ └─> 解析量化配置(w8a16) │ +│ │ +│ QwenForCausalLM("", cfg) │ +│ └─> 创建模型结构 │ +│ └─> 注册子模块(QwenText, lm_head等) │ +│ │ +│ mllm::load(model_path, file_version) │ +│ └─> 打开.mllm文件 │ +│ └─> 读取所有权重张量 │ +│ └─> 返回ParameterFile │ +│ │ +│ model.load(param) │ +│ └─> 递归加载子模块参数 │ +│ └─> 将权重复制到对应模块 │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 3. 图构建阶段(Trace) │ +├─────────────────────────────────────────────────────────────┤ +│ inputs = {{"sequence", Tensor::empty({1, 32}, ...)}} │ +│ └─> 创建占位符输入(用于确定形状) │ +│ │ +│ model.trace(inputs, {}) │ +│ ├─> traceStart() │ +│ ├─> model.embedding_(sequence) [记录到IR] │ +│ ├─> traceYield() │ +│ ├─> 创建position_ids, RoPE嵌入 [不记录] │ +│ ├─> traceContinue() │ +│ ├─> traceModule(model, ...) [记录整个模型] │ +│ ├─> lm_head_(hidden_states) [记录到IR] │ +│ └─> traceStop() → 返回IR │ +│ │ +│ PassManager: QNNGraphIOTensorPass │ +│ └─> 识别输入/输出张量 │ +│ └─> 为QNN图准备IO张量 │ +│ │ +│ PassManager: QNNOpNamingPass │ +│ └─> 为QNN操作生成唯一名称 │ +│ │ +│ PassManager: QNNGraphBuildPass │ +│ └─> 将MLLM IR转换为QNN图 │ +│ └─> 操作映射(MatMul → QNN MatMul) │ +│ └─> 图优化(融合、量化) │ +│ └─> 编译为QNN可执行图 │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 4. 推理准备阶段 │ +├─────────────────────────────────────────────────────────────┤ +│ model.model.clearKVCache() │ +│ └─> 清空所有注意力层的KV Cache │ +│ │ +│ qwen_tokenizer.convertMessage({.prompt = "How are you?"}) │ +│ ├─> 应用消息模板 │ +│ ├─> BPE分词 │ +│ ├─> 词汇表查找 → token IDs │ +│ └─> 创建Tensor: [1, 15] │ +│ │ +│ 手动填充输入 │ +│ ├─> 复制有效token (0-14) │ +│ └─> 填充padding (-1) (15-31) │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 5. 前向推理阶段 │ +├─────────────────────────────────────────────────────────────┤ +│ model.forward(inputs, {{"seq_len", 15}}) │ +│ ├─> 获取输入序列 [1, 32] │ +│ ├─> 生成position_ids [0, 1, 2, ..., 31] │ +│ ├─> 生成RoPE嵌入 (sin, cos) │ +│ ├─> model.embedding_(sequence) │ +│ │ └─> QNN版本处理padding token (-1) │ +│ │ └─> 输出: [1, 32, hidden_size] │ +│ ├─> model(input_embeddings, sin, cos) │ +│ │ ├─> 遍历所有Transformer层 │ +│ │ ├─> 每层: Attention + MLP │ +│ │ └─> 输出: [1, 32, hidden_size] │ +│ ├─> 截取最后一个有效位置 │ +│ │ └─> hidden_states[{kAll, {14}, kAll}] │ +│ │ └─> 输出: [1, 1, hidden_size] │ +│ ├─> lm_head_(hidden_states) │ +│ │ └─> 输出: [1, 1, vocab_size] │ +│ └─> 返回: {{"sequence", logits}, {"position_ids", ...}} │ +└─────────────────────────────────────────────────────────────┘ + ↓ +┌─────────────────────────────────────────────────────────────┐ +│ 6. 采样和输出阶段 │ +├─────────────────────────────────────────────────────────────┤ +│ model.sampleGreedy(out) │ +│ ├─> 获取最后一个位置的logits │ +│ ├─> 找到最大值索引 │ +│ └─> 返回token ID (int64_t) │ +│ │ +│ qwen_tokenizer.detokenize(sampled) │ +│ ├─> 查找词汇表 │ +│ ├─> 合并BPE tokens │ +│ └─> 解码为UTF-8文本 │ +└─────────────────────────────────────────────────────────────┘ +``` + +### 关键数据流 + +``` +文本输入: "How are you?" + ↓ +分词器: convertMessage() + ↓ +Token IDs: [1234, 5678, 9012, ...] (15个tokens) + ↓ +填充: [1234, 5678, ..., -1, -1, ...] (32个位置) + ↓ +Embedding: [1, 32, hidden_size] + ↓ +Transformer层 × N: [1, 32, hidden_size] + ↓ +截取: [1, 1, hidden_size] (只取最后一个有效位置) + ↓ +LM Head: [1, 1, vocab_size] + ↓ +采样: token ID (int64_t) + ↓ +Detokenize: "I'm" +``` + +## 关键数据结构 + +### Tensor +- **位置**: `mllm/core/Tensor.hpp` +- **组成**: + - `TensorViewImpl`: 视图实现(形状、步长) + - `TensorStorage`: 存储(实际数据) +- **生命周期**: + 1. `Tensor::empty()`: 创建对象(无内存) + 2. `.alloc()`: 分配内存 + 3. 使用指针操作数据 + 4. 自动析构释放内存 +- **设备管理**: + - 默认在CPU上创建 + - 可以通过`.to(device)`转换设备 + - QNN后端会自动处理设备转换 + +### 设备类型 +- **`kCPU`**: CPU内存,用于: + - 输入/输出张量 + - 辅助计算(如position_ids) + - 不支持QNN的操作 +- **`kQNN`**: QNN设备,用于: + - 模型权重 + - 主要计算(在NPU上执行) + - 需要QNN支持的操作 +- **设备转换**: + - 自动转换:操作会自动将输入转换到正确设备 + - 手动转换:`.to(device)`显式转换 + - 注意:QNN操作要求输入在QNN设备上 + +### ARGenerationOutputPast +- **类型**: `std::unordered_map` +- **用途**: 模型输入/输出的统一接口 +- **键**: 模型特定的字符串标识符 + +### IRContext +- **位置**: `mllm/compile/ir/` +- **组成**: 计算图的节点和边 +- **用途**: 图优化和代码生成 + +## 与其他文件的连接 + +1. **模型定义**: `modeling_qwen_npu.hpp` + - `QwenForCausalLM`: 主模型类(继承ARGeneration) + - `forward()`: 前向传播实现 + - `trace()`: 图构建实现 + - `QwenText`: Transformer堆叠(modeling_qwen_npu.hpp:403) + - `decode_blocks_`: 多层QwenDecoder + - `norm_`: RMSNorm层 + - `embedding_`: 词嵌入层(使用QNN版本处理padding) + - `forward()`: 执行所有Transformer层 + - `QwenDecoder`: 单个Transformer层 + - 包含注意力层和MLP层 + - `QwenAttentionMatmul`: 注意力层 + - 包含QKV投影、RoPE、CausalMask、Softmax等 + - `QwenMLP`: MLP层 + - Gate、Up、Down投影,SiLU激活 + +2. **分词器**: `tokenization_qwen.hpp` + - `QwenTokenizer`: 分词器类 + - BPE实现 + +3. **后端**: `mllm/backends/qnn/` + - QNN操作实现 + - Pass实现 + - 运行时集成 + +4. **核心**: `mllm/core/` + - `Tensor`: 张量实现 + - `Context`: 全局上下文 + - `MemoryManager`: 内存管理 + +5. **编译**: `mllm/compile/` + - `PassManager`: Pass管理 + - `ir/`: IR定义和操作 + diff --git a/examples/qwen_npu/main.cpp b/examples/qwen_npu/main.cpp index 97c54b4ce..9cd4a2d85 100644 --- a/examples/qwen_npu/main.cpp +++ b/examples/qwen_npu/main.cpp @@ -4,11 +4,16 @@ #include #include "mllm/backends/qnn/passes/QNNGraphBuildPass.hpp" -#include "mllm/backends/qnn/passes/QNNGraphBuildPipeline.hpp" +#include "mllm/backends/qnn/passes/QNNGraphIOTensorPass.hpp" +#include "mllm/backends/qnn/passes/QNNOpNamingPass.hpp" +#include "mllm/backends/qnn/QNNAllocator.hpp" #include "mllm/compile/PassManager.hpp" #include "mllm/core/DataTypes.hpp" +#include "mllm/engine/Context.hpp" #include "mllm/models/qwen_npu/tokenization_qwen.hpp" #include "mllm/models/qwen_npu/modeling_qwen_npu.hpp" +#include "mllm/utils/Common.hpp" +#include "mllm/utils/Log.hpp" using mllm::Argparse; @@ -28,7 +33,7 @@ MLLM_MAIN({ auto param = mllm::load(model_path, file_version); model.load(param); - mllm::models::ARGenerationOutputPast inputs{{"sequence", mllm::Tensor::empty({1, 32}, mllm::kInt64, mllm::kCPU).alloc()}}; + mllm::models::ARGenerationOutputPast inputs{{"sequence", mllm::Tensor::empty({1, 128}, mllm::kInt64, mllm::kCPU).alloc()}}; auto irs = model.trace(inputs, {}); @@ -46,22 +51,132 @@ MLLM_MAIN({ graphBuildPM.reg(mllm::qnn::createQNNGraphBuildPass()); graphBuildPM.run(); + // Debug: Check registered buffer count after graph build + { + auto qnn_backend = mllm::Context::instance().getBackend(mllm::kQNN); + if (qnn_backend) { + auto allocator = std::static_pointer_cast(qnn_backend->allocator()); + if (allocator) { + auto stats = allocator->getRegisteredBufferStats(); + MLLM_INFO("After graph build: {} buffers registered, {} MB", stats.count, stats.total_bytes / (1024 * 1024)); + } + } + } + // cache has been updated due to trace, clear cache model.model.clearKVCache(); auto raw_input_tokens = qwen_tokenizer.convertMessage({.prompt = "How are you?"})["sequence"]; print(raw_input_tokens); + MLLM_INFO("raw_input_tokens shape: {} {}", raw_input_tokens.shape()[0], raw_input_tokens.shape()[1]); + + const int chunk_size = 128; + int real_seq = static_cast(raw_input_tokens.shape()[1]); + const int eos_token_id = 151645; + if (real_seq <= 0 || real_seq >= chunk_size) { + MLLM_ERROR_EXIT(mllm::ExitCode::kShapeError, "Invalid input length {} for chunk size {}", real_seq, chunk_size); + } // manually set input data as fill op is not supported in QNN auto ptr = inputs["sequence"].ptr(); auto input_data = raw_input_tokens.ptr(); - for (int i = 0; i < raw_input_tokens.shape()[1]; ++i) { ptr[i] = input_data[i]; } - for (int i = raw_input_tokens.shape()[1]; i < 32; ++i) { ptr[i] = -1; } - - auto out = model.forward(inputs, {{"seq_len", mllm::AnyValue((int)raw_input_tokens.shape()[1])}})["sequence"]; - - auto sampled = model.sampleGreedy(out); - std::wcout << "token: " << sampled << " " << qwen_tokenizer.detokenize(sampled) << "\n"; + for (int i = 0; i < real_seq; ++i) { ptr[i] = input_data[i]; } + for (int i = real_seq; i < chunk_size; ++i) { ptr[i] = -1; } + + // Prefill + MLLM_INFO("=== Prefill Phase ==="); + MLLM_INFO("Input sequence length: {}", real_seq); + auto prefill_output = model.forward(inputs, {{"seq_len", mllm::AnyValue(real_seq)}}); + auto& prefill_logits = prefill_output["sequence"]; + auto sampled = model.sampleGreedy(prefill_logits); + prefill_logits.delete_(); + prefill_output.erase("sequence"); + MLLM_INFO("Prefill generated token id: {}", sampled); + std::wcout << qwen_tokenizer.detokenize(sampled); + + // Decode loop + int current_seq_len = real_seq; + auto& sequence_tensor = inputs["sequence"]; + auto sequence_ptr = sequence_tensor.ptr(); + + // write first token into padding + sequence_ptr[current_seq_len] = sampled; + current_seq_len++; + + // carry past (position_ids) from prefill + mllm::models::ARGenerationOutputPast past{{"position_ids", prefill_output["position_ids"]}}; + prefill_output.clear(); + + // Debug: Check registered buffer count after prefill + { + auto qnn_backend = mllm::Context::instance().getBackend(mllm::kQNN); + if (qnn_backend) { + auto allocator = std::static_pointer_cast(qnn_backend->allocator()); + if (allocator) { + auto stats = allocator->getRegisteredBufferStats(); + MLLM_INFO("After prefill: {} buffers registered, {} MB", stats.count, stats.total_bytes / (1024 * 1024)); + } + } + } + + MLLM_INFO("=== Decode Phase ==="); + MLLM_INFO("Starting decode loop, initial seq_len: {}", current_seq_len); + + int decode_step = 0; + mllm::Tensor decode_token_tensor = mllm::Tensor::empty({1, 1}, mllm::kInt64, mllm::kCPU).alloc(); + mllm::models::ARGenerationOutputPast decode_input{ + {"sequence", decode_token_tensor}, + {"position_ids", past["position_ids"]}, + }; + + while (current_seq_len < chunk_size) { + decode_step++; + MLLM_INFO("--- Decode Step {} ---", decode_step); + MLLM_INFO("Current sequence length: {}", current_seq_len); + + // update KV cache sequence length across layers + model.setKVCacheSeqCnt(current_seq_len); + + // reuse a single CPU tensor for decode token to avoid repeated QNN allocations + decode_token_tensor.ptr()[0] = sequence_ptr[current_seq_len - 1]; + + // pass through latest position ids returned from previous forward call + decode_input["position_ids"] = past["position_ids"]; + MLLM_INFO("Decode input token: {}", sequence_ptr[current_seq_len - 1]); + + // forward for next token logits + auto decode_output = model.forward(decode_input, {{"seq_len", mllm::AnyValue(current_seq_len)}}); + + // sample next token + auto& decode_logits = decode_output["sequence"]; + auto next_token = model.sampleGreedy(decode_logits); + MLLM_INFO("Generated token id: {}", next_token); + std::wcout << qwen_tokenizer.detokenize(next_token); + + if (next_token == eos_token_id) { + MLLM_INFO("EOS token detected, stopping decode"); + break; + } + + // write token into sequence buffer + sequence_ptr[current_seq_len] = next_token; + current_seq_len++; + MLLM_INFO("Updated sequence length: {}", current_seq_len); + + // carry past (only keep position_ids to avoid leaking QNN buffers) + decode_logits.delete_(); + decode_output.erase("sequence"); + auto position_ids = decode_output["position_ids"]; + decode_output.erase("position_ids"); + past = {{"position_ids", position_ids}}; + decode_input["position_ids"] = past["position_ids"]; + } + + MLLM_INFO("=== Decode Complete ==="); + MLLM_INFO("Total decode steps: {}", decode_step); + MLLM_INFO("Final sequence length: {}", current_seq_len); + MLLM_INFO("Remaining capacity: {}", chunk_size - current_seq_len); + std::wcout << L"\n"; return 0; -}) \ No newline at end of file +}) diff --git a/mllm/backends/cpu/ops/KVCacheOp.cpp b/mllm/backends/cpu/ops/KVCacheOp.cpp index 7847a1fb7..04d8f254e 100644 --- a/mllm/backends/cpu/ops/KVCacheOp.cpp +++ b/mllm/backends/cpu/ops/KVCacheOp.cpp @@ -44,4 +44,6 @@ void CPUKVCacheOp::forward(const std::vector& inputs, std::vector +#include #include namespace mllm::qnn { +namespace { +constexpr bool kVerboseQnnAllocatorLogs = false; +} // namespace + +#define QNN_ALLOCATOR_VERBOSE(...) \ + do { \ + if constexpr (kVerboseQnnAllocatorLogs) { MLLM_INFO(__VA_ARGS__); } \ + } while (0) + // specified in QNN doc #define RPCMEM_HEAP_ID_SYSTEM 25 #define RPCMEM_DEFAULT_FLAGS 1 @@ -34,8 +46,26 @@ QNNAllocator::QNNAllocator(QNN_INTERFACE_VER_TYPE qnnInterface, void* context) rpcmem_to_fd = (RpcMemToFdFn_t)dlsym(libCdspHandle, "rpcmem_to_fd"); } +QNNAllocator::~QNNAllocator() { + for (auto iter = ptrToFdAndMemHandleMap_.begin(); iter != ptrToFdAndMemHandleMap_.end();) { + Qnn_ErrorHandle_t deregisterRet = qnnInterface_.memDeRegister(&iter->second.second, 1); + if (QNN_SUCCESS != deregisterRet) { + MLLM_WARN("~QNNAllocator: memDeRegister failed during shutdown, status=0x{:x}", deregisterRet); + } + qnnMemPtrSet_.erase(iter->first); + rpcmem_free(iter->first); + iter = ptrToFdAndMemHandleMap_.erase(iter); + } + + for (void* ptr : qnnMemPtrSet_) { + rpcmem_free(ptr); + } + qnnMemPtrSet_.clear(); +} + bool QNNAllocator::alloc(Storage* storage) { - uint8_t* ptr = (uint8_t*)rpcmem_alloc(RPCMEM_HEAP_ID_SYSTEM, RPCMEM_DEFAULT_FLAGS, allocSize(storage)); + size_t request_bytes = allocSize(storage); + uint8_t* ptr = (uint8_t*)rpcmem_alloc(RPCMEM_HEAP_ID_SYSTEM, RPCMEM_DEFAULT_FLAGS, request_bytes); MLLM_RT_ASSERT(ptr != nullptr); @@ -46,24 +76,234 @@ bool QNNAllocator::alloc(Storage* storage) { } void QNNAllocator::free(Storage* storage) { - if (ptrToFdAndMemHandleMap_.count(storage->ptr_)) { - MLLM_RT_ASSERT_EQ(QNN_SUCCESS, - qnnInterface_.memDeRegister(&(ptrToFdAndMemHandleMap_.find(storage->ptr_)->second.second), 1)); + auto ptr = storage->ptr_; + + // Early return if ptr is nullptr or not in qnnMemPtrSet_ (already freed or never allocated) + if (ptr == nullptr) { + // too noisy during decode; silently ignore nullptr frees + return; + } + + if (qnnMemPtrSet_.count(ptr) == 0) { + QNN_ALLOCATOR_VERBOSE("QNNAllocator::free called for ptr={} that is not in qnnMemPtrSet_, ignoring", ptr); + return; } - rpcmem_free(storage->ptr_); + void* alternative_ptr = nullptr; // Another ptr using the same mem_handle, if any + + if (ptrToFdAndMemHandleMap_.count(ptr)) { + auto iter = ptrToFdAndMemHandleMap_.find(ptr); + auto mem_handle = iter->second.second; + + // Check if any other ptr is using the same mem_handle + for (const auto& [other_ptr, fd_and_handle] : ptrToFdAndMemHandleMap_) { + if (other_ptr != ptr && fd_and_handle.second == mem_handle) { + alternative_ptr = other_ptr; + break; + } + } + + // Only deRegister if this is the last ptr using this mem_handle + if (alternative_ptr == nullptr) { + auto status = qnnInterface_.memDeRegister(&mem_handle, 1); + if (status != QNN_SUCCESS) { + MLLM_WARN("QNNAllocator::free memDeRegister failed, status=0x{:x}, ptr={}, fd={}", status, ptr, iter->second.first); + } + // Remove from ptrToFdAndMemHandleMap_ and ptrToSizeMap_ + // The actual buffer will be freed later in the function + ptrToFdAndMemHandleMap_.erase(iter); + ptrToSizeMap_.erase(ptr); + } else { + QNN_ALLOCATOR_VERBOSE("QNNAllocator::free skipping deRegister for ptr={} because other ptrs use the mem_handle", ptr); + ptrToFdAndMemHandleMap_.erase(iter); + ptrToSizeMap_.erase(ptr); + } + } else { + // ptr is in qnnMemPtrSet_ but not in ptrToFdAndMemHandleMap_ + // This means it was allocated but never registered (e.g., memRegister failed) + // Just free the buffer without deRegister + QNN_ALLOCATOR_VERBOSE("QNNAllocator::free freeing unregistered buffer ptr={}", ptr); + qnnMemPtrSet_.erase(ptr); + rpcmem_free(ptr); + + // Clear mappings that point to this ptr + for (auto it = tensorIdToPtrMap_.begin(); it != tensorIdToPtrMap_.end();) { + if (it->second == ptr) { + it = tensorIdToPtrMap_.erase(it); + } else { + ++it; + } + } + for (auto it = tensorNameToPtrMap_.begin(); it != tensorNameToPtrMap_.end();) { + if (it->second == ptr) { + it = tensorNameToPtrMap_.erase(it); + } else { + ++it; + } + } + return; + } + + // Update or keep tensor ID and name mappings + // If mem_handle is still in use (alternative_ptr exists), update mappings to point to alternative_ptr + // Otherwise, free the buffer and clear mappings + if (alternative_ptr != nullptr) { + // Update mappings to point to alternative_ptr instead of deleting them + for (auto& entry : tensorIdToPtrMap_) { + if (entry.second == ptr) { entry.second = alternative_ptr; } + } + for (auto& entry : tensorNameToPtrMap_) { + if (entry.second == ptr) { entry.second = alternative_ptr; } + } + // Don't free the buffer here since alternative_ptr is still using it + qnnMemPtrSet_.erase(ptr); + } else { + // Since QNN doesn't support re-registering a deRegistered buffer (fd may be invalidated), + // we should free the buffer immediately even if there are mappings. + // The decode phase will allocate a new buffer when needed. + qnnMemPtrSet_.erase(ptr); + rpcmem_free(ptr); + + // Clear mappings that point to this ptr + for (auto it = tensorIdToPtrMap_.begin(); it != tensorIdToPtrMap_.end();) { + if (it->second == ptr) { + it = tensorIdToPtrMap_.erase(it); + } else { + ++it; + } + } + for (auto it = tensorNameToPtrMap_.begin(); it != tensorNameToPtrMap_.end();) { + if (it->second == ptr) { + it = tensorNameToPtrMap_.erase(it); + } else { + ++it; + } + } + } + storage->ptr_ = nullptr; } -void QNNAllocator::registerQnnTensorToSharedBuffer(void* ptr, Qnn_Tensor_t& qnn_tensor) { +bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_t& qnn_tensor) { + MLLM_RT_ASSERT(storage != nullptr); + void* ptr = storage->ptr_; + // Make sure there has a memory that we can register to. + MLLM_RT_ASSERT(ptr != nullptr); MLLM_RT_ASSERT(qnnMemPtrSet_.count(ptr)); + auto original_mem_type = QNN_TENSOR_GET_MEM_TYPE(qnn_tensor); + Qnn_MemHandle_t original_mem_handle = QNN_TENSOR_GET_MEM_HANDLE(qnn_tensor); + + uint32_t tensor_id = QNN_TENSOR_GET_ID(qnn_tensor); + const char* tensor_name_cstr = QNN_TENSOR_GET_NAME(qnn_tensor); + std::string tensor_name = tensor_name_cstr ? tensor_name_cstr : "unknown"; + + uint32_t rank = QNN_TENSOR_GET_RANK(qnn_tensor); + uint32_t* dims_ptr = QNN_TENSOR_GET_DIMENSIONS(qnn_tensor); + Qnn_DataType_t data_type = QNN_TENSOR_GET_DATA_TYPE(qnn_tensor); + + size_t element_bytes = 0; + if (auto it = QNNDataTypeToSize.find(data_type); it != QNNDataTypeToSize.end()) { element_bytes = it->second; } + + size_t element_cnt = 1; + std::vector dims; + dims.reserve(rank); + for (uint32_t i = 0; i < rank; ++i) { + uint32_t dim = dims_ptr ? dims_ptr[i] : 0; + dims.push_back(dim); + element_cnt *= (dim == 0 ? 1 : dim); + } + size_t total_bytes = element_cnt * element_bytes; + + std::string shape_str = "[]"; + if (!dims.empty()) { + shape_str = "["; + for (size_t i = 0; i < dims.size(); ++i) { + shape_str += std::to_string(dims[i]); + if (i + 1 < dims.size()) { shape_str += ", "; } + } + shape_str += "]"; + } + + QNN_ALLOCATOR_VERBOSE( + "registerQnnTensorToSharedBuffer: ptr={}, tensor_id={}, tensor_name={}, tensorIdToPtrMap_.size()={}", ptr, tensor_id, + tensor_name, tensorIdToPtrMap_.size()); + + auto updateMappings = [&](void* mapped_ptr) { + tensorIdToPtrMap_[tensor_id] = mapped_ptr; + if (tensor_name != "unknown") { tensorNameToPtrMap_[tensor_name] = mapped_ptr; } + ptrToSizeMap_[mapped_ptr] = total_bytes; + }; + + auto reuseExistingBuffer = [&](void* existing_ptr) -> bool { + auto fd_handle_iter = ptrToFdAndMemHandleMap_.find(existing_ptr); + if (fd_handle_iter == ptrToFdAndMemHandleMap_.end()) { return false; } + + Qnn_MemHandle_t existing_mem_handle = fd_handle_iter->second.second; + size_t existing_size = ptrToSizeMap_.count(existing_ptr) > 0 ? ptrToSizeMap_[existing_ptr] : 0; + + if (existing_ptr != ptr) { + size_t bytes_to_copy = total_bytes; + if (existing_size > 0) { bytes_to_copy = std::min(bytes_to_copy, existing_size); } + if (bytes_to_copy > 0) { std::memcpy(existing_ptr, ptr, bytes_to_copy); } + + if (qnnMemPtrSet_.count(ptr) > 0) { + qnnMemPtrSet_.erase(ptr); + rpcmem_free(ptr); + } + storage->ptr_ = existing_ptr; + } + + QNN_TENSOR_SET_MEM_TYPE(qnn_tensor, QNN_TENSORMEMTYPE_MEMHANDLE); + QNN_TENSOR_SET_MEM_HANDLE(qnn_tensor, existing_mem_handle); + updateMappings(existing_ptr); + return true; + }; + // if already registered, just set the mem handle if (ptrToFdAndMemHandleMap_.count(ptr) > 0) { Qnn_MemHandle_t mem_handle = ptrToFdAndMemHandleMap_[ptr].second; QNN_TENSOR_SET_MEM_TYPE(qnn_tensor, QNN_TENSORMEMTYPE_MEMHANDLE); QNN_TENSOR_SET_MEM_HANDLE(qnn_tensor, mem_handle); - return; + updateMappings(ptr); + return true; + } + + // Check if we can reuse an existing buffer for the same tensor ID + if (tensorIdToPtrMap_.count(tensor_id) > 0) { + void* existing_ptr = tensorIdToPtrMap_[tensor_id]; + QNN_ALLOCATOR_VERBOSE("Found existing mapping for tensor_id={}: existing_ptr={}", tensor_id, existing_ptr); + + if (existing_ptr == nullptr) { + QNN_ALLOCATOR_VERBOSE( + "Existing mapping for tensor_id={} has nullptr ptr (buffer was freed), will register new buffer", tensor_id); + tensorIdToPtrMap_.erase(tensor_id); + } else if (reuseExistingBuffer(existing_ptr)) { + return true; + } else { + MLLM_WARN("Existing ptr {} for tensor_id={} is no longer registered, removing from map", existing_ptr, tensor_id); + tensorIdToPtrMap_.erase(tensor_id); + } + } else { + QNN_ALLOCATOR_VERBOSE("No existing mapping found for tensor_id={}", tensor_id); + } + + // Also check by tensor name as fallback (in case ID changed) + if (tensor_name != "unknown" && tensorNameToPtrMap_.count(tensor_name) > 0) { + void* existing_ptr = tensorNameToPtrMap_[tensor_name]; + QNN_ALLOCATOR_VERBOSE("Found existing mapping for tensor_name={}: existing_ptr={}", tensor_name, existing_ptr); + + if (existing_ptr == nullptr) { + QNN_ALLOCATOR_VERBOSE( + "Existing mapping for tensor_name={} has nullptr ptr (mem_handle was deRegistered), will register new buffer", + tensor_name); + tensorNameToPtrMap_.erase(tensor_name); + } else if (reuseExistingBuffer(existing_ptr)) { + return true; + } else { + MLLM_WARN("Existing ptr {} for tensor_name={} is no longer registered", existing_ptr, tensor_name); + tensorNameToPtrMap_.erase(tensor_name); + } } // Get the file id of this memory space. @@ -73,30 +313,123 @@ void QNNAllocator::registerQnnTensorToSharedBuffer(void* ptr, Qnn_Tensor_t& qnn_ // Make qnn memory descriptor. Set ION. Qnn_MemDescriptor_t mem_descriptor = QNN_MEM_DESCRIPTOR_INIT; mem_descriptor.memShape = { - .numDim = QNN_TENSOR_GET_RANK(qnn_tensor), - .dimSize = QNN_TENSOR_GET_DIMENSIONS(qnn_tensor), + .numDim = rank, + .dimSize = dims_ptr, .shapeConfig = nullptr, }; - mem_descriptor.dataType = QNN_TENSOR_GET_DATA_TYPE(qnn_tensor); + mem_descriptor.dataType = data_type; mem_descriptor.memType = QNN_MEM_TYPE_ION; mem_descriptor.ionInfo.fd = mem_fd; QNN_TENSOR_SET_MEM_TYPE(qnn_tensor, QNN_TENSORMEMTYPE_MEMHANDLE); // Register to QNN memory Qnn_MemHandle_t mem_handle = QNN_TENSOR_GET_MEM_HANDLE(qnn_tensor); - MLLM_RT_ASSERT_EQ(QNN_SUCCESS, qnnInterface_.memRegister(context_, &mem_descriptor, 1u, &mem_handle)); + auto status = qnnInterface_.memRegister(context_, &mem_descriptor, 1u, &mem_handle); + + if (status != QNN_SUCCESS) { + auto stats = getRegisteredBufferStats(); + MLLM_ERROR("QNNAllocator::registerQnnTensorToSharedBuffer memRegister failed, status=0x{:x}, ptr={}, fd={}, bytes={}, " + "shape={}, dtype={}, tensor_id={}, tensor_name={}", + status, ptr, mem_fd, total_bytes, shape_str, static_cast(mem_descriptor.dataType), tensor_id, tensor_name); + MLLM_ERROR("Current registered buffers: {} buffers, {} MB", stats.count, stats.total_bytes / (1024 * 1024)); + + // Try to reuse existing buffer for the same tensor ID or name as fallback + bool fallback_success = false; + if (tensorIdToPtrMap_.count(tensor_id) > 0) { + void* existing_ptr = tensorIdToPtrMap_[tensor_id]; + if (existing_ptr != nullptr) { + MLLM_WARN("Fallback: Reusing existing buffer by ID for tensor_id={}, tensor_name={}, old_ptr={}, new_ptr={}", + tensor_id, tensor_name, existing_ptr, ptr); + fallback_success = reuseExistingBuffer(existing_ptr); + } + } + if (!fallback_success && tensor_name != "unknown" && tensorNameToPtrMap_.count(tensor_name) > 0) { + void* existing_ptr = tensorNameToPtrMap_[tensor_name]; + if (existing_ptr != nullptr) { + MLLM_WARN("Fallback: Reusing existing buffer by name for tensor_id={}, tensor_name={}, old_ptr={}, new_ptr={}", + tensor_id, tensor_name, existing_ptr, ptr); + fallback_success = reuseExistingBuffer(existing_ptr); + } + } + + if (!fallback_success) { + MLLM_ERROR("QNNAllocator::registerQnnTensorToSharedBuffer: memRegister failed and fallback also failed. " + "Buffer ptr={} will be freed, tensor registration cannot proceed.", ptr); + + if (qnnMemPtrSet_.count(ptr) > 0) { + qnnMemPtrSet_.erase(ptr); + rpcmem_free(ptr); + storage->ptr_ = nullptr; + QNN_ALLOCATOR_VERBOSE("QNNAllocator::registerQnnTensorToSharedBuffer: Freed ptr={} ({} bytes) after failure", ptr, + total_bytes); + } + + QNN_TENSOR_SET_MEM_HANDLE(qnn_tensor, original_mem_handle); + QNN_TENSOR_SET_MEM_TYPE(qnn_tensor, original_mem_type); + return false; + } + return true; + } else { + QNN_ALLOCATOR_VERBOSE("Register shared buffer ptr={}, fd={}, bytes={}, shape={}, dtype={}, tensor_id={}, tensor_name={}", + ptr, mem_fd, total_bytes, shape_str, static_cast(mem_descriptor.dataType), tensor_id, + tensor_name); + } QNN_TENSOR_SET_MEM_HANDLE(qnn_tensor, mem_handle); ptrToFdAndMemHandleMap_.insert({ptr, {mem_fd, mem_handle}}); + updateMappings(ptr); + return true; } void QNNAllocator::deRegisterQnnTensorFromSharedBuffer(void* ptr) { - MLLM_RT_ASSERT_EQ(ptrToFdAndMemHandleMap_.count(ptr), 1); - MLLM_RT_ASSERT_EQ(QNN_SUCCESS, qnnInterface_.memDeRegister(&(ptrToFdAndMemHandleMap_[ptr].second), 1)); - ptrToFdAndMemHandleMap_.erase(ptr); + auto iter = ptrToFdAndMemHandleMap_.find(ptr); + if (iter == ptrToFdAndMemHandleMap_.end()) { return; } + + Qnn_ErrorHandle_t status = qnnInterface_.memDeRegister(&(iter->second.second), 1); + if (status != QNN_SUCCESS) { + MLLM_WARN("QNNAllocator::deRegisterQnnTensorFromSharedBuffer memDeRegister failed, status=0x{:x}, ptr={}, fd={}", status, + ptr, iter->second.first); + } + + ptrToFdAndMemHandleMap_.erase(iter); + ptrToSizeMap_.erase(ptr); + + // Remove from tensor ID and name mappings if they exist + for (auto it = tensorIdToPtrMap_.begin(); it != tensorIdToPtrMap_.end();) { + if (it->second == ptr) { + it = tensorIdToPtrMap_.erase(it); + } else { + ++it; + } + } + for (auto it = tensorNameToPtrMap_.begin(); it != tensorNameToPtrMap_.end();) { + if (it->second == ptr) { + it = tensorNameToPtrMap_.erase(it); + } else { + ++it; + } + } +} + +QNNAllocator::BufferStats QNNAllocator::getRegisteredBufferStats() const { + BufferStats stats{}; + stats.count = ptrToFdAndMemHandleMap_.size(); + stats.total_bytes = 0; + + for (const auto& [ptr, size] : ptrToSizeMap_) { + stats.total_bytes += size; + } + + return stats; } +bool QNNAllocator::isRegistered(void* ptr) const { + return ptrToFdAndMemHandleMap_.count(ptr) > 0; +} + +#undef QNN_ALLOCATOR_VERBOSE + std::shared_ptr createQNNAllocator() { return std::make_shared(); } } // namespace mllm::qnn diff --git a/mllm/backends/qnn/QNNAllocator.hpp b/mllm/backends/qnn/QNNAllocator.hpp index 7fc7335d4..79db7e305 100644 --- a/mllm/backends/qnn/QNNAllocator.hpp +++ b/mllm/backends/qnn/QNNAllocator.hpp @@ -3,8 +3,9 @@ #pragma once -#include #include +#include +#include #include "QnnCommon.h" #include "QnnInterface.h" #include "mllm/backends/base/Allocator.hpp" @@ -30,14 +31,7 @@ class QNNAllocator final : public Allocator { QNNAllocator(); // need to setQNNPointer afterward QNNAllocator(QNN_INTERFACE_VER_TYPE qnnInterface, void* context); - ~QNNAllocator() { - for (auto iter = ptrToFdAndMemHandleMap_.begin(); iter != ptrToFdAndMemHandleMap_.end();) { - Qnn_ErrorHandle_t deregisterRet = qnnInterface_.memDeRegister(&iter->second.second, 1); - if (QNN_SUCCESS != deregisterRet) { MLLM_ERROR("~QNNAllocator: qnnInterface_.memDeRegister failed"); } - rpcmem_free(iter->first); - iter = ptrToFdAndMemHandleMap_.erase(iter); - } - } + ~QNNAllocator(); void setQNNPointer(QNN_INTERFACE_VER_TYPE qnnInterface, void* context) { this->qnnInterface_ = qnnInterface; @@ -75,10 +69,20 @@ class QNNAllocator final : public Allocator { // Sharing access in between processing domains in QNN HTP backend. Using shared buffers can // eliminate data copy in between client code on the host CPU and HTP accelerator. - void registerQnnTensorToSharedBuffer(void* ptr, Qnn_Tensor_t& qnn_tensor); + bool registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_t& qnn_tensor); void deRegisterQnnTensorFromSharedBuffer(void* ptr); + // Debug: Get statistics about registered buffers + struct BufferStats { + size_t count; + size_t total_bytes; + }; + [[nodiscard]] BufferStats getRegisteredBufferStats() const; + + // Debug: Check if a ptr is already registered + bool isRegistered(void* ptr) const; + private: QNN_INTERFACE_VER_TYPE qnnInterface_; Qnn_ContextHandle_t context_ = nullptr; @@ -90,6 +94,13 @@ class QNNAllocator final : public Allocator { // to check if the ptr is allocted by rpcmem_alloc std::set qnnMemPtrSet_; std::map> ptrToFdAndMemHandleMap_; + // Track buffer sizes for statistics + std::map ptrToSizeMap_; + // Map tensor name to registered buffer ptr for reuse + std::map tensorNameToPtrMap_; + // Map tensor ID to registered buffer ptr for reuse (more reliable than name) + std::map tensorIdToPtrMap_; + }; std::shared_ptr createQNNAllocator(); diff --git a/mllm/backends/qnn/QNNBackend.cpp b/mllm/backends/qnn/QNNBackend.cpp index efcb89a36..6a218d64e 100644 --- a/mllm/backends/qnn/QNNBackend.cpp +++ b/mllm/backends/qnn/QNNBackend.cpp @@ -539,6 +539,8 @@ void QNNBackend::graphExecute(const std::string& graphName, std::vector& for (int i = 0; i < model->getGraphInputTensorWrappers().size(); i++) { // alloc and register qnn tensor model->getGraphInputTensorWrappers()[i]->getDataContainer() = inputs[i]; // update data container + // Reset allocation flag when dataContainer is updated to ensure proper registration + model->getGraphInputTensorWrappers()[i]->resetAlloc(); model->getGraphInputTensorWrappers()[i]->alloc(); // QNNAllocator will handle registered memory descriptor qnn_inputs.push_back(*(model->getGraphInputTensorWrappers()[i]->getNativeTensor())); } diff --git a/mllm/backends/qnn/QNNUtils.cpp b/mllm/backends/qnn/QNNUtils.cpp index 03d752b08..1c93e4558 100644 --- a/mllm/backends/qnn/QNNUtils.cpp +++ b/mllm/backends/qnn/QNNUtils.cpp @@ -11,6 +11,7 @@ #include #include #include +#include namespace mllm::qnn { @@ -360,6 +361,7 @@ std::shared_ptr QNNTensorWrapper::createStaticTensor(const std std::shared_ptr tensorWrapper = QNNTensorWrapper::create(name, QNN_TENSOR_TYPE_STATIC, tensor, quantize); tensorWrapper->isAlloc_ = true; + tensorWrapper->registeredPtr_ = tensor.ptr(); uint32_t numElement = tensor.bytes(); Qnn_ClientBuffer_t clientBuffer = {.data = tensor.ptr(), .dataSize = numElement}; @@ -369,22 +371,48 @@ std::shared_ptr QNNTensorWrapper::createStaticTensor(const std } void QNNTensorWrapper::alloc() { - if (isAlloc_) { - MLLM_WARN("Tensor {} has already been allocated.", name_); - return; - } MLLM_RT_ASSERT(dataContainer_.device() == kQNN); - // if storage is not allocated, allocate it - // or, register the existing storage to QNN(passing allocated input to QNN) - if (!dataContainer_.impl()->ptr()) { dataContainer_.alloc(); } + void* currentPtr = dataContainer_.impl()->ptr(); + if (!currentPtr) { + dataContainer_.alloc(); + currentPtr = dataContainer_.ptr(); + } + + auto allocator = std::static_pointer_cast(Context::instance().getBackend(kQNN)->allocator()); + + auto storage = dataContainer_.impl()->storage(); + MLLM_RT_ASSERT(storage != nullptr); + + if (registeredPtr_ && registeredPtr_ != storage->ptr_) { + if (!allocator->isRegistered(registeredPtr_)) { + registeredPtr_ = nullptr; + } else { + void* freshPtr = storage->ptr_; + size_t bytesToCopy = dataContainer_.bytes(); + if (freshPtr && bytesToCopy > 0) { std::memcpy(registeredPtr_, freshPtr, bytesToCopy); } + if (freshPtr) { allocator->free(storage.get()); } + storage->ptr_ = registeredPtr_; + currentPtr = registeredPtr_; + } + } + + if (isAlloc_ && registeredPtr_ == currentPtr) { return; } - std::static_pointer_cast(Context::instance().getBackend(kQNN)->allocator()) - ->registerQnnTensorToSharedBuffer(dataContainer_.ptr(), qnnTensor_); + if (!allocator->registerQnnTensorToSharedBuffer(storage.get(), qnnTensor_)) { + MLLM_ERROR("QNNTensorWrapper::alloc failed to register shared buffer for tensor {}", name_); + // Fail fast: prevent executing graph with invalid mem handle + MLLM_RT_ASSERT(false); + } + registeredPtr_ = storage->ptr_; isAlloc_ = true; } +void QNNTensorWrapper::resetAlloc() { + isAlloc_ = false; +} + void QNNTensorWrapper::initFromQnnTensor(Qnn_Tensor_t* qnnTensor) { if (qnnTensor == nullptr) { MLLM_ERROR("QNNTensorWrapper::setQnnTensor() received nullptr"); @@ -503,4 +531,4 @@ void propagateQuantScale(const Tensor& input, Tensor& output) { } } -} // namespace mllm::qnn \ No newline at end of file +} // namespace mllm::qnn diff --git a/mllm/backends/qnn/QNNUtils.hpp b/mllm/backends/qnn/QNNUtils.hpp index b5d12cb10..5c0483dfb 100644 --- a/mllm/backends/qnn/QNNUtils.hpp +++ b/mllm/backends/qnn/QNNUtils.hpp @@ -203,10 +203,12 @@ class QNNTensorWrapper { [[nodiscard]] const Qnn_Tensor_t* getNativeTensor() const { return &qnnTensor_; } // Get tensor name - const std::string& getName() const { return name_; } + [[nodiscard]] const std::string& getName() const { return name_; } // alloc graph input/output tensor memory in QNN shared buffer void alloc(); + // reset allocation flag when dataContainer is updated + void resetAlloc(); Tensor& getDataContainer() { return dataContainer_; } const std::vector* getDimension() { return &dimensions_; } @@ -216,6 +218,7 @@ class QNNTensorWrapper { Tensor dataContainer_; Qnn_Tensor_t qnnTensor_; bool isAlloc_ = false; + void* registeredPtr_ = nullptr; }; class QNNParamTensorWrapper { diff --git a/mllm/core/aops/KVCacheOp.hpp b/mllm/core/aops/KVCacheOp.hpp index 695b4142f..65c1905c8 100644 --- a/mllm/core/aops/KVCacheOp.hpp +++ b/mllm/core/aops/KVCacheOp.hpp @@ -34,10 +34,14 @@ class KVCacheOp : public BaseOp { virtual void clearCache(); + // Set current valid sequence length for KV cache logic + // Default no-op; backends that maintain cache should override. + virtual void setCurrentSeqCnt(int32_t /*seq*/) {} + inline const KVCacheOpOptions& options() const { return options_; } protected: KVCacheOpOptions options_; }; -} // namespace mllm::aops \ No newline at end of file +} // namespace mllm::aops diff --git a/mllm/models/qwen_npu/modeling_qwen_npu.hpp b/mllm/models/qwen_npu/modeling_qwen_npu.hpp index 355137a34..85d2a2cca 100644 --- a/mllm/models/qwen_npu/modeling_qwen_npu.hpp +++ b/mllm/models/qwen_npu/modeling_qwen_npu.hpp @@ -440,6 +440,10 @@ class QwenText final : public nn::Module { void clearKVCache() { for (auto& block : decode_blocks_.list()) { block.getKVCache().clearCache(); } } + + void setKVCacheSeqCnt(int32_t seq) { + for (auto& block : decode_blocks_.list()) { block.getKVCache().setCurrentSeqCnt(seq); } + } }; class QwenForCausalLM : public nn::Module, public ARGeneration { @@ -453,6 +457,9 @@ class QwenForCausalLM : public nn::Module, public ARGeneration { tie_word_embeddings_ = cfg.tie_word_embeddings; } + // Set current valid sequence length for KV cache across all layers + void setKVCacheSeqCnt(int32_t seq) { model.setKVCacheSeqCnt(seq); } + ARGenerationOutputPast forward(const ARGenerationOutputPast& input, const ARGenerationArgs& args) override { auto sequence = input.at("sequence"); diff --git a/mllm/nn/layers/KVCache.cpp b/mllm/nn/layers/KVCache.cpp index 6ab0c2504..a33a35ab0 100644 --- a/mllm/nn/layers/KVCache.cpp +++ b/mllm/nn/layers/KVCache.cpp @@ -23,4 +23,8 @@ void KVCache::setLayerIndex(int32_t layer_idx) { void KVCache::clearCache() { std::static_pointer_cast(impl()->getInstancedOp())->clearCache(); } +void KVCache::setCurrentSeqCnt(int32_t seq) { + std::static_pointer_cast(impl()->getInstancedOp())->setCurrentSeqCnt(seq); +} + } // namespace mllm::nn diff --git a/mllm/nn/layers/KVCache.hpp b/mllm/nn/layers/KVCache.hpp index 1cea392bd..abde039fb 100644 --- a/mllm/nn/layers/KVCache.hpp +++ b/mllm/nn/layers/KVCache.hpp @@ -20,6 +20,9 @@ class KVCache : public Layer { void clearCache(); + // Update current valid sequence length in underlying KV cache op + void setCurrentSeqCnt(int32_t seq); + MLLM_LAYER_ANY_INPUTS_2_OUTPUTS_FORWARD }; diff --git a/tests/qnn/CMakeLists.txt b/tests/qnn/CMakeLists.txt new file mode 100644 index 000000000..59dc90870 --- /dev/null +++ b/tests/qnn/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright (c) MLLM Team. +# Licensed under the MIT License. + +if(MLLM_BUILD_QNN_BACKEND) + add_executable(QNNOutputOrderTest QNNOutputOrderTest.cpp) + target_link_libraries(QNNOutputOrderTest PRIVATE MllmRT MllmCPUBackend MllmQNNBackend) + target_include_directories(QNNOutputOrderTest PRIVATE ${MLLM_INCLUDE_DIR}) +endif() + diff --git a/tests/qnn/QNNOutputOrderTest.cpp b/tests/qnn/QNNOutputOrderTest.cpp new file mode 100644 index 000000000..6b893075e --- /dev/null +++ b/tests/qnn/QNNOutputOrderTest.cpp @@ -0,0 +1,97 @@ +// Copyright (c) MLLM Team. +// Licensed under the MIT License. + +#include "mllm/backends/qnn/QNNModel.hpp" +#include "mllm/backends/qnn/QNNUtils.hpp" +#include "mllm/core/DataTypes.hpp" +#include "mllm/utils/Log.hpp" +#include +#include +#include + +using namespace mllm; +using namespace mllm::qnn; + +// Mock QNN interface for testing +struct MockQnnInterface { + // Minimal mock implementation +}; + +// Test QNNModel output order mapping +void testOutputOrderMapping() { + MLLM_INFO("Testing QNNModel output order mapping..."); + + // Create a mock QNN interface (in real usage, this would be from QNN SDK) + // For testing purposes, we'll create a minimal test + + // Note: This test requires actual QNN backend initialization + // In a real test environment, you would: + // 1. Initialize QNN backend + // 2. Create a QNNModel + // 3. Add tensors in a specific order + // 4. Set expected output order + // 5. Verify the mapping is correct + + MLLM_INFO("QNNModel output order mapping test structure:"); + MLLM_INFO(" 1. Create QNNModel with expected output order"); + MLLM_INFO(" 2. Add output tensors (QNN order)"); + MLLM_INFO(" 3. Verify qnnOutputNameToIndex_ mapping is correct"); + MLLM_INFO(" 4. Verify getQnnOutputIndex() returns correct indices"); + MLLM_INFO(" 5. Verify getExpectedOutputOrder() returns correct order"); + + // Example test scenario: + // Expected order (MLLM): ["output_0", "output_1", "output_2"] + // QNN order (actual): ["output_2", "output_0", "output_1"] + // Mapping should be: + // MLLM[0] = QNN[1] (output_0) + // MLLM[1] = QNN[2] (output_1) + // MLLM[2] = QNN[0] (output_2) + + MLLM_INFO("Test structure created. Integration test requires QNN backend."); +} + +// Test output reordering logic +void testOutputReordering() { + MLLM_INFO("Testing output reordering logic..."); + + // Simulate the reordering logic + std::vector expectedOrder = {"output_0", "output_1", "output_2"}; + std::map qnnOutputNameToIndex = { + {"output_2", 0}, // QNN returns in this order + {"output_0", 1}, + {"output_1", 2} + }; + + // Simulate output tensors (in QNN order) + std::vector qnnOutputs = {"output_2", "output_0", "output_1"}; + + // Reorder according to expected order + std::vector reorderedIndices; + for (const auto& expected_name : expectedOrder) { + auto it = qnnOutputNameToIndex.find(expected_name); + if (it != qnnOutputNameToIndex.end()) { + reorderedIndices.push_back(it->second); + MLLM_INFO(" Mapping: MLLM[{}] = QNN[{}] (tensor: {})", + reorderedIndices.size() - 1, it->second, expected_name); + } + } + + // Verify the mapping + assert(reorderedIndices.size() == expectedOrder.size()); + assert(reorderedIndices[0] == 1); // output_0 is at QNN index 1 + assert(reorderedIndices[1] == 2); // output_1 is at QNN index 2 + assert(reorderedIndices[2] == 0); // output_2 is at QNN index 0 + + MLLM_INFO("Output reordering logic test passed!"); +} + +int main() { + MLLM_INFO("=== QNN Output Order Test ==="); + + testOutputOrderMapping(); + testOutputReordering(); + + MLLM_INFO("=== All tests passed ==="); + return 0; +} + From 1d5d25376ea6fa2fe5c85023b52c9ce0faf59916 Mon Sep 17 00:00:00 2001 From: jialilve <3485723235@qq.com> Date: Mon, 17 Nov 2025 05:47:45 +0000 Subject: [PATCH 2/8] feat: implement Qwen NPU simple single chunk decoding support - Add KV cache sequence count management - Implement decode loop with position_ids handling - Add EOS token termination check - Update forward method to support decode phase --- examples/qwen_npu/main.cpp | 104 ++++++++++++------ mllm/backends/cpu/ops/KVCacheOp.cpp | 2 + mllm/backends/cpu/ops/KVCacheOp.hpp | 2 + mllm/backends/qnn/QNNAllocator.cpp | 117 +++++++++++++-------- mllm/backends/qnn/QNNAllocator.hpp | 18 ++++ mllm/backends/qnn/QNNBackend.cpp | 63 +++++++++-- mllm/backends/qnn/QNNUtils.cpp | 16 +++ mllm/core/aops/KVCacheOp.hpp | 4 + mllm/models/qwen_npu/modeling_qwen_npu.hpp | 12 +++ mllm/nn/Module.hpp | 1 + mllm/nn/layers/KVCache.cpp | 4 + mllm/nn/layers/KVCache.hpp | 3 + 12 files changed, 265 insertions(+), 81 deletions(-) diff --git a/examples/qwen_npu/main.cpp b/examples/qwen_npu/main.cpp index 9cd4a2d85..feec3a272 100644 --- a/examples/qwen_npu/main.cpp +++ b/examples/qwen_npu/main.cpp @@ -66,10 +66,10 @@ MLLM_MAIN({ // cache has been updated due to trace, clear cache model.model.clearKVCache(); - auto raw_input_tokens = qwen_tokenizer.convertMessage({.prompt = "How are you?"})["sequence"]; + auto raw_input_tokens = qwen_tokenizer.convertMessage({.prompt = "What can you do?"})["sequence"]; print(raw_input_tokens); MLLM_INFO("raw_input_tokens shape: {} {}", raw_input_tokens.shape()[0], raw_input_tokens.shape()[1]); - + const int chunk_size = 128; int real_seq = static_cast(raw_input_tokens.shape()[1]); const int eos_token_id = 151645; @@ -78,15 +78,42 @@ MLLM_MAIN({ } // manually set input data as fill op is not supported in QNN - auto ptr = inputs["sequence"].ptr(); + // IMPORTANT: inputs["sequence"] was created before trace and may have been processed by QNN backend + // Recreate inputs from raw_input_tokens to ensure we're using fresh CPU memory + mllm::models::ARGenerationOutputPast prefill_inputs{{"sequence", mllm::Tensor::empty({1, chunk_size}, mllm::kInt64, mllm::kCPU).alloc()}}; + auto ptr = prefill_inputs["sequence"].ptr(); auto input_data = raw_input_tokens.ptr(); + + // Copy tokenized input data for (int i = 0; i < real_seq; ++i) { ptr[i] = input_data[i]; } for (int i = real_seq; i < chunk_size; ++i) { ptr[i] = -1; } + + bool data_matches = true; + for (int i = 0; i < real_seq; ++i) { + if (ptr[i] != input_data[i]) { + MLLM_ERROR("Data mismatch at index {}: expected {}, got {}", i, input_data[i], ptr[i]); + data_matches = false; + } + } + if (!data_matches) { + MLLM_ERROR_EXIT(mllm::ExitCode::kShapeError, "Failed to copy input data correctly"); + } // Prefill MLLM_INFO("=== Prefill Phase ==="); MLLM_INFO("Input sequence length: {}", real_seq); - auto prefill_output = model.forward(inputs, {{"seq_len", mllm::AnyValue(real_seq)}}); + + // Debug: Verify prefill_inputs data right before forward + { + auto verify_ptr = prefill_inputs["sequence"].ptr(); + MLLM_INFO("prefill_inputs[\"sequence\"] right before forward (first 10): {} {} {} {} {} {} {} {} {} {}", + verify_ptr[0], verify_ptr[1], verify_ptr[2], verify_ptr[3], verify_ptr[4], + verify_ptr[5], verify_ptr[6], verify_ptr[7], verify_ptr[8], verify_ptr[9]); + MLLM_INFO("prefill_inputs[\"sequence\"] device: {}, bytes: {}", + (int)prefill_inputs["sequence"].device(), prefill_inputs["sequence"].bytes()); + } + + auto prefill_output = model.forward(prefill_inputs, {{"seq_len", mllm::AnyValue(real_seq)}}); auto& prefill_logits = prefill_output["sequence"]; auto sampled = model.sampleGreedy(prefill_logits); prefill_logits.delete_(); @@ -94,17 +121,16 @@ MLLM_MAIN({ MLLM_INFO("Prefill generated token id: {}", sampled); std::wcout << qwen_tokenizer.detokenize(sampled); - // Decode loop + // Decode loop - 新方案:每次完整 prefill int current_seq_len = real_seq; - auto& sequence_tensor = inputs["sequence"]; + auto& sequence_tensor = prefill_inputs["sequence"]; auto sequence_ptr = sequence_tensor.ptr(); // write first token into padding sequence_ptr[current_seq_len] = sampled; current_seq_len++; - // carry past (position_ids) from prefill - mllm::models::ARGenerationOutputPast past{{"position_ids", prefill_output["position_ids"]}}; + // Clean up prefill output prefill_output.clear(); // Debug: Check registered buffer count after prefill @@ -123,53 +149,71 @@ MLLM_MAIN({ MLLM_INFO("Starting decode loop, initial seq_len: {}", current_seq_len); int decode_step = 0; - mllm::Tensor decode_token_tensor = mllm::Tensor::empty({1, 1}, mllm::kInt64, mllm::kCPU).alloc(); - mllm::models::ARGenerationOutputPast decode_input{ - {"sequence", decode_token_tensor}, - {"position_ids", past["position_ids"]}, - }; - while (current_seq_len < chunk_size) { decode_step++; MLLM_INFO("--- Decode Step {} ---", decode_step); MLLM_INFO("Current sequence length: {}", current_seq_len); - // update KV cache sequence length across layers - model.setKVCacheSeqCnt(current_seq_len); + // Reset KV cache to 0 for full prefill (重新计算所有 KV cache) + model.setKVCacheSeqCnt(0); - // reuse a single CPU tensor for decode token to avoid repeated QNN allocations - decode_token_tensor.ptr()[0] = sequence_ptr[current_seq_len - 1]; + // IMPORTANT: Use full [1, 128] tensor, not a slice + // QNN backend expects fixed-size input tensor [1, 128] as defined during graph build + // We only use the first current_seq_len tokens, the rest are padding (-1) + // Ensure padding area is properly set to -1 + for (int i = current_seq_len; i < chunk_size; ++i) { + sequence_ptr[i] = -1; + } - // pass through latest position ids returned from previous forward call - decode_input["position_ids"] = past["position_ids"]; - MLLM_INFO("Decode input token: {}", sequence_ptr[current_seq_len - 1]); + // Use full sequence tensor - QNN backend will handle the size correctly + // The seq_len parameter tells the model how many tokens are actually valid + mllm::models::ARGenerationOutputPast decode_input{ + {"sequence", sequence_tensor}, // Use full [1, 128] tensor, not a slice + }; - // forward for next token logits + MLLM_INFO("Decode input sequence length: {} (using full [1, {}] tensor)", current_seq_len, chunk_size); + + // Forward with full sequence - this is a full prefill, not incremental decode + // seq_len parameter tells the model to only process first current_seq_len tokens auto decode_output = model.forward(decode_input, {{"seq_len", mllm::AnyValue(current_seq_len)}}); - // sample next token + // Print KV cache length after decode + auto kv_cache_len = model.getKVCacheSeqCnt(0); // Get KV cache length from layer 0 + MLLM_INFO("KV cache length after decode step {}: {}", decode_step, kv_cache_len); + + // Sample next token auto& decode_logits = decode_output["sequence"]; auto next_token = model.sampleGreedy(decode_logits); MLLM_INFO("Generated token id: {}", next_token); std::wcout << qwen_tokenizer.detokenize(next_token); + // Check termination if (next_token == eos_token_id) { MLLM_INFO("EOS token detected, stopping decode"); break; } - // write token into sequence buffer + // Write new token into sequence buffer sequence_ptr[current_seq_len] = next_token; current_seq_len++; MLLM_INFO("Updated sequence length: {}", current_seq_len); - // carry past (only keep position_ids to avoid leaking QNN buffers) + // Clean up - no need to keep position_ids since we're doing full prefill each time decode_logits.delete_(); - decode_output.erase("sequence"); - auto position_ids = decode_output["position_ids"]; - decode_output.erase("position_ids"); - past = {{"position_ids", position_ids}}; - decode_input["position_ids"] = past["position_ids"]; + decode_output.clear(); + + // Debug: Check registered buffer count after each decode step + { + auto qnn_backend = mllm::Context::instance().getBackend(mllm::kQNN); + if (qnn_backend) { + auto allocator = std::static_pointer_cast(qnn_backend->allocator()); + if (allocator) { + auto stats = allocator->getRegisteredBufferStats(); + MLLM_INFO("After decode step {}: {} buffers registered, {} MB", + decode_step, stats.count, stats.total_bytes / (1024 * 1024)); + } + } + } } MLLM_INFO("=== Decode Complete ==="); diff --git a/mllm/backends/cpu/ops/KVCacheOp.cpp b/mllm/backends/cpu/ops/KVCacheOp.cpp index 04d8f254e..ed6977687 100644 --- a/mllm/backends/cpu/ops/KVCacheOp.cpp +++ b/mllm/backends/cpu/ops/KVCacheOp.cpp @@ -46,4 +46,6 @@ void CPUKVCacheOp::clearCache() { cache_.clearCache(); } void CPUKVCacheOp::setCurrentSeqCnt(int32_t seq) { cache_.setCurrentSeqCnt(seq); } +int32_t CPUKVCacheOp::getCurrentSeqCnt() const { return cache_.getCurrentSeqCnt(options_.layer_idx); } + } // namespace mllm::cpu diff --git a/mllm/backends/cpu/ops/KVCacheOp.hpp b/mllm/backends/cpu/ops/KVCacheOp.hpp index 09fb737a0..e8bdba86e 100644 --- a/mllm/backends/cpu/ops/KVCacheOp.hpp +++ b/mllm/backends/cpu/ops/KVCacheOp.hpp @@ -21,6 +21,8 @@ class CPUKVCacheOp final : public aops::KVCacheOp { void setCurrentSeqCnt(int32_t seq) override; + int32_t getCurrentSeqCnt() const override; + private: nn::StaticCache cache_; }; diff --git a/mllm/backends/qnn/QNNAllocator.cpp b/mllm/backends/qnn/QNNAllocator.cpp index 4901c17eb..b910d9aa1 100644 --- a/mllm/backends/qnn/QNNAllocator.cpp +++ b/mllm/backends/qnn/QNNAllocator.cpp @@ -125,22 +125,8 @@ void QNNAllocator::free(Storage* storage) { QNN_ALLOCATOR_VERBOSE("QNNAllocator::free freeing unregistered buffer ptr={}", ptr); qnnMemPtrSet_.erase(ptr); rpcmem_free(ptr); - - // Clear mappings that point to this ptr - for (auto it = tensorIdToPtrMap_.begin(); it != tensorIdToPtrMap_.end();) { - if (it->second == ptr) { - it = tensorIdToPtrMap_.erase(it); - } else { - ++it; - } - } - for (auto it = tensorNameToPtrMap_.begin(); it != tensorNameToPtrMap_.end();) { - if (it->second == ptr) { - it = tensorNameToPtrMap_.erase(it); - } else { - ++it; - } - } + eraseTensorMappingsForPtr(ptr, "free(unregistered buffer)"); + clearLastRegistrationIfMatches(ptr, "free(unregistered buffer)"); return; } @@ -157,28 +143,15 @@ void QNNAllocator::free(Storage* storage) { } // Don't free the buffer here since alternative_ptr is still using it qnnMemPtrSet_.erase(ptr); + clearLastRegistrationIfMatches(ptr, "free(ptr) -> redirected to alias"); } else { // Since QNN doesn't support re-registering a deRegistered buffer (fd may be invalidated), // we should free the buffer immediately even if there are mappings. // The decode phase will allocate a new buffer when needed. qnnMemPtrSet_.erase(ptr); rpcmem_free(ptr); - - // Clear mappings that point to this ptr - for (auto it = tensorIdToPtrMap_.begin(); it != tensorIdToPtrMap_.end();) { - if (it->second == ptr) { - it = tensorIdToPtrMap_.erase(it); - } else { - ++it; - } - } - for (auto it = tensorNameToPtrMap_.begin(); it != tensorNameToPtrMap_.end();) { - if (it->second == ptr) { - it = tensorNameToPtrMap_.erase(it); - } else { - ++it; - } - } + eraseTensorMappingsForPtr(ptr, "free(ptr) -> mem_handle released"); + clearLastRegistrationIfMatches(ptr, "free(ptr) -> mem_handle released"); } storage->ptr_ = nullptr; } @@ -257,6 +230,7 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ QNN_TENSOR_SET_MEM_TYPE(qnn_tensor, QNN_TENSORMEMTYPE_MEMHANDLE); QNN_TENSOR_SET_MEM_HANDLE(qnn_tensor, existing_mem_handle); updateMappings(existing_ptr); + rememberLastRegistration(tensor_id, tensor_name, existing_ptr, existing_mem_handle, total_bytes); return true; }; @@ -266,6 +240,7 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ QNN_TENSOR_SET_MEM_TYPE(qnn_tensor, QNN_TENSORMEMTYPE_MEMHANDLE); QNN_TENSOR_SET_MEM_HANDLE(qnn_tensor, mem_handle); updateMappings(ptr); + rememberLastRegistration(tensor_id, tensor_name, ptr, mem_handle, total_bytes); return true; } @@ -352,6 +327,23 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ } } + if (!fallback_success && hasLastRegistrationInfo_) { + bool same_tensor_id = tensor_id != 0 && tensor_id == lastRegistrationInfo_.tensor_id; + bool same_tensor_name = tensor_name != "unknown" && !tensor_name.empty() + && tensor_name == lastRegistrationInfo_.tensor_name; + bool ptr_still_registered = lastRegistrationInfo_.ptr != nullptr + && ptrToFdAndMemHandleMap_.count(lastRegistrationInfo_.ptr) > 0; + if ((same_tensor_id || same_tensor_name) && ptr_still_registered) { + MLLM_WARN("Fallback: Reusing last successful buffer for tensor_id={}, tensor_name={}, old_ptr={}, new_ptr={}", + tensor_id, tensor_name, lastRegistrationInfo_.ptr, ptr); + fallback_success = reuseExistingBuffer(lastRegistrationInfo_.ptr); + } else { + MLLM_WARN("Fallback: Last registration info unusable for tensor_id={}, tensor_name={}, " + "same_tensor_id={}, same_tensor_name={}, ptr_registered={}", + tensor_id, tensor_name, same_tensor_id, same_tensor_name, ptr_still_registered); + } + } + if (!fallback_success) { MLLM_ERROR("QNNAllocator::registerQnnTensorToSharedBuffer: memRegister failed and fallback also failed. " "Buffer ptr={} will be freed, tensor registration cannot proceed.", ptr); @@ -360,6 +352,8 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ qnnMemPtrSet_.erase(ptr); rpcmem_free(ptr); storage->ptr_ = nullptr; + eraseTensorMappingsForPtr(ptr, "register failure -> freed ptr"); + clearLastRegistrationIfMatches(ptr, "register failure -> freed ptr"); QNN_ALLOCATOR_VERBOSE("QNNAllocator::registerQnnTensorToSharedBuffer: Freed ptr={} ({} bytes) after failure", ptr, total_bytes); } @@ -379,6 +373,7 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ ptrToFdAndMemHandleMap_.insert({ptr, {mem_fd, mem_handle}}); updateMappings(ptr); + rememberLastRegistration(tensor_id, tensor_name, ptr, mem_handle, total_bytes); return true; } @@ -394,8 +389,35 @@ void QNNAllocator::deRegisterQnnTensorFromSharedBuffer(void* ptr) { ptrToFdAndMemHandleMap_.erase(iter); ptrToSizeMap_.erase(ptr); + eraseTensorMappingsForPtr(ptr, "explicit deRegister"); + clearLastRegistrationIfMatches(ptr, "explicit deRegister"); +} + +QNNAllocator::BufferStats QNNAllocator::getRegisteredBufferStats() const { + BufferStats stats{}; + stats.count = ptrToFdAndMemHandleMap_.size(); + stats.total_bytes = 0; + + for (const auto& [ptr, size] : ptrToSizeMap_) { + stats.total_bytes += size; + } - // Remove from tensor ID and name mappings if they exist + return stats; +} + +bool QNNAllocator::isRegistered(void* ptr) const { + return ptrToFdAndMemHandleMap_.count(ptr) > 0; +} + +size_t QNNAllocator::getRegisteredBufferSize(void* ptr) const { + auto it = ptrToSizeMap_.find(ptr); + if (it == ptrToSizeMap_.end()) { return 0; } + return it->second; +} + +void QNNAllocator::eraseTensorMappingsForPtr(void* ptr, std::string_view reason) { + if (ptr == nullptr) { return; } + for (auto it = tensorIdToPtrMap_.begin(); it != tensorIdToPtrMap_.end();) { if (it->second == ptr) { it = tensorIdToPtrMap_.erase(it); @@ -403,6 +425,7 @@ void QNNAllocator::deRegisterQnnTensorFromSharedBuffer(void* ptr) { ++it; } } + for (auto it = tensorNameToPtrMap_.begin(); it != tensorNameToPtrMap_.end();) { if (it->second == ptr) { it = tensorNameToPtrMap_.erase(it); @@ -412,20 +435,24 @@ void QNNAllocator::deRegisterQnnTensorFromSharedBuffer(void* ptr) { } } -QNNAllocator::BufferStats QNNAllocator::getRegisteredBufferStats() const { - BufferStats stats{}; - stats.count = ptrToFdAndMemHandleMap_.size(); - stats.total_bytes = 0; - - for (const auto& [ptr, size] : ptrToSizeMap_) { - stats.total_bytes += size; - } - - return stats; +void QNNAllocator::rememberLastRegistration(uint32_t tensor_id, const std::string& tensor_name, void* ptr, + Qnn_MemHandle_t mem_handle, size_t total_bytes) { + if (ptr == nullptr || mem_handle == nullptr) { return; } + lastRegistrationInfo_.tensor_id = tensor_id; + lastRegistrationInfo_.tensor_name = tensor_name; + lastRegistrationInfo_.ptr = ptr; + lastRegistrationInfo_.mem_handle = mem_handle; + lastRegistrationInfo_.bytes = total_bytes; + hasLastRegistrationInfo_ = true; + // Note: Remembered registration info is used as fallback mechanism, logging removed for performance } -bool QNNAllocator::isRegistered(void* ptr) const { - return ptrToFdAndMemHandleMap_.count(ptr) > 0; +void QNNAllocator::clearLastRegistrationIfMatches(void* ptr, std::string_view reason) { + if (!hasLastRegistrationInfo_ || ptr == nullptr) { return; } + if (lastRegistrationInfo_.ptr == ptr) { + lastRegistrationInfo_ = {}; + hasLastRegistrationInfo_ = false; + } } #undef QNN_ALLOCATOR_VERBOSE diff --git a/mllm/backends/qnn/QNNAllocator.hpp b/mllm/backends/qnn/QNNAllocator.hpp index 79db7e305..6942eb6e7 100644 --- a/mllm/backends/qnn/QNNAllocator.hpp +++ b/mllm/backends/qnn/QNNAllocator.hpp @@ -5,6 +5,7 @@ #include #include +#include #include #include "QnnCommon.h" #include "QnnInterface.h" @@ -82,6 +83,7 @@ class QNNAllocator final : public Allocator { // Debug: Check if a ptr is already registered bool isRegistered(void* ptr) const; + [[nodiscard]] size_t getRegisteredBufferSize(void* ptr) const; private: QNN_INTERFACE_VER_TYPE qnnInterface_; @@ -101,6 +103,22 @@ class QNNAllocator final : public Allocator { // Map tensor ID to registered buffer ptr for reuse (more reliable than name) std::map tensorIdToPtrMap_; + struct LastRegistrationInfo { + uint32_t tensor_id = 0; + std::string tensor_name; + void* ptr = nullptr; + Qnn_MemHandle_t mem_handle = nullptr; + size_t bytes = 0; + }; + + LastRegistrationInfo lastRegistrationInfo_{}; + bool hasLastRegistrationInfo_ = false; + + void eraseTensorMappingsForPtr(void* ptr, std::string_view reason); + void rememberLastRegistration(uint32_t tensor_id, const std::string& tensor_name, void* ptr, + Qnn_MemHandle_t mem_handle, size_t total_bytes); + void clearLastRegistrationIfMatches(void* ptr, std::string_view reason); + }; std::shared_ptr createQNNAllocator(); diff --git a/mllm/backends/qnn/QNNBackend.cpp b/mllm/backends/qnn/QNNBackend.cpp index 6a218d64e..67c483087 100644 --- a/mllm/backends/qnn/QNNBackend.cpp +++ b/mllm/backends/qnn/QNNBackend.cpp @@ -1,4 +1,5 @@ #include "QNNBackend.hpp" +#include #include #include #include @@ -537,12 +538,62 @@ void QNNBackend::graphExecute(const std::string& graphName, std::vector& std::vector qnn_inputs; std::vector qnn_outputs; for (int i = 0; i < model->getGraphInputTensorWrappers().size(); i++) { - // alloc and register qnn tensor - model->getGraphInputTensorWrappers()[i]->getDataContainer() = inputs[i]; // update data container - // Reset allocation flag when dataContainer is updated to ensure proper registration - model->getGraphInputTensorWrappers()[i]->resetAlloc(); - model->getGraphInputTensorWrappers()[i]->alloc(); // QNNAllocator will handle registered memory descriptor - qnn_inputs.push_back(*(model->getGraphInputTensorWrappers()[i]->getNativeTensor())); + auto wrapper = model->getGraphInputTensorWrappers()[i]; + auto& wrapper_tensor = wrapper->getDataContainer(); + const auto& runtime_input = inputs[i]; + + if (runtime_input.isNil()) { + MLLM_ERROR("Input tensor {} is nil for graph '{}'", i, graphName); + return; + } + + if (wrapper_tensor.isNil()) { + MLLM_ERROR("Graph input wrapper {} for graph '{}' has no backing tensor", i, graphName); + return; + } + + size_t dst_bytes = wrapper_tensor.bytes(); + size_t src_bytes = runtime_input.bytes(); + if (dst_bytes != src_bytes) { + MLLM_WARN("Graph '{}' input tensor {} byte-size mismatch: wrapper={} bytes, runtime input={} bytes. Copying " + "min(dst, src), but this may truncate data.", + graphName, i, dst_bytes, src_bytes); + } + + if (dst_bytes > 0) { + void* dst_ptr = wrapper_tensor.ptr(); + if (!dst_ptr) { + wrapper_tensor.alloc(); + dst_ptr = wrapper_tensor.ptr(); + } + + const void* src_ptr = runtime_input.ptr(); + size_t bytes_to_copy = std::min(dst_bytes, src_bytes); + if (!src_ptr) { + MLLM_ERROR("Runtime input tensor {} for graph '{}' has null data pointer", i, graphName); + return; + } + if (dst_ptr && src_ptr && dst_ptr != src_ptr) { + // Copy source data + if (bytes_to_copy > 0) { + std::memcpy(dst_ptr, src_ptr, bytes_to_copy); + } + + // If source is smaller than destination, zero out the remaining bytes + // This is important for decode phase where input tensors may be smaller than prefill + // Note: In current implementation with full [1, 128] tensor, this should not trigger + if (src_bytes < dst_bytes) { + size_t remaining_bytes = dst_bytes - src_bytes; + std::memset(static_cast(dst_ptr) + bytes_to_copy, 0, remaining_bytes); + // Only log if zero-padding actually occurs (unexpected case) + MLLM_WARN("[QNN graphExecute] Graph '{}' input tensor {}: zero-padded {} bytes (src={} bytes, dst={} bytes)", + graphName, i, remaining_bytes, src_bytes, dst_bytes); + } + } + } + + wrapper->alloc(); // QNNAllocator will handle registered memory descriptor when needed + qnn_inputs.push_back(*(wrapper->getNativeTensor())); } // Prepare QNN outputs in QNN order diff --git a/mllm/backends/qnn/QNNUtils.cpp b/mllm/backends/qnn/QNNUtils.cpp index 1c93e4558..2d4b8f734 100644 --- a/mllm/backends/qnn/QNNUtils.cpp +++ b/mllm/backends/qnn/QNNUtils.cpp @@ -384,6 +384,22 @@ void QNNTensorWrapper::alloc() { auto storage = dataContainer_.impl()->storage(); MLLM_RT_ASSERT(storage != nullptr); + size_t requiredBytes = dataContainer_.bytes(); + + if (registeredPtr_) { + if (!allocator->isRegistered(registeredPtr_)) { + registeredPtr_ = nullptr; + isAlloc_ = false; + } else { + size_t registeredBytes = allocator->getRegisteredBufferSize(registeredPtr_); + if (registeredBytes > 0 && registeredBytes < requiredBytes) { + allocator->deRegisterQnnTensorFromSharedBuffer(registeredPtr_); + registeredPtr_ = nullptr; + isAlloc_ = false; + } + } + } + if (registeredPtr_ && registeredPtr_ != storage->ptr_) { if (!allocator->isRegistered(registeredPtr_)) { registeredPtr_ = nullptr; diff --git a/mllm/core/aops/KVCacheOp.hpp b/mllm/core/aops/KVCacheOp.hpp index 65c1905c8..c4d172b1d 100644 --- a/mllm/core/aops/KVCacheOp.hpp +++ b/mllm/core/aops/KVCacheOp.hpp @@ -38,6 +38,10 @@ class KVCacheOp : public BaseOp { // Default no-op; backends that maintain cache should override. virtual void setCurrentSeqCnt(int32_t /*seq*/) {} + // Get current valid sequence length for KV cache logic + // Default returns -1; backends that maintain cache should override. + virtual int32_t getCurrentSeqCnt() const { return -1; } + inline const KVCacheOpOptions& options() const { return options_; } protected: diff --git a/mllm/models/qwen_npu/modeling_qwen_npu.hpp b/mllm/models/qwen_npu/modeling_qwen_npu.hpp index 85d2a2cca..84b7d3627 100644 --- a/mllm/models/qwen_npu/modeling_qwen_npu.hpp +++ b/mllm/models/qwen_npu/modeling_qwen_npu.hpp @@ -268,6 +268,7 @@ class QwenAttentionMatmul final : public nn::Module { } nn::KVCache& getKVCache() { return kv_cache_; } + [[nodiscard]] const nn::KVCache& getKVCache() const { return kv_cache_; } }; class QwenOutProjAndMLP final : public nn::Module { @@ -398,6 +399,7 @@ class QwenDecoder final : public nn::Module { } nn::KVCache& getKVCache() { return self_attn_matmul_.getKVCache(); } + [[nodiscard]] const nn::KVCache& getKVCache() const { return self_attn_matmul_.getKVCache(); } }; class QwenText final : public nn::Module { @@ -444,6 +446,13 @@ class QwenText final : public nn::Module { void setKVCacheSeqCnt(int32_t seq) { for (auto& block : decode_blocks_.list()) { block.getKVCache().setCurrentSeqCnt(seq); } } + + [[nodiscard]] int32_t getKVCacheSeqCnt(int32_t layer_idx = 0) const { + if (layer_idx < 0 || layer_idx >= static_cast(decode_blocks_.list().size())) { + return -1; + } + return decode_blocks_.list()[layer_idx].getKVCache().getCurrentSeqCnt(); + } }; class QwenForCausalLM : public nn::Module, public ARGeneration { @@ -460,6 +469,9 @@ class QwenForCausalLM : public nn::Module, public ARGeneration { // Set current valid sequence length for KV cache across all layers void setKVCacheSeqCnt(int32_t seq) { model.setKVCacheSeqCnt(seq); } + // Get current valid sequence length for KV cache from specified layer + [[nodiscard]] int32_t getKVCacheSeqCnt(int32_t layer_idx = 0) const { return model.getKVCacheSeqCnt(layer_idx); } + ARGenerationOutputPast forward(const ARGenerationOutputPast& input, const ARGenerationArgs& args) override { auto sequence = input.at("sequence"); diff --git a/mllm/nn/Module.hpp b/mllm/nn/Module.hpp index 3337aff52..88eb4105d 100644 --- a/mllm/nn/Module.hpp +++ b/mllm/nn/Module.hpp @@ -204,6 +204,7 @@ class ModuleList final : public Module { } std::vector& list() { return layers_; } + const std::vector& list() const { return layers_; } }; template diff --git a/mllm/nn/layers/KVCache.cpp b/mllm/nn/layers/KVCache.cpp index a33a35ab0..4b6bc70f9 100644 --- a/mllm/nn/layers/KVCache.cpp +++ b/mllm/nn/layers/KVCache.cpp @@ -27,4 +27,8 @@ void KVCache::setCurrentSeqCnt(int32_t seq) { std::static_pointer_cast(impl()->getInstancedOp())->setCurrentSeqCnt(seq); } +int32_t KVCache::getCurrentSeqCnt() const { + return std::static_pointer_cast(impl()->getInstancedOp())->getCurrentSeqCnt(); +} + } // namespace mllm::nn diff --git a/mllm/nn/layers/KVCache.hpp b/mllm/nn/layers/KVCache.hpp index abde039fb..55f194544 100644 --- a/mllm/nn/layers/KVCache.hpp +++ b/mllm/nn/layers/KVCache.hpp @@ -23,6 +23,9 @@ class KVCache : public Layer { // Update current valid sequence length in underlying KV cache op void setCurrentSeqCnt(int32_t seq); + // Get current valid sequence length from underlying KV cache op + int32_t getCurrentSeqCnt() const; + MLLM_LAYER_ANY_INPUTS_2_OUTPUTS_FORWARD }; From b438b3d5b03ab124c05f296cea289407b5d33733 Mon Sep 17 00:00:00 2001 From: jialilve <3485723235@qq.com> Date: Tue, 18 Nov 2025 14:12:43 +0000 Subject: [PATCH 3/8] implement Qwen NPU simple muti-chunk decoding support --- examples/qwen_npu/main.cpp | 253 ++++++++++++++++++------------------- 1 file changed, 122 insertions(+), 131 deletions(-) diff --git a/examples/qwen_npu/main.cpp b/examples/qwen_npu/main.cpp index feec3a272..3cdc09862 100644 --- a/examples/qwen_npu/main.cpp +++ b/examples/qwen_npu/main.cpp @@ -66,160 +66,151 @@ MLLM_MAIN({ // cache has been updated due to trace, clear cache model.model.clearKVCache(); - auto raw_input_tokens = qwen_tokenizer.convertMessage({.prompt = "What can you do?"})["sequence"]; + auto raw_input_tokens = qwen_tokenizer.convertMessage({.prompt = "提示:海洋世界里,鲸鱼是地球上体型最为庞大的哺乳动物,它们拥有流线型的身躯,主要通过头顶的喷水孔进行呼吸。与终生生活在水下并利用鱼鳃从水中提取溶解氧的鱼类有着本质区别。鲸鱼无法在水下直接呼吸氧气,因此它们需要耗费大量的体力,定时浮出水面完成一次快速而彻底的换气过程。令人惊奇的是,当它们处于睡眠状态时,为了确保不会因为忘记呼吸而发生危险,它们只会关闭大脑的一半来进行休息,另一半大脑则始终保持清醒和警觉,以便及时引导身体浮上水面。这种独特的生存机制是它们在深海中延续生命的关键。问题:鲸鱼与鱼类在呼吸方式上的根本区别是什么?它们在睡觉时会采取什么特殊的措施来保证安全和生存?"})["sequence"]; print(raw_input_tokens); MLLM_INFO("raw_input_tokens shape: {} {}", raw_input_tokens.shape()[0], raw_input_tokens.shape()[1]); - + const int chunk_size = 128; - int real_seq = static_cast(raw_input_tokens.shape()[1]); const int eos_token_id = 151645; - if (real_seq <= 0 || real_seq >= chunk_size) { - MLLM_ERROR_EXIT(mllm::ExitCode::kShapeError, "Invalid input length {} for chunk size {}", real_seq, chunk_size); + int prompt_tokens = static_cast(raw_input_tokens.shape()[1]); + if (prompt_tokens <= 0) { + MLLM_ERROR_EXIT(mllm::ExitCode::kShapeError, "Prompt sequence length must be positive"); } - // manually set input data as fill op is not supported in QNN - // IMPORTANT: inputs["sequence"] was created before trace and may have been processed by QNN backend - // Recreate inputs from raw_input_tokens to ensure we're using fresh CPU memory - mllm::models::ARGenerationOutputPast prefill_inputs{{"sequence", mllm::Tensor::empty({1, chunk_size}, mllm::kInt64, mllm::kCPU).alloc()}}; - auto ptr = prefill_inputs["sequence"].ptr(); + // Prepare reusable [1, chunk_size] CPU buffer for chunked prefill/decode + mllm::models::ARGenerationOutputPast chunk_inputs{ + {"sequence", mllm::Tensor::empty({1, chunk_size}, mllm::kInt64, mllm::kCPU).alloc()}}; + auto sequence_tensor = chunk_inputs["sequence"]; + auto sequence_ptr = sequence_tensor.ptr(); auto input_data = raw_input_tokens.ptr(); - - // Copy tokenized input data - for (int i = 0; i < real_seq; ++i) { ptr[i] = input_data[i]; } - for (int i = real_seq; i < chunk_size; ++i) { ptr[i] = -1; } - - bool data_matches = true; - for (int i = 0; i < real_seq; ++i) { - if (ptr[i] != input_data[i]) { - MLLM_ERROR("Data mismatch at index {}: expected {}, got {}", i, input_data[i], ptr[i]); - data_matches = false; - } - } - if (!data_matches) { - MLLM_ERROR_EXIT(mllm::ExitCode::kShapeError, "Failed to copy input data correctly"); - } - // Prefill - MLLM_INFO("=== Prefill Phase ==="); - MLLM_INFO("Input sequence length: {}", real_seq); - - // Debug: Verify prefill_inputs data right before forward - { - auto verify_ptr = prefill_inputs["sequence"].ptr(); - MLLM_INFO("prefill_inputs[\"sequence\"] right before forward (first 10): {} {} {} {} {} {} {} {} {} {}", - verify_ptr[0], verify_ptr[1], verify_ptr[2], verify_ptr[3], verify_ptr[4], - verify_ptr[5], verify_ptr[6], verify_ptr[7], verify_ptr[8], verify_ptr[9]); - MLLM_INFO("prefill_inputs[\"sequence\"] device: {}, bytes: {}", - (int)prefill_inputs["sequence"].device(), prefill_inputs["sequence"].bytes()); - } - - auto prefill_output = model.forward(prefill_inputs, {{"seq_len", mllm::AnyValue(real_seq)}}); - auto& prefill_logits = prefill_output["sequence"]; - auto sampled = model.sampleGreedy(prefill_logits); - prefill_logits.delete_(); - prefill_output.erase("sequence"); - MLLM_INFO("Prefill generated token id: {}", sampled); - std::wcout << qwen_tokenizer.detokenize(sampled); - - // Decode loop - 新方案:每次完整 prefill - int current_seq_len = real_seq; - auto& sequence_tensor = prefill_inputs["sequence"]; - auto sequence_ptr = sequence_tensor.ptr(); + const int prompt_chunks = (prompt_tokens + chunk_size - 1) / chunk_size; + bool reached_eos = false; + int total_decode_steps = 0; - // write first token into padding - sequence_ptr[current_seq_len] = sampled; - current_seq_len++; + for (int chunk_index = 0; chunk_index < prompt_chunks && !reached_eos; ++chunk_index) { + const int chunk_start = chunk_index * chunk_size; + const int chunk_prompt_len = std::min(chunk_size, prompt_tokens - chunk_start); + const bool is_last_prompt_chunk = (chunk_index == prompt_chunks - 1); - // Clean up prefill output - prefill_output.clear(); + // Copy current chunk prompt tokens and pad remaining positions with -1 + for (int i = 0; i < chunk_prompt_len; ++i) { sequence_ptr[i] = input_data[chunk_start + i]; } + for (int i = chunk_prompt_len; i < chunk_size; ++i) { sequence_ptr[i] = -1; } - // Debug: Check registered buffer count after prefill - { - auto qnn_backend = mllm::Context::instance().getBackend(mllm::kQNN); - if (qnn_backend) { - auto allocator = std::static_pointer_cast(qnn_backend->allocator()); - if (allocator) { - auto stats = allocator->getRegisteredBufferStats(); - MLLM_INFO("After prefill: {} buffers registered, {} MB", stats.count, stats.total_bytes / (1024 * 1024)); - } + MLLM_INFO("=== Prefill Chunk {} ===", chunk_index); + MLLM_INFO("Chunk start: {}, Chunk prompt length: {}", chunk_start, chunk_prompt_len); + + // Calculate absolute sequence length from the start of the entire sequence + const int absolute_seq_len = chunk_start + chunk_prompt_len; + MLLM_INFO("Absolute sequence length: {}", absolute_seq_len); + + // Align KV cache so StaticCache writes start at the chunk's absolute offset + model.setKVCacheSeqCnt(chunk_start); + MLLM_INFO("KV cache seq_cnt set to: {}", chunk_start); + + // Generate position_ids starting from chunk_start for multi-chunk scenarios + auto position_ids_tensor = mllm::Tensor::empty({1, chunk_size}, mllm::kInt64, mllm::kCPU).alloc(); + auto position_ids_ptr = position_ids_tensor.ptr(); + for (int i = 0; i < chunk_size; ++i) { + position_ids_ptr[i] = chunk_start + i; + } + + // Prepare input with correct position_ids + mllm::models::ARGenerationOutputPast prefill_inputs{ + {"sequence", sequence_tensor}, + {"position_ids", position_ids_tensor}}; + + // real_seq should be the effective length in the current input tensor (relative position) + // hidden_states shape is [1, chunk_size, hidden_size], we need to index it with chunk_prompt_len - 1 + auto chunk_output = + model.forward(prefill_inputs, {{"seq_len", mllm::AnyValue(mllm::any_copy_tag, chunk_prompt_len)}}); + auto& chunk_logits = chunk_output["sequence"]; + + if (!is_last_prompt_chunk) { + MLLM_INFO("Chunk {} processed as prompt only, moving to next chunk", chunk_index); + chunk_logits.delete_(); + chunk_output.clear(); + continue; } - } - MLLM_INFO("=== Decode Phase ==="); - MLLM_INFO("Starting decode loop, initial seq_len: {}", current_seq_len); + if (chunk_prompt_len >= chunk_size) { + MLLM_WARN("Last chunk is fully occupied by prompt tokens; no padding for decode"); + chunk_logits.delete_(); + chunk_output.clear(); + break; + } - int decode_step = 0; - while (current_seq_len < chunk_size) { - decode_step++; - MLLM_INFO("--- Decode Step {} ---", decode_step); - MLLM_INFO("Current sequence length: {}", current_seq_len); + MLLM_INFO("=== Decode Phase (Chunk {}) ===", chunk_index); - // Reset KV cache to 0 for full prefill (重新计算所有 KV cache) - model.setKVCacheSeqCnt(0); + // Use the prefill logits as the first decode step + auto next_token = model.sampleGreedy(chunk_logits); + chunk_logits.delete_(); + + // Keep full-length position_ids tensor aligned with chunk buffer + auto position_ids = position_ids_tensor; - // IMPORTANT: Use full [1, 128] tensor, not a slice - // QNN backend expects fixed-size input tensor [1, 128] as defined during graph build - // We only use the first current_seq_len tokens, the rest are padding (-1) - // Ensure padding area is properly set to -1 - for (int i = current_seq_len; i < chunk_size; ++i) { - sequence_ptr[i] = -1; - } + chunk_output.clear(); - // Use full sequence tensor - QNN backend will handle the size correctly - // The seq_len parameter tells the model how many tokens are actually valid - mllm::models::ARGenerationOutputPast decode_input{ - {"sequence", sequence_tensor}, // Use full [1, 128] tensor, not a slice + auto emit_token = [&](int64_t token_id) { + std::wcout << qwen_tokenizer.detokenize(token_id); + // if (token_id == eos_token_id) { + // MLLM_INFO("EOS token detected, stopping decode"); + // reached_eos = true; + // } }; - MLLM_INFO("Decode input sequence length: {} (using full [1, {}] tensor)", current_seq_len, chunk_size); - - // Forward with full sequence - this is a full prefill, not incremental decode - // seq_len parameter tells the model to only process first current_seq_len tokens - auto decode_output = model.forward(decode_input, {{"seq_len", mllm::AnyValue(current_seq_len)}}); - - // Print KV cache length after decode - auto kv_cache_len = model.getKVCacheSeqCnt(0); // Get KV cache length from layer 0 - MLLM_INFO("KV cache length after decode step {}: {}", decode_step, kv_cache_len); - - // Sample next token - auto& decode_logits = decode_output["sequence"]; - auto next_token = model.sampleGreedy(decode_logits); - MLLM_INFO("Generated token id: {}", next_token); - std::wcout << qwen_tokenizer.detokenize(next_token); - - // Check termination - if (next_token == eos_token_id) { - MLLM_INFO("EOS token detected, stopping decode"); - break; + int current_chunk_len = chunk_prompt_len; + emit_token(next_token); + if (reached_eos) { break; } + + sequence_ptr[current_chunk_len] = next_token; + current_chunk_len++; + + while (!reached_eos && current_chunk_len < chunk_size) { + total_decode_steps++; + + // Calculate absolute sequence length from the start of the entire sequence + const int absolute_seq_len = chunk_start + current_chunk_len; + + MLLM_INFO("--- Chunk {} Decode Step {} ---", chunk_index, total_decode_steps); + MLLM_INFO("Current chunk length: {} (relative), Absolute sequence length: {} (absolute)", current_chunk_len, absolute_seq_len); + + // Keep padding clean for the remaining area + for (int i = current_chunk_len; i < chunk_size; ++i) { sequence_ptr[i] = -1; } + + // Set KV cache to absolute sequence length (where the next token will be written) + // [Maybe Wrong] + model.setKVCacheSeqCnt(chunk_start); + MLLM_INFO("KV cache seq_cnt set to: {} (relative position)", chunk_start); + + // Prepare decode input with position_ids from previous step + mllm::models::ARGenerationOutputPast decode_inputs{ + {"sequence", sequence_tensor}, + {"position_ids", position_ids}}; + + // real_seq should be the effective length in the current input tensor (relative position) + // hidden_states shape is [1, chunk_size, hidden_size], we need to index it with current_chunk_len - 1 + auto decode_output = model.forward( + decode_inputs, {{"seq_len", mllm::AnyValue(mllm::any_copy_tag, current_chunk_len)}}); + + auto& decode_logits = decode_output["sequence"]; + next_token = model.sampleGreedy(decode_logits); + decode_logits.delete_(); + decode_output.erase("sequence"); + decode_output.clear(); + + emit_token(next_token); + if (reached_eos) { break; } + + sequence_ptr[current_chunk_len] = next_token; + current_chunk_len++; } - // Write new token into sequence buffer - sequence_ptr[current_seq_len] = next_token; - current_seq_len++; - MLLM_INFO("Updated sequence length: {}", current_seq_len); - - // Clean up - no need to keep position_ids since we're doing full prefill each time - decode_logits.delete_(); - decode_output.clear(); - - // Debug: Check registered buffer count after each decode step - { - auto qnn_backend = mllm::Context::instance().getBackend(mllm::kQNN); - if (qnn_backend) { - auto allocator = std::static_pointer_cast(qnn_backend->allocator()); - if (allocator) { - auto stats = allocator->getRegisteredBufferStats(); - MLLM_INFO("After decode step {}: {} buffers registered, {} MB", - decode_step, stats.count, stats.total_bytes / (1024 * 1024)); - } - } - } + MLLM_INFO("=== Chunk {} Decode Complete ===", chunk_index); + MLLM_INFO("Chunk final length: {}", current_chunk_len); + MLLM_INFO("Remaining capacity: {}", chunk_size - current_chunk_len); } - MLLM_INFO("=== Decode Complete ==="); - MLLM_INFO("Total decode steps: {}", decode_step); - MLLM_INFO("Final sequence length: {}", current_seq_len); - MLLM_INFO("Remaining capacity: {}", chunk_size - current_seq_len); std::wcout << L"\n"; return 0; From e26b11bbf2973ffda47accc60f4a6e560b828335 Mon Sep 17 00:00:00 2001 From: jialilve <3485723235@qq.com> Date: Wed, 19 Nov 2025 13:13:40 +0000 Subject: [PATCH 4/8] fix: stabilize QNN multi-chunk decoding - correct multi-chunk decode loop and KV cache sequencing - CausalMaskOp improvement by @oreomaker --- .../qwen_npu_decoding_git_workflow.md | 686 --------- .../qwen_npu_decoding_requirements.md | 510 ------- docs/qnn_fix_bug/PR_PREPARATION_GUIDE.md | 453 ------ docs/qnn_fix_bug/adb_output.md | 1343 ----------------- examples/qwen_npu/CODE_EXPLANATION.md | 951 ------------ examples/qwen_npu/main.cpp | 14 +- mllm/backends/cpu/ops/CausalMaskOp.cpp | 8 +- tests/qnn/CMakeLists.txt | 9 - tests/qnn/QNNOutputOrderTest.cpp | 97 -- 9 files changed, 14 insertions(+), 4057 deletions(-) delete mode 100644 docs/qnn_backend/qwen_npu_decoding_git_workflow.md delete mode 100644 docs/qnn_backend/qwen_npu_decoding_requirements.md delete mode 100644 docs/qnn_fix_bug/PR_PREPARATION_GUIDE.md delete mode 100644 docs/qnn_fix_bug/adb_output.md delete mode 100644 examples/qwen_npu/CODE_EXPLANATION.md delete mode 100644 tests/qnn/CMakeLists.txt delete mode 100644 tests/qnn/QNNOutputOrderTest.cpp diff --git a/docs/qnn_backend/qwen_npu_decoding_git_workflow.md b/docs/qnn_backend/qwen_npu_decoding_git_workflow.md deleted file mode 100644 index e15481b9e..000000000 --- a/docs/qnn_backend/qwen_npu_decoding_git_workflow.md +++ /dev/null @@ -1,686 +0,0 @@ -# QNN Decoding 功能开发 - Git 工作流指南 - -本文档提供 QNN Decoding 功能开发的完整 Git 工作流,从创建功能分支到提交 PR 的每一步都有详细说明。 - -## 📋 目录 - -1. [前置准备](#前置准备) -2. [阶段 1: 创建功能分支](#阶段-1-创建功能分支) -3. [阶段 2: 日常开发流程](#阶段-2-日常开发流程) -4. [阶段 3: 提交和推送](#阶段-3-提交和推送) -5. [阶段 4: 创建 Pull Request](#阶段-4-创建-pull-request) -6. [常见问题](#常见问题) -7. [快速参考命令](#快速参考命令) - ---- - -## 前置准备 - -### 1. 检查 Git 配置 - -确保 Git 已配置用户信息: - -```bash -# 检查当前配置 -git config user.name -git config user.email - -# 如果未配置,设置全局配置 -git config --global user.name "你的名字" -git config --global user.email "你的邮箱" -``` - -### 2. 检查远程仓库配置 - -```bash -# 查看远程仓库 -git remote -v -``` - -**期望输出:** -``` -origin git@github.com:jialilve/mllm.git (fetch) -origin git@github.com:jialilve/mllm.git (push) -upstream https://github.com/UbiquitousLearning/mllm.git (fetch) -upstream https://github.com/UbiquitousLearning/mllm.git (push) -``` - -**如果没有 upstream,添加它:** -```bash -git remote add upstream https://github.com/UbiquitousLearning/mllm.git -``` - -### 3. 检查当前状态 - -```bash -# 查看当前分支 -git branch - -# 查看当前状态 -git status - -# 查看最近的提交历史 -git log --oneline -5 -``` - ---- - -## 阶段 1: 创建功能分支 - -### 步骤 1.1: 同步 upstream 最新代码 - -在创建功能分支之前,确保基于最新的 upstream/v2 代码: - -```bash -# 1. 获取 upstream 的最新更改 -git fetch upstream - -# 2. 查看 upstream/v2 和本地 v2 的差异(可选) -git log v2..upstream/v2 --oneline - -# 3. 如果 upstream 有更新,同步到本地 v2(可选,用于保持本地 v2 最新) -git checkout v2 -git merge upstream/v2 -# 或者使用 rebase(更推荐,保持提交历史整洁) -# git rebase upstream/v2 -``` - -### 步骤 1.2: 创建功能分支 - -**重要:** 功能分支应该基于 `upstream/v2` 创建,而不是 `origin/v2` 或本地 `v2`。 - -```bash -# 创建并切换到新功能分支 -git checkout -b feature/qwen-npu-decoding upstream/v2 -``` - -**分支命名规范:** -- `feature/` - 新功能 -- `fix/` - 修复 bug -- `refactor/` - 重构 -- `docs/` - 文档更新 - -**示例:** -- ✅ `feature/qwen-npu-decoding` - 新功能 -- ✅ `fix/qnn-kv-cache-sync` - 修复 -- ❌ `my-branch` - 不推荐,不够描述性 - -### 步骤 1.3: 验证分支状态 - -```bash -# 确认当前在功能分支上 -git branch - -# 应该显示 * feature/qwen-npu-decoding - -# 查看分支基于哪个提交 -git log --oneline -1 - -# 查看与 upstream/v2 的关系 -git log --oneline --graph --decorate -5 -``` - ---- - -## 阶段 2: 日常开发流程 - -### 2.1 开始开发 - -在功能分支上进行开发: - -```bash -# 确认在功能分支上 -git branch - -# 开始编辑文件、添加代码等 -# ... -``` - -### 2.2 查看修改状态 - -定期检查你的修改: - -```bash -# 查看哪些文件被修改 -git status - -# 查看具体的修改内容 -git diff - -# 查看某个文件的修改 -git diff <文件路径> - -# 查看已暂存和未暂存的修改 -git diff --staged # 已暂存 -git diff # 未暂存 -``` - -### 2.3 暂存修改(准备提交) - -```bash -# 暂存所有修改 -git add . - -# 或者暂存特定文件 -git add <文件路径1> <文件路径2> - -# 或者暂存特定目录 -git add <目录路径>/ - -# 查看暂存的文件 -git status -``` - -**最佳实践:** -- 相关修改一起提交(例如:接口定义和实现一起提交) -- 不相关的修改分开提交 -- 每次提交应该是一个逻辑完整的改动 - -### 2.4 提交修改 - -```bash -# 提交暂存的修改 -git commit -m "提交信息" -``` - -**提交信息规范:** - -格式:`<类型>: <简短描述>` - -**类型:** -- `feat`: 新功能 -- `fix`: 修复 bug -- `docs`: 文档更新 -- `refactor`: 重构 -- `test`: 测试相关 -- `chore`: 构建/工具相关 - -**示例:** - -```bash -# 好的提交信息 -git commit -m "feat: add KV cache interface extension for Qwen NPU decoding" -git commit -m "fix: correct position_ids handling in decode loop" -git commit -m "docs: update decoding requirements document" - -# 多行提交信息(推荐用于复杂改动) -git commit -m "feat: implement decoding loop for Qwen NPU - -- Add KV cache sequence count management -- Implement decode loop with position_ids handling -- Add EOS token termination check -- Update forward method to support decode phase" -``` - -**不好的提交信息:** -```bash -# ❌ 太简单 -git commit -m "update" - -# ❌ 不够描述性 -git commit -m "fix bug" - -# ❌ 使用中文(除非项目要求) -git commit -m "修复问题" -``` - ---- - -## 阶段 3: 提交和推送 - -### 3.1 提交到本地仓库 - -```bash -# 提交修改 -git add . -git commit -m "feat: your commit message" -``` - -### 3.2 推送到 Fork(origin) - -**第一次推送:** - -```bash -# 推送功能分支到 origin(你的 Fork) -git push -u origin feature/qwen-npu-decoding -``` - -`-u` 参数设置上游分支,之后可以直接使用 `git push`。 - -**后续推送:** - -```bash -# 如果已设置上游分支 -git push - -# 或者明确指定 -git push origin feature/qwen-npu-decoding -``` - -### 3.3 处理推送冲突 - -如果 upstream 有更新,你的分支可能落后: - -```bash -# 1. 获取最新代码 -git fetch upstream - -# 2. 在功能分支上 rebase upstream/v2 -git rebase upstream/v2 - -# 3. 如果有冲突,解决冲突后继续 -# 解决冲突后: -git add <冲突文件> -git rebase --continue - -# 4. 如果 rebase 过程中想取消 -git rebase --abort - -# 5. 强制推送(因为 rebase 改变了历史) -git push --force-with-lease origin feature/qwen-npu-decoding -``` - -**注意:** 使用 `--force-with-lease` 比 `--force` 更安全,它会检查远程分支是否有其他人的提交。 - ---- - -## 阶段 4: 创建 Pull Request - -### 4.1 推送功能分支 - -确保所有修改都已提交并推送: - -```bash -# 检查状态 -git status - -# 如果有未提交的修改,先提交 -git add . -git commit -m "feat: final changes" - -# 推送到 Fork -git push origin feature/qwen-npu-decoding -``` - -### 4.2 在 GitHub 上创建 PR - -#### 方法 1: 通过 GitHub Web 界面 - -1. **访问你的 Fork 仓库:** - ``` - https://github.com/jialilve/mllm - ``` - -2. **你会看到提示创建 PR:** - - GitHub 通常会在你推送新分支后显示提示 - - 点击 "Compare & pull request" 按钮 - -3. **或者手动创建:** - - 点击 "Pull requests" 标签 - - 点击 "New pull request" - - 选择: - - **base repository:** `UbiquitousLearning/mllm` - - **base branch:** `v2` - - **compare repository:** `jialilve/mllm` - - **compare branch:** `feature/qwen-npu-decoding` - -#### 方法 2: 使用 GitHub CLI(如果已安装) - -```bash -# 创建 PR -gh pr create --base v2 --head jialilve:feature/qwen-npu-decoding --title "feat: Qwen NPU Decoding Support" --body "PR描述内容" -``` - -### 4.3 编写 PR 描述 - -**PR 标题格式:** -``` -feat: Qwen NPU Decoding Support -``` - -**PR 描述模板:** - -```markdown -## 功能描述 -实现 Qwen NPU 自回归解码功能,支持连续 token 生成。 - -## 主要改动 -- 扩展 KV Cache 接口,支持序列长度管理 -- 实现解码循环,支持 position_ids 自动递增 -- 添加 EOS token 终止检查 -- 更新 forward 方法以支持 decode 阶段 - -## 实现细节 -- 在 `QwenForCausalLM` 中添加 `setKVCacheSeqCnt` 方法 -- 实现基于 128 长度 KV cache 的解码循环 -- 正确处理 position_ids 的传递和递增 - -## 测试 -- [x] 编译通过 -- [x] 单次 prefill 测试通过 -- [x] 解码循环测试通过 -- [x] EOS token 终止测试通过 - -## 相关文档 -- [需求文档](../docs/qnn_backend/qwen_npu_decoding_requirements.md) - -## 相关 Issue -# (如果有) -``` - -### 4.4 PR 提交清单 - -在创建 PR 之前,确认: - -- [ ] 代码已编译通过,无编译错误 -- [ ] 已运行相关测试,测试通过 -- [ ] 代码已格式化(如果有格式化工具) -- [ ] 提交信息清晰,符合规范 -- [ ] 所有修改都已提交并推送 -- [ ] PR 描述清晰,说明了功能和改动 -- [ ] 已同步 upstream/v2 最新代码(避免冲突) - ---- - -## 常见问题 - -### Q1: 如何查看功能分支和 upstream/v2 的差异? - -```bash -# 查看所有差异 -git diff upstream/v2..feature/qwen-npu-decoding - -# 查看提交历史差异 -git log upstream/v2..feature/qwen-npu-decoding --oneline - -# 查看文件列表差异 -git diff --name-only upstream/v2..feature/qwen-npu-decoding -``` - -### Q2: 如何修改已提交的 commit? - -**修改最后一次提交:** - -```bash -# 修改提交信息 -git commit --amend -m "新的提交信息" - -# 添加遗漏的文件到上次提交 -git add <遗漏的文件> -git commit --amend --no-edit - -# 修改后需要强制推送 -git push --force-with-lease origin feature/qwen-npu-decoding -``` - -**修改更早的提交:** - -```bash -# 使用交互式 rebase -git rebase -i HEAD~3 # 修改最近 3 个提交 - -# 在编辑器中,将需要修改的提交标记为 'edit' -# 然后修改文件,执行: -git add . -git commit --amend -git rebase --continue -``` - -### Q3: 如何撤销未提交的修改? - -```bash -# 撤销工作区的修改(未暂存) -git checkout -- <文件路径> -# 或者 -git restore <文件路径> - -# 撤销所有未暂存的修改 -git checkout -- . -# 或者 -git restore . - -# 撤销暂存的修改(但保留工作区修改) -git reset HEAD <文件路径> -# 或者 -git restore --staged <文件路径> -``` - -### Q4: 如何查看分支的提交历史? - -```bash -# 简洁模式 -git log --oneline - -# 图形化显示 -git log --oneline --graph --decorate - -# 显示最近 10 个提交 -git log --oneline -10 - -# 显示某个文件的提交历史 -git log --oneline <文件路径> -``` - -### Q5: 如何切换分支? - -```bash -# 切换到其他分支 -git checkout <分支名> - -# 或者使用新的命令(Git 2.23+) -git switch <分支名> - -# 创建并切换新分支 -git checkout -b <新分支名> -# 或者 -git switch -c <新分支名> -``` - -### Q6: 如何删除分支? - -```bash -# 删除本地分支 -git branch -d feature/qwen-npu-decoding - -# 强制删除本地分支(即使未合并) -git branch -D feature/qwen-npu-decoding - -# 删除远程分支 -git push origin --delete feature/qwen-npu-decoding -``` - -### Q7: PR 被要求修改后怎么办? - -```bash -# 1. 在功能分支上继续修改 -git checkout feature/qwen-npu-decoding - -# 2. 进行修改 -# ... 编辑文件 ... - -# 3. 提交修改 -git add . -git commit -m "fix: address review comments" - -# 4. 推送到 Fork -git push origin feature/qwen-npu-decoding - -# PR 会自动更新,不需要重新创建 -``` - -### Q8: 如何同步 upstream 的最新代码到功能分支? - -```bash -# 方法 1: 使用 rebase(推荐,保持提交历史整洁) -git fetch upstream -git rebase upstream/v2 - -# 如果有冲突,解决后: -git add <冲突文件> -git rebase --continue - -# 方法 2: 使用 merge -git fetch upstream -git merge upstream/v2 -``` - ---- - -## 快速参考命令 - -### 日常开发流程 - -```bash -# 1. 切换到功能分支 -git checkout feature/qwen-npu-decoding - -# 2. 查看状态 -git status - -# 3. 暂存修改 -git add . - -# 4. 提交 -git commit -m "feat: your message" - -# 5. 推送 -git push -``` - -### 创建功能分支(一次性) - -```bash -# 1. 同步 upstream -git fetch upstream - -# 2. 创建功能分支 -git checkout -b feature/qwen-npu-decoding upstream/v2 - -# 3. 推送并设置上游 -git push -u origin feature/qwen-npu-decoding -``` - -### 同步 upstream 代码 - -```bash -# 1. 获取最新代码 -git fetch upstream - -# 2. 在功能分支上 rebase -git checkout feature/qwen-npu-decoding -git rebase upstream/v2 - -# 3. 如果有冲突,解决后继续 -git add <冲突文件> -git rebase --continue - -# 4. 强制推送 -git push --force-with-lease -``` - -### 查看差异和状态 - -```bash -# 查看工作区修改 -git diff - -# 查看与 upstream/v2 的差异 -git diff upstream/v2..feature/qwen-npu-decoding - -# 查看提交历史 -git log --oneline --graph --decorate -10 -``` - ---- - -## 完整工作流示例 - -假设你要实现 QNN Decoding 功能,完整流程如下: - -```bash -# ========== 阶段 1: 创建功能分支 ========== - -# 1. 同步 upstream -git fetch upstream - -# 2. 创建功能分支 -git checkout -b feature/qwen-npu-decoding upstream/v2 - -# 3. 推送并设置上游 -git push -u origin feature/qwen-npu-decoding - - -# ========== 阶段 2: 开发 ========== - -# 1. 开始开发(编辑文件) -vim mllm/models/qwen_npu/modeling_qwen_npu.hpp -# ... 添加代码 ... - -# 2. 查看修改 -git status -git diff - -# 3. 暂存并提交 -git add mllm/models/qwen_npu/modeling_qwen_npu.hpp -git commit -m "feat: add KV cache interface extension" - -# 4. 继续开发 -vim mllm/models/qwen_npu/modeling_qwen_npu.cpp -# ... 添加代码 ... - -# 5. 再次提交 -git add mllm/models/qwen_npu/modeling_qwen_npu.cpp -git commit -m "feat: implement setKVCacheSeqCnt method" - -# 6. 定期推送 -git push - - -# ========== 阶段 3: 准备 PR ========== - -# 1. 确保所有修改已提交 -git status - -# 2. 同步 upstream(避免冲突) -git fetch upstream -git rebase upstream/v2 - -# 3. 如果有冲突,解决后继续 -# git add <冲突文件> -# git rebase --continue - -# 4. 强制推送(如果 rebase 了) -git push --force-with-lease - -# 5. 在 GitHub 上创建 PR -# 访问: https://github.com/jialilve/mllm -# 点击 "Compare & pull request" -``` - ---- - -## 总结 - -**标准工作流:** - -1. ✅ **创建功能分支** - 基于 `upstream/v2` -2. ✅ **开发** - 在功能分支上编辑、提交 -3. ✅ **推送** - 定期推送到 Fork -4. ✅ **同步** - 必要时同步 upstream 代码 -5. ✅ **PR** - 在 GitHub 上创建 Pull Request - -**关键原则:** - -- 🎯 每个功能使用独立分支 -- 🎯 功能分支基于 `upstream/v2` -- 🎯 提交信息清晰、规范 -- 🎯 定期推送,避免丢失工作 -- 🎯 PR 前同步 upstream,避免冲突 - ---- - -**需要帮助?** 如果遇到问题,可以: -- 查看本文档的"常见问题"部分 -- 使用 `git help <命令>` 查看帮助 -- 参考项目的其他 PR 示例 - diff --git a/docs/qnn_backend/qwen_npu_decoding_requirements.md b/docs/qnn_backend/qwen_npu_decoding_requirements.md deleted file mode 100644 index 43e880795..000000000 --- a/docs/qnn_backend/qwen_npu_decoding_requirements.md +++ /dev/null @@ -1,510 +0,0 @@ -# QNN Backend Qwen NPU Decoding 功能需求分析文档 - -## 1. 项目背景 - -### 1.1 目标 -在 mllm_v2 框架上实现基于 QNN 加速的 Qwen3 4B 长文本推理功能。当前 QNN 已迁移到 v2 版本,但仅支持单个固定长度输入的 prefill 推理。需要实现自回归解码(decoding)功能,使模型能够连续生成文本。 - -### 1.2 当前状态 -- **功能限制**:QNN 后端仅支持单 chunk 128 长度的 prefill 推理 -- **问题现象**:`mllm-qwen-npu` 示例程序只输出单个 token 后即结束,无法进行连续生成 -- **技术约束**: - - QNN 端只允许单 chunk 128 长度 - - Decode 阶段的新 token 处理需要在 CPU 侧完成 - - QNN 负责输出 logits,CPU 负责采样和 token 管理 - -## 2. 功能需求 - -### 2.1 核心功能 -在输入长度 < chunk_size(chunk_size = 128)的场景下,实现基于 QNN 后端的自回归解码: - -1. **KV Cache 管理** - - KV cache 默认长度为 1K(1024) - - Prefill 阶段:real_seq 以内的真实输入 + (128 - real_seq) 的 padding - - Decode 阶段:利用 padding 区域存放新生成的 token - -2. **解码循环** - - 循环调用 `forward` 生成下一个 token - - 将新 token 写入 padding 区域(在 CPU buffer 中维护输入序列) - - 累积 seq_len,直至满足终止条件 - -3. **终止条件** - - 总长度达到 128(chunk_size) - - 生成 EOS token(token ID: 151645) - -### 2.2 预期效果 -示例程序能够输出完整句子,而非仅单个 token。推理流程能够连续生成文本,直到达到最大长度或遇到 EOS token。 - -## 3. 技术实现方案 - -### 3.1 KV Cache 接口扩展 - -#### 3.1.1 接口设计原则 -- **避免全局接口**:v1 中使用了大量全局接口,耦合性过大 -- **在 modeling 中体现接口**:方便后续功能扩展 -- **保持向后兼容**:确保新增接口不会破坏已有 trace/prefill 流程 - -#### 3.1.2 需要实现的接口层次 - -**层次 1:基类接口(aops::KVCacheOp)** -```cpp -// mllm/core/aops/KVCacheOp.hpp -class KVCacheOp : public BaseOp { -public: - // 现有接口 - void setLayerIndex(int32_t layer_idx); - virtual void clearCache(); - - // 新增接口 - virtual void setCurrentSeqCnt(int32_t seq); -}; -``` - -**层次 2:CPU 实现(CPUKVCacheOp)** -```cpp -// mllm/backends/cpu/ops/KVCacheOp.hpp -class CPUKVCacheOp final : public aops::KVCacheOp { -public: - void setCurrentSeqCnt(int32_t seq) override; - -private: - nn::StaticCache cache_; // 内部使用 StaticCache -}; -``` - -**层次 3:Layer 接口(nn::KVCache)** -```cpp -// mllm/nn/layers/KVCache.hpp -class KVCache : public Layer { -public: - void setCurrentSeqCnt(int32_t seq); - // 现有接口:clearCache(), setLayerIndex() -}; -``` - -**层次 4:Model 接口(QwenText/QwenForCausalLM)** -```cpp -// mllm/models/qwen_npu/modeling_qwen_npu.hpp -class QwenText : public nn::Module { -public: - void setKVCacheSeqCnt(int32_t seq); // 设置所有层的 KV cache 序列长度 - void clearKVCache(); // 现有接口 -}; - -class QwenForCausalLM : public nn::Module, public ARGeneration { -public: - void setKVCacheSeqCnt(int32_t seq); // 委托给 model.setKVCacheSeqCnt() -}; -``` - -#### 3.1.3 实现细节 - -**StaticCache::setCurrentSeqCnt 行为** -- 参考 `nn::StaticCache::setCurrentSeqCnt(int32_t seq)` -- 设置所有层的 `current_seq_cnt_[layer_idx] = seq` -- **关键**:不会覆盖已有 KV cache 数据,只是更新长度计数器 - -**CPUKVCacheOp::setCurrentSeqCnt 实现** -```cpp -void CPUKVCacheOp::setCurrentSeqCnt(int32_t seq) { - cache_.setCurrentSeqCnt(seq); -} -``` - -**nn::KVCache::setCurrentSeqCnt 实现** -```cpp -void KVCache::setCurrentSeqCnt(int32_t seq) { - std::static_pointer_cast(impl()->getInstancedOp())->setCurrentSeqCnt(seq); -} -``` - -### 3.2 解码循环实现 - -#### 3.2.1 在 main.cpp 中添加解码循环 - -**当前代码结构**(examples/qwen_npu/main.cpp): -```cpp -// Prefill 阶段 -auto out = model.forward(inputs, {{"seq_len", mllm::AnyValue((int)raw_input_tokens.shape()[1])}})["sequence"]; -auto sampled = model.sampleGreedy(out); -std::wcout << "token: " << sampled << " " << qwen_tokenizer.detokenize(sampled) << "\n"; -``` - -**需要添加的解码循环**(包含调试日志): -```cpp -const int chunk_size = 128; -const int real_seq = raw_input_tokens.shape()[1]; // 实际输入长度 -const int eos_token_id = 151645; - -// Prefill 阶段(已有代码) -MLLM_INFO("=== Prefill Phase ==="); -MLLM_INFO("Input sequence length: {}", real_seq); -auto prefill_output = model.forward(inputs, {{"seq_len", mllm::AnyValue(real_seq)}}); -auto sampled = model.sampleGreedy(prefill_output["sequence"]); -MLLM_INFO("Prefill generated token: {} ({})", sampled, qwen_tokenizer.detokenize(sampled)); -std::wcout << qwen_tokenizer.detokenize(sampled); - -// 解码循环 -int current_seq_len = real_seq; -auto& sequence_tensor = inputs["sequence"]; -auto sequence_ptr = sequence_tensor.ptr(); - -// 将第一个生成的 token 写入 padding 区域 -sequence_ptr[current_seq_len] = sampled; -current_seq_len++; - -// 保存 prefill 返回的 position_ids,用于第一次 decode -ARGenerationOutputPast past = prefill_output; - -MLLM_INFO("=== Decode Phase ==="); -MLLM_INFO("Starting decode loop, initial seq_len: {}", current_seq_len); - -// 循环生成直到达到 chunk_size 或遇到 EOS -int decode_step = 0; -while (current_seq_len < chunk_size) { - decode_step++; - MLLM_INFO("--- Decode Step {} ---", decode_step); - MLLM_INFO("Current sequence length: {}", current_seq_len); - - // 更新 KV cache 序列长度 - model.setKVCacheSeqCnt(current_seq_len); - - // 验证 KV cache 状态(调试用) - // 注意:需要通过 model.model 访问内部 KV cache - // 这里假设可以通过某种方式访问,实际实现时可能需要添加辅助方法 - // MLLM_INFO("KV cache seq_cnt after update: {}", model.model.getKVCacheSeqCnt(0)); - - // 准备输入:只包含当前要处理的 token(decode 阶段每次只处理 1 个 token) - // 注意:需要传入上一次返回的 position_ids,forward 方法会自动递增 - auto decode_input = ARGenerationOutputPast{ - {"sequence", Tensor::empty({1, 1}, kInt64, kCPU).alloc()}, - {"position_ids", past["position_ids"]} // 使用上一次返回的 position_ids - }; - decode_input["sequence"].ptr()[0] = sequence_ptr[current_seq_len - 1]; - - MLLM_INFO("Decode input token: {}", sequence_ptr[current_seq_len - 1]); - - // 调用 forward,传入当前序列长度 - // forward 方法会检测到 position_ids 存在且 seq_len == 1,自动递增位置 - auto decode_output = model.forward(decode_input, {{"seq_len", mllm::AnyValue(current_seq_len)}}); - - // 采样下一个 token - auto next_token = model.sampleGreedy(decode_output["sequence"]); - MLLM_INFO("Generated token: {} ({})", next_token, qwen_tokenizer.detokenize(next_token)); - std::wcout << qwen_tokenizer.detokenize(next_token); - - // 检查终止条件 - if (next_token == eos_token_id) { - MLLM_INFO("EOS token detected, stopping decode"); - break; - } - - // 将新 token 写入序列 - sequence_ptr[current_seq_len] = next_token; - current_seq_len++; - MLLM_INFO("Updated sequence length: {}", current_seq_len); - - // 保存本次输出,用于下次循环(包含更新后的 position_ids) - past = decode_output; -} - -MLLM_INFO("=== Decode Complete ==="); -MLLM_INFO("Total decode steps: {}", decode_step); -MLLM_INFO("Final sequence length: {}", current_seq_len); -MLLM_INFO("Remaining capacity: {}", chunk_size - current_seq_len); -std::wcout << "\n"; -``` - -#### 3.2.2 关键实现细节 - -**输入序列管理** -- Prefill 阶段:使用完整的 128 长度 tensor,real_seq 之前是真实 token,之后是 padding(-1) -- Decode 阶段:每次 forward 只传入单个 token(形状 [1, 1]),但需要正确设置 seq_len 参数 - -**KV Cache 同步** -- 每次解码循环前调用 `model.setKVCacheSeqCnt(current_seq_len)` -- 确保 KV cache 知道当前已处理的序列长度 -- 新 token 的 KV 会被追加到现有 cache 的末尾 - -**Position IDs 处理** -- Decode 阶段需要正确传递 position_ids -- 参考 `QwenForCausalLM::forward` 中的 position_ids 生成逻辑: - - Prefill 阶段:自动生成 `[0, 1, 2, ..., seq_len-1]` - - Decode 阶段:如果 input 中包含 position_ids,会自动递增最后一个位置 -- 实现要点: - - Prefill 返回的 output 中包含 position_ids - - 第一次 decode 时,使用 prefill 返回的 position_ids - - 后续 decode 时,使用上一次 forward 返回的 position_ids - - forward 方法会自动检测 `seq_len == 1` 且存在 position_ids,然后递增位置 - -### 3.3 量化信息处理 - -#### 3.3.1 量化 Scale 的作用 -- **GraphBuild 阶段**:量化 scale 用于构建 QNN 计算图 -- **执行阶段**:量化 scale 仍然有效,但不会被使用(QNN 内部已固化) - -#### 3.3.2 实现注意事项 -- 量化 scale 只在 quantize 前需要显式 attach 到 input tensor -- 参考 `QNNCastTypeOp.cpp::QNNQuantizePattern` 中从输入 tensor 获取 quant scale 的操作 -- 对于 scale 维持不变的算子(view, transpose),使用 `propagateQuantScale` 进行传递 -- 对于 Linear 算子,scale 通过模型加载而来 - -**结论**:在 decode 循环中,不需要重新设置量化 scale,因为: -1. 量化参数已在 GraphBuild 时附加到 QNN tensor 中 -2. 执行时 QNN 内部使用已固化的量化参数 - -## 4. 约束与关注点 - -### 4.1 技术约束 -1. **QNN 限制**:QNN 端只允许单 chunk 128 长度;decode 只能在 CPU 侧处理新增 token -2. **KV Cache 管理**:需要确保新增的接口不会破坏已有 trace/prefill 流程 -3. **内存管理**:避免在 decode 循环中创建新的 128 长度 KV cache,应复用现有 cache - -### 4.2 实现注意事项 -1. **Position IDs**:decode 阶段需要正确生成 position_ids,确保位置编码正确 -2. **序列长度参数**:每次 forward 需要传入正确的 seq_len,告知模型当前实际序列长度 -3. **Tensor 设备**:注意 QNN/CPU 之间的 tensor 转换,确保数据正确传递 -4. **错误处理**: - - 验证 `current_seq_len` 不超过 `chunk_size`(128) - - 验证输入序列长度 `real_seq` 小于 `chunk_size` - - 处理 `forward` 调用可能出现的异常 - - 验证 `setKVCacheSeqCnt` 的参数范围(0 <= seq <= chunk_size) -5. **边界情况**: - - 输入长度为 0 或负数(应在调用前验证) - - 输入长度等于或超过 chunk_size(应在调用前验证或拒绝) - - KV cache 已满的情况(理论上不应发生,因为限制在 chunk_size 内) - -### 4.3 调试与验证 -1. **调试环境**:Android 设备、ADB -2. **验证方法**: - - 检查输出是否连续生成多个 token - - 验证是否在 EOS 或达到 128 长度时正确停止 - - 确认没有内存泄漏或崩溃 - -## 5. 待解决问题 - -### 5.1 Context 析构问题 -**问题描述**:当前存在 SIGSEGV 崩溃,推测与析构顺序相关。 - -**解决方案**:需要在 context 析构中手动管理 backend 销毁顺序。 - -**实现位置**:待确认具体实现位置(可能在 QNNBackend 或 Context 相关代码中) - -### 5.2 Position IDs 生成逻辑(已解决) -**解决方案**:decode 循环中需要显式传递 position_ids。 - -**实现方式**: -- Prefill 阶段返回的 output 中包含 position_ids -- 第一次 decode 时,使用 prefill 返回的 position_ids -- 后续 decode 时,使用上一次 forward 返回的 position_ids -- `QwenForCausalLM::forward` 方法会自动检测 `seq_len == 1` 且存在 position_ids,然后递增位置 - -**参考实现**:见 3.2.1 节解码循环代码示例。 - -## 6. 调试日志与测试验证 - -### 6.1 调试日志需求 - -为了验证 KV 缓存长度控制的正确性以及解码流程的正确性,需要在关键位置添加调试日志。 - -#### 6.1.1 日志位置与内容 - -**1. Prefill 阶段日志** -- 输入序列长度(real_seq) -- 生成的第一个 token ID 和文本 - -**2. Decode 循环日志(每次迭代)** -- 当前解码步骤编号 -- 当前序列长度(current_seq_len) -- 输入 token ID(用于验证输入序列管理) -- 生成的 token ID 和文本 -- KV cache 序列长度(验证 `setKVCacheSeqCnt` 是否正确设置) -- 终止原因(EOS 或达到最大长度) - -**3. 解码完成日志** -- 总解码步数 -- 最终序列长度 -- 剩余容量(chunk_size - current_seq_len) - -#### 6.1.2 日志实现方式 - -使用项目现有的日志宏 `MLLM_INFO`(定义在 `mllm/utils/Log.hpp`): - -```cpp -#include "mllm/utils/Log.hpp" - -// 示例 -MLLM_INFO("Current sequence length: {}", current_seq_len); -MLLM_INFO("Generated token: {} ({})", token_id, token_text); -``` - -**日志级别控制**: -- 默认日志级别为 `LogLevel::kInfo`,会显示所有 `MLLM_INFO` 日志 -- 可以通过 `Logger::level()` 调整日志级别(如果需要减少日志输出) - -#### 6.1.3 KV Cache 状态验证 - -为了验证 KV cache 状态,需要添加辅助方法获取当前序列长度: - -**可选实现:在 Model 接口中添加查询方法** -```cpp -// mllm/models/qwen_npu/modeling_qwen_npu.hpp -class QwenText : public nn::Module { -public: - void setKVCacheSeqCnt(int32_t seq); - int32_t getKVCacheSeqCnt(int32_t layer_idx = 0) const; // 新增:获取指定层的序列长度 - void clearKVCache(); -}; -``` - -**实现方式**: -```cpp -int32_t QwenText::getKVCacheSeqCnt(int32_t layer_idx) const { - // 通过内部 KV cache 层获取序列长度 - // 需要访问 model 内部的 kv_cache_ 成员 - // 具体实现取决于内部结构 -} -``` - -**注意**:如果添加查询方法比较复杂,也可以暂时在调试时通过其他方式验证(如直接访问内部 cache),或使用条件编译宏控制调试代码。 - -### 6.2 测试验证要点 - -#### 6.2.1 功能验证 - -1. **序列长度递增验证** - - 验证 `current_seq_len` 从 `real_seq` 开始,每次循环递增 1 - - 验证最终长度不超过 `chunk_size`(128) - -2. **KV Cache 同步验证** - - 验证每次调用 `setKVCacheSeqCnt` 后,KV cache 的序列长度正确更新 - - 验证所有层的序列长度保持一致 - - 验证新 token 的 KV 被正确追加到 cache 末尾 - -3. **输入序列管理验证** - - 验证新生成的 token 被正确写入 `sequence_tensor` 的 padding 区域 - - 验证每次 decode 时,输入 token 来自序列的正确位置(`sequence_ptr[current_seq_len - 1]`) - -4. **终止条件验证** - - 验证遇到 EOS token(151645)时正确停止 - - 验证达到 chunk_size(128)时正确停止 - - 验证终止后不再继续生成 - -5. **Position IDs 验证** - - 验证 position_ids 在每次 decode 后正确递增 - - 验证 position_ids 与序列长度一致 - -#### 6.2.2 边界情况测试 - -1. **最小输入长度** - - 测试 `real_seq = 1` 的情况 - - 验证能够正常进行 decode - -2. **接近最大长度** - - 测试 `real_seq = 127` 的情况(只能生成 1 个 token) - - 验证在达到 128 时正确停止 - -3. **EOS 提前终止** - - 测试在生成过程中遇到 EOS token - - 验证提前终止后不再继续生成 - -4. **空输入处理** - - 测试边界情况下的输入验证 - -#### 6.2.3 性能与稳定性验证 - -1. **内存泄漏检查** - - 使用内存检测工具(如 Valgrind、AddressSanitizer)检查 - - 验证 decode 循环中不会创建不必要的临时对象 - -2. **崩溃检查** - - 验证不会出现 SIGSEGV 或其他崩溃 - - 特别关注 Context 析构相关的崩溃(见 5.1 节) - -3. **长时间运行稳定性** - - 测试多次 decode 循环的稳定性 - - 验证 KV cache 不会溢出或损坏 - -### 6.3 调试日志示例输出 - -期望的日志输出格式: - -``` -[INFO] examples/qwen_npu/main.cpp:140 === Prefill Phase === -[INFO] examples/qwen_npu/main.cpp:141 Input sequence length: 5 -[INFO] examples/qwen_npu/main.cpp:144 Prefill generated token: 12345 (你好) -[INFO] examples/qwen_npu/main.cpp:156 === Decode Phase === -[INFO] examples/qwen_npu/main.cpp:157 Starting decode loop, initial seq_len: 6 -[INFO] examples/qwen_npu/main.cpp:162 --- Decode Step 1 --- -[INFO] examples/qwen_npu/main.cpp:163 Current sequence length: 6 -[INFO] examples/qwen_npu/main.cpp:177 Decode input token: 12345 -[INFO] examples/qwen_npu/main.cpp:186 Generated token: 67890 (世界) -[INFO] examples/qwen_npu/main.cpp:195 Updated sequence length: 7 -[INFO] examples/qwen_npu/main.cpp:162 --- Decode Step 2 --- -[INFO] examples/qwen_npu/main.cpp:163 Current sequence length: 7 -... -[INFO] examples/qwen_npu/main.cpp:200 === Decode Complete === -[INFO] examples/qwen_npu/main.cpp:201 Total decode steps: 10 -[INFO] examples/qwen_npu/main.cpp:202 Final sequence length: 15 -[INFO] examples/qwen_npu/main.cpp:203 Remaining capacity: 113 -``` - -### 6.4 调试日志的后续处理 - -**开发阶段**: -- 保留所有调试日志,便于问题定位和验证 - -**PR 提交阶段**: -- 根据项目规范,可以选择: - - **方案 A**:保留日志,通过日志级别控制(推荐) - - **方案 B**:注释掉调试日志,保留代码以便将来使用 - - **方案 C**:使用条件编译宏控制(如 `#ifdef MLLM_DEBUG_DECODING`) - -**建议**:使用方案 A,通过日志级别控制。如果需要减少日志输出,可以在发布版本中设置更高的日志级别。 - -## 7. 实现步骤 - -### 阶段 1:KV Cache 接口扩展 -1. 在 `aops::KVCacheOp` 中添加 `setCurrentSeqCnt` 虚方法 -2. 在 `CPUKVCacheOp` 中实现该方法,调用 `cache_.setCurrentSeqCnt()` -3. 在 `nn::KVCache` 中添加 `setCurrentSeqCnt` 方法 -4. 在 `QwenText` 和 `QwenForCausalLM` 中添加 `setKVCacheSeqCnt` 方法 - -### 阶段 2:解码循环实现与调试日志 -1. 在 `main.cpp` 中添加解码循环代码(包含调试日志,见 3.2.1 节) -2. 实现输入序列管理(将新 token 写入 padding 区域) -3. 实现 KV cache 序列长度同步 -4. 实现终止条件检查(EOS 或达到 128 长度) -5. 添加调试日志输出(见 6.1 节) - -### 阶段 3:测试与验证 -1. 编译并运行示例程序 -2. 检查调试日志输出,验证 KV cache 状态和序列长度 -3. 验证是否能够连续生成多个 token -4. 验证终止条件是否正确工作(EOS 和最大长度) -5. 验证边界情况(最小输入、接近最大长度等) -6. 检查是否有内存泄漏或崩溃 -7. 验证长时间运行的稳定性 - -### 阶段 4:Context 析构修复(可选) -1. 定位 SIGSEGV 崩溃原因 -2. 实现 backend 销毁顺序管理 -3. 验证修复效果 - -## 8. 参考文档 - -- QNN Backend Design: `docs/qnn_backend/core_design.rst` -- QNN 量化文档: https://docs.qualcomm.com/bundle/publicresource/topics/80-63442-10/quantization.html -- 模型量化基本概念: https://zhuanlan.zhihu.com/p/505570612 - -## 9. 相关代码文件 - -- `examples/qwen_npu/main.cpp` - 示例程序入口 -- `mllm/backends/cpu/ops/KVCacheOp.{hpp,cpp}` - CPU KV Cache 操作实现 -- `mllm/nn/layers/KVCache.{hpp,cpp}` - KV Cache Layer 接口 -- `mllm/nn/lmcache/StaticCache.{hpp,cpp}` - 静态缓存实现(包含 `getCurrentSeqCnt` 方法) -- `mllm/models/qwen_npu/modeling_qwen_npu.hpp` - Qwen NPU 模型实现 -- `mllm/core/aops/KVCacheOp.{hpp,cpp}` - KV Cache 操作基类 -- `mllm/backends/qnn/op/QNNCastTypeOp.cpp` - QNN 量化实现参考 -- `mllm/utils/Log.hpp` - 日志宏定义 - diff --git a/docs/qnn_fix_bug/PR_PREPARATION_GUIDE.md b/docs/qnn_fix_bug/PR_PREPARATION_GUIDE.md deleted file mode 100644 index c4219fb16..000000000 --- a/docs/qnn_fix_bug/PR_PREPARATION_GUIDE.md +++ /dev/null @@ -1,453 +0,0 @@ -# QNN Execute Return Order Fix - PR 准备指南 - -## 问题 1: 调试日志信息处理 - -### 当前情况 -代码中添加了很多调试用的 `MLLM_INFO` 打印信息,用于验证 QNN Execute Return Order 修复是否正确。 - -### 解决方案 - -#### 方案 A: 使用条件编译宏(推荐) -创建一个调试宏,可以通过编译选项控制是否启用调试日志: - -**优点:** -- 代码保持整洁,不需要注释/取消注释 -- 可以通过编译选项控制(如 `-DMLLM_QNN_DEBUG_OUTPUT_ORDER=ON`) -- 保留所有调试代码,方便将来使用 - -**实现步骤:** -1. 在 `QNNBackend.hpp` 或相关头文件中定义宏 -2. 将所有调试日志用宏包裹 -3. 在 CMakeLists.txt 中添加编译选项 - -#### 方案 B: 注释掉调试日志(简单快速) -直接注释掉所有调试用的 `MLLM_INFO`,但保留代码以便将来使用。 - -**优点:** -- 实现简单快速 -- 代码清晰,明确标注为调试代码 -- 需要时可以快速取消注释 - -**缺点:** -- 代码中会有很多注释,可能不够美观 -- 需要手动注释/取消注释 - -#### 方案 C: 使用日志级别控制 -利用现有的 `LogLevel` 机制,将调试日志改为 `MLLM_DEBUG`(如果存在)或通过设置日志级别控制。 - -**注意:** 当前代码库中似乎没有 `MLLM_DEBUG`,只有 `MLLM_INFO/WARN/ERROR`。 - -### 推荐方案 -**建议使用方案 B(注释)**,因为: -1. 实现最简单,不需要修改构建系统 -2. 代码意图清晰,明确标注为调试代码 -3. 需要时可以快速恢复 -4. 对于 PR 来说,注释掉的调试代码是可以接受的 - -### 需要处理的文件 -- `mllm/backends/qnn/QNNBackend.cpp` - 包含大部分调试日志 -- `mllm/models/qwen_npu/modeling_qwen_npu.hpp` - 已注释(保持现状) - ---- - -## 问题 2: Git 工作流同步 - -### 当前情况 -- Fork 的项目与原项目(upstream)没有同步 -- 本地仓库与 Fork 的项目也没有同步 -- **重要:** 当前在 fork 的 `v2` 分支上工作,需要 PR 到主项目的 `v2` 分支 -- **重要:** fork 的 `v2` 分支上有多个 commits,但只有最新的这次修改需要 PR - -### 正确的 Git 工作流策略 - -#### 为什么需要功能分支? - -**最佳实践:** 每个功能/修复应该创建独立的功能分支,而不是直接在主分支(如 `v2`)上工作。 - -**优点:** -- ✅ 可以独立 PR 每个功能,不需要一次性 PR 所有改动 -- ✅ 保持主分支干净,只包含已合并的功能 -- ✅ 方便代码审查,每个 PR 只关注一个功能 -- ✅ 如果某个功能有问题,不影响其他功能 - -#### 当前情况的解决方案 - -如果你已经在 `v2` 分支上做了多个 commits,但只想 PR 其中一个,有以下几种方案: - -**方案 A: 创建新功能分支并 cherry-pick(推荐)** - -这是最干净的方法,创建一个新的功能分支,只包含你需要的修改: - -```bash -# 1. 确保 upstream 已配置并同步 -git fetch upstream -git checkout v2 -git merge upstream/v2 # 或 git rebase upstream/v2 - -# 2. 创建一个新的功能分支,基于 upstream/v2 -git checkout -b fix/qnn-execute-return-order upstream/v2 - -# 3. 找到你需要 PR 的 commit(假设是最新的 commit) -# 查看最近的 commits -git log --oneline -10 - -# 4. Cherry-pick 你需要的 commit(s) -# 如果是最新的 commit -git cherry-pick HEAD@{1} # 或者使用 commit hash -# 或者如果是多个相关的 commits -git cherry-pick ... - -# 5. 如果有未提交的更改,先提交 -git add . -git commit -m "fix: QNN Execute Return Order - handle output reordering" - -# 6. 推送到你的 Fork -git push origin fix/qnn-execute-return-order - -# 7. 在 GitHub 上创建 PR:从 fix/qnn-execute-return-order 到 upstream/v2 -``` - -**方案 B: 使用交互式 rebase 整理 commits** - -如果你想保留在 `v2` 分支上工作,但只 PR 部分 commits: - -```bash -# 1. 创建一个新分支用于 PR -git checkout -b fix/qnn-execute-return-order - -# 2. 使用交互式 rebase 整理 commits -git rebase -i upstream/v2 - -# 在编辑器中,只保留需要 PR 的 commits,其他标记为 drop -# 或者使用 squash 合并多个相关 commits - -# 3. 推送到 Fork -git push origin fix/qnn-execute-return-order -``` - -**方案 C: 创建补丁并应用到新分支** - -```bash -# 1. 在 v2 分支上,创建补丁文件 -git format-patch -1 HEAD # 为最新的 commit 创建补丁 - -# 2. 创建新功能分支 -git checkout -b fix/qnn-execute-return-order upstream/v2 - -# 3. 应用补丁 -git am - -# 4. 推送到 Fork -git push origin fix/qnn-execute-return-order -``` - -### 正确的 Git 工作流(未来参考) - -#### 步骤 1: 配置远程仓库 -```bash -# 查看当前远程仓库 -git remote -v - -# 如果没有 upstream,添加原项目为 upstream -git remote add upstream <原项目URL> - -# 如果已有 upstream,确认 URL 正确 -git remote set-url upstream <原项目URL> -``` - -#### 步骤 2: 同步 upstream 到本地(v2 分支) -```bash -# 获取 upstream 的最新更改 -git fetch upstream - -# 切换到 v2 分支 -git checkout v2 - -# 合并 upstream 的更改到本地 v2 分支 -git merge upstream/v2 - -# 或者使用 rebase(更推荐,保持提交历史整洁) -git rebase upstream/v2 -``` - -#### 步骤 3: 同步本地 v2 分支到 Fork(可选) -```bash -# 推送本地 v2 分支到你的 Fork(用于同步,不是 PR) -git push origin v2 -``` - -#### 步骤 4: 创建功能分支(从当前 v2 分支提取需要的修改) -```bash -# 方法 1: 如果修改还未提交,直接创建新分支 -git checkout -b fix/qnn-execute-return-order upstream/v2 -# 然后手动应用你的修改,或使用 git cherry-pick - -# 方法 2: 如果修改已提交,使用 cherry-pick -# 先找到你的 commit hash -git log --oneline -10 - -# 创建新分支基于 upstream/v2 -git checkout -b fix/qnn-execute-return-order upstream/v2 - -# Cherry-pick 你需要的 commit(s) -git cherry-pick - -# 方法 3: 如果修改还未提交,先暂存 -git stash -git checkout -b fix/qnn-execute-return-order upstream/v2 -git stash pop -# 然后提交 -``` - -#### 步骤 5: 处理调试日志并提交 -```bash -# 1. 注释掉所有调试日志(使用方案 B) -# 2. 确保代码编译通过 -# 3. 运行测试确保功能正常 - -# 提交更改 -git add . -git commit -m "fix: QNN Execute Return Order - handle output reordering - -- Fix QNN graphExecute output order mismatch -- Add output reordering logic based on expected order -- Remove debug logs for production (commented for future use)" -``` - -#### 步骤 6: 重新编译和测试 -```bash -# 清理之前的构建 -rm -rf build/ - -# 重新编译 -# 根据你的构建系统执行编译命令 -# 例如:cmake .. && make - -# 运行测试 -# 确保所有测试通过 -``` - -#### 步骤 7: 推送到 Fork 并创建 PR -```bash -# 推送功能分支到你的 Fork -git push origin fix/qnn-execute-return-order - -# 在 GitHub 上创建 Pull Request -# 从你的 Fork: fix/qnn-execute-return-order -# 到原项目: v2 分支 -``` - -### 针对当前情况的快速操作指南 - -如果你现在在 `v2` 分支上,有未提交的修改或已提交的修改,按以下步骤操作: - -#### 情况 1: 修改还未提交 -```bash -# 1. 暂存当前修改 -git stash - -# 2. 同步 upstream/v2 -git fetch upstream -git checkout v2 -git rebase upstream/v2 - -# 3. 创建功能分支 -git checkout -b fix/qnn-execute-return-order upstream/v2 - -# 4. 应用你的修改 -git stash pop - -# 5. 提交修改 -git add . -git commit -m "fix: QNN Execute Return Order - handle output reordering - -- Fix QNN graphExecute output order mismatch -- Add output reordering logic based on expected order -- Remove debug logs for production (commented for future use)" - -# 6. 推送到 Fork -git push origin fix/qnn-execute-return-order -``` - -#### 情况 2: 修改已提交(在 v2 分支上) -```bash -# 1. 查看最近的 commits,找到你的 commit hash -git log --oneline -10 - -# 2. 同步 upstream/v2 -git fetch upstream -git checkout v2 -git rebase upstream/v2 - -# 3. 创建功能分支 -git checkout -b fix/qnn-execute-return-order upstream/v2 - -# 4. Cherry-pick 你的 commit(s) -# 假设你的 commit hash 是 abc1234 -git cherry-pick abc1234 - -# 如果有多个相关 commits,可以一起 cherry-pick -# git cherry-pick abc1234 def5678 - -# 5. 推送到 Fork -git push origin fix/qnn-execute-return-order -``` - -#### 情况 3: 有多个 commits,但只想 PR 最新的 -```bash -# 1. 查看 commits,确认哪些需要 PR -git log --oneline -10 - -# 2. 同步 upstream/v2 -git fetch upstream -git checkout v2 -git rebase upstream/v2 - -# 3. 创建功能分支 -git checkout -b fix/qnn-execute-return-order upstream/v2 - -# 4. Cherry-pick 最新的 commit(或相关的几个 commits) -git cherry-pick HEAD@{1} # 或者使用具体的 commit hash - -# 5. 推送到 Fork -git push origin fix/qnn-execute-return-order -``` - -### 关于功能分支的常见问题 - -**Q: 必须全部一起 PR 到主项目吗?** -A: **不是的!** 这正是为什么需要功能分支的原因。每个功能分支可以独立 PR,不需要一次性 PR 所有改动。 - -**Q: 我的开发流程有问题吗?** -A: 在 `v2` 分支上直接开发是可以的(特别是如果你在 fork 上工作),但更好的做法是: -- 为每个功能创建独立的功能分支 -- 功能分支基于 `upstream/v2` 创建 -- 只将需要的功能分支 PR 到主项目 -- 其他不需要 PR 的改动保留在你的 fork 分支上 - -**Q: 我没创建过功能分支,怎么办?** -A: 不用担心!创建功能分支很简单: -```bash -# 创建新分支 -git checkout -b fix/qnn-execute-return-order upstream/v2 - -# 或者从当前分支创建 -git checkout -b fix/qnn-execute-return-order -``` -功能分支就是普通的 Git 分支,可以随时创建、删除、合并。 - -**Q: 如果我在 v2 分支上有很多 commits,只想 PR 其中一个怎么办?** -A: 使用 `cherry-pick`: -```bash -# 1. 创建新功能分支 -git checkout -b fix/qnn-execute-return-order upstream/v2 - -# 2. Cherry-pick 你需要的 commit -git cherry-pick - -# 3. 推送到 Fork 并创建 PR -git push origin fix/qnn-execute-return-order -``` - ---- - -## 前置条件:Git 配置 - -**重要:** 在开始之前,确保 Git 已配置 `user.name` 和 `user.email`。 - -如果遇到 "Committer identity unknown" 错误,请先配置 Git: - -```bash -# 全局配置(推荐) -git config --global user.name "你的名字" -git config --global user.email "你的邮箱" -``` - -详细说明请参考:[Git 配置说明](./GIT_SETUP.md) - ---- - -## 快速参考:当前情况的操作步骤 - -### 场景:在 fork 的 v2 分支上有多个 commits,只想 PR 最新的修改 - -**最简单的方法(推荐):** - -```bash -# 1. 确保 upstream 已配置 -git remote add upstream <原项目URL> # 如果还没有 - -# 2. 同步 upstream/v2 -git fetch upstream - -# 3. 查看你的 commits,找到需要 PR 的 commit hash -git log --oneline -10 - -# 4. 创建功能分支(基于 upstream/v2) -git checkout -b fix/qnn-execute-return-order upstream/v2 - -# 5. Cherry-pick 你需要的 commit(假设是 abc1234) -git cherry-pick abc1234 - -# 6. 确保调试日志已注释(已完成) -# 7. 编译和测试 -# 8. 推送到 Fork -git push origin fix/qnn-execute-return-order - -# 9. 在 GitHub 上创建 PR:从 fix/qnn-execute-return-order 到 upstream/v2 -``` - -**或者使用脚本:** - -```bash -# 运行自动化脚本 -./docs/qnn_fix_bug/sync_and_prepare_pr.sh - -# 脚本会引导你完成所有步骤 -``` - ---- - -## PR 提交清单 - -在提交 PR 之前,请确认: - -- [ ] 已同步 upstream 和本地仓库 -- [ ] 已注释掉所有调试日志(或使用条件编译) -- [ ] 代码已重新编译,无编译错误 -- [ ] 已运行测试,所有测试通过 -- [ ] 提交信息清晰,描述了修复的问题 -- [ ] 代码已推送到 Fork 的功能分支 -- [ ] PR 描述清晰,说明了问题和解决方案 - ---- - -## PR 描述模板 - -```markdown -## 问题描述 -修复 QNN Execute Return Order 问题:QNN graphExecute 返回的输出顺序与 MLLM 期望的顺序不一致。 - -## 解决方案 -- 在 `QNNBackend::graphExecute` 中添加输出重排序逻辑 -- 根据 `expectedOrder` 将 QNN 返回的输出重新排序到 MLLM 期望的顺序 -- 添加输出索引映射机制,确保正确匹配 tensor 名称 -- 注释掉调试日志,保留代码以便将来调试使用 - -## 修改的文件 -- `mllm/backends/qnn/QNNBackend.cpp` - 添加输出重排序逻辑 -- `mllm/backends/qnn/QNNModel.cpp` - 添加输出索引映射方法 -- `mllm/backends/qnn/QNNModel.hpp` - 添加输出索引映射方法声明 -- `mllm/backends/qnn/passes/QNNGraphBuildPass.cpp` - 设置期望输出顺序 - -## 测试 -- [x] 编译通过 -- [x] 运行测试通过 -- [x] 验证输出顺序正确 - -## 相关 Issue -# (如果有) -``` - -**注意:** PR 的目标分支应该是 `v2`,不是 `main` 或 `master`。 - diff --git a/docs/qnn_fix_bug/adb_output.md b/docs/qnn_fix_bug/adb_output.md deleted file mode 100644 index 1cbbd97f4..000000000 --- a/docs/qnn_fix_bug/adb_output.md +++ /dev/null @@ -1,1343 +0,0 @@ -modeling_qwen_npu.hpp中只在QwenAttentionProjNPU中的forward函数中最后一行return {query_states, key_states, value_states} -query states在view前的tensor放到return列表的最后 /data/local/tmp/zl/mllm-v2/bin_test目录下的QNNOutputOrderTest输出 - -```bash -root@zhulei:~/mllm_v2/build-android-qnn-dbg/bin# adb shell -manet:/ $ cd /data/local/tmp/zl/mllm-v2/bin_test -manet:/data/local/tmp/zl/mllm-v2/bin_test $ LD_LIBRARY_PATH=. ./mllm-qwen-npu -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNUtils.cpp:22 QNN Backend Lib: libQnnHtp.so -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:305 Registered Op Package: libQnnLLaMAPackage_CPU.so and interface provider: LLaMAPackageInterfaceProvider -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:305 Registered Op Package: libQnnLLaMAPackage_HTP.so and interface provider: LLaMAPackageInterfaceProvider -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:46 QNN Backend Build Id: v2.36.0.250627101419_123260 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:48 QNN backend supports tensor sparsity -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:51 QNN backend supports dynamic dimensions -[INFO] /root/mllm_v2/mllm/backends/base/PluginSystem.cpp:89 Register customized op: DequantizeAdd:4097 -> QNN -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.0_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.0_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.1_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.1_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.2_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.2_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.3_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.3_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.4_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.4_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.5_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.5_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.6_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.6_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.7_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.7_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.8_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.8_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.9_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.9_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.10_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.10_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.11_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.11_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.12_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.12_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.13_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.13_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.14_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.14_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.15_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.15_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.16_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.16_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.17_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.17_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.18_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.18_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.19_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.19_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.20_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.20_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.21_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.21_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.22_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.22_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.23_1' with 3 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.23_2' with 1 outputs -tensor( -[[151644, 8948, 198, 2610, 525, 264, ..., 30, 151645, 198, 151644, 77091, 198]], dtype=Int64, device=CPU) -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.0_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1377 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1378 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1379 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1377 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1378 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1379 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1377) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1378) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1379) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.0_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1431 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1431 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1431) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.1_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1451 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1452 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1453 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1451 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1452 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1453 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1451) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1452) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1453) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.1_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1504 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1504 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1504) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.2_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1524 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1525 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1526 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1524 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1525 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1526 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1524) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1525) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1526) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.2_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1577 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1577 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1577) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.3_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1597 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1598 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1599 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1597 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1598 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1599 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1597) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1598) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1599) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.3_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1650 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1650 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1650) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.4_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1670 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1671 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1672 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1670 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1671 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1672 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1670) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1671) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1672) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.4_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1723 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1723 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1723) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.5_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1743 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1744 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1745 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1743 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1744 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1745 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1743) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1744) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1745) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.5_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1796 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1796 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1796) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.6_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1816 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1817 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1818 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1816 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1817 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1818 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1816) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1817) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1818) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.6_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1869 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1869 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1869) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.7_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1889 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1890 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1891 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1889 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1890 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1891 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1889) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1890) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1891) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.7_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1942 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1942 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1942) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.8_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1962 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1963 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1964 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1962 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1963 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1964 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1962) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 1963) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 1964) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.8_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2015 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2015 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2015) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.9_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2035 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2036 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2037 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2035 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2036 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2037 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2035) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2036) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2037) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.9_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2088 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2088 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2088) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.10_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2108 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2109 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2110 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2108 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2109 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2110 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2108) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2109) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2110) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.10_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2161 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2161 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2161) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.11_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2181 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2182 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2183 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2181 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2182 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2183 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2181) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2182) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2183) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.11_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2234 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2234 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2234) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.12_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2254 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2255 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2256 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2254 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2255 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2256 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2254) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2255) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2256) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.12_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2307 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2307 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2307) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.13_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2327 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2328 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2329 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2327 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2328 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2329 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2327) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2328) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2329) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.13_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2380 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2380 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2380) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.14_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2400 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2401 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2402 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2400 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2401 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2402 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2400) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2401) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2402) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.14_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2453 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2453 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2453) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.15_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2473 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2474 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2475 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2473 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2474 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2475 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2473) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2474) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2475) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.15_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2526 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2526 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2526) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.16_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2546 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2547 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2548 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2546 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2547 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2548 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2546) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2547) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2548) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.16_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2599 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2599 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2599) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.17_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2619 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2620 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2621 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2619 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2620 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2621 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2619) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2620) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2621) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.17_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2672 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2672 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2672) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.18_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2692 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2693 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2694 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2692 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2693 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2694 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2692) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2693) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2694) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.18_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2745 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2745 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2745) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.19_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2765 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2766 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2767 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2765 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2766 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2767 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2765) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2766) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2767) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.19_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2818 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2818 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2818) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.20_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2838 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2839 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2840 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2838 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2839 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2840 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2838) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2839) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2840) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.20_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2891 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2891 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2891) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.21_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2911 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2912 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2913 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2911 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2912 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2913 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2911) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2912) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2913) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.21_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2964 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2964 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2964) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.22_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2984 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2985 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2986 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2984 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2985 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2986 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2984) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 2985) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 2986) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.22_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3037 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3037 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 3037) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.23_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3057 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 3058 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 3059 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (3 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3057 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 3058 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 3059 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 3057) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[1] = QNN[1] (tensor: 3058) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[2] = QNN[2] (tensor: 3059) [SAME] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.23_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3110 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3110 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 3110) [SAME] -token: 2121 As -Error: Received signal11 - SIGSEGV (Segmentation violation) -Stack trace: -#0 0x5c8fd2b12c -#1 0x5c8fd2af4c -#2 0x5c8fd2ac38 -#3 0x7645b91860 __kernel_rt_sigreturn -#4 0x739fdcbff4 -#5 0x739fdad750 -#6 0x739fd89748 -#7 0x739fb70a74 -#8 0x739fb7042c -#9 0x739fafbe14 -#10 0x739fafcc68 -#11 0x739fafead4 -#12 0x764075c3f0 __cxa_finalize -#13 0x764076155c exit -#14 0x7640755158 -Possible causes: invalid memory access, dangling pointer, stack overflow. -Shutting down... -``` - -modeling_qwen_npu.hpp中在QwenAttentionProjNPU中的forward函数中最后一行试试把query states在view前的tensor放到return列表的最后return {query_states, key_states, value_states, query_states_raw};这样修改代码后的/data/local/tmp/zl/mllm-v2/bin_test目录下的QNNOutputOrderTest输出 -```bash -manet:/data/local/tmp/zl/mllm-v2/bin_test $ LD_LIBRARY_PATH=. ./mllm-qwen-npu -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNUtils.cpp:22 QNN Backend Lib: libQnnHtp.so -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:305 Registered Op Package: libQnnLLaMAPackage_CPU.so and interface provider: LLaMAPackageInterfaceProvider -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:305 Registered Op Package: libQnnLLaMAPackage_HTP.so and interface provider: LLaMAPackageInterfaceProvider -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:46 QNN Backend Build Id: v2.36.0.250627101419_123260 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:48 QNN backend supports tensor sparsity -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:51 QNN backend supports dynamic dimensions -[INFO] /root/mllm_v2/mllm/backends/base/PluginSystem.cpp:89 Register customized op: DequantizeAdd:4097 -> QNN -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.0_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.0_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.1_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.1_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.2_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.2_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.3_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.3_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.4_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.4_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.5_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.5_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.6_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.6_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.7_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.7_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.8_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.8_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.9_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.9_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.10_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.10_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.11_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.11_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.12_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.12_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.13_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.13_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.14_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.14_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.15_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.15_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.16_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.16_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.17_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.17_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.18_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.18_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.19_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.19_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.20_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.20_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.21_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.21_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.22_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.22_2' with 1 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.23_1' with 4 outputs -[INFO] /root/mllm_v2/mllm/backends/qnn/passes/QNNGraphBuildPass.cpp:185 QNNGraphBuildPass: Recorded MLLM expected output order for graph 'model.layers.23_2' with 1 outputs -tensor( -[[151644, 8948, 198, 2610, 525, 264, ..., 30, 151645, 198, 151644, 77091, 198]], dtype=Int64, device=CPU) -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.0_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1377 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1378 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1379 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1362 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1362 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1377 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1378 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1379 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1377' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1378' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1379' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1362' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1377) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1378) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1379) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1362) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.0_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1431 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1431 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1431) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.1_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1451 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1452 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1453 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1436 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1436 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1451 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1452 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1453 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1451' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1452' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1453' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1436' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1451) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1452) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1453) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1436) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.1_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1504 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1504 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1504) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.2_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1524 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1525 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1526 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1509 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1509 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1524 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1525 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1526 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1524' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1525' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1526' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1509' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1524) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1525) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1526) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1509) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.2_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1577 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1577 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1577) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.3_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1597 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1598 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1599 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1582 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1582 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1597 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1598 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1599 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1597' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1598' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1599' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1582' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1597) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1598) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1599) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1582) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.3_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1650 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1650 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1650) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.4_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1670 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1671 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1672 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1655 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1655 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1670 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1671 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1672 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1670' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1671' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1672' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1655' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1670) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1671) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1672) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1655) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.4_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1723 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1723 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1723) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.5_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1743 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1744 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1745 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1728 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1728 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1743 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1744 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1745 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1743' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1744' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1745' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1728' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1743) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1744) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1745) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1728) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.5_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1796 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1796 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1796) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.6_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1816 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1817 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1818 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1801 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1801 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1816 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1817 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1818 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1816' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1817' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1818' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1801' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1816) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1817) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1818) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1801) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.6_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1869 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1869 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1869) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.7_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1889 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1890 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1891 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1874 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1874 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1889 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1890 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1891 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1889' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1890' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1891' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1874' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1889) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1890) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1891) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1874) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.7_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1942 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1942 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 1942) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.8_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 1962 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 1963 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 1964 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 1947 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 1947 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 1962 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 1963 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 1964 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '1962' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '1963' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '1964' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '1947' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 1962) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 1963) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 1964) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 1947) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.8_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2015 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2015 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2015) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.9_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2035 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2036 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2037 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2020 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2020 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2035 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2036 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2037 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2035' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2036' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2037' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2020' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2035) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2036) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2037) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2020) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.9_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2088 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2088 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2088) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.10_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2108 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2109 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2110 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2093 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2093 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2108 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2109 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2110 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2108' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2109' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2110' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2093' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2108) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2109) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2110) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2093) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.10_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2161 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2161 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2161) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.11_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2181 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2182 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2183 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2166 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2166 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2181 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2182 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2183 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2181' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2182' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2183' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2166' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2181) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2182) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2183) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2166) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.11_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2234 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2234 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2234) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.12_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2254 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2255 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2256 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2239 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2239 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2254 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2255 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2256 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2254' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2255' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2256' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2239' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2254) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2255) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2256) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2239) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.12_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2307 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2307 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2307) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.13_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2327 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2328 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2329 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2312 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2312 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2327 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2328 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2329 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2327' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2328' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2329' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2312' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2327) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2328) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2329) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2312) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.13_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2380 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2380 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2380) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.14_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2400 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2401 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2402 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2385 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2385 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2400 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2401 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2402 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2400' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2401' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2402' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2385' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2400) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2401) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2402) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2385) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.14_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2453 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2453 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2453) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.15_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2473 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2474 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2475 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2458 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2458 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2473 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2474 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2475 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2473' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2474' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2475' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2458' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2473) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2474) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2475) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2458) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.15_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2526 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2526 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2526) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.16_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2546 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2547 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2548 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2531 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2531 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2546 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2547 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2548 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2546' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2547' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2548' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2531' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2546) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2547) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2548) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2531) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.16_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2599 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2599 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2599) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.17_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2619 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2620 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2621 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2604 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2604 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2619 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2620 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2621 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2619' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2620' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2621' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2604' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2619) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2620) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2621) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2604) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.17_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2672 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2672 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2672) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.18_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2692 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2693 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2694 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2677 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2677 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2692 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2693 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2694 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2692' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2693' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2694' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2677' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2692) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2693) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2694) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2677) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.18_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2745 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2745 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2745) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.19_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2765 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2766 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2767 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2750 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2750 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2765 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2766 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2767 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2765' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2766' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2767' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2750' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2765) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2766) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2767) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2750) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.19_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2818 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2818 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2818) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.20_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2838 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2839 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2840 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2823 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2823 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2838 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2839 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2840 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2838' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2839' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2840' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2823' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2838) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2839) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2840) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2823) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.20_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2891 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2891 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2891) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.21_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2911 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2912 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2913 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2896 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2896 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2911 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2912 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2913 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2911' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2912' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2913' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2896' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2911) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2912) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2913) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2896) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.21_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2964 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2964 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 2964) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.22_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 2984 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 2985 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 2986 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 2969 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 2969 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 2984 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 2985 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 2986 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '2984' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '2985' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '2986' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '2969' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 2984) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 2985) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 2986) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 2969) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.22_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3037 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3037 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 3037) [SAME] -[INFO] /root/mllm_v2/mllm/models/qwen_npu/modeling_qwen_npu.hpp:187 query_states_raw shape: [1, 32, 1, 2048] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.23_1' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3057 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [1] 3058 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [2] 3059 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [3] 3042 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (4 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3042 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [1] 3057 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [2] 3058 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [3] 3059 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:590 [VERIFICATION] QNN output order DIFFERS from MLLM expected order - REORDERING REQUIRED -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[0] expects '3057' but it's at QNN[1] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[1] expects '3058' but it's at QNN[2] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[2] expects '3059' but it's at QNN[3] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:593 Mismatch: MLLM[3] expects '3042' but it's at QNN[0] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[0] = QNN[1] (tensor: 3057) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[1] = QNN[2] (tensor: 3058) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[2] = QNN[3] (tensor: 3059) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:606 Mapping: MLLM[3] = QNN[0] (tensor: 3042) [REORDERED] -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:564 QNNBackend::graphExecute: Checking output order for graph 'model.layers.23_2' -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:565 MLLM Expected Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:567 [0] 3110 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:569 QNN Output Order (1 outputs): -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:572 [0] 3110 -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:596 [VERIFICATION] QNN output order MATCHES MLLM expected order - no reordering needed -[INFO] /root/mllm_v2/mllm/backends/qnn/QNNBackend.cpp:608 Mapping: MLLM[0] = QNN[0] (tensor: 3110) [SAME] -token: 2121 As -Error: Received signal11 - SIGSEGV (Segmentation violation) -Stack trace: -#0 0x5d5f57545c -#1 0x5d5f57527c -#2 0x5d5f574f68 -#3 0x74c41cf860 __kernel_rt_sigreturn -#4 0x7222ccdff4 -#5 0x7222caf750 -#6 0x7222c8b748 -#7 0x7222e6ea74 -#8 0x7222e6e42c -#9 0x7222df9e14 -#10 0x7222dfac68 -#11 0x7222dfcad4 -#12 0x74c23633f0 __cxa_finalize -#13 0x74c236855c exit -#14 0x74c235c158 -Possible causes: invalid memory access, dangling pointer, stack overflow. -Shutting down... -``` \ No newline at end of file diff --git a/examples/qwen_npu/CODE_EXPLANATION.md b/examples/qwen_npu/CODE_EXPLANATION.md deleted file mode 100644 index aa2183b5a..000000000 --- a/examples/qwen_npu/CODE_EXPLANATION.md +++ /dev/null @@ -1,951 +0,0 @@ -# Qwen NPU 示例代码详细解释 - -本文档逐行解释 `main.cpp` 中每行代码的具体逻辑,以及它们如何与其他文件结合。 - -## 1. 头文件包含(1-11行) - -```cpp -#include -#include -#include -#include - -#include "mllm/backends/qnn/passes/QNNGraphBuildPass.hpp" -#include "mllm/backends/qnn/passes/QNNGraphBuildPipeline.hpp" -#include "mllm/compile/PassManager.hpp" -#include "mllm/core/DataTypes.hpp" -#include "mllm/models/qwen_npu/tokenization_qwen.hpp" -#include "mllm/models/qwen_npu/modeling_qwen_npu.hpp" -``` - -### 详细说明: - -- **``**: 格式化库,用于打印输出 -- **``**: 标准整数类型(int64_t等) -- **``**: MLLM核心库,包含: - - `MLLM_MAIN` 宏定义(在 `mllm/mllm.hpp:401-412`) - - `Tensor` 类 - - `Context` 管理 - - - 信号处理等基础设施 -- **``**: 类型擦除的值容器,用于传递任意类型的参数 -- **QNN相关头文件**: QNN(Qualcomm Neural Network)后端相关的Pass -- **模型相关头文件**: Qwen NPU模型的实现和分词器 - -## 2. MLLM_MAIN 宏(15行) - -```cpp -MLLM_MAIN({ -``` - -### 详细说明: - -`MLLM_MAIN` 宏定义在 `mllm/mllm.hpp:401-412`: - -```cpp -#define MLLM_MAIN(...) \ - int main(int argc, char** argv) { \ - ::mllm::__setup_signal_handler(); \ - ::mllm::initializeContext(); \ - auto user_main = [&]() -> int { \ - __VA_ARGS__; \ - return 0; \ - }; \ - int result = ::mllm::__mllm_exception_main(user_main); \ - ::mllm::shutdownContext(); \ - return result; \ - } -``` - -**展开后的逻辑:** -1. 设置信号处理器(SIGINT, SIGTERM等) -2. 初始化MLLM上下文(内存管理器、设备等) -3. 将用户代码包装在lambda中 -4. 在异常处理中执行用户代码 -5. 程序结束时清理上下文 - -## 3. 初始化QNN后端(16行) - -```cpp - mllm::initQnnBackend(); -``` - -### 详细说明: - -- **位置**: `mllm/backends/qnn/Register.cpp:18` -- **作用**: - - 注册QNN后端操作(ops) - - 初始化QNN运行时环境 - - 使QNN相关的操作可以在MLLM中使用 - -## 4. 配置路径定义(18-19行) - -```cpp - const std::string config_path = "./config_1.8B_w8a16_qnn.json"; - const std::string model_path = "./qwen1.5-1.8b-chat-rot-qnn.mllm"; -``` - -### 详细说明: - -- **config_path**: 模型配置文件,包含: - - 模型架构参数(hidden_size, num_layers等) - - 量化配置(w8a16表示8bit权重,16bit激活) - - QNN特定配置 -- **model_path**: 模型权重文件(.mllm格式) - -## 5. 创建分词器(21行) - -```cpp - auto qwen_tokenizer = mllm::models::qwen_npu::QwenTokenizer("./tokenizer.json", "./qwen_merges.txt"); -``` - -### 详细说明: - -- **位置**: `mllm/models/qwen_npu/tokenization_qwen.hpp` -- **构造函数参数**: - - `tokenizer.json`: BPE(Byte Pair Encoding)词汇表 - - `qwen_merges.txt`: BPE合并规则 -- **作用**: 将文本转换为token IDs,或将token IDs转换回文本 - -### 内部实现(tokenization_qwen.hpp:285-309): - -```cpp -ARGenerationOutputPast convertMessage(const QwenMessage& message) { - // 1. 应用消息模板 - auto applied_string = QwenMessage::message_template; - size_t pos = applied_string.find("{{{prompt}}}"); - applied_string.replace(pos, 12, message.prompt); - - // 2. 分词 - auto sequence_str = tokenize(applied_string); - - // 3. 查找词汇表,转换为ID - std::vector ids; - for (const auto& str : sequence_str) { - ids.emplace_back(bpe_._lookup_vocab(str)); - } - - // 4. 创建Tensor - Tensor sequence = Tensor::empty({1, (int32_t)ids.size()}, kInt64, kCPU) - .alloc(); - auto ptr = sequence.ptr(); - for (size_t i = 0; i < ids.size(); ++i) { ptr[i] = ids[i]; } - - return {{"sequence", sequence}}; -} -``` - -## 6. 模型文件版本(23行) - -```cpp - mllm::ModelFileVersion file_version = mllm::ModelFileVersion::kV1; -``` - -### 详细说明: - -- 指定模型文件格式版本 -- 不同版本可能有不同的序列化格式 -- 用于 `mllm::load()` 函数正确解析模型文件 - -## 7. 创建模型配置和实例(25-26行) - -```cpp - auto cfg = mllm::models::qwen_npu::QwenNPUConfig(config_path); - auto model = mllm::models::qwen_npu::QwenForCausalLM("", cfg); -``` - -### 详细说明: - -#### 7.1 QwenNPUConfig(25行) - -- **位置**: `mllm/models/qwen_npu/configuration_qwen_npu.hpp` -- **作用**: 从JSON配置文件加载模型参数 -- **包含的参数**: - - `vocab_size`: 词汇表大小 - - `hidden_size`: 隐藏层维度 - - `num_attention_heads`: 注意力头数 - - `num_key_value_heads`: KV缓存头数(GQA) - - `num_hidden_layers`: Transformer层数 - - `intermediate_size`: MLP中间层维度 - - `max_position_embeddings`: 最大位置编码 - - `rope_theta`: RoPE旋转角度 - - `linear_impl_type`: 线性层实现类型(QNN特定) - -#### 7.2 QwenForCausalLM(26行) - -- **位置**: `mllm/models/qwen_npu/modeling_qwen_npu.hpp:445-454` -- **继承关系**: - - `nn::Module`: 神经网络模块基类 - - `ARGeneration`: 自回归生成接口 -- **构造函数逻辑**: - -```cpp -explicit QwenForCausalLM(const std::string& name, const QwenNPUConfig& cfg) - : cfg(cfg), nn::Module(name) { - // 注册主模型(Transformer堆叠) - model = reg("model", cfg); - - // 注册语言模型头(如果未共享权重) - if (!cfg.tie_word_embeddings) { - lm_head_ = reg("lm_head", cfg.hidden_size, cfg.vocab_size, - false, cfg.linear_impl_type); - } - tie_word_embeddings_ = cfg.tie_word_embeddings; -} -``` - -**`reg<>()` 函数**: -- 注册子模块到当前模块 -- 返回子模块的引用 -- 子模块会被添加到模块树中,用于参数加载和计算图构建 - -## 8. 加载模型参数(28-29行) - -```cpp - auto param = mllm::load(model_path, file_version); - model.load(param); -``` - -### 详细说明: - -#### 8.1 mllm::load()(28行) - -- **作用**: 从.mllm文件加载参数 -- **返回**: `ParameterFile::ptr_t`,包含所有模型权重 -- **内部流程**: - 1. 打开模型文件 - 2. 根据file_version解析文件格式 - 3. 读取所有张量数据(权重、偏置等) - 4. 返回参数容器 - -#### 8.2 model.load()(29行) - -- **位置**: `nn::Module::load()`(继承自Module基类) -- **作用**: 将参数加载到模型结构中 -- **匹配逻辑**: - - 根据模块名称匹配参数 - - 递归加载子模块参数 - - 将权重张量复制到对应的模块中 - -## 9. 创建Trace输入占位符(31行) - -```cpp - mllm::models::ARGenerationOutputPast inputs{{"sequence", mllm::Tensor::empty({1, 32}, mllm::kInt64, mllm::kCPU).alloc()}}; -``` - -### 详细说明: - -#### 9.1 ARGenerationOutputPast - -- **定义**: `mllm/models/ARGeneration.hpp:17` -```cpp -using ARGenerationOutputPast = std::unordered_map; -``` -- **作用**: 模型输入/输出的键值对容器 -- **常用键**: - - `"sequence"`: 输入token序列 - - `"position_ids"`: 位置编码(可选) - - 其他模型特定的输入 - -#### 9.2 Tensor::empty() - -- **位置**: `mllm/core/Tensor.cpp:70-74` -```cpp -static Tensor empty(const std::vector& shape, DataTypes dtype, DeviceTypes device) { - auto storage = TensorStorage::create(shape, dtype, device); - auto impl = TensorViewImpl::create(shape, storage); - return Tensor(impl); -} -``` -- **参数**: - - `{1, 32}`: shape,batch_size=1, seq_len=32 - - `kInt64`: 数据类型,64位整数 - - `kCPU`: 设备类型,CPU内存 -- **注意**: `empty()` **不分配内存**,只创建Tensor对象 - -#### 9.3 .alloc() - -- **位置**: `mllm/core/Tensor.cpp:63-66` -```cpp -Tensor& alloc() { - Context::instance().memoryManager()->alloc(impl_->storage()); - return *this; -} -``` -- **作用**: - - 通过内存管理器分配实际内存 - - 返回Tensor引用(支持链式调用) -- **内存布局**: 分配 `1 * 32 * sizeof(int64_t) = 256` 字节 - -## 10. Trace构建计算图(33行) - -```cpp - auto irs = model.trace(inputs, {}); -``` - -### 详细说明: - -#### 10.1 trace()方法 - -- **位置**: `mllm/models/qwen_npu/modeling_qwen_npu.hpp:514-563` -- **作用**: 构建计算图的中间表示(IR) -- **输入**: - - `inputs`: 占位符输入(用于确定形状) - - `args`: 额外参数(这里为空) - -#### 10.2 trace()内部流程(实际实现): - -```cpp -IROutput trace(const ARGenerationOutputPast& input, const ARGenerationArgs& args) override { - ir::IRContext::ptr_t llm_ir = nullptr; - - // 1. 开始trace:启用操作记录模式 - ir::lowlevel::traceStart(); - - // 2. 获取输入序列 - auto sequence = input.at("sequence"); - - // 3. Trace embedding层 - // 在trace模式下,embedding操作会被记录到IR中 - auto input_embeddings = model.embedding_(sequence); - - // 4. 暂停trace:停止记录操作 - // 接下来的操作(如创建position_ids)不需要被trace - ir::lowlevel::traceYield(); - - // 5. 准备RoPE嵌入(不在trace中) - auto batch_size = sequence.shape()[0]; - auto seq_len = sequence.shape()[1]; - auto position_ids = Tensor::empty({batch_size, seq_len}, kInt64, kCPU); - auto llm_embedding_sin = Tensor::empty({...}, kFloat32, kCPU); - auto llm_embedding_cos = Tensor::empty({...}, kFloat32, kCPU); - - // 6. 继续trace:恢复操作记录 - ir::lowlevel::traceContinue(); - - // 7. Trace模型主体(Transformer层) - // traceModule会记录整个模块的计算图 - auto hidden_states = ir::lowlevel::traceModule( - model, input_embeddings, llm_embedding_sin, llm_embedding_cos)[0]; - - // 8. 截取最后一个位置 - auto S = hidden_states.shape()[1]; - hidden_states = hidden_states[{kAll, {S - 1}, kAll}]; - - // 9. Trace语言模型头 - Tensor logits; - if (!tie_word_embeddings_) { - logits = lm_head_(hidden_states); - } - - // 10. 停止trace并获取IR - llm_ir = ir::lowlevel::traceStop(); - - return {{"model", llm_ir}}; -} -``` - -#### 10.2.1 trace函数说明: - -- **`traceStart()`**: - - 启用trace模式 - - 后续的操作会被记录到IR中 - - 操作不会实际执行,只记录计算图结构 - -- **`traceYield()`**: - - 暂停trace - - 接下来的操作不会被记录 - - 用于执行一些辅助操作(如创建辅助张量) - -- **`traceContinue()`**: - - 恢复trace - - 继续记录操作 - -- **`traceModule()`**: - - 专门用于trace模块(Module) - - 会递归trace模块的所有子操作 - - 返回模块的输出 - -- **`traceStop()`**: - - 停止trace - - 构建最终的IR - - 返回IRContext指针 - -#### 10.3 IR(Intermediate Representation) - -- **结构**: 计算图的节点和边 -- **节点**: 操作(Op),如MatMul、Add、Softmax等 -- **边**: 张量(Tensor)的流动 -- **用途**: - - 图优化 - - 后端代码生成 - - 静态分析 - -#### 10.4 返回值 - -- **类型**: `IROutput = std::unordered_map` -- **内容**: `{{"model", ir_context}}` -- **注意**: 此时KV Cache可能被更新,需要后续清理 - -## 11. QNN Graph Rewrite Pass(35-39行) - -```cpp - // QNN Graph Rewrite Pass - mllm::ir::PassManager rewritePM(irs["model"]); - rewritePM.reg(mllm::qnn::createQNNGraphIOTensorPass()); - rewritePM.reg(mllm::qnn::createQNNOpNamingPass()); - rewritePM.run(); -``` - -### 详细说明: - -#### 11.1 PassManager - -- **位置**: `mllm/compile/PassManager.hpp` -- **作用**: 管理IR转换Pass的执行 -- **构造函数**: 接收IR上下文 - -#### 11.2 Pass注册和执行 - -- **`reg()`**: 注册Pass到执行队列 -- **`run()`**: 按顺序执行所有Pass - -#### 11.3 QNNGraphIOTensorPass - -- **作用**: - - 识别输入/输出张量 - - 为QNN图准备IO张量 - - 处理形状信息 - -#### 11.4 QNNOpNamingPass - -- **作用**: - - 为QNN操作生成唯一名称 - - 确保操作名称符合QNN要求 - - 便于调试和日志记录 - -## 12. 输出IR到文件(42行) - -```cpp - mllm::redirect("qwen_npu.mir", [&]() { mllm::print(irs["model"]); }); -``` - -### 详细说明: - -- **`mllm::redirect()`**: 重定向输出到文件 -- **`mllm::print()`**: 打印IR的文本表示 -- **用途**: 调试,查看优化后的计算图结构 -- **文件内容**: MIR(MLLM IR)格式的计算图 - -## 13. QNN Graph Build Pass(44-47行) - -```cpp - // QNN Graph Build Pass - mllm::ir::PassManager graphBuildPM(irs["model"]); - graphBuildPM.reg(mllm::qnn::createQNNGraphBuildPass()); - graphBuildPM.run(); -``` - -### 详细说明: - -#### 13.1 QNNGraphBuildPass - -- **位置**: `mllm/backends/qnn/passes/QNNGraphBuildPass.hpp` -- **作用**: 将MLLM IR转换为QNN图 -- **转换过程**: - 1. **遍历IR节点**: 访问计算图中的每个操作节点 - 2. **操作映射**: 将MLLM操作映射到QNN操作 - - MatMul → QNN MatMul - - Add → QNN ElementWiseAdd - - Softmax → QNN Softmax - - 等等 - 3. **创建QNN图**: 使用QNN API创建图结构 - 4. **图优化**: - - 操作融合(如MatMul+Add → FullyConnected) - - 量化处理(w8a16量化) - - 内存优化 - 5. **编译图**: 编译为QNN可执行图 -- **结果**: - - 模型可以在QNN运行时执行 - - 图被编译并优化,准备在NPU上运行 - - 后续forward()调用会使用这个编译好的图 - -## 14. 清空KV Cache(50行) - -```cpp - // cache has been updated due to trace, clear cache - model.model.clearKVCache(); -``` - -### 详细说明: - -- **原因**: trace过程中可能执行了前向传播,更新了KV Cache -- **作用**: 清空所有层的KV Cache,准备新的推理 -- **位置**: `QwenText::clearKVCache()`,递归清空所有注意力层的缓存 - -## 15. 分词输入文本(52-53行) - -```cpp - auto raw_input_tokens = qwen_tokenizer.convertMessage({.prompt = "How are you?"})["sequence"]; - print(raw_input_tokens); -``` - -### 详细说明: - -#### 15.1 convertMessage() - -- **输入**: `QwenMessage` 结构,包含 `prompt` 字段 -- **处理流程**: - 1. 应用消息模板(添加系统提示等) - 2. BPE分词 - 3. 词汇表查找,转换为token IDs - 4. 创建Tensor并返回 - -#### 15.2 返回值 - -- **类型**: `ARGenerationOutputPast` -- **内容**: `{{"sequence", Tensor}}` -- **Tensor形状**: `[1, token_count]`,例如 `[1, 15]` - -#### 15.3 print() - -- **作用**: 打印Tensor内容(用于调试) -- **输出**: token IDs数组 - -## 16. 手动填充输入(55-59行) - -```cpp - // manually set input data as fill op is not supported in QNN - auto ptr = inputs["sequence"].ptr(); - auto input_data = raw_input_tokens.ptr(); - for (int i = 0; i < raw_input_tokens.shape()[1]; ++i) { ptr[i] = input_data[i]; } - for (int i = raw_input_tokens.shape()[1]; i < 32; ++i) { ptr[i] = -1; } -``` - -### 详细说明: - -#### 16.1 为什么手动填充? - -- **原因**: QNN后端不支持Fill操作 -- **解决方案**: 在CPU上手动填充,然后传递给QNN - -#### 16.2 ptr() - -- **位置**: `mllm/core/Tensor.hpp` -- **作用**: 获取张量的原始指针 -- **类型**: `int64_t*` -- **注意**: 必须确保张量已分配内存(已调用alloc()) - -#### 16.3 填充逻辑 - -```cpp -// 1. 复制有效token -for (int i = 0; i < raw_input_tokens.shape()[1]; ++i) { - ptr[i] = input_data[i]; -} - -// 2. 填充padding(-1表示无效位置) -for (int i = raw_input_tokens.shape()[1]; i < 32; ++i) { - ptr[i] = -1; -} -``` - -**结果**: -- 前15个位置:有效token IDs -- 后17个位置:-1(padding) - -## 17. 前向推理(61行) - -```cpp - auto out = model.forward(inputs, {{"seq_len", mllm::AnyValue((int)raw_input_tokens.shape()[1])}})["sequence"]; -``` - -### 详细说明: - -#### 17.1 forward()方法 - -- **位置**: `mllm/models/qwen_npu/modeling_qwen_npu.hpp:456-512` -- **签名**: -```cpp -ARGenerationOutputPast forward( - const ARGenerationOutputPast& input, - const ARGenerationArgs& args -) override -``` - -#### 17.2 forward()内部流程: - -```cpp -ARGenerationOutputPast forward(...) { - // 1. 获取输入序列 - auto sequence = input.at("sequence"); - auto batch_size = sequence.shape()[0]; // 1 - auto seq_len = sequence.shape()[1]; // 32 - - // 2. 获取真实序列长度 - auto real_seq = args.count("seq_len") - ? args.at("seq_len").get() // 15 - : seq_len; // 32(fallback) - - // 3. 生成位置编码 - Tensor position_ids = Tensor::empty({batch_size, seq_len}, kInt64, kCPU).alloc(); - // 填充 [0, 1, 2, ..., 31] - - // 4. 生成RoPE嵌入 - auto [llm_embedding_sin, llm_embedding_cos] = - makeRotaryPosEmbedding(position_ids, model.getBuffer("inv_freq"), 1.0f); - - // 5. 文本嵌入 - auto input_embeddings = model.embedding_(sequence); - // shape: [1, 32, hidden_size] - // - // 注意:QwenText中的embedding使用了QNN版本 - // (modeling_qwen_npu.hpp:415: embedding_.to(kQNN)) - // QNN版本的embedding会特殊处理padding token(-1): - // - 可能映射到特殊的embedding向量 - // - 或返回零向量 - // - 确保padding位置不影响计算 - - // 6. Transformer前向传播 - auto hidden_states = model(input_embeddings, llm_embedding_sin, llm_embedding_cos)[0]; - // shape: [1, 32, hidden_size] - - // 7. 截取有效部分 - hidden_states = hidden_states[{kAll, {real_seq - 1}, kAll}]; - // shape: [1, 1, hidden_size](只取最后一个有效位置) - - // 8. 语言模型头 - Tensor logits; - if (!tie_word_embeddings_) { - logits = lm_head_(hidden_states); - } else { - // 共享权重:使用embedding权重 - auto emb_w = model.embedding_.weight(); - logits = nn::functional::matmul(hidden_states, emb_w, false, true); - } - // shape: [1, 1, vocab_size] - - return { - {"sequence", logits}, - {"position_ids", position_ids} - }; -} -``` - -#### 17.3 关键点: - -1. **real_seq参数**: 告知模型真实序列长度,用于: - - 截取输出(只取最后一个有效位置) - - 可能影响attention mask(虽然当前实现可能未完全处理) - -2. **位置编码**: 生成 `[0, 1, 2, ..., 31]`,即使有padding - -3. **Embedding处理**: - - token ID `-1` 可能被映射为特殊embedding - - 或返回零向量 - -4. **输出截取**: - ```cpp - hidden_states[{kAll, {real_seq - 1}, kAll}] - ``` - - `kAll`: 保留batch维度 - - `{real_seq - 1}`: 只取第real_seq-1个位置(最后一个有效位置) - - `kAll`: 保留hidden维度 - -#### 17.4 返回值 - -- **类型**: `ARGenerationOutputPast` -- **内容**: - - `"sequence"`: logits,形状 `[1, 1, vocab_size]` - - `"position_ids"`: 位置编码,形状 `[1, 32]` - -## 18. 采样(63行) - -```cpp - auto sampled = model.sampleGreedy(out); -``` - -### 详细说明: - -#### 18.1 sampleGreedy() - -- **位置**: `mllm/models/ARGeneration.cpp` -- **作用**: 贪心采样,选择概率最高的token -- **实现**: - -```cpp -int64_t ARGeneration::sampleGreedy(Tensor& logits) { - // 1. 获取最后一个位置的logits - auto last_logits = getLastLogits(logits); - // shape: [vocab_size] - - // 2. 找到最大值索引 - int64_t max_idx = 0; - float max_val = last_logits.ptr()[0]; - for (int i = 1; i < vocab_size; ++i) { - if (last_logits.ptr()[i] > max_val) { - max_val = last_logits.ptr()[i]; - max_idx = i; - } - } - - return max_idx; -} -``` - -- **输入**: logits,形状 `[1, 1, vocab_size]` -- **输出**: token ID(int64_t) - -## 19. 输出结果(64行) - -```cpp - std::wcout << "token: " << sampled << " " << qwen_tokenizer.detokenize(sampled) << "\n"; -``` - -### 详细说明: - -#### 19.1 detokenize() - -- **位置**: `mllm/models/qwen_npu/tokenization_qwen.hpp` -- **作用**: 将token ID转换回文本 -- **流程**: - 1. 查找词汇表,获取token字符串 - 2. 合并BPE tokens - 3. 解码为UTF-8文本 - -#### 19.2 输出 - -- **格式**: `token: ` -- **示例**: `token: 1234 I'm` - -## 20. 返回(66行) - -```cpp - return 0; -``` - -### 详细说明: - -- 返回0表示程序成功执行 -- `MLLM_MAIN`宏会捕获返回值并传递给系统 - -## 数据流总结 - -### 完整执行流程 - -``` -┌─────────────────────────────────────────────────────────────┐ -│ 1. 初始化阶段 │ -├─────────────────────────────────────────────────────────────┤ -│ initQnnBackend() │ -│ └─> 注册QNN后端操作 │ -│ └─> 初始化QNN运行时环境 │ -└─────────────────────────────────────────────────────────────┘ - ↓ -┌─────────────────────────────────────────────────────────────┐ -│ 2. 模型加载阶段 │ -├─────────────────────────────────────────────────────────────┤ -│ QwenNPUConfig(config_path) │ -│ └─> 从JSON读取模型配置 │ -│ └─> 解析架构参数(hidden_size, num_layers等) │ -│ └─> 解析量化配置(w8a16) │ -│ │ -│ QwenForCausalLM("", cfg) │ -│ └─> 创建模型结构 │ -│ └─> 注册子模块(QwenText, lm_head等) │ -│ │ -│ mllm::load(model_path, file_version) │ -│ └─> 打开.mllm文件 │ -│ └─> 读取所有权重张量 │ -│ └─> 返回ParameterFile │ -│ │ -│ model.load(param) │ -│ └─> 递归加载子模块参数 │ -│ └─> 将权重复制到对应模块 │ -└─────────────────────────────────────────────────────────────┘ - ↓ -┌─────────────────────────────────────────────────────────────┐ -│ 3. 图构建阶段(Trace) │ -├─────────────────────────────────────────────────────────────┤ -│ inputs = {{"sequence", Tensor::empty({1, 32}, ...)}} │ -│ └─> 创建占位符输入(用于确定形状) │ -│ │ -│ model.trace(inputs, {}) │ -│ ├─> traceStart() │ -│ ├─> model.embedding_(sequence) [记录到IR] │ -│ ├─> traceYield() │ -│ ├─> 创建position_ids, RoPE嵌入 [不记录] │ -│ ├─> traceContinue() │ -│ ├─> traceModule(model, ...) [记录整个模型] │ -│ ├─> lm_head_(hidden_states) [记录到IR] │ -│ └─> traceStop() → 返回IR │ -│ │ -│ PassManager: QNNGraphIOTensorPass │ -│ └─> 识别输入/输出张量 │ -│ └─> 为QNN图准备IO张量 │ -│ │ -│ PassManager: QNNOpNamingPass │ -│ └─> 为QNN操作生成唯一名称 │ -│ │ -│ PassManager: QNNGraphBuildPass │ -│ └─> 将MLLM IR转换为QNN图 │ -│ └─> 操作映射(MatMul → QNN MatMul) │ -│ └─> 图优化(融合、量化) │ -│ └─> 编译为QNN可执行图 │ -└─────────────────────────────────────────────────────────────┘ - ↓ -┌─────────────────────────────────────────────────────────────┐ -│ 4. 推理准备阶段 │ -├─────────────────────────────────────────────────────────────┤ -│ model.model.clearKVCache() │ -│ └─> 清空所有注意力层的KV Cache │ -│ │ -│ qwen_tokenizer.convertMessage({.prompt = "How are you?"}) │ -│ ├─> 应用消息模板 │ -│ ├─> BPE分词 │ -│ ├─> 词汇表查找 → token IDs │ -│ └─> 创建Tensor: [1, 15] │ -│ │ -│ 手动填充输入 │ -│ ├─> 复制有效token (0-14) │ -│ └─> 填充padding (-1) (15-31) │ -└─────────────────────────────────────────────────────────────┘ - ↓ -┌─────────────────────────────────────────────────────────────┐ -│ 5. 前向推理阶段 │ -├─────────────────────────────────────────────────────────────┤ -│ model.forward(inputs, {{"seq_len", 15}}) │ -│ ├─> 获取输入序列 [1, 32] │ -│ ├─> 生成position_ids [0, 1, 2, ..., 31] │ -│ ├─> 生成RoPE嵌入 (sin, cos) │ -│ ├─> model.embedding_(sequence) │ -│ │ └─> QNN版本处理padding token (-1) │ -│ │ └─> 输出: [1, 32, hidden_size] │ -│ ├─> model(input_embeddings, sin, cos) │ -│ │ ├─> 遍历所有Transformer层 │ -│ │ ├─> 每层: Attention + MLP │ -│ │ └─> 输出: [1, 32, hidden_size] │ -│ ├─> 截取最后一个有效位置 │ -│ │ └─> hidden_states[{kAll, {14}, kAll}] │ -│ │ └─> 输出: [1, 1, hidden_size] │ -│ ├─> lm_head_(hidden_states) │ -│ │ └─> 输出: [1, 1, vocab_size] │ -│ └─> 返回: {{"sequence", logits}, {"position_ids", ...}} │ -└─────────────────────────────────────────────────────────────┘ - ↓ -┌─────────────────────────────────────────────────────────────┐ -│ 6. 采样和输出阶段 │ -├─────────────────────────────────────────────────────────────┤ -│ model.sampleGreedy(out) │ -│ ├─> 获取最后一个位置的logits │ -│ ├─> 找到最大值索引 │ -│ └─> 返回token ID (int64_t) │ -│ │ -│ qwen_tokenizer.detokenize(sampled) │ -│ ├─> 查找词汇表 │ -│ ├─> 合并BPE tokens │ -│ └─> 解码为UTF-8文本 │ -└─────────────────────────────────────────────────────────────┘ -``` - -### 关键数据流 - -``` -文本输入: "How are you?" - ↓ -分词器: convertMessage() - ↓ -Token IDs: [1234, 5678, 9012, ...] (15个tokens) - ↓ -填充: [1234, 5678, ..., -1, -1, ...] (32个位置) - ↓ -Embedding: [1, 32, hidden_size] - ↓ -Transformer层 × N: [1, 32, hidden_size] - ↓ -截取: [1, 1, hidden_size] (只取最后一个有效位置) - ↓ -LM Head: [1, 1, vocab_size] - ↓ -采样: token ID (int64_t) - ↓ -Detokenize: "I'm" -``` - -## 关键数据结构 - -### Tensor -- **位置**: `mllm/core/Tensor.hpp` -- **组成**: - - `TensorViewImpl`: 视图实现(形状、步长) - - `TensorStorage`: 存储(实际数据) -- **生命周期**: - 1. `Tensor::empty()`: 创建对象(无内存) - 2. `.alloc()`: 分配内存 - 3. 使用指针操作数据 - 4. 自动析构释放内存 -- **设备管理**: - - 默认在CPU上创建 - - 可以通过`.to(device)`转换设备 - - QNN后端会自动处理设备转换 - -### 设备类型 -- **`kCPU`**: CPU内存,用于: - - 输入/输出张量 - - 辅助计算(如position_ids) - - 不支持QNN的操作 -- **`kQNN`**: QNN设备,用于: - - 模型权重 - - 主要计算(在NPU上执行) - - 需要QNN支持的操作 -- **设备转换**: - - 自动转换:操作会自动将输入转换到正确设备 - - 手动转换:`.to(device)`显式转换 - - 注意:QNN操作要求输入在QNN设备上 - -### ARGenerationOutputPast -- **类型**: `std::unordered_map` -- **用途**: 模型输入/输出的统一接口 -- **键**: 模型特定的字符串标识符 - -### IRContext -- **位置**: `mllm/compile/ir/` -- **组成**: 计算图的节点和边 -- **用途**: 图优化和代码生成 - -## 与其他文件的连接 - -1. **模型定义**: `modeling_qwen_npu.hpp` - - `QwenForCausalLM`: 主模型类(继承ARGeneration) - - `forward()`: 前向传播实现 - - `trace()`: 图构建实现 - - `QwenText`: Transformer堆叠(modeling_qwen_npu.hpp:403) - - `decode_blocks_`: 多层QwenDecoder - - `norm_`: RMSNorm层 - - `embedding_`: 词嵌入层(使用QNN版本处理padding) - - `forward()`: 执行所有Transformer层 - - `QwenDecoder`: 单个Transformer层 - - 包含注意力层和MLP层 - - `QwenAttentionMatmul`: 注意力层 - - 包含QKV投影、RoPE、CausalMask、Softmax等 - - `QwenMLP`: MLP层 - - Gate、Up、Down投影,SiLU激活 - -2. **分词器**: `tokenization_qwen.hpp` - - `QwenTokenizer`: 分词器类 - - BPE实现 - -3. **后端**: `mllm/backends/qnn/` - - QNN操作实现 - - Pass实现 - - 运行时集成 - -4. **核心**: `mllm/core/` - - `Tensor`: 张量实现 - - `Context`: 全局上下文 - - `MemoryManager`: 内存管理 - -5. **编译**: `mllm/compile/` - - `PassManager`: Pass管理 - - `ir/`: IR定义和操作 - diff --git a/examples/qwen_npu/main.cpp b/examples/qwen_npu/main.cpp index 3cdc09862..c50394ead 100644 --- a/examples/qwen_npu/main.cpp +++ b/examples/qwen_npu/main.cpp @@ -67,6 +67,7 @@ MLLM_MAIN({ model.model.clearKVCache(); auto raw_input_tokens = qwen_tokenizer.convertMessage({.prompt = "提示:海洋世界里,鲸鱼是地球上体型最为庞大的哺乳动物,它们拥有流线型的身躯,主要通过头顶的喷水孔进行呼吸。与终生生活在水下并利用鱼鳃从水中提取溶解氧的鱼类有着本质区别。鲸鱼无法在水下直接呼吸氧气,因此它们需要耗费大量的体力,定时浮出水面完成一次快速而彻底的换气过程。令人惊奇的是,当它们处于睡眠状态时,为了确保不会因为忘记呼吸而发生危险,它们只会关闭大脑的一半来进行休息,另一半大脑则始终保持清醒和警觉,以便及时引导身体浮上水面。这种独特的生存机制是它们在深海中延续生命的关键。问题:鲸鱼与鱼类在呼吸方式上的根本区别是什么?它们在睡觉时会采取什么特殊的措施来保证安全和生存?"})["sequence"]; + // auto raw_input_tokens = qwen_tokenizer.convertMessage({.prompt = "提示:海洋世界里,鲸鱼是体型庞大的哺乳动物,它们通过喷水孔呼吸。与鱼类不同,鲸鱼无法在水下直接呼吸氧气。它们会定时浮出水面进行换气,每次换气需要消耗大量的体力。当它们睡觉时,只会关闭大脑的一半,另一半则保持清醒,以确保不忘记浮出水面呼吸。问题:鲸鱼与鱼类在呼吸方式上的根本区别是什么?它们在睡觉时会采取什么特殊的措施来保证安全?"})["sequence"]; print(raw_input_tokens); MLLM_INFO("raw_input_tokens shape: {} {}", raw_input_tokens.shape()[0], raw_input_tokens.shape()[1]); @@ -126,6 +127,11 @@ MLLM_MAIN({ model.forward(prefill_inputs, {{"seq_len", mllm::AnyValue(mllm::any_copy_tag, chunk_prompt_len)}}); auto& chunk_logits = chunk_output["sequence"]; + // auto tmp_next_token = model.sampleGreedy(chunk_logits); + // std::wcout << qwen_tokenizer.detokenize(tmp_next_token) << "\n"; + // 打印原字符串当前位置的detokenize结果 + // std::wcout << qwen_tokenizer.detokenize(sequence_ptr[chunk_start + chunk_prompt_len]) << "\n"; + if (!is_last_prompt_chunk) { MLLM_INFO("Chunk {} processed as prompt only, moving to next chunk", chunk_index); chunk_logits.delete_(); @@ -153,10 +159,10 @@ MLLM_MAIN({ auto emit_token = [&](int64_t token_id) { std::wcout << qwen_tokenizer.detokenize(token_id); - // if (token_id == eos_token_id) { - // MLLM_INFO("EOS token detected, stopping decode"); - // reached_eos = true; - // } + if (token_id == eos_token_id) { + MLLM_INFO("EOS token detected, stopping decode"); + reached_eos = true; + } }; int current_chunk_len = chunk_prompt_len; diff --git a/mllm/backends/cpu/ops/CausalMaskOp.cpp b/mllm/backends/cpu/ops/CausalMaskOp.cpp index b91dda5d6..074ca0199 100644 --- a/mllm/backends/cpu/ops/CausalMaskOp.cpp +++ b/mllm/backends/cpu/ops/CausalMaskOp.cpp @@ -53,8 +53,8 @@ void CPUCausalMaskOp::forward(const std::vector& inputs, std::vector copy_count ? (D - copy_count) : 0; + const size_t copy_count = D - S + r + 1; + const size_t fill_count = std::max(D - copy_count, (size_t)0); memcpy(o_ptr + r * D, i_ptr + r * D, copy_count * sizeof(float)); @@ -68,8 +68,8 @@ void CPUCausalMaskOp::forward(const std::vector& inputs, std::vector copy_count ? (D - copy_count) : 0; + const size_t copy_count = D - S + r + 1; + const size_t fill_count = std::max(D - copy_count, (size_t)0); memcpy(o_ptr + r * D, i_ptr + r * D, copy_count * sizeof(float)); diff --git a/tests/qnn/CMakeLists.txt b/tests/qnn/CMakeLists.txt deleted file mode 100644 index 59dc90870..000000000 --- a/tests/qnn/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) MLLM Team. -# Licensed under the MIT License. - -if(MLLM_BUILD_QNN_BACKEND) - add_executable(QNNOutputOrderTest QNNOutputOrderTest.cpp) - target_link_libraries(QNNOutputOrderTest PRIVATE MllmRT MllmCPUBackend MllmQNNBackend) - target_include_directories(QNNOutputOrderTest PRIVATE ${MLLM_INCLUDE_DIR}) -endif() - diff --git a/tests/qnn/QNNOutputOrderTest.cpp b/tests/qnn/QNNOutputOrderTest.cpp deleted file mode 100644 index 6b893075e..000000000 --- a/tests/qnn/QNNOutputOrderTest.cpp +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) MLLM Team. -// Licensed under the MIT License. - -#include "mllm/backends/qnn/QNNModel.hpp" -#include "mllm/backends/qnn/QNNUtils.hpp" -#include "mllm/core/DataTypes.hpp" -#include "mllm/utils/Log.hpp" -#include -#include -#include - -using namespace mllm; -using namespace mllm::qnn; - -// Mock QNN interface for testing -struct MockQnnInterface { - // Minimal mock implementation -}; - -// Test QNNModel output order mapping -void testOutputOrderMapping() { - MLLM_INFO("Testing QNNModel output order mapping..."); - - // Create a mock QNN interface (in real usage, this would be from QNN SDK) - // For testing purposes, we'll create a minimal test - - // Note: This test requires actual QNN backend initialization - // In a real test environment, you would: - // 1. Initialize QNN backend - // 2. Create a QNNModel - // 3. Add tensors in a specific order - // 4. Set expected output order - // 5. Verify the mapping is correct - - MLLM_INFO("QNNModel output order mapping test structure:"); - MLLM_INFO(" 1. Create QNNModel with expected output order"); - MLLM_INFO(" 2. Add output tensors (QNN order)"); - MLLM_INFO(" 3. Verify qnnOutputNameToIndex_ mapping is correct"); - MLLM_INFO(" 4. Verify getQnnOutputIndex() returns correct indices"); - MLLM_INFO(" 5. Verify getExpectedOutputOrder() returns correct order"); - - // Example test scenario: - // Expected order (MLLM): ["output_0", "output_1", "output_2"] - // QNN order (actual): ["output_2", "output_0", "output_1"] - // Mapping should be: - // MLLM[0] = QNN[1] (output_0) - // MLLM[1] = QNN[2] (output_1) - // MLLM[2] = QNN[0] (output_2) - - MLLM_INFO("Test structure created. Integration test requires QNN backend."); -} - -// Test output reordering logic -void testOutputReordering() { - MLLM_INFO("Testing output reordering logic..."); - - // Simulate the reordering logic - std::vector expectedOrder = {"output_0", "output_1", "output_2"}; - std::map qnnOutputNameToIndex = { - {"output_2", 0}, // QNN returns in this order - {"output_0", 1}, - {"output_1", 2} - }; - - // Simulate output tensors (in QNN order) - std::vector qnnOutputs = {"output_2", "output_0", "output_1"}; - - // Reorder according to expected order - std::vector reorderedIndices; - for (const auto& expected_name : expectedOrder) { - auto it = qnnOutputNameToIndex.find(expected_name); - if (it != qnnOutputNameToIndex.end()) { - reorderedIndices.push_back(it->second); - MLLM_INFO(" Mapping: MLLM[{}] = QNN[{}] (tensor: {})", - reorderedIndices.size() - 1, it->second, expected_name); - } - } - - // Verify the mapping - assert(reorderedIndices.size() == expectedOrder.size()); - assert(reorderedIndices[0] == 1); // output_0 is at QNN index 1 - assert(reorderedIndices[1] == 2); // output_1 is at QNN index 2 - assert(reorderedIndices[2] == 0); // output_2 is at QNN index 0 - - MLLM_INFO("Output reordering logic test passed!"); -} - -int main() { - MLLM_INFO("=== QNN Output Order Test ==="); - - testOutputOrderMapping(); - testOutputReordering(); - - MLLM_INFO("=== All tests passed ==="); - return 0; -} - From 079a01efe9c2d2b0251cb4547c87843a339d7d63 Mon Sep 17 00:00:00 2001 From: jialilve <3485723235@qq.com> Date: Wed, 19 Nov 2025 14:59:29 +0000 Subject: [PATCH 5/8] update examples/qwen_npu/main.cpp --- examples/qwen_npu/main.cpp | 39 +++++++++++++------------------------- 1 file changed, 13 insertions(+), 26 deletions(-) diff --git a/examples/qwen_npu/main.cpp b/examples/qwen_npu/main.cpp index c50394ead..e64b8d194 100644 --- a/examples/qwen_npu/main.cpp +++ b/examples/qwen_npu/main.cpp @@ -51,18 +51,6 @@ MLLM_MAIN({ graphBuildPM.reg(mllm::qnn::createQNNGraphBuildPass()); graphBuildPM.run(); - // Debug: Check registered buffer count after graph build - { - auto qnn_backend = mllm::Context::instance().getBackend(mllm::kQNN); - if (qnn_backend) { - auto allocator = std::static_pointer_cast(qnn_backend->allocator()); - if (allocator) { - auto stats = allocator->getRegisteredBufferStats(); - MLLM_INFO("After graph build: {} buffers registered, {} MB", stats.count, stats.total_bytes / (1024 * 1024)); - } - } - } - // cache has been updated due to trace, clear cache model.model.clearKVCache(); @@ -98,16 +86,16 @@ MLLM_MAIN({ for (int i = 0; i < chunk_prompt_len; ++i) { sequence_ptr[i] = input_data[chunk_start + i]; } for (int i = chunk_prompt_len; i < chunk_size; ++i) { sequence_ptr[i] = -1; } - MLLM_INFO("=== Prefill Chunk {} ===", chunk_index); - MLLM_INFO("Chunk start: {}, Chunk prompt length: {}", chunk_start, chunk_prompt_len); + // MLLM_INFO("=== Prefill Chunk {} ===", chunk_index); + // MLLM_INFO("Chunk start: {}, Chunk prompt length: {}", chunk_start, chunk_prompt_len); // Calculate absolute sequence length from the start of the entire sequence const int absolute_seq_len = chunk_start + chunk_prompt_len; - MLLM_INFO("Absolute sequence length: {}", absolute_seq_len); + // MLLM_INFO("Absolute sequence length: {}", absolute_seq_len); // Align KV cache so StaticCache writes start at the chunk's absolute offset model.setKVCacheSeqCnt(chunk_start); - MLLM_INFO("KV cache seq_cnt set to: {}", chunk_start); + // MLLM_INFO("KV cache seq_cnt set to: {}", chunk_start); // Generate position_ids starting from chunk_start for multi-chunk scenarios auto position_ids_tensor = mllm::Tensor::empty({1, chunk_size}, mllm::kInt64, mllm::kCPU).alloc(); @@ -129,11 +117,10 @@ MLLM_MAIN({ // auto tmp_next_token = model.sampleGreedy(chunk_logits); // std::wcout << qwen_tokenizer.detokenize(tmp_next_token) << "\n"; - // 打印原字符串当前位置的detokenize结果 // std::wcout << qwen_tokenizer.detokenize(sequence_ptr[chunk_start + chunk_prompt_len]) << "\n"; if (!is_last_prompt_chunk) { - MLLM_INFO("Chunk {} processed as prompt only, moving to next chunk", chunk_index); + // MLLM_INFO("Chunk {} processed as prompt only, moving to next chunk", chunk_index); chunk_logits.delete_(); chunk_output.clear(); continue; @@ -146,7 +133,7 @@ MLLM_MAIN({ break; } - MLLM_INFO("=== Decode Phase (Chunk {}) ===", chunk_index); + // MLLM_INFO("=== Decode Phase (Chunk {}) ===", chunk_index); // Use the prefill logits as the first decode step auto next_token = model.sampleGreedy(chunk_logits); @@ -158,7 +145,7 @@ MLLM_MAIN({ chunk_output.clear(); auto emit_token = [&](int64_t token_id) { - std::wcout << qwen_tokenizer.detokenize(token_id); + std::wcout << qwen_tokenizer.detokenize(token_id) << std::flush; if (token_id == eos_token_id) { MLLM_INFO("EOS token detected, stopping decode"); reached_eos = true; @@ -178,8 +165,8 @@ MLLM_MAIN({ // Calculate absolute sequence length from the start of the entire sequence const int absolute_seq_len = chunk_start + current_chunk_len; - MLLM_INFO("--- Chunk {} Decode Step {} ---", chunk_index, total_decode_steps); - MLLM_INFO("Current chunk length: {} (relative), Absolute sequence length: {} (absolute)", current_chunk_len, absolute_seq_len); + // MLLM_INFO("--- Chunk {} Decode Step {} ---", chunk_index, total_decode_steps); + // MLLM_INFO("Current chunk length: {} (relative), Absolute sequence length: {} (absolute)", current_chunk_len, absolute_seq_len); // Keep padding clean for the remaining area for (int i = current_chunk_len; i < chunk_size; ++i) { sequence_ptr[i] = -1; } @@ -187,7 +174,7 @@ MLLM_MAIN({ // Set KV cache to absolute sequence length (where the next token will be written) // [Maybe Wrong] model.setKVCacheSeqCnt(chunk_start); - MLLM_INFO("KV cache seq_cnt set to: {} (relative position)", chunk_start); + // MLLM_INFO("KV cache seq_cnt set to: {} (relative position)", chunk_start); // Prepare decode input with position_ids from previous step mllm::models::ARGenerationOutputPast decode_inputs{ @@ -212,9 +199,9 @@ MLLM_MAIN({ current_chunk_len++; } - MLLM_INFO("=== Chunk {} Decode Complete ===", chunk_index); - MLLM_INFO("Chunk final length: {}", current_chunk_len); - MLLM_INFO("Remaining capacity: {}", chunk_size - current_chunk_len); + // MLLM_INFO("=== Chunk {} Decode Complete ===", chunk_index); + // MLLM_INFO("Chunk final length: {}", current_chunk_len); + // MLLM_INFO("Remaining capacity: {}", chunk_size - current_chunk_len); } std::wcout << L"\n"; From cdb7dfa479f98fd6796c3660a1ca7280c0847aac Mon Sep 17 00:00:00 2001 From: jialilve <3485723235@qq.com> Date: Thu, 20 Nov 2025 03:14:23 +0000 Subject: [PATCH 6/8] Add code comments to QNN backend --- mllm/backends/qnn/QNNAllocator.cpp | 145 ++++++++++++++++++++++++++++- mllm/backends/qnn/QNNAllocator.hpp | 54 +++++++++-- mllm/backends/qnn/QNNBackend.cpp | 13 ++- mllm/backends/qnn/QNNUtils.cpp | 9 ++ 4 files changed, 204 insertions(+), 17 deletions(-) diff --git a/mllm/backends/qnn/QNNAllocator.cpp b/mllm/backends/qnn/QNNAllocator.cpp index b910d9aa1..877e2aebc 100644 --- a/mllm/backends/qnn/QNNAllocator.cpp +++ b/mllm/backends/qnn/QNNAllocator.cpp @@ -75,10 +75,27 @@ bool QNNAllocator::alloc(Storage* storage) { return true; } +/** + * @brief Free a storage buffer and manage QNN memory handle lifecycle + * + * This function handles the complex lifecycle of QNN shared buffers: + * 1. Checks if the buffer is already freed or never allocated + * 2. Detects if multiple pointers share the same mem_handle (aliases) + * 3. Only de-registers mem_handle when it's the last reference + * 4. Updates tensor ID/name mappings to point to alternative pointers if needed + * + * Key design considerations: + * - QNN doesn't support re-registering a de-registered buffer (fd may be invalidated) + * - Multiple buffer pointers can share the same mem_handle (common in decode phase) + * - Tensor mappings must be updated when pointers are redirected to aliases + * + * @param storage Pointer to the storage object containing the buffer to free + */ void QNNAllocator::free(Storage* storage) { auto ptr = storage->ptr_; // Early return if ptr is nullptr or not in qnnMemPtrSet_ (already freed or never allocated) + // This is common during decode phase when buffers are reused, so we silently ignore if (ptr == nullptr) { // too noisy during decode; silently ignore nullptr frees return; @@ -89,6 +106,9 @@ void QNNAllocator::free(Storage* storage) { return; } + // Check if any other buffer pointer shares the same mem_handle (alias detection) + // This is important because in decode phase, multiple tensor wrappers may reference + // the same underlying buffer through different pointers void* alternative_ptr = nullptr; // Another ptr using the same mem_handle, if any if (ptrToFdAndMemHandleMap_.count(ptr)) { @@ -96,6 +116,7 @@ void QNNAllocator::free(Storage* storage) { auto mem_handle = iter->second.second; // Check if any other ptr is using the same mem_handle + // This handles the case where buffer reuse creates multiple pointers to the same mem_handle for (const auto& [other_ptr, fd_and_handle] : ptrToFdAndMemHandleMap_) { if (other_ptr != ptr && fd_and_handle.second == mem_handle) { alternative_ptr = other_ptr; @@ -104,7 +125,9 @@ void QNNAllocator::free(Storage* storage) { } // Only deRegister if this is the last ptr using this mem_handle + // If there are aliases, we must keep the mem_handle registered if (alternative_ptr == nullptr) { + // No aliases found, safe to de-register the mem_handle auto status = qnnInterface_.memDeRegister(&mem_handle, 1); if (status != QNN_SUCCESS) { MLLM_WARN("QNNAllocator::free memDeRegister failed, status=0x{:x}, ptr={}, fd={}", status, ptr, iter->second.first); @@ -114,6 +137,7 @@ void QNNAllocator::free(Storage* storage) { ptrToFdAndMemHandleMap_.erase(iter); ptrToSizeMap_.erase(ptr); } else { + // Aliases exist, skip de-registration to avoid breaking other references QNN_ALLOCATOR_VERBOSE("QNNAllocator::free skipping deRegister for ptr={} because other ptrs use the mem_handle", ptr); ptrToFdAndMemHandleMap_.erase(iter); ptrToSizeMap_.erase(ptr); @@ -135,6 +159,7 @@ void QNNAllocator::free(Storage* storage) { // Otherwise, free the buffer and clear mappings if (alternative_ptr != nullptr) { // Update mappings to point to alternative_ptr instead of deleting them + // This ensures that future tensor lookups will find the correct buffer for (auto& entry : tensorIdToPtrMap_) { if (entry.second == ptr) { entry.second = alternative_ptr; } } @@ -152,10 +177,29 @@ void QNNAllocator::free(Storage* storage) { rpcmem_free(ptr); eraseTensorMappingsForPtr(ptr, "free(ptr) -> mem_handle released"); clearLastRegistrationIfMatches(ptr, "free(ptr) -> mem_handle released"); - } +} storage->ptr_ = nullptr; } +/** + * @brief Register a tensor's buffer to QNN shared memory + * + * This function implements a sophisticated buffer reuse mechanism to avoid duplicate registrations + * of the same tensor across prefill and decode phases. It uses a multi-level fallback strategy: + * + * 1. Check if the buffer is already registered (by ptr) + * 2. Check if a buffer exists for the same tensor ID (primary lookup) + * 3. Check if a buffer exists for the same tensor name (fallback lookup) + * 4. Check if we can reuse the last successfully registered buffer (last resort) + * 5. If all fallbacks fail, attempt new registration + * + * This is critical for decode phase where the same tensor (e.g., KV cache) is used repeatedly, + * and QNN HTP device has limited memory resources (~2.5GB typically). + * + * @param storage Storage object containing the buffer to register + * @param qnn_tensor QNN tensor structure to update with mem_handle + * @return true if registration succeeded, false otherwise + */ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_t& qnn_tensor) { MLLM_RT_ASSERT(storage != nullptr); void* ptr = storage->ptr_; @@ -164,13 +208,17 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ MLLM_RT_ASSERT(ptr != nullptr); MLLM_RT_ASSERT(qnnMemPtrSet_.count(ptr)); + // Save original tensor state in case we need to restore on failure auto original_mem_type = QNN_TENSOR_GET_MEM_TYPE(qnn_tensor); Qnn_MemHandle_t original_mem_handle = QNN_TENSOR_GET_MEM_HANDLE(qnn_tensor); + // Extract tensor identification information + // Tensor ID is the primary identifier (more reliable than name) uint32_t tensor_id = QNN_TENSOR_GET_ID(qnn_tensor); const char* tensor_name_cstr = QNN_TENSOR_GET_NAME(qnn_tensor); std::string tensor_name = tensor_name_cstr ? tensor_name_cstr : "unknown"; + // Calculate buffer size from tensor dimensions and data type uint32_t rank = QNN_TENSOR_GET_RANK(qnn_tensor); uint32_t* dims_ptr = QNN_TENSOR_GET_DIMENSIONS(qnn_tensor); Qnn_DataType_t data_type = QNN_TENSOR_GET_DATA_TYPE(qnn_tensor); @@ -188,6 +236,7 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ } size_t total_bytes = element_cnt * element_bytes; + // Format shape string for error messages std::string shape_str = "[]"; if (!dims.empty()) { shape_str = "["; @@ -202,12 +251,31 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ "registerQnnTensorToSharedBuffer: ptr={}, tensor_id={}, tensor_name={}, tensorIdToPtrMap_.size()={}", ptr, tensor_id, tensor_name, tensorIdToPtrMap_.size()); + /** + * @brief Update tensor ID/name mappings and size tracking + * + * This lambda updates the internal mappings that allow us to find existing buffers + * for the same tensor in future registration attempts. + */ auto updateMappings = [&](void* mapped_ptr) { tensorIdToPtrMap_[tensor_id] = mapped_ptr; if (tensor_name != "unknown") { tensorNameToPtrMap_[tensor_name] = mapped_ptr; } ptrToSizeMap_[mapped_ptr] = total_bytes; }; + /** + * @brief Reuse an existing registered buffer for this tensor + * + * This lambda implements the core buffer reuse logic: + * 1. Verifies the existing buffer is still registered + * 2. Copies data from new buffer to existing buffer if needed + * 3. Updates tensor to use existing mem_handle + * 4. Updates internal mappings + * 5. Frees the new buffer to avoid memory leak + * + * @param existing_ptr Pointer to the existing registered buffer + * @return true if reuse succeeded, false if buffer is no longer registered + */ auto reuseExistingBuffer = [&](void* existing_ptr) -> bool { auto fd_handle_iter = ptrToFdAndMemHandleMap_.find(existing_ptr); if (fd_handle_iter == ptrToFdAndMemHandleMap_.end()) { return false; } @@ -215,11 +283,14 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ Qnn_MemHandle_t existing_mem_handle = fd_handle_iter->second.second; size_t existing_size = ptrToSizeMap_.count(existing_ptr) > 0 ? ptrToSizeMap_[existing_ptr] : 0; + // If pointers differ, copy data from new buffer to existing buffer + // This handles the case where a new buffer was allocated but we want to reuse the old one if (existing_ptr != ptr) { size_t bytes_to_copy = total_bytes; if (existing_size > 0) { bytes_to_copy = std::min(bytes_to_copy, existing_size); } if (bytes_to_copy > 0) { std::memcpy(existing_ptr, ptr, bytes_to_copy); } + // Free the new buffer since we're reusing the existing one if (qnnMemPtrSet_.count(ptr) > 0) { qnnMemPtrSet_.erase(ptr); rpcmem_free(ptr); @@ -227,6 +298,7 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ storage->ptr_ = existing_ptr; } + // Update tensor to use existing mem_handle QNN_TENSOR_SET_MEM_TYPE(qnn_tensor, QNN_TENSORMEMTYPE_MEMHANDLE); QNN_TENSOR_SET_MEM_HANDLE(qnn_tensor, existing_mem_handle); updateMappings(existing_ptr); @@ -234,7 +306,8 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ return true; }; - // if already registered, just set the mem handle + // Level 1: Check if this exact buffer pointer is already registered + // This is the fastest path and handles the common case in decode phase if (ptrToFdAndMemHandleMap_.count(ptr) > 0) { Qnn_MemHandle_t mem_handle = ptrToFdAndMemHandleMap_[ptr].second; QNN_TENSOR_SET_MEM_TYPE(qnn_tensor, QNN_TENSORMEMTYPE_MEMHANDLE); @@ -244,18 +317,22 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ return true; } - // Check if we can reuse an existing buffer for the same tensor ID + // Level 2: Check if we can reuse an existing buffer for the same tensor ID + // Tensor ID is the primary identifier and is more reliable than name + // This handles decode phase where the same tensor is used repeatedly if (tensorIdToPtrMap_.count(tensor_id) > 0) { void* existing_ptr = tensorIdToPtrMap_[tensor_id]; QNN_ALLOCATOR_VERBOSE("Found existing mapping for tensor_id={}: existing_ptr={}", tensor_id, existing_ptr); if (existing_ptr == nullptr) { + // Mapping exists but buffer was freed, clean up and register new buffer QNN_ALLOCATOR_VERBOSE( "Existing mapping for tensor_id={} has nullptr ptr (buffer was freed), will register new buffer", tensor_id); tensorIdToPtrMap_.erase(tensor_id); } else if (reuseExistingBuffer(existing_ptr)) { return true; } else { + // Buffer exists but is no longer registered, clean up mapping MLLM_WARN("Existing ptr {} for tensor_id={} is no longer registered, removing from map", existing_ptr, tensor_id); tensorIdToPtrMap_.erase(tensor_id); } @@ -263,12 +340,14 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ QNN_ALLOCATOR_VERBOSE("No existing mapping found for tensor_id={}", tensor_id); } - // Also check by tensor name as fallback (in case ID changed) + // Level 3: Check by tensor name as fallback (in case ID changed or is 0) + // Some tensors may have ID=0, so name becomes the fallback identifier if (tensor_name != "unknown" && tensorNameToPtrMap_.count(tensor_name) > 0) { void* existing_ptr = tensorNameToPtrMap_[tensor_name]; QNN_ALLOCATOR_VERBOSE("Found existing mapping for tensor_name={}: existing_ptr={}", tensor_name, existing_ptr); if (existing_ptr == nullptr) { + // Mapping exists but buffer was freed, clean up and register new buffer QNN_ALLOCATOR_VERBOSE( "Existing mapping for tensor_name={} has nullptr ptr (mem_handle was deRegistered), will register new buffer", tensor_name); @@ -276,6 +355,7 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ } else if (reuseExistingBuffer(existing_ptr)) { return true; } else { + // Buffer exists but is no longer registered, clean up mapping MLLM_WARN("Existing ptr {} for tensor_name={} is no longer registered", existing_ptr, tensor_name); tensorNameToPtrMap_.erase(tensor_name); } @@ -301,6 +381,11 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ Qnn_MemHandle_t mem_handle = QNN_TENSOR_GET_MEM_HANDLE(qnn_tensor); auto status = qnnInterface_.memRegister(context_, &mem_descriptor, 1u, &mem_handle); + // Attempt to register the buffer with QNN + // This can fail if: + // 1. QNN HTP device memory is exhausted (typically ~2.5GB limit) + // 2. FastRPC memory mapping fails + // 3. SMMU (System Memory Management Unit) mapping fails if (status != QNN_SUCCESS) { auto stats = getRegisteredBufferStats(); MLLM_ERROR("QNNAllocator::registerQnnTensorToSharedBuffer memRegister failed, status=0x{:x}, ptr={}, fd={}, bytes={}, " @@ -308,8 +393,11 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ status, ptr, mem_fd, total_bytes, shape_str, static_cast(mem_descriptor.dataType), tensor_id, tensor_name); MLLM_ERROR("Current registered buffers: {} buffers, {} MB", stats.count, stats.total_bytes / (1024 * 1024)); - // Try to reuse existing buffer for the same tensor ID or name as fallback + // Multi-level fallback strategy when registration fails + // This is critical when QNN device memory is exhausted bool fallback_success = false; + + // Fallback Level 1: Try to reuse buffer by tensor ID if (tensorIdToPtrMap_.count(tensor_id) > 0) { void* existing_ptr = tensorIdToPtrMap_[tensor_id]; if (existing_ptr != nullptr) { @@ -318,6 +406,8 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ fallback_success = reuseExistingBuffer(existing_ptr); } } + + // Fallback Level 2: Try to reuse buffer by tensor name if (!fallback_success && tensor_name != "unknown" && tensorNameToPtrMap_.count(tensor_name) > 0) { void* existing_ptr = tensorNameToPtrMap_[tensor_name]; if (existing_ptr != nullptr) { @@ -327,6 +417,8 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ } } + // Fallback Level 3: Try to reuse last successfully registered buffer + // This is a last resort when memory is exhausted and we can't find exact matches if (!fallback_success && hasLastRegistrationInfo_) { bool same_tensor_id = tensor_id != 0 && tensor_id == lastRegistrationInfo_.tensor_id; bool same_tensor_name = tensor_name != "unknown" && !tensor_name.empty() @@ -344,6 +436,8 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ } } + // If all fallbacks failed, we must free the buffer and return failure + // The caller should handle this gracefully (e.g., by retrying or using CPU fallback) if (!fallback_success) { MLLM_ERROR("QNNAllocator::registerQnnTensorToSharedBuffer: memRegister failed and fallback also failed. " "Buffer ptr={} will be freed, tensor registration cannot proceed.", ptr); @@ -358,12 +452,14 @@ bool QNNAllocator::registerQnnTensorToSharedBuffer(Storage* storage, Qnn_Tensor_ total_bytes); } + // Restore original tensor state QNN_TENSOR_SET_MEM_HANDLE(qnn_tensor, original_mem_handle); QNN_TENSOR_SET_MEM_TYPE(qnn_tensor, original_mem_type); return false; } return true; } else { + // Registration succeeded, log verbose information QNN_ALLOCATOR_VERBOSE("Register shared buffer ptr={}, fd={}, bytes={}, shape={}, dtype={}, tensor_id={}, tensor_name={}", ptr, mem_fd, total_bytes, shape_str, static_cast(mem_descriptor.dataType), tensor_id, tensor_name); @@ -415,9 +511,19 @@ size_t QNNAllocator::getRegisteredBufferSize(void* ptr) const { return it->second; } +/** + * @brief Erase all tensor ID and name mappings that point to a specific buffer pointer + * + * When a buffer is freed or de-registered, we need to clean up all mappings that reference it. + * This ensures that future lookups won't find stale pointers. + * + * @param ptr The buffer pointer to remove from mappings + * @param reason Reason for erasure (for debugging/logging purposes) + */ void QNNAllocator::eraseTensorMappingsForPtr(void* ptr, std::string_view reason) { if (ptr == nullptr) { return; } + // Remove all tensor ID mappings that point to this ptr for (auto it = tensorIdToPtrMap_.begin(); it != tensorIdToPtrMap_.end();) { if (it->second == ptr) { it = tensorIdToPtrMap_.erase(it); @@ -426,6 +532,7 @@ void QNNAllocator::eraseTensorMappingsForPtr(void* ptr, std::string_view reason) } } + // Remove all tensor name mappings that point to this ptr for (auto it = tensorNameToPtrMap_.begin(); it != tensorNameToPtrMap_.end();) { if (it->second == ptr) { it = tensorNameToPtrMap_.erase(it); @@ -435,6 +542,24 @@ void QNNAllocator::eraseTensorMappingsForPtr(void* ptr, std::string_view reason) } } +/** + * @brief Remember the last successful buffer registration for fallback purposes + * + * This function stores information about the most recent successful registration. + * This information is used as a last-resort fallback when: + * 1. New registration fails (e.g., memory exhausted) + * 2. Exact tensor ID/name matches are not found + * 3. The last registered buffer is still valid and matches the tensor + * + * This is particularly useful in decode phase where memory pressure is high + * and we want to maximize buffer reuse. + * + * @param tensor_id Tensor ID of the registered tensor + * @param tensor_name Tensor name of the registered tensor + * @param ptr Buffer pointer that was successfully registered + * @param mem_handle QNN memory handle from successful registration + * @param total_bytes Size of the registered buffer in bytes + */ void QNNAllocator::rememberLastRegistration(uint32_t tensor_id, const std::string& tensor_name, void* ptr, Qnn_MemHandle_t mem_handle, size_t total_bytes) { if (ptr == nullptr || mem_handle == nullptr) { return; } @@ -447,6 +572,16 @@ void QNNAllocator::rememberLastRegistration(uint32_t tensor_id, const std::strin // Note: Remembered registration info is used as fallback mechanism, logging removed for performance } +/** + * @brief Clear the last registration info if it matches the given pointer + * + * When a buffer is freed or de-registered, we should clear the last registration + * info if it references that buffer. This prevents using stale registration info + * in future fallback attempts. + * + * @param ptr The buffer pointer to check against + * @param reason Reason for clearing (for debugging/logging purposes) + */ void QNNAllocator::clearLastRegistrationIfMatches(void* ptr, std::string_view reason) { if (!hasLastRegistrationInfo_ || ptr == nullptr) { return; } if (lastRegistrationInfo_.ptr == ptr) { diff --git a/mllm/backends/qnn/QNNAllocator.hpp b/mllm/backends/qnn/QNNAllocator.hpp index 6942eb6e7..c9fe7b399 100644 --- a/mllm/backends/qnn/QNNAllocator.hpp +++ b/mllm/backends/qnn/QNNAllocator.hpp @@ -6,7 +6,6 @@ #include #include #include -#include #include "QnnCommon.h" #include "QnnInterface.h" #include "mllm/backends/base/Allocator.hpp" @@ -98,25 +97,60 @@ class QNNAllocator final : public Allocator { std::map> ptrToFdAndMemHandleMap_; // Track buffer sizes for statistics std::map ptrToSizeMap_; - // Map tensor name to registered buffer ptr for reuse + // Map tensor name to registered buffer ptr for reuse (fallback identifier) + // Used when tensor ID is 0 or unavailable std::map tensorNameToPtrMap_; - // Map tensor ID to registered buffer ptr for reuse (more reliable than name) + + // Map tensor ID to registered buffer ptr for reuse (primary identifier) + // Tensor ID is more reliable than name and is used as the primary lookup key + // This enables buffer reuse across prefill and decode phases std::map tensorIdToPtrMap_; + /** + * @brief Information about the last successful buffer registration + * + * This structure stores metadata about the most recent successful registration, + * which is used as a last-resort fallback when: + * - New registration fails (e.g., memory exhausted) + * - Exact tensor ID/name matches are not found + * - The last registered buffer is still valid and matches the tensor + * + * This is particularly useful in decode phase where memory pressure is high. + */ struct LastRegistrationInfo { - uint32_t tensor_id = 0; - std::string tensor_name; - void* ptr = nullptr; - Qnn_MemHandle_t mem_handle = nullptr; - size_t bytes = 0; + uint32_t tensor_id = 0; // Tensor ID of the registered tensor + std::string tensor_name; // Tensor name of the registered tensor + void* ptr = nullptr; // Buffer pointer that was successfully registered + Qnn_MemHandle_t mem_handle = nullptr; // QNN memory handle from successful registration + size_t bytes = 0; // Size of the registered buffer in bytes }; - LastRegistrationInfo lastRegistrationInfo_{}; - bool hasLastRegistrationInfo_ = false; + LastRegistrationInfo lastRegistrationInfo_{}; // Last successful registration info + bool hasLastRegistrationInfo_ = false; // Whether last registration info is valid + /** + * @brief Erase all tensor ID and name mappings that point to a specific buffer pointer + * @param ptr The buffer pointer to remove from mappings + * @param reason Reason for erasure (for debugging/logging purposes) + */ void eraseTensorMappingsForPtr(void* ptr, std::string_view reason); + + /** + * @brief Remember the last successful buffer registration for fallback purposes + * @param tensor_id Tensor ID of the registered tensor + * @param tensor_name Tensor name of the registered tensor + * @param ptr Buffer pointer that was successfully registered + * @param mem_handle QNN memory handle from successful registration + * @param total_bytes Size of the registered buffer in bytes + */ void rememberLastRegistration(uint32_t tensor_id, const std::string& tensor_name, void* ptr, Qnn_MemHandle_t mem_handle, size_t total_bytes); + + /** + * @brief Clear the last registration info if it matches the given pointer + * @param ptr The buffer pointer to check against + * @param reason Reason for clearing (for debugging/logging purposes) + */ void clearLastRegistrationIfMatches(void* ptr, std::string_view reason); }; diff --git a/mllm/backends/qnn/QNNBackend.cpp b/mllm/backends/qnn/QNNBackend.cpp index 67c483087..f2025698e 100644 --- a/mllm/backends/qnn/QNNBackend.cpp +++ b/mllm/backends/qnn/QNNBackend.cpp @@ -535,6 +535,8 @@ void QNNBackend::graphExecute(const std::string& graphName, std::vector& return; } + // Prepare QNN input tensors by copying data from runtime inputs to graph input wrappers + // This handles the case where input tensor sizes may differ between prefill and decode phases std::vector qnn_inputs; std::vector qnn_outputs; for (int i = 0; i < model->getGraphInputTensorWrappers().size(); i++) { @@ -542,6 +544,7 @@ void QNNBackend::graphExecute(const std::string& graphName, std::vector& auto& wrapper_tensor = wrapper->getDataContainer(); const auto& runtime_input = inputs[i]; + // Validate input tensors if (runtime_input.isNil()) { MLLM_ERROR("Input tensor {} is nil for graph '{}'", i, graphName); return; @@ -552,6 +555,7 @@ void QNNBackend::graphExecute(const std::string& graphName, std::vector& return; } + // Check for size mismatches (can occur in decode phase where inputs may be smaller) size_t dst_bytes = wrapper_tensor.bytes(); size_t src_bytes = runtime_input.bytes(); if (dst_bytes != src_bytes) { @@ -574,14 +578,17 @@ void QNNBackend::graphExecute(const std::string& graphName, std::vector& return; } if (dst_ptr && src_ptr && dst_ptr != src_ptr) { - // Copy source data + // Copy source data to destination buffer + // This ensures that the graph input wrapper has the correct data for execution if (bytes_to_copy > 0) { std::memcpy(dst_ptr, src_ptr, bytes_to_copy); } // If source is smaller than destination, zero out the remaining bytes // This is important for decode phase where input tensors may be smaller than prefill + // For example, decode phase may use [1, 1] input while wrapper expects [1, 128] // Note: In current implementation with full [1, 128] tensor, this should not trigger + // but it's kept as a safety measure for future optimizations if (src_bytes < dst_bytes) { size_t remaining_bytes = dst_bytes - src_bytes; std::memset(static_cast(dst_ptr) + bytes_to_copy, 0, remaining_bytes); @@ -592,7 +599,9 @@ void QNNBackend::graphExecute(const std::string& graphName, std::vector& } } - wrapper->alloc(); // QNNAllocator will handle registered memory descriptor when needed + // Allocate and register the wrapper tensor with QNN allocator + // QNNAllocator will handle registered memory descriptor when needed + wrapper->alloc(); qnn_inputs.push_back(*(wrapper->getNativeTensor())); } diff --git a/mllm/backends/qnn/QNNUtils.cpp b/mllm/backends/qnn/QNNUtils.cpp index 2d4b8f734..13c577931 100644 --- a/mllm/backends/qnn/QNNUtils.cpp +++ b/mllm/backends/qnn/QNNUtils.cpp @@ -386,13 +386,22 @@ void QNNTensorWrapper::alloc() { size_t requiredBytes = dataContainer_.bytes(); + // Check if we have a previously registered buffer pointer + // This handles the case where tensor dimensions change (e.g., in decode phase) + // and the existing registered buffer is too small if (registeredPtr_) { + // Verify that the registered buffer is still valid if (!allocator->isRegistered(registeredPtr_)) { + // Buffer was de-registered, clear the reference registeredPtr_ = nullptr; isAlloc_ = false; } else { + // Check if the registered buffer is large enough for current requirements + // If not, we need to de-register it and allocate a new one size_t registeredBytes = allocator->getRegisteredBufferSize(registeredPtr_); if (registeredBytes > 0 && registeredBytes < requiredBytes) { + // Registered buffer is too small, de-register it + // A new buffer will be allocated and registered below allocator->deRegisterQnnTensorFromSharedBuffer(registeredPtr_); registeredPtr_ = nullptr; isAlloc_ = false; From 834db98b78d04a82a1db92ba0844d80bbf2c80be Mon Sep 17 00:00:00 2001 From: jialilve <3485723235@qq.com> Date: Thu, 20 Nov 2025 03:56:18 +0000 Subject: [PATCH 7/8] chore: update kleidiai submodule to v1.12.0 --- mllm/backends/cpu/vendors/kleidiai | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mllm/backends/cpu/vendors/kleidiai b/mllm/backends/cpu/vendors/kleidiai index 84796ece2..8ca226712 160000 --- a/mllm/backends/cpu/vendors/kleidiai +++ b/mllm/backends/cpu/vendors/kleidiai @@ -1 +1 @@ -Subproject commit 84796ece210fbf736c2c51236f4690e0b8812861 +Subproject commit 8ca226712975f24f13f71d04cda039a0ee9f9e2f From 2f6077bcd1bd160d31ffa93b861e64bcf15f03e6 Mon Sep 17 00:00:00 2001 From: jialilve <3485723235@qq.com> Date: Thu, 20 Nov 2025 06:33:54 +0000 Subject: [PATCH 8/8] chore: bump kleidiai submodule to 84796ec --- mllm/backends/cpu/vendors/kleidiai | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mllm/backends/cpu/vendors/kleidiai b/mllm/backends/cpu/vendors/kleidiai index 8ca226712..84796ece2 160000 --- a/mllm/backends/cpu/vendors/kleidiai +++ b/mllm/backends/cpu/vendors/kleidiai @@ -1 +1 @@ -Subproject commit 8ca226712975f24f13f71d04cda039a0ee9f9e2f +Subproject commit 84796ece210fbf736c2c51236f4690e0b8812861